diff -u --recursive --new-file v2.4.0-prerelease/linux/Documentation/Changes linux/Documentation/Changes
--- v2.4.0-prerelease/linux/Documentation/Changes	Mon Jan  1 09:38:34 2001
+++ linux/Documentation/Changes	Mon Jan  1 10:00:04 2001
@@ -52,7 +52,7 @@
 o  Gnu make               3.77                    # make --version
 o  binutils               2.9.1.0.25              # ld -v
 o  util-linux             2.10o                   # fdformat --version
-o  modutils               2.3.21                  # insmod -V
+o  modutils               2.4.0                   # insmod -V
 o  e2fsprogs              1.19                    # tune2fs --version
 o  pcmcia-cs              3.1.21                  # cardmgr -V
 o  PPP                    2.4.0                   # pppd --version
@@ -153,7 +153,7 @@
 Ksymoops
 --------
 
-If the unthinkable happens and your kernel oopses, you'll need a 2.3
+If the unthinkable happens and your kernel oopses, you'll need a 2.4
 version of ksymoops to decode the report; see REPORTING-BUGS in the
 root of the Linux source for more information.
 
@@ -287,11 +287,11 @@
 
 Ksymoops
 --------
-o  <ftp://ftp.kernel.org/pub/linux/utils/kernel/ksymoops/v2.3>
+o  <ftp://ftp.kernel.org/pub/linux/utils/kernel/ksymoops/v2.4>
 
 Modutils
 --------
-o  <ftp://ftp.kernel.org/pub/linux/utils/kernel/modutils/v2.3/modutils-2.3.21.tar.bz2>
+o  <ftp://ftp.kernel.org/pub/linux/utils/kernel/modutils/v2.4/>
 
 Mkinitrd
 --------
diff -u --recursive --new-file v2.4.0-prerelease/linux/Documentation/Configure.help linux/Documentation/Configure.help
--- v2.4.0-prerelease/linux/Documentation/Configure.help	Mon Jan  1 09:38:34 2001
+++ linux/Documentation/Configure.help	Thu Jan  4 13:00:55 2001
@@ -15512,14 +15512,15 @@
   want). The module is called ariadne.o. If you want to compile it as
   a module, say M here and read Documentation/modules.txt.
 
-Ariadne II support
+Ariadne II and X-Surf support
 CONFIG_ARIADNE2
-  If you have a Village Tronic Ariadne II Ethernet adapter, say Y.
+  This driver is for the Village Tronic Ariadne II and the Individual
+  Computers X-Surf Ethernet cards. If you have such a card, say Y.
   Otherwise, say N.
 
   This driver is also available as a module ( = code which can be
-  inserted in and removed from the running kernel whenever you
-  want). The module is called ariadne2.o. If you want to compile it as
+  inserted in and removed from the running kernel whenever you want).
+  The module will be called ariadne2.o. If you want to compile it as
   a module, say M here and read Documentation/modules.txt.
 
 A2065 support
@@ -16993,12 +16994,6 @@
   Select this option to build a kernel for an Itanium prototype system
   with an A-step CPU.  You have an A-step CPU if the "revision" field in
   /proc/cpuinfo is 0.
-
-Enable Itanium A1-step specific code
-CONFIG_ITANIUM_A1_SPECIFIC
-  Select this option to build a kernel for an Itanium prototype system
-  with an A1-step CPU.  If you don't know whether you have an A1-step CPU,
-  you probably don't and you can answer "no" here.
 
 Enable Itanium B-step specific code
 CONFIG_ITANIUM_BSTEP_SPECIFIC
diff -u --recursive --new-file v2.4.0-prerelease/linux/Documentation/DocBook/mousedrivers.tmpl linux/Documentation/DocBook/mousedrivers.tmpl
--- v2.4.0-prerelease/linux/Documentation/DocBook/mousedrivers.tmpl	Mon Jun 19 12:56:07 2000
+++ linux/Documentation/DocBook/mousedrivers.tmpl	Thu Jan  4 12:50:12 2001
@@ -335,7 +335,7 @@
   <para>
     We count off a user and provided that there are still other users need 
     take no further action. The last person closing the mouse causes us to 
-    free up the interrupt. This stopps interrupts from the mouse from using 
+    free up the interrupt. This stops interrupts from the mouse from using 
     our CPU time, and lets us use <function>MOD_DEC_USE_COUNT</function> so 
     that the mouse can now be unloaded.
   </para>
@@ -404,7 +404,7 @@
     play with them.
   </para>
   <para>
-    If a change has occured we also need to wake sleeping processes, so we 
+    If a change has occurred we also need to wake sleeping processes, so we 
     add a wakeup call and a <structname>wait_queue</structname> to use when 
     we wish to await a mouse event.
   </para>
@@ -426,7 +426,7 @@
   <para>
     This is fairly standard poll code. First we add the wait queue to the 
     list of queues we want to monitor for an event. Secondly we check if an 
-    event has occured. We only have one kind of event - the 
+    event has occurred. We only have one kind of event - the 
     <varname>mouse_event</varname> flag tells us that something happened. 
     We know that this something can only be mouse data. We return the flags 
     indicating input and normal reading will succeed.
@@ -476,7 +476,7 @@
   </para>
   <para>
     Next we wait for an event to occur. The loop is fairly standard event
-    waiting in Linux. Having checked that the event has not yet occured, we
+    waiting in Linux. Having checked that the event has not yet occurred, we
     then check if an event is pending and if not we need to sleep. 
   </para>
   <para>
@@ -488,7 +488,7 @@
     Next we sleep until the mouse or a signal awakens us. A signal will 
     awaken us as we have used <function>wakeup_interruptible</function>. 
     This is important as it means a user can kill processes waiting for 
-    the mouse - clearly a desireable property. If we are interrupted we 
+    the mouse - clearly a desirable property. If we are interrupted we 
     exit the call and the kernel will then process signals and maybe 
     restart the call again - from the beginning.
   </para>
diff -u --recursive --new-file v2.4.0-prerelease/linux/Documentation/DocBook/videobook.tmpl linux/Documentation/DocBook/videobook.tmpl
--- v2.4.0-prerelease/linux/Documentation/DocBook/videobook.tmpl	Mon Dec 11 17:59:43 2000
+++ linux/Documentation/DocBook/videobook.tmpl	Thu Jan  4 12:50:12 2001
@@ -486,7 +486,7 @@
   <para>
         We copy the user supplied structure into kernel memory so we can examine it. 
         If the user has selected a tuner other than zero we reject the request. If 
-        they wanted tuner 0 then, suprisingly enough, that is the current tuner already.
+        they wanted tuner 0 then, surprisingly enough, that is the current tuner already.
   </para>
   <para>
         The next two ioctls we need to provide are to get and set the frequency of
@@ -652,7 +652,7 @@
   </para>
   <para>
         The VIDIOCSAUDIO ioctl allows the user to set the audio parameters in the
-        video_audio stucture. The driver does its best to honour the request.
+        video_audio structure. The driver does its best to honour the request.
   </para>
   <programlisting>
 
@@ -812,7 +812,7 @@
         Chroma keying is a technique used by cards to get around this. It is an old
         television mixing trick where you mark all the areas you wish to replace
         with a single clear colour that isn't used in the image - TV people use an
-        incredibly bright blue while computing people often use a paticularly
+        incredibly bright blue while computing people often use a particularly
         virulent purple. Bright blue occurs on the desktop. Anyone with virulent
         purple windows has another problem besides their TV overlay.
   </para>
@@ -1259,7 +1259,7 @@
    </row><row>
         <entry>VIDEO_MODE_NTSC</><>NTSC (US) encoded Television</entry>
    </row><row>
-        <entry>VIDEO_MODE_SECAM</><>SECAM (French) Televison </entry>
+        <entry>VIDEO_MODE_SECAM</><>SECAM (French) Television </entry>
    </row><row>
         <entry>VIDEO_MODE_AUTO</><>Automatic switching, or format does not
                                 matter</entry>
@@ -1269,7 +1269,7 @@
     </table>
     <para>
         The corresponding VIDIOCSCHAN ioctl allows a user to change channel and to
-        request the norm is changed - for exaple to switch between a PAL or an NTSC
+        request the norm is changed - for example to switch between a PAL or an NTSC
         format camera.
   </para>
   <programlisting>
@@ -1332,7 +1332,7 @@
         it make a best effort attempt.
   </para>
   <para>
-        Our depth is 24, as this is in bits. We will be returing RGB24 format. This
+        Our depth is 24, as this is in bits. We will be returning RGB24 format. This
         has one byte of red, then one of green, then one of blue. This then repeats
         for every other pixel in the image. The other common formats the interface 
         defines are
diff -u --recursive --new-file v2.4.0-prerelease/linux/Documentation/DocBook/z8530book.tmpl linux/Documentation/DocBook/z8530book.tmpl
--- v2.4.0-prerelease/linux/Documentation/DocBook/z8530book.tmpl	Sun Mar 12 19:39:47 2000
+++ linux/Documentation/DocBook/z8530book.tmpl	Thu Jan  4 12:50:12 2001
@@ -57,7 +57,7 @@
       <title>Introduction</title>
   <para>
 	The Z85x30 family synchronous/asynchronous controller chips are
-	used on a larg number of cheap network interface cards. The
+	used on a large number of cheap network interface cards. The
 	kernel provides a core interface layer that is designed to make
 	it easy to provide WAN services using this chip.
   </para>
@@ -124,7 +124,7 @@
 	for allocating the interrupt line. The interrupt handler should be
 	set to <function>z8530_interrupt</function>. The device id should
 	be set to the z8530_dev structure pointer. Whether the interrupt can
-	be shared or not is board dependant, and up to you to initialise.
+	be shared or not is board dependent, and up to you to initialise.
   </para>
   <para>
 	The structure holds two channel structures. 
@@ -143,19 +143,19 @@
   </para>
   <para>
 	Repeat the same operation with the B channel if your chip has
-	both channels wired to something useful. This isnt always the
+	both channels wired to something useful. This isn't always the
 	case. If it is not wired then the I/O values do not matter, but
 	you must initialise chanB.dev.
   </para>
   <para>
 	If your board has DMA facilities then initialise the txdma and
 	rxdma fields for the relevant channels. You must also allocate the
-	ISA DMA channels and do any neccessary board level initialisation
+	ISA DMA channels and do any necessary board level initialisation
 	to configure them. The low level driver will do the Z8530 and
 	DMA controller programming but not board specific magic.
   </para>
   <para>
-	Having intialised the device you can then call
+	Having initialised the device you can then call
 	<function>z8530_init</function>. This will probe the chip and 
 	reset it into a known state. An identification sequence is then
 	run to identify the chip type. If the checks fail to pass the
@@ -167,7 +167,7 @@
   <para>
 	Once you have called z8530_init you can also make use of the utility
 	function <function>z8530_describe</function>. This provides a 
-	consistant reporting format for the Z8530 devices, and allows all
+	consistent reporting format for the Z8530 devices, and allows all
 	the drivers to provide consistent reporting.
   </para>
   </chapter>
@@ -191,7 +191,7 @@
 	to the syncppp structures.
   </para>
   <para>
-	The way most drivers approach this paticular problem is to
+	The way most drivers approach this particular problem is to
 	create a structure holding the Z8530 device definition and
 	put that and the syncppp pointer into the private field of
 	the network device. The network device fields of the channels
@@ -330,7 +330,7 @@
   <para>
 	The Z8530 driver is written to be portable. In DMA mode it makes
 	assumptions about the use of ISA DMA. These are probably warranted
-	in most cases as the Z85230 in paticular was designed to glue to PC
+	in most cases as the Z85230 in particular was designed to glue to PC
 	type machines. The PIO mode makes no real assumptions.
   </para>
   <para>
diff -u --recursive --new-file v2.4.0-prerelease/linux/Documentation/IO-mapping.txt linux/Documentation/IO-mapping.txt
--- v2.4.0-prerelease/linux/Documentation/IO-mapping.txt	Tue Oct 31 12:42:25 2000
+++ linux/Documentation/IO-mapping.txt	Thu Jan  4 13:00:15 2001
@@ -1,3 +1,9 @@
+[ NOTE: The virt_to_bus() and bus_to_virt() functions have been
+	superseded by the functionality provided by the PCI DMA
+	interface (see Documentation/DMA-mapping.txt).  They continue
+	to be documented below for historical purposes, but new code
+	must not use them. --davidm 00/12/12 ]
+
 [ This is a mail message in response to a query on IO mapping, thus the
   strange format for a "document" ]
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/Documentation/rtc.txt linux/Documentation/rtc.txt
--- v2.4.0-prerelease/linux/Documentation/rtc.txt	Tue Jan 25 14:13:47 2000
+++ linux/Documentation/rtc.txt	Thu Jan  4 12:50:17 2001
@@ -56,7 +56,7 @@
 exclusive access to the device for your applications.
 
 The alarm and/or interrupt frequency are programmed into the RTC via
-various ioctl(2) calls as listed in ./include/linux/mc146818rtc.h
+various ioctl(2) calls as listed in ./include/linux/rtc.h
 Rather than write 50 pages describing the ioctl() and so on, it is
 perhaps more useful to include a small test program that demonstrates
 how to use them, and demonstrates the features of the driver. This is
@@ -81,7 +81,7 @@
  */
 
 #include <stdio.h>
-#include <linux/mc146818rtc.h>
+#include <linux/rtc.h>
 #include <sys/ioctl.h>
 #include <sys/time.h>
 #include <sys/types.h>
diff -u --recursive --new-file v2.4.0-prerelease/linux/Makefile linux/Makefile
--- v2.4.0-prerelease/linux/Makefile	Mon Jan  1 09:38:34 2001
+++ linux/Makefile	Thu Jan  4 13:48:13 2001
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 4
 SUBLEVEL = 0
-EXTRAVERSION = -prerelease
+EXTRAVERSION =
 
 KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
 
@@ -145,7 +145,7 @@
 DRIVERS-$(CONFIG_ATM) += drivers/atm/atm.o
 DRIVERS-$(CONFIG_IDE) += drivers/ide/idedriver.o
 DRIVERS-$(CONFIG_SCSI) += drivers/scsi/scsidrv.o
-DRIVERS-$(CONFIG_IEEE1394) += drivers/ieee1394/ieee1394.a
+DRIVERS-$(CONFIG_IEEE1394) += drivers/ieee1394/ieee1394drv.o
 
 ifneq ($(CONFIG_CD_NO_IDESCSI)$(CONFIG_BLK_DEV_IDECD)$(CONFIG_BLK_DEV_SR)$(CONFIG_PARIDE_PCD),)
 DRIVERS-y += drivers/cdrom/driver.o
@@ -188,6 +188,7 @@
 	.tmp* \
 	drivers/char/consolemap_deftbl.c drivers/video/promcon_tbl.c \
 	drivers/char/conmakehash \
+	drivers/char/drm/*-mod.c \
 	drivers/pci/devlist.h drivers/pci/classlist.h drivers/pci/gen-devlist \
 	drivers/zorro/devlist.h drivers/zorro/gen-devlist \
 	drivers/sound/bin2hex drivers/sound/hex2hex \
diff -u --recursive --new-file v2.4.0-prerelease/linux/README linux/README
--- v2.4.0-prerelease/linux/README	Tue Oct 31 12:42:25 2000
+++ linux/README	Tue Jan  2 16:55:26 2001
@@ -1,26 +1,9 @@
-	Linux kernel release 2.3.xx
+	Linux kernel release 2.4.xx
 
-These are the release notes for Linux version 2.3.  Read them carefully,
+These are the release notes for Linux version 2.4.  Read them carefully,
 as they tell you what this is all about, explain how to install the
 kernel, and what to do if something goes wrong. 
 
-Linux version 2.3 is a DEVELOPMENT kernel, and not intended for general
-public use.  Different releases may have various and sometimes severe
-bugs.  It is *strongly* recommended that you back up the previous kernel
-before installing any new 2.3.xx release.
-
-If you need to use a proven and stable Linux kernel, please use 2.0.38
-or 2.2.xx.  All features which will be in the 2.3.xx releases will be
-contained in 2.4.xx when the code base has stabilized again. 
-
-If you decide to use 2.3, it is recommended that you join the kernel mailing
-list.  To do this, e-mail majordomo@vger.kernel.org, and put in the body
-of the message "subscribe linux-kernel" or "subscribe linux-kernel-digest"
-for a daily digest of the mailing list (it is a high-traffic list.)
-
-However, please make sure you don't ask questions which are already answered
-in various files in the Documentation directory.  See DOCUMENTATION below.
-
 WHAT IS LINUX?
 
   Linux is a Unix clone written from scratch by Linus Torvalds with
@@ -63,7 +46,7 @@
    directory where you have permissions (eg. your home directory) and
    unpack it:
 
-		gzip -cd linux-2.3.XX.tar.gz | tar xvf -
+		gzip -cd linux-2.4.XX.tar.gz | tar xvf -
 
    Replace "XX" with the version number of the latest kernel.
 
@@ -72,7 +55,7 @@
    files.  They should match the library, and not get messed up by
    whatever the kernel-du-jour happens to be.
 
- - You can also upgrade between 2.3.xx releases by patching.  Patches are
+ - You can also upgrade between 2.4.xx releases by patching.  Patches are
    distributed in the traditional gzip and the new bzip2 format.  To
    install by patching, get all the newer patch files, enter the
    directory in which you unpacked the kernel source and execute:
@@ -107,7 +90,7 @@
 
 SOFTWARE REQUIREMENTS
 
-   Compiling and running the 2.3.xx kernels requires up-to-date
+   Compiling and running the 2.4.xx kernels requires up-to-date
    versions of various software packages.  Consult
    ./Documentation/Changes for the minimum version numbers required
    and how to get updates for these packages.  Beware that using
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/alpha/kernel/setup.c linux/arch/alpha/kernel/setup.c
--- v2.4.0-prerelease/linux/arch/alpha/kernel/setup.c	Tue Oct 31 12:42:25 2000
+++ linux/arch/alpha/kernel/setup.c	Tue Jan  2 16:45:37 2001
@@ -1089,7 +1089,8 @@
 		       hwrpb->pagesize,
 		       hwrpb->pa_bits,
 		       hwrpb->max_asn,
-		       loops_per_sec / 500000, (loops_per_sec / 5000) % 100,
+		       loops_per_jiffy / (500000/HZ),
+		       (loops_per_jiffy / (5000/HZ)) % 100,
 		       unaligned[0].count, unaligned[0].pc, unaligned[0].va,
 		       unaligned[1].count, unaligned[1].pc, unaligned[1].va,
 		       platform_string(), nr_processors);
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/alpha/kernel/smp.c linux/arch/alpha/kernel/smp.c
--- v2.4.0-prerelease/linux/arch/alpha/kernel/smp.c	Mon Jan  1 09:38:34 2001
+++ linux/arch/alpha/kernel/smp.c	Tue Jan  2 16:45:37 2001
@@ -105,7 +105,7 @@
 static inline void __init
 smp_store_cpu_info(int cpuid)
 {
-	cpu_data[cpuid].loops_per_sec = loops_per_sec;
+	cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy;
 	cpu_data[cpuid].last_asn = ASN_FIRST_VERSION;
 	cpu_data[cpuid].need_new_asn = 0;
 	cpu_data[cpuid].asn_lock = 0;
@@ -601,12 +601,12 @@
 	bogosum = 0;
 	for (i = 0; i < NR_CPUS; i++) {
 		if (cpu_present_mask & (1L << i))
-			bogosum += cpu_data[i].loops_per_sec;
+			bogosum += cpu_data[i].loops_per_jiffy;
 	}
 	printk(KERN_INFO "SMP: Total of %d processors activated "
 	       "(%lu.%02lu BogoMIPS).\n",
-	       cpu_count, (bogosum + 2500) / 500000,
-	       ((bogosum + 2500) / 5000) % 100);
+	       cpu_count, (bogosum + 2500) / (500000/HZ),
+	       ((bogosum + 2500) / (5000/HZ)) % 100);
 
 	smp_num_cpus = cpu_count;
 }
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/i386/kernel/pci-irq.c linux/arch/i386/kernel/pci-irq.c
--- v2.4.0-prerelease/linux/arch/i386/kernel/pci-irq.c	Mon Jan  1 09:38:34 2001
+++ linux/arch/i386/kernel/pci-irq.c	Wed Jan  3 20:45:26 2001
@@ -157,25 +157,7 @@
 {
 	static unsigned char irqmap[16] = { 0, 9, 3, 10, 4, 5, 7, 6, 1, 11, 0, 12, 0, 14, 0, 15 };
 
-	switch (pirq) {
-	case 0x00:
-		return 0;
-	default:
-		return irqmap[read_config_nybble(router, 0x48, pirq-1)];
-	case 0xfe:
-		return irqmap[read_config_nybble(router, 0x44, 0)];
-	case 0xff:
-		return irqmap[read_config_nybble(router, 0x75, 0)];
-	}
-}
-
-static void pirq_ali_ide_interrupt(struct pci_dev *router, unsigned reg, unsigned val, unsigned irq)
-{
-	u8 x;
-
-	pci_read_config_byte(router, reg, &x);
-	x = (x & 0xe0) | val;	/* clear the level->edge transform */
-	pci_write_config_byte(router, reg, x);
+	return irqmap[read_config_nybble(router, 0x48, pirq-1)];
 }
 
 static int pirq_ali_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
@@ -184,17 +166,7 @@
 	unsigned int val = irqmap[irq];
 		
 	if (val) {
-		switch (pirq) {
-		default:
-			write_config_nybble(router, 0x48, pirq-1, val);
-			break;
-		case 0xfe:
-			pirq_ali_ide_interrupt(router, 0x44, val, irq);
-			break;
-		case 0xff:
-			pirq_ali_ide_interrupt(router, 0x75, val, irq);
-			break;
-		}
+		write_config_nybble(router, 0x48, pirq-1, val);
 		return 1;
 	}
 	return 0;
@@ -202,40 +174,25 @@
 
 /*
  * The Intel PIIX4 pirq rules are fairly simple: "pirq" is
- * just a pointer to the config space. However, something
- * funny is going on with 0xfe/0xff, and apparently they
- * should handle IDE irq routing. Ignore them for now.
+ * just a pointer to the config space.
  */
 static int pirq_piix_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
 {
 	u8 x;
 
-	switch (pirq) {
-	case 0xfe:
-	case 0xff:
-		return 0;
-	default:
-		pci_read_config_byte(router, pirq, &x);
-		return (x < 16) ? x : 0;
-	}
+	pci_read_config_byte(router, pirq, &x);
+	return (x < 16) ? x : 0;
 }
 
 static int pirq_piix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
 {
-	switch (pirq) {
-	case 0xfe:
-	case 0xff:
-		return 0;
-	default:
-		pci_write_config_byte(router, pirq, irq);
-		return 1;
-	}
+	pci_write_config_byte(router, pirq, irq);
+	return 1;
 }
 
 /*
  * The VIA pirq rules are nibble-based, like ALI,
- * but without the ugly irq number munging or the
- * strange special cases..
+ * but without the ugly irq number munging.
  */
 static int pirq_via_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
 {
@@ -500,8 +457,16 @@
 	}
 	DBG(" -> newirq=%d", newirq);
 
-	/* Try to get current IRQ */
-	if (r->get && (irq = r->get(pirq_router_dev, dev, pirq))) {
+	/* Check if it is hardcoded */
+	if ((pirq & 0xf0) == 0xf0) {
+		irq = pirq & 0xf;
+		DBG(" -> hardcoded IRQ %d\n", irq);
+		msg = "Hardcoded";
+		if (dev->irq && dev->irq != irq) {
+			printk("IRQ routing conflict in pirq table! Try 'pci=autoirq'\n");
+			return 0;
+		}
+	} else if (r->get && (irq = r->get(pirq_router_dev, dev, pirq))) {
 		DBG(" -> got IRQ %d\n", irq);
 		msg = "Found";
 		/* We refuse to override the dev->irq information. Give a warning! */
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/i386/kernel/process.c linux/arch/i386/kernel/process.c
--- v2.4.0-prerelease/linux/arch/i386/kernel/process.c	Mon Jan  1 09:38:34 2001
+++ linux/arch/i386/kernel/process.c	Thu Jan  4 12:50:17 2001
@@ -32,6 +32,7 @@
 #include <linux/delay.h>
 #include <linux/reboot.h>
 #include <linux/init.h>
+#include <linux/mc146818rtc.h>
 
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
@@ -257,6 +258,8 @@
  */
 void machine_real_restart(unsigned char *code, int length)
 {
+	unsigned long flags;
+
 	cli();
 
 	/* Write zero to CMOS register number 0x0f, which the BIOS POST
@@ -266,10 +269,12 @@
 	   disable NMIs by setting the top bit in the CMOS address register,
 	   as we're about to do peculiar things to the CPU.  I'm not sure if
 	   `outb_p' is needed instead of just `outb'.  Use it to be on the
-	   safe side. */
+	   safe side.  (Yes, CMOS_WRITE does outb_p's. -  Paul G.)
+	 */
 
-	outb_p (0x8f, 0x70);
-	outb_p (0x00, 0x71);
+	spin_lock_irqsave(&rtc_lock, flags);
+	CMOS_WRITE(0x00, 0x8f);
+	spin_unlock_irqrestore(&rtc_lock, flags);
 
 	/* Remap the kernel at virtual address zero, as well as offset zero
 	   from the kernel segment.  This assumes the kernel segment starts at
@@ -379,13 +384,14 @@
 		pm_power_off();
 }
 
+extern void show_trace(unsigned long* esp);
 
 void show_regs(struct pt_regs * regs)
 {
 	unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
 
 	printk("\n");
-	printk("EIP: %04x:[<%08lx>]",0xffff & regs->xcs,regs->eip);
+	printk("EIP: %04x:[<%08lx>] CPU: %d",0xffff & regs->xcs,regs->eip, smp_processor_id());
 	if (regs->xcs & 3)
 		printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
 	printk(" EFLAGS: %08lx\n",regs->eflags);
@@ -407,6 +413,7 @@
 		".previous			\n"
 		: "=r" (cr4): "0" (0));
 	printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4);
+	show_trace(&regs->esp);
 }
 
 /*
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/i386/kernel/traps.c linux/arch/i386/kernel/traps.c
--- v2.4.0-prerelease/linux/arch/i386/kernel/traps.c	Mon Dec 11 17:59:43 2000
+++ linux/arch/i386/kernel/traps.c	Wed Jan  3 20:45:26 2001
@@ -89,33 +89,18 @@
 
 /*
  * These constants are for searching for possible module text
- * segments. MODULE_RANGE is a guess of how much space is likely
- * to be vmalloced.
+ * segments.
  */
-#define MODULE_RANGE (8*1024*1024)
 
-void show_stack(unsigned long * esp)
+void show_trace(unsigned long * stack)
 {
-	unsigned long *stack, addr, module_start, module_end;
 	int i;
+	unsigned long addr, module_start, module_end;
 
-	// debugging aid: "show_stack(NULL);" prints the
-	// back trace for this cpu.
-
-	if(esp==NULL)
-		esp=(unsigned long*)&esp;
-
-	stack = esp;
-	for(i=0; i < kstack_depth_to_print; i++) {
-		if (((long) stack & (THREAD_SIZE-1)) == 0)
-			break;
-		if (i && ((i % 8) == 0))
-			printk("\n       ");
-		printk("%08lx ", *stack++);
-	}
+	if (!stack)
+		stack = (unsigned long*)&stack;
 
-	printk("\nCall Trace: ");
-	stack = esp;
+	printk("Call Trace: ");
 	i = 1;
 	module_start = VMALLOC_START;
 	module_end = VMALLOC_END;
@@ -138,6 +123,30 @@
 			i++;
 		}
 	}
+	printk("\n");
+}
+
+void show_stack(unsigned long * esp)
+{
+	unsigned long *stack;
+	int i;
+
+	// debugging aid: "show_stack(NULL);" prints the
+	// back trace for this cpu.
+
+	if(esp==NULL)
+		esp=(unsigned long*)&esp;
+
+	stack = esp;
+	for(i=0; i < kstack_depth_to_print; i++) {
+		if (((long) stack & (THREAD_SIZE-1)) == 0)
+			break;
+		if (i && ((i % 8) == 0))
+			printk("\n       ");
+		printk("%08lx ", *stack++);
+	}
+	printk("\n");
+	show_trace(esp);
 }
 
 static void show_registers(struct pt_regs *regs)
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/i386/vmlinux.lds linux/arch/i386/vmlinux.lds
--- v2.4.0-prerelease/linux/arch/i386/vmlinux.lds	Wed Jul  5 13:42:37 2000
+++ linux/arch/i386/vmlinux.lds	Wed Jan  3 20:45:26 2001
@@ -14,6 +14,9 @@
 	*(.gnu.warning)
 	} = 0x9090
   .text.lock : { *(.text.lock) }	/* out-of-line lock text */
+
+  _etext = .;			/* End of text section */
+
   .rodata : { *(.rodata) }
   .kstrtab : { *(.kstrtab) }
 
@@ -25,8 +28,6 @@
   __start___ksymtab = .;	/* Kernel symbol table */
   __ksymtab : { *(__ksymtab) }
   __stop___ksymtab = .;
-
-  _etext = .;			/* End of text section */
 
   .data : {			/* Data */
 	*(.data)
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/Makefile linux/arch/ia64/Makefile
--- v2.4.0-prerelease/linux/arch/ia64/Makefile	Tue Oct 31 12:42:26 2000
+++ linux/arch/ia64/Makefile	Thu Jan  4 12:50:17 2001
@@ -19,22 +19,28 @@
 EXTRA	=
 
 CFLAGS := $(CFLAGS) -pipe $(EXTRA) -Wa,-x -ffixed-r13 -mfixed-range=f10-f15,f32-f127 \
-	  -funwind-tables
+	  -funwind-tables -falign-functions=32
+# -frename-registers
 CFLAGS_KERNEL := -mconstant-gp
 
 ifeq ($(CONFIG_ITANIUM_ASTEP_SPECIFIC),y)
 	CFLAGS += -ma-step
 endif
+ifeq ($(CONFIG_ITANIUM_BSTEP_SPECIFIC),y)
+	CFLAGS += -mb-step
+endif
 
 ifdef CONFIG_IA64_GENERIC
 	CORE_FILES      :=      arch/$(ARCH)/hp/hp.a	\
 				arch/$(ARCH)/sn/sn.a	\
 				arch/$(ARCH)/dig/dig.a	\
+				arch/$(ARCH)/sn/io/sgiio.o \
 				$(CORE_FILES)
 	SUBDIRS		:=	arch/$(ARCH)/hp		\
 				arch/$(ARCH)/sn/sn1	\
 				arch/$(ARCH)/sn		\
 				arch/$(ARCH)/dig	\
+				arch/$(ARCH)/sn/io	\
 				$(SUBDIRS)
 
 else # !GENERIC
@@ -47,10 +53,7 @@
 endif
 
 ifdef CONFIG_IA64_SGI_SN1
-CFLAGS := $(CFLAGS) -DSN -I. -DBRINGUP -DDIRECT_L1_CONSOLE \
-		-DNUMA_BASE -DSIMULATED_KLGRAPH -DNUMA_MIGR_CONTROL  \
-		-DLITTLE_ENDIAN -DREAL_HARDWARE -DLANGUAGE_C=1 	     \
-		-D_LANGUAGE_C=1
+CFLAGS += -DBRINGUP
         SUBDIRS         :=      arch/$(ARCH)/sn/sn1	\
 				arch/$(ARCH)/sn		\
 				arch/$(ARCH)/sn/io	\
@@ -96,7 +99,7 @@
 
 arch/$(ARCH)/vmlinux.lds: arch/$(ARCH)/vmlinux.lds.S FORCE
 	$(CPP) -D__ASSEMBLY__ -C -P -I$(HPATH) -I$(HPATH)/asm-$(ARCH) \
-		arch/$(ARCH)/vmlinux.lds.S > $@
+		-traditional arch/$(ARCH)/vmlinux.lds.S > $@
 
 FORCE: ;
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/boot/Makefile linux/arch/ia64/boot/Makefile
--- v2.4.0-prerelease/linux/arch/ia64/boot/Makefile	Thu Jun 22 07:09:44 2000
+++ linux/arch/ia64/boot/Makefile	Thu Jan  4 12:50:17 2001
@@ -16,13 +16,11 @@
 	$(CC) $(AFLAGS) -traditional -c -o $*.o $<
 
 OBJECTS	= bootloader.o
-TARGETS =
 
-ifdef CONFIG_IA64_HP_SIM
- TARGETS += bootloader
-endif
+targets-$(CONFIG_IA64_HP_SIM) += bootloader
+targets-$(CONFIG_IA64_GENERIC) += bootloader
 
-all:	$(TARGETS)
+all:	$(targets-y)
 
 bootloader: $(OBJECTS)
 	$(LD) $(LINKFLAGS) $(OBJECTS) $(TOPDIR)/lib/lib.a $(TOPDIR)/arch/$(ARCH)/lib/lib.a \
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/config.in linux/arch/ia64/config.in
--- v2.4.0-prerelease/linux/arch/ia64/config.in	Sun Nov 19 18:44:03 2000
+++ linux/arch/ia64/config.in	Thu Jan  4 12:50:17 2001
@@ -18,7 +18,6 @@
 comment 'General setup'
 
 define_bool CONFIG_IA64 y
-define_bool CONFIG_SWIOTLB y	# for now...
 
 define_bool CONFIG_ISA n
 define_bool CONFIG_EISA n
@@ -41,20 +40,22 @@
 	define_bool CONFIG_ITANIUM y
 	define_bool CONFIG_IA64_BRL_EMU y
 	bool '  Enable Itanium A-step specific code' CONFIG_ITANIUM_ASTEP_SPECIFIC
-	if [ "$CONFIG_ITANIUM_ASTEP_SPECIFIC" = "y" ]; then
-	  bool '   Enable Itanium A1-step specific code' CONFIG_ITANIUM_A1_SPECIFIC
-	fi
 	bool '  Enable Itanium B-step specific code' CONFIG_ITANIUM_BSTEP_SPECIFIC
 	if [ "$CONFIG_ITANIUM_BSTEP_SPECIFIC" = "y" ]; then
 	  bool '   Enable Itanium B0-step specific code' CONFIG_ITANIUM_B0_SPECIFIC
+	  bool '   Enable Itanium B1-step specific code' CONFIG_ITANIUM_B1_SPECIFIC
+	  bool '   Enable Itanium B2-step specific code' CONFIG_ITANIUM_B2_SPECIFIC
+	fi
+	bool '  Enable Itanium C-step specific code' CONFIG_ITANIUM_CSTEP_SPECIFIC
+	if [ "$CONFIG_ITANIUM_CSTEP_SPECIFIC" = "y" ]; then
+	  bool '   Enable Itanium C0-step specific code' CONFIG_ITANIUM_C0_SPECIFIC
 	fi
 	bool '  Force interrupt redirection' CONFIG_IA64_HAVE_IRQREDIR
 	bool '  Enable use of global TLB purge instruction (ptc.g)' CONFIG_ITANIUM_PTCG
 	bool '  Enable SoftSDV hacks' CONFIG_IA64_SOFTSDV_HACKS
 	bool '  Enable AzusA hacks' CONFIG_IA64_AZUSA_HACKS
 	bool '  Enable IA-64 Machine Check Abort' CONFIG_IA64_MCA
-	bool '  Force socket buffers below 4GB?' CONFIG_SKB_BELOW_4GB
-
+	bool '  Enable ACPI 2.0 with errata 1.3' CONFIG_ACPI20
 	bool '  ACPI kernel configuration manager (EXPERIMENTAL)' CONFIG_ACPI_KERNEL_CONFIG
 	if [ "$CONFIG_ACPI_KERNEL_CONFIG" = "y" ]; then
 	  define_bool CONFIG_PM y
@@ -70,13 +71,16 @@
 	  bool '    Enable Itanium B0-step specific code' CONFIG_ITANIUM_B0_SPECIFIC
 	fi
 	bool '  Enable SGI Medusa Simulator Support' CONFIG_IA64_SGI_SN1_SIM n
-        bool '  Enable SGI hack for version 1.0 syngery bugs' CONFIG_IA64_SGI_SYNERGY_1_0_HACKS n
 	define_bool CONFIG_DEVFS_DEBUG y
 	define_bool CONFIG_DEVFS_FS y
 	define_bool CONFIG_IA64_BRL_EMU y
 	define_bool CONFIG_IA64_MCA y
-	define_bool CONFIG_IA64_SGI_IO y
 	define_bool CONFIG_ITANIUM y
+	define_bool CONFIG_SGI_IOC3_ETH y
+	define_bool CONFIG_PERCPU_IRQ y
+	define_int  CONFIG_CACHE_LINE_SHIFT 7
+	bool '  Enable DISCONTIGMEM support' CONFIG_DISCONTIGMEM y
+	bool '	Enable NUMA support' CONFIG_NUMA y
 fi
 
 define_bool CONFIG_KCORE_ELF y	# On IA-64, we always want an ELF /proc/kcore.
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/dig/Makefile linux/arch/ia64/dig/Makefile
--- v2.4.0-prerelease/linux/arch/ia64/dig/Makefile	Fri Apr 21 15:21:23 2000
+++ linux/arch/ia64/dig/Makefile	Thu Jan  4 12:50:17 2001
@@ -12,12 +12,10 @@
 
 all: dig.a
 
-O_TARGET        = dig.a
-O_OBJS          = iosapic.o setup.o
+O_TARGET := dig.a
 
-ifdef CONFIG_IA64_GENERIC
-O_OBJS		+= machvec.o
-endif
+obj-y := setup.o
+obj-$(CONFIG_IA64_GENERIC) += machvec.o
 
 clean::
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/dig/iosapic.c linux/arch/ia64/dig/iosapic.c
--- v2.4.0-prerelease/linux/arch/ia64/dig/iosapic.c	Tue Oct 31 12:42:26 2000
+++ linux/arch/ia64/dig/iosapic.c	Wed Dec 31 16:00:00 1969
@@ -1,409 +0,0 @@
-/*
- * Streamlined APIC support.
- *
- * Copyright (C) 1999 Intel Corp.
- * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
- * Copyright (C) 1999-2000 Hewlett-Packard Co.
- * Copyright (C) 1999-2000 David Mosberger-Tang <davidm@hpl.hp.com>
- * Copyright (C) 1999 VA Linux Systems
- * Copyright (C) 1999,2000 Walt Drummond <drummond@valinux.com>
- *
- * 00/04/19	D. Mosberger	Rewritten to mirror more closely the x86 I/O APIC code.
- *				In particular, we now have separate handlers for edge
- *				and level triggered interrupts.
- */
-#include <linux/config.h>
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/string.h>
-#include <linux/irq.h>
-
-#include <asm/acpi-ext.h>
-#include <asm/delay.h>
-#include <asm/io.h>
-#include <asm/iosapic.h>
-#include <asm/machvec.h>
-#include <asm/processor.h>
-#include <asm/ptrace.h>
-#include <asm/system.h>
-
-#ifdef	CONFIG_ACPI_KERNEL_CONFIG
-# include <asm/acpikcfg.h>
-#endif
-
-#undef DEBUG_IRQ_ROUTING
-
-static spinlock_t iosapic_lock = SPIN_LOCK_UNLOCKED;
-
-struct iosapic_vector iosapic_vector[NR_IRQS] = {
-	[0 ... NR_IRQS-1] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }
-};
-
-/*
- * find the IRQ in the IOSAPIC map for the PCI device on bus/slot/pin
- */
-int
-iosapic_get_PCI_irq_vector (int bus, int slot, int pci_pin)
-{
-	int i;
-
-	for (i = 0; i < NR_IRQS; i++) {
-		if ((iosapic_bustype(i) == BUS_PCI) &&
-		    (iosapic_bus(i) == bus) &&
-		    (iosapic_busdata(i) == ((slot << 16) | pci_pin))) {
-			return i;
-		}
-	}
-	return -1;
-}
-
-static void
-set_rte (unsigned long iosapic_addr, int entry, int pol, int trigger, int delivery,
-	 long dest, int vector)
-{
-	u32 low32;
-	u32 high32;
-
-	low32 = ((pol << IO_SAPIC_POLARITY_SHIFT) |
-		 (trigger << IO_SAPIC_TRIGGER_SHIFT) |
-		 (delivery << IO_SAPIC_DELIVERY_SHIFT) |
-		 vector);
-
-#ifdef CONFIG_IA64_AZUSA_HACKS
-	/* set Flush Disable bit */
-	if (iosapic_addr != 0xc0000000fec00000)
-		low32 |= (1 << 17);
-#endif
-
-	/* dest contains both id and eid */
-	high32 = (dest << IO_SAPIC_DEST_SHIFT);	
-
-	writel(IO_SAPIC_RTE_HIGH(entry), iosapic_addr + IO_SAPIC_REG_SELECT);
-	writel(high32, iosapic_addr + IO_SAPIC_WINDOW);
-	writel(IO_SAPIC_RTE_LOW(entry), iosapic_addr + IO_SAPIC_REG_SELECT);
-	writel(low32, iosapic_addr + IO_SAPIC_WINDOW);
-}
-
-static void
-nop (unsigned int irq)
-{
-	/* do nothing... */
-}
-
-static void 
-mask_irq (unsigned int irq)
-{
-	unsigned long flags, iosapic_addr = iosapic_addr(irq);
-	u32 low32;
-
-	spin_lock_irqsave(&iosapic_lock, flags);
-	{
-		writel(IO_SAPIC_RTE_LOW(iosapic_pin(irq)), iosapic_addr + IO_SAPIC_REG_SELECT);
-		low32 = readl(iosapic_addr + IO_SAPIC_WINDOW);
-
-		low32 |= (1 << IO_SAPIC_MASK_SHIFT);    /* Zero only the mask bit */
-		writel(low32, iosapic_addr + IO_SAPIC_WINDOW);
-	}
-	spin_unlock_irqrestore(&iosapic_lock, flags);
-}
-
-static void 
-unmask_irq (unsigned int irq)
-{
-	unsigned long flags, iosapic_addr = iosapic_addr(irq);
-	u32 low32;
-
-	spin_lock_irqsave(&iosapic_lock, flags);
-	{
-		writel(IO_SAPIC_RTE_LOW(iosapic_pin(irq)), iosapic_addr + IO_SAPIC_REG_SELECT);
-		low32 = readl(iosapic_addr + IO_SAPIC_WINDOW);
-
-		low32 &= ~(1 << IO_SAPIC_MASK_SHIFT);    /* Zero only the mask bit */
-		writel(low32, iosapic_addr + IO_SAPIC_WINDOW);
-	}
-	spin_unlock_irqrestore(&iosapic_lock, flags);
-}
-
-
-static void
-iosapic_set_affinity (unsigned int irq, unsigned long mask)
-{
-	printk("iosapic_set_affinity: not implemented yet\n");
-}
-
-/*
- * Handlers for level-triggered interrupts.
- */
-
-static unsigned int
-iosapic_startup_level_irq (unsigned int irq)
-{
-	unmask_irq(irq);
-	return 0;
-}
-
-static void
-iosapic_end_level_irq (unsigned int irq)
-{
-	writel(irq, iosapic_addr(irq) + IO_SAPIC_EOI);
-}
-
-#define iosapic_shutdown_level_irq	mask_irq
-#define iosapic_enable_level_irq	unmask_irq
-#define iosapic_disable_level_irq	mask_irq
-#define iosapic_ack_level_irq		nop
-
-struct hw_interrupt_type irq_type_iosapic_level = {
-	typename:	"IO-SAPIC-level",
-	startup:	iosapic_startup_level_irq,
-	shutdown:	iosapic_shutdown_level_irq,
-	enable:		iosapic_enable_level_irq,
-	disable:	iosapic_disable_level_irq,
-	ack:		iosapic_ack_level_irq,
-	end:		iosapic_end_level_irq,
-	set_affinity:	iosapic_set_affinity
-};
-
-/*
- * Handlers for edge-triggered interrupts.
- */
-
-static unsigned int
-iosapic_startup_edge_irq (unsigned int irq)
-{
-	unmask_irq(irq);
-	/*
-	 * IOSAPIC simply drops interrupts pended while the
-	 * corresponding pin was masked, so we can't know if an
-	 * interrupt is pending already.  Let's hope not...
-	 */
-	return 0;
-}
-
-static void
-iosapic_ack_edge_irq (unsigned int irq)
-{
-	/*
-	 * Once we have recorded IRQ_PENDING already, we can mask the
-	 * interrupt for real. This prevents IRQ storms from unhandled
-	 * devices.
-	 */
-	if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED)) == (IRQ_PENDING | IRQ_DISABLED))
-		mask_irq(irq);
-}
-
-#define iosapic_enable_edge_irq		unmask_irq
-#define iosapic_disable_edge_irq	nop
-#define iosapic_end_edge_irq		nop
-
-struct hw_interrupt_type irq_type_iosapic_edge = {
-	typename:	"IO-SAPIC-edge",
-	startup:	iosapic_startup_edge_irq,
-	shutdown:	iosapic_disable_edge_irq,
-	enable:		iosapic_enable_edge_irq,
-	disable:	iosapic_disable_edge_irq,
-	ack:		iosapic_ack_edge_irq,
-	end:		iosapic_end_edge_irq,
-	set_affinity:	iosapic_set_affinity
-};
-
-unsigned int
-iosapic_version (unsigned long base_addr) 
-{
-	/*
-	 * IOSAPIC Version Register return 32 bit structure like:
-	 * {
-	 *	unsigned int version   : 8;
-	 *	unsigned int reserved1 : 8;
-	 *	unsigned int pins      : 8;
-	 *	unsigned int reserved2 : 8;
-	 * }
-	 */
-	writel(IO_SAPIC_VERSION, base_addr + IO_SAPIC_REG_SELECT);
-	return readl(IO_SAPIC_WINDOW + base_addr);
-}
-
-void
-iosapic_init (unsigned long address, int irqbase)
-{
-	struct hw_interrupt_type *irq_type;
-	struct pci_vector_struct *vectors;
-	int i, irq, num_pci_vectors;
-
-	if (irqbase == 0)
-		/* 
-		 * Map the legacy ISA devices into the IOSAPIC data.
-		 * Some of these may get reprogrammed later on with
-		 * data from the ACPI Interrupt Source Override table.
-		 */
-		for (i = 0; i < 16; i++) {
-			irq = isa_irq_to_vector(i);
-			iosapic_pin(irq) = i; 
-			iosapic_bus(irq) = BUS_ISA;
-			iosapic_busdata(irq) = 0;
-			iosapic_dmode(irq) = IO_SAPIC_LOWEST_PRIORITY;
-			iosapic_trigger(irq)  = IO_SAPIC_EDGE;
-			iosapic_polarity(irq) = IO_SAPIC_POL_HIGH;
-#ifdef DEBUG_IRQ_ROUTING
-			printk("ISA: IRQ %02x -> Vector %02x IOSAPIC Pin %d\n",
-			       i, irq, iosapic_pin(irq));
-#endif
-		}
-
-#ifndef CONFIG_IA64_SOFTSDV_HACKS
-	/* 
-	 * Map the PCI Interrupt data into the ACPI IOSAPIC data using
-	 * the info that the bootstrap loader passed to us.
-	 */
-# ifdef CONFIG_ACPI_KERNEL_CONFIG
-	acpi_cf_get_pci_vectors(&vectors, &num_pci_vectors);
-# else
-	ia64_boot_param.pci_vectors = (__u64) __va(ia64_boot_param.pci_vectors);
-	vectors = (struct pci_vector_struct *) ia64_boot_param.pci_vectors;
-	num_pci_vectors = ia64_boot_param.num_pci_vectors;
-# endif
-	for (i = 0; i < num_pci_vectors; i++) {
-		irq = vectors[i].irq;
-		if (irq < 16)
-			irq = isa_irq_to_vector(irq);
-		if (iosapic_baseirq(irq) != irqbase)
-			continue;
-
-		iosapic_bustype(irq) = BUS_PCI;
-		iosapic_pin(irq) = irq - iosapic_baseirq(irq);
-		iosapic_bus(irq) = vectors[i].bus;
-		/*
-		 * Map the PCI slot and pin data into iosapic_busdata()
-		 */
-		iosapic_busdata(irq) = (vectors[i].pci_id & 0xffff0000) | vectors[i].pin;
-
-		/* Default settings for PCI */
-		iosapic_dmode(irq) = IO_SAPIC_LOWEST_PRIORITY;
-		iosapic_trigger(irq)  = IO_SAPIC_LEVEL;
-		iosapic_polarity(irq) = IO_SAPIC_POL_LOW;
-
-# ifdef DEBUG_IRQ_ROUTING
-		printk("PCI: BUS %d Slot %x Pin %x IRQ %02x --> Vector %02x IOSAPIC Pin %d\n", 
-		       vectors[i].bus, vectors[i].pci_id>>16, vectors[i].pin, vectors[i].irq, 
-		       irq, iosapic_pin(irq));
-# endif
-	}
-#endif /* CONFIG_IA64_SOFTSDV_HACKS */
-
-	for (i = 0; i < NR_IRQS; ++i) {
-		if (iosapic_baseirq(i) != irqbase)
-			continue;
-
-		if (iosapic_pin(i) != -1) {
-			if (iosapic_trigger(i) == IO_SAPIC_LEVEL)
-			  irq_type = &irq_type_iosapic_level;
-			else
-			  irq_type = &irq_type_iosapic_edge;
-			if (irq_desc[i].handler != &no_irq_type)
-				printk("dig_irq_init: warning: changing vector %d from %s to %s\n",
-				       i, irq_desc[i].handler->typename,
-				       irq_type->typename);
-			irq_desc[i].handler = irq_type;
-
-			/* program the IOSAPIC routing table: */
-			set_rte(iosapic_addr(i), iosapic_pin(i), iosapic_polarity(i),
-				iosapic_trigger(i), iosapic_dmode(i),
-				(ia64_get_lid() >> 16) & 0xffff, i);
-		}
-	}
-}
-
-void
-dig_irq_init (void)
-{
-	/*
-	 * Disable the compatibility mode interrupts (8259 style), needs IN/OUT support
-	 * enabled.
-	 */
-	outb(0xff, 0xA1);
-	outb(0xff, 0x21);
-}
-
-void
-dig_pci_fixup (void)
-{
-	struct	pci_dev	*dev;
-	int		irq;
-	unsigned char 	pin;
-
-	pci_for_each_dev(dev) {
-		pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
-		if (pin) {
-			pin--;          /* interrupt pins are numbered starting from 1 */
-			irq = iosapic_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn),
-							 pin);
-			if (irq < 0 && dev->bus->parent) { /* go back to the bridge */
-				struct pci_dev * bridge = dev->bus->self;
-
-				/* allow for multiple bridges on an adapter */
-				do {
-					/* do the bridge swizzle... */
-					pin = (pin + PCI_SLOT(dev->devfn)) % 4;
-					irq = iosapic_get_PCI_irq_vector(bridge->bus->number,
-									 PCI_SLOT(bridge->devfn), pin);
-				} while (irq < 0 && (bridge = bridge->bus->self));
-				if (irq >= 0)
-					printk(KERN_WARNING
-					       "PCI: using PPB(B%d,I%d,P%d) to get irq %02x\n",
-					       bridge->bus->number, PCI_SLOT(bridge->devfn),
-					       pin, irq);
-				else
-					printk(KERN_WARNING
-					       "PCI: Couldn't map irq for B%d,I%d,P%d\n",
-					       bridge->bus->number, PCI_SLOT(bridge->devfn),
-					       pin);
-			}
-			if (irq >= 0) {
-				printk("PCI->APIC IRQ transform: (B%d,I%d,P%d) -> %02x\n",
-				       dev->bus->number, PCI_SLOT(dev->devfn), pin, irq);
-				dev->irq = irq;
-			}
-		}
-		/*
-		 * Nothing to fixup
-		 * Fix out-of-range IRQ numbers
-		 */
-		if (dev->irq >= NR_IRQS)
-			dev->irq = 15;	/* Spurious interrupts */
-	}
-}
-
-/*
- * Register an IOSAPIC discovered via ACPI.
- */
-void __init
-dig_register_iosapic (acpi_entry_iosapic_t *iosapic)
-{
-	unsigned int ver, v;
-	int l, max_pin;
-
-	ver = iosapic_version((unsigned long) ioremap(iosapic->address, 0));
-	max_pin = (ver >> 16) & 0xff;
-	
-	printk("IOSAPIC Version %x.%x: address 0x%lx IRQs 0x%x - 0x%x\n", 
-	       (ver & 0xf0) >> 4, (ver & 0x0f), iosapic->address, 
-	       iosapic->irq_base, iosapic->irq_base + max_pin);
-	
-	for (l = 0; l <= max_pin; l++) {
-		v = iosapic->irq_base + l;
-		if (v < 16)
-			v = isa_irq_to_vector(v);
-		if (v > IA64_MAX_VECTORED_IRQ) {
-			printk("    !!! bad IOSAPIC interrupt vector: %u\n", v);
-			continue;
-		}
-		/* XXX Check for IOSAPIC collisions */
-		iosapic_addr(v) = (unsigned long) ioremap(iosapic->address, 0);
-		iosapic_baseirq(v) = iosapic->irq_base;
-	}
-	iosapic_init(iosapic->address, iosapic->irq_base);
-}
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/dig/setup.c linux/arch/ia64/dig/setup.c
--- v2.4.0-prerelease/linux/arch/ia64/dig/setup.c	Fri Aug 11 19:09:06 2000
+++ linux/arch/ia64/dig/setup.c	Thu Jan  4 12:50:17 2001
@@ -84,3 +84,14 @@
 	screen_info.orig_video_isVGA = 1;	/* XXX fake */
 	screen_info.orig_video_ega_bx = 3;	/* XXX fake */
 }
+
+void
+dig_irq_init (void)
+{
+	/*
+	 * Disable the compatibility mode interrupts (8259 style), needs IN/OUT support
+	 * enabled.
+	 */
+	outb(0xff, 0xA1);
+	outb(0xff, 0x21);
+}
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/hp/Makefile linux/arch/ia64/hp/Makefile
--- v2.4.0-prerelease/linux/arch/ia64/hp/Makefile	Fri Apr 21 15:21:23 2000
+++ linux/arch/ia64/hp/Makefile	Thu Jan  4 12:50:17 2001
@@ -7,12 +7,10 @@
 
 all: hp.a
 
-O_TARGET	= hp.a
-O_OBJS		= hpsim_console.o hpsim_irq.o hpsim_setup.o
+O_TARGET := hp.a
 
-ifdef CONFIG_IA64_GENERIC
-O_OBJS		+= hpsim_machvec.o
-endif
+obj-y := hpsim_console.o hpsim_irq.o hpsim_setup.o
+obj-$(CONFIG_IA64_GENERIC) += hpsim_machvec.o
 
 clean::
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/hp/hpsim_setup.c linux/arch/ia64/hp/hpsim_setup.c
--- v2.4.0-prerelease/linux/arch/ia64/hp/hpsim_setup.c	Fri Jul 14 16:08:11 2000
+++ linux/arch/ia64/hp/hpsim_setup.c	Thu Jan  4 12:50:17 2001
@@ -63,12 +63,6 @@
 }
 
 void __init
-hpsim_pci_fixup (void)
-{
-}
-
-
-void __init
 hpsim_setup (char **cmdline_p)
 {
 	ROOT_DEV = to_kdev_t(0x0801);		/* default to first SCSI drive */
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/ia32/Makefile linux/arch/ia64/ia32/Makefile
--- v2.4.0-prerelease/linux/arch/ia64/ia32/Makefile	Tue Oct 31 12:42:26 2000
+++ linux/arch/ia64/ia32/Makefile	Thu Jan  4 12:50:17 2001
@@ -10,7 +10,8 @@
 all: ia32.o
 
 O_TARGET := ia32.o
-O_OBJS	 := ia32_entry.o sys_ia32.o ia32_ioctl.o ia32_signal.o ia32_support.o ia32_traps.o binfmt_elf32.o
+
+obj-y := ia32_entry.o sys_ia32.o ia32_ioctl.o ia32_signal.o ia32_support.o ia32_traps.o binfmt_elf32.o
 
 clean::
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/ia32/binfmt_elf32.c linux/arch/ia64/ia32/binfmt_elf32.c
--- v2.4.0-prerelease/linux/arch/ia64/ia32/binfmt_elf32.c	Tue Oct 31 12:42:26 2000
+++ linux/arch/ia64/ia32/binfmt_elf32.c	Thu Jan  4 12:50:17 2001
@@ -9,6 +9,7 @@
 
 #include <linux/types.h>
 
+#include <asm/param.h>
 #include <asm/signal.h>
 #include <asm/ia32.h>
 
@@ -31,6 +32,9 @@
 # define CONFIG_BINFMT_ELF_MODULE	CONFIG_BINFMT_ELF32_MODULE
 #endif
 
+#undef CLOCKS_PER_SEC
+#define CLOCKS_PER_SEC	IA32_CLOCKS_PER_SEC
+
 extern void ia64_elf32_init(struct pt_regs *regs);
 extern void put_dirty_page(struct task_struct * tsk, struct page *page, unsigned long address);
 
@@ -89,8 +93,8 @@
 	
 	/* Do all the IA-32 setup here */
 
-	current->thread.map_base = 0x40000000;
-
+	current->thread.map_base  =  0x40000000;
+	current->thread.task_size =  0xc0000000;	/* use what Linux/x86 uses... */
  
 	/* setup ia32 state for ia32_load_state */
 
@@ -239,6 +243,12 @@
 	if (eppnt->p_memsz >= (1UL<<32) || addr > (1UL<<32) - eppnt->p_memsz)
 		return -EINVAL;
 
+	/*
+	 *  Make sure the elf interpreter doesn't get loaded at location 0
+	 *    so that NULL pointers correctly cause segfaults.
+	 */
+	if (addr == 0)
+		addr += PAGE_SIZE;
 #if 1
 	set_brk(ia32_mm_addr(addr), addr + eppnt->p_memsz);
 	memset((char *) addr + eppnt->p_filesz, 0, eppnt->p_memsz - eppnt->p_filesz);
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/ia32/ia32_entry.S linux/arch/ia64/ia32/ia32_entry.S
--- v2.4.0-prerelease/linux/arch/ia64/ia32/ia32_entry.S	Tue Sep  5 13:50:01 2000
+++ linux/arch/ia64/ia32/ia32_entry.S	Thu Jan  4 12:50:17 2001
@@ -133,7 +133,7 @@
 	data8 sys32_ni_syscall /* sys_stime is not supported on IA64 */  /* 25 */
 	data8 sys32_ptrace
 	data8 sys32_alarm
-	data8 sys32_ni_syscall
+	data8 sys_pause
 	data8 sys32_ni_syscall
 	data8 ia32_utime	  /* 30 */
 	data8 sys32_ni_syscall	  /* old stty syscall holder */
@@ -291,11 +291,43 @@
 	data8 sys_getcwd
 	data8 sys_capget
 	data8 sys_capset	  /* 185 */
-	data8 sys_sigaltstack
+	data8 sys32_sigaltstack
 	data8 sys_sendfile
 	data8 sys32_ni_syscall		  /* streams1 */
 	data8 sys32_ni_syscall		  /* streams2 */
 	data8 sys32_vfork	  /* 190 */
+	data8 sys_ni_syscall
+	data8 sys_ni_syscall
+	data8 sys_ni_syscall
+	data8 sys_ni_syscall
+	data8 sys_ni_syscall	  /* 195 */
+	data8 sys_ni_syscall
+	data8 sys_ni_syscall
+	data8 sys_ni_syscall
+	data8 sys_ni_syscall
+	data8 sys_ni_syscall	  /* 200 */
+	data8 sys_ni_syscall
+	data8 sys_ni_syscall
+	data8 sys_ni_syscall
+	data8 sys_ni_syscall
+	data8 sys_ni_syscall	  /* 205 */
+	data8 sys_ni_syscall
+	data8 sys_ni_syscall
+	data8 sys_ni_syscall
+	data8 sys_ni_syscall
+	data8 sys_ni_syscall	  /* 210 */
+	data8 sys_ni_syscall
+	data8 sys_ni_syscall
+	data8 sys_ni_syscall
+	data8 sys_ni_syscall
+	data8 sys_ni_syscall	  /* 215 */
+	data8 sys_ni_syscall
+	data8 sys_ni_syscall
+	data8 sys_ni_syscall
+	data8 sys_ni_syscall
+	data8 sys_ni_syscall	  /* 220 */
+	data8 sys_ni_syscall
+	data8 sys_ni_syscall
 	/*
 	 *  CAUTION: If any system calls are added beyond this point
 	 *	then the check in `arch/ia64/kernel/ivt.S' will have
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/ia32/ia32_ioctl.c linux/arch/ia64/ia32/ia32_ioctl.c
--- v2.4.0-prerelease/linux/arch/ia64/ia32/ia32_ioctl.c	Fri Jul 14 16:08:11 2000
+++ linux/arch/ia64/ia32/ia32_ioctl.c	Thu Jan  4 12:50:17 2001
@@ -22,81 +22,193 @@
 #include <linux/if_ppp.h>
 #include <linux/ixjuser.h>
 #include <linux/i2o-dev.h>
+#include <../drivers/char/drm/drm.h>
+
+#define IOCTL_NR(a)	((a) & ~(_IOC_SIZEMASK << _IOC_SIZESHIFT))
+
+#define DO_IOCTL(fd, cmd, arg) ({			\
+	int _ret;					\
+	mm_segment_t _old_fs = get_fs();		\
+							\
+	set_fs(KERNEL_DS);				\
+	_ret = sys_ioctl(fd, cmd, (unsigned long)arg);	\
+	set_fs(_old_fs);				\
+	_ret;						\
+})
+
+#define P(i)	((void *)(long)(i))
+
 
 asmlinkage long sys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg);
 
 asmlinkage long ia32_ioctl(unsigned int fd, unsigned int cmd, unsigned int arg)
 {
+	long ret;
+
+	switch (IOCTL_NR(cmd)) {
+
+	case IOCTL_NR(DRM_IOCTL_VERSION):
+		{
+			drm_version_t ver;
+			struct {
+				int	version_major;
+				int	version_minor;
+				int	version_patchlevel;
+				unsigned int name_len;
+				unsigned int name; /* pointer */
+				unsigned int date_len;
+				unsigned int date; /* pointer */
+				unsigned int desc_len;
+				unsigned int desc; /* pointer */
+			} ver32;
+
+			if (copy_from_user(&ver32, P(arg), sizeof(ver32)))
+				return -EFAULT;
+			ver.name_len = ver32.name_len;
+			ver.name = P(ver32.name);
+			ver.date_len = ver32.date_len;
+			ver.date = P(ver32.date);
+			ver.desc_len = ver32.desc_len;
+			ver.desc = P(ver32.desc);
+			ret = DO_IOCTL(fd, cmd, &ver);
+			if (ret >= 0) {
+				ver32.version_major = ver.version_major;
+				ver32.version_minor = ver.version_minor;
+				ver32.version_patchlevel = ver.version_patchlevel;
+				ver32.name_len = ver.name_len;
+				ver32.date_len = ver.date_len;
+				ver32.desc_len = ver.desc_len;
+				if (copy_to_user(P(arg), &ver32, sizeof(ver32)))
+					return -EFAULT;
+			}
+			return(ret);
+		}
+
+	case IOCTL_NR(DRM_IOCTL_GET_UNIQUE):
+		{
+			drm_unique_t un;
+			struct {
+				unsigned int unique_len;
+				unsigned int unique;
+			} un32;
+
+			if (copy_from_user(&un32, P(arg), sizeof(un32)))
+				return -EFAULT;
+			un.unique_len = un32.unique_len;
+			un.unique = P(un32.unique);
+			ret = DO_IOCTL(fd, cmd, &un);
+			if (ret >= 0) {
+				un32.unique_len = un.unique_len;
+				if (copy_to_user(P(arg), &un32, sizeof(un32)))
+					return -EFAULT;
+			}
+			return(ret);
+		}
+	case IOCTL_NR(DRM_IOCTL_SET_UNIQUE):
+	case IOCTL_NR(DRM_IOCTL_ADD_MAP):
+	case IOCTL_NR(DRM_IOCTL_ADD_BUFS):
+	case IOCTL_NR(DRM_IOCTL_MARK_BUFS):
+	case IOCTL_NR(DRM_IOCTL_INFO_BUFS):
+	case IOCTL_NR(DRM_IOCTL_MAP_BUFS):
+	case IOCTL_NR(DRM_IOCTL_FREE_BUFS):
+	case IOCTL_NR(DRM_IOCTL_ADD_CTX):
+	case IOCTL_NR(DRM_IOCTL_RM_CTX):
+	case IOCTL_NR(DRM_IOCTL_MOD_CTX):
+	case IOCTL_NR(DRM_IOCTL_GET_CTX):
+	case IOCTL_NR(DRM_IOCTL_SWITCH_CTX):
+	case IOCTL_NR(DRM_IOCTL_NEW_CTX):
+	case IOCTL_NR(DRM_IOCTL_RES_CTX):
+
+	case IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE):
+	case IOCTL_NR(DRM_IOCTL_AGP_RELEASE):
+	case IOCTL_NR(DRM_IOCTL_AGP_ENABLE):
+	case IOCTL_NR(DRM_IOCTL_AGP_INFO):
+	case IOCTL_NR(DRM_IOCTL_AGP_ALLOC):
+	case IOCTL_NR(DRM_IOCTL_AGP_FREE):
+	case IOCTL_NR(DRM_IOCTL_AGP_BIND):
+	case IOCTL_NR(DRM_IOCTL_AGP_UNBIND):
+
+	/* Mga specific ioctls */
+
+	case IOCTL_NR(DRM_IOCTL_MGA_INIT):
+
+	/* I810 specific ioctls */
+
+	case IOCTL_NR(DRM_IOCTL_I810_GETBUF):
+	case IOCTL_NR(DRM_IOCTL_I810_COPY):
+
+	/* Rage 128 specific ioctls */
 
-	switch (cmd) {
+	case IOCTL_NR(DRM_IOCTL_R128_PACKET):
 
-	case VFAT_IOCTL_READDIR_BOTH:
-	case VFAT_IOCTL_READDIR_SHORT:
-	case MTIOCGET:
-	case MTIOCPOS:
-	case MTIOCGETCONFIG:
-	case MTIOCSETCONFIG:
-	case PPPIOCSCOMPRESS:
-	case PPPIOCGIDLE:
-	case NCP_IOC_GET_FS_INFO_V2:
-	case NCP_IOC_GETOBJECTNAME:
-	case NCP_IOC_SETOBJECTNAME:
-	case NCP_IOC_GETPRIVATEDATA:
-	case NCP_IOC_SETPRIVATEDATA:
-	case NCP_IOC_GETMOUNTUID2:
-	case CAPI_MANUFACTURER_CMD:
-	case VIDIOCGTUNER:
-	case VIDIOCSTUNER:
-	case VIDIOCGWIN:
-	case VIDIOCSWIN:
-	case VIDIOCGFBUF:
-	case VIDIOCSFBUF:
-	case MGSL_IOCSPARAMS:
-	case MGSL_IOCGPARAMS:
-	case ATM_GETNAMES:
-	case ATM_GETLINKRATE:
-	case ATM_GETTYPE:
-	case ATM_GETESI:
-	case ATM_GETADDR:
-	case ATM_RSTADDR:
-	case ATM_ADDADDR:
-	case ATM_DELADDR:
-	case ATM_GETCIRANGE:
-	case ATM_SETCIRANGE:
-	case ATM_SETESI:
-	case ATM_SETESIF:
-	case ATM_GETSTAT:
-	case ATM_GETSTATZ:
-	case ATM_GETLOOP:
-	case ATM_SETLOOP:
-	case ATM_QUERYLOOP:
-	case ENI_SETMULT:
-	case NS_GETPSTAT:
-	/* case NS_SETBUFLEV: This is a duplicate case with ZATM_GETPOOLZ */
-	case ZATM_GETPOOLZ:
-	case ZATM_GETPOOL:
-	case ZATM_SETPOOL:
-	case ZATM_GETTHIST:
-	case IDT77105_GETSTAT:
-	case IDT77105_GETSTATZ:
-	case IXJCTL_TONE_CADENCE:
-	case IXJCTL_FRAMES_READ:
-	case IXJCTL_FRAMES_WRITTEN:
-	case IXJCTL_READ_WAIT:
-	case IXJCTL_WRITE_WAIT:
-	case IXJCTL_DRYBUFFER_READ:
-	case I2OHRTGET:
-	case I2OLCTGET:
-	case I2OPARMSET:
-	case I2OPARMGET:
-	case I2OSWDL:
-	case I2OSWUL:
-	case I2OSWDEL:
-	case I2OHTML:
-		printk("%x:unimplemented IA32 ioctl system call\n", cmd);
-		return(-EINVAL);
+	case IOCTL_NR(VFAT_IOCTL_READDIR_BOTH):
+	case IOCTL_NR(VFAT_IOCTL_READDIR_SHORT):
+	case IOCTL_NR(MTIOCGET):
+	case IOCTL_NR(MTIOCPOS):
+	case IOCTL_NR(MTIOCGETCONFIG):
+	case IOCTL_NR(MTIOCSETCONFIG):
+	case IOCTL_NR(PPPIOCSCOMPRESS):
+	case IOCTL_NR(PPPIOCGIDLE):
+	case IOCTL_NR(NCP_IOC_GET_FS_INFO_V2):
+	case IOCTL_NR(NCP_IOC_GETOBJECTNAME):
+	case IOCTL_NR(NCP_IOC_SETOBJECTNAME):
+	case IOCTL_NR(NCP_IOC_GETPRIVATEDATA):
+	case IOCTL_NR(NCP_IOC_SETPRIVATEDATA):
+	case IOCTL_NR(NCP_IOC_GETMOUNTUID2):
+	case IOCTL_NR(CAPI_MANUFACTURER_CMD):
+	case IOCTL_NR(VIDIOCGTUNER):
+	case IOCTL_NR(VIDIOCSTUNER):
+	case IOCTL_NR(VIDIOCGWIN):
+	case IOCTL_NR(VIDIOCSWIN):
+	case IOCTL_NR(VIDIOCGFBUF):
+	case IOCTL_NR(VIDIOCSFBUF):
+	case IOCTL_NR(MGSL_IOCSPARAMS):
+	case IOCTL_NR(MGSL_IOCGPARAMS):
+	case IOCTL_NR(ATM_GETNAMES):
+	case IOCTL_NR(ATM_GETLINKRATE):
+	case IOCTL_NR(ATM_GETTYPE):
+	case IOCTL_NR(ATM_GETESI):
+	case IOCTL_NR(ATM_GETADDR):
+	case IOCTL_NR(ATM_RSTADDR):
+	case IOCTL_NR(ATM_ADDADDR):
+	case IOCTL_NR(ATM_DELADDR):
+	case IOCTL_NR(ATM_GETCIRANGE):
+	case IOCTL_NR(ATM_SETCIRANGE):
+	case IOCTL_NR(ATM_SETESI):
+	case IOCTL_NR(ATM_SETESIF):
+	case IOCTL_NR(ATM_GETSTAT):
+	case IOCTL_NR(ATM_GETSTATZ):
+	case IOCTL_NR(ATM_GETLOOP):
+	case IOCTL_NR(ATM_SETLOOP):
+	case IOCTL_NR(ATM_QUERYLOOP):
+	case IOCTL_NR(ENI_SETMULT):
+	case IOCTL_NR(NS_GETPSTAT):
+	/* case IOCTL_NR(NS_SETBUFLEV): This is a duplicate case with ZATM_GETPOOLZ */
+	case IOCTL_NR(ZATM_GETPOOLZ):
+	case IOCTL_NR(ZATM_GETPOOL):
+	case IOCTL_NR(ZATM_SETPOOL):
+	case IOCTL_NR(ZATM_GETTHIST):
+	case IOCTL_NR(IDT77105_GETSTAT):
+	case IOCTL_NR(IDT77105_GETSTATZ):
+	case IOCTL_NR(IXJCTL_TONE_CADENCE):
+	case IOCTL_NR(IXJCTL_FRAMES_READ):
+	case IOCTL_NR(IXJCTL_FRAMES_WRITTEN):
+	case IOCTL_NR(IXJCTL_READ_WAIT):
+	case IOCTL_NR(IXJCTL_WRITE_WAIT):
+	case IOCTL_NR(IXJCTL_DRYBUFFER_READ):
+	case IOCTL_NR(I2OHRTGET):
+	case IOCTL_NR(I2OLCTGET):
+	case IOCTL_NR(I2OPARMSET):
+	case IOCTL_NR(I2OPARMGET):
+	case IOCTL_NR(I2OSWDL):
+	case IOCTL_NR(I2OSWUL):
+	case IOCTL_NR(I2OSWDEL):
+	case IOCTL_NR(I2OHTML):
+		break;
 	default:
 		return(sys_ioctl(fd, cmd, (unsigned long)arg));
 
 	}
+	printk("%x:unimplemented IA32 ioctl system call\n", cmd);
+	return(-EINVAL);
 }
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/ia32/ia32_traps.c linux/arch/ia64/ia32/ia32_traps.c
--- v2.4.0-prerelease/linux/arch/ia64/ia32/ia32_traps.c	Thu Jun 22 07:09:44 2000
+++ linux/arch/ia64/ia32/ia32_traps.c	Thu Jan  4 12:50:17 2001
@@ -119,6 +119,6 @@
 	      default:
 		return -1;
 	}
-	force_sig_info(SIGTRAP, &siginfo, current);
+	force_sig_info(siginfo.si_signo, &siginfo, current);
 	return 0;
 }
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/ia32/sys_ia32.c linux/arch/ia64/ia32/sys_ia32.c
--- v2.4.0-prerelease/linux/arch/ia64/ia32/sys_ia32.c	Sun Nov 19 18:44:03 2000
+++ linux/arch/ia64/ia32/sys_ia32.c	Thu Jan  4 12:50:17 2001
@@ -236,8 +236,6 @@
 
 	if (OFFSET4K(addr) || OFFSET4K(off))
 		return -EINVAL;
-	if (prot & PROT_WRITE)
-		prot |= PROT_EXEC;
 	prot |= PROT_WRITE;
 	front = NULL;
 	back = NULL;
@@ -287,23 +285,20 @@
 	unsigned int poff;
 
 	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+	prot |= PROT_EXEC;
 
  	if ((flags & MAP_FIXED) && ((addr & ~PAGE_MASK) || (offset & ~PAGE_MASK)))
  		error = do_mmap_fake(file, addr, len, prot, flags, (loff_t)offset);
- 	else if (!addr && (offset & ~PAGE_MASK)) {
+	else {
  		poff = offset & PAGE_MASK;
  		len += offset - poff;
 
  		down(&current->mm->mmap_sem);
- 		error = do_mmap(file, addr, len, prot, flags, poff);
+ 		error = do_mmap_pgoff(file, addr, len, prot, flags, poff >> PAGE_SHIFT);
   		up(&current->mm->mmap_sem);
 
  		if (!IS_ERR((void *) error))
  			error += offset - poff;
- 	} else {
-  		down(&current->mm->mmap_sem);
-  		error = do_mmap(file, addr, len, prot, flags, offset);
- 		up(&current->mm->mmap_sem);
  	}
 	return error;
 }
@@ -2032,14 +2027,14 @@
 	ret = sys_times(tbuf ? &t : NULL);
 	set_fs (old_fs);
 	if (tbuf) {
-		err = put_user (t.tms_utime, &tbuf->tms_utime);
-		err |= __put_user (t.tms_stime, &tbuf->tms_stime);
-		err |= __put_user (t.tms_cutime, &tbuf->tms_cutime);
-		err |= __put_user (t.tms_cstime, &tbuf->tms_cstime);
+		err = put_user (IA32_TICK(t.tms_utime), &tbuf->tms_utime);
+		err |= __put_user (IA32_TICK(t.tms_stime), &tbuf->tms_stime);
+		err |= __put_user (IA32_TICK(t.tms_cutime), &tbuf->tms_cutime);
+		err |= __put_user (IA32_TICK(t.tms_cstime), &tbuf->tms_cstime);
 		if (err)
 			ret = -EFAULT;
 	}
-	return ret;
+	return IA32_TICK(ret);
 }
 
 unsigned int
@@ -2617,6 +2612,53 @@
 	 *	manipulating the page protections...
 	 */
 	return(sys_iopl(3, 0, 0, 0));
+}
+
+typedef struct {
+	unsigned int	ss_sp;
+	unsigned int	ss_flags;
+	unsigned int	ss_size;
+} ia32_stack_t;
+
+asmlinkage long
+sys32_sigaltstack (const ia32_stack_t *uss32, ia32_stack_t *uoss32,
+long arg2, long arg3, long arg4,
+long arg5, long arg6, long arg7,
+long stack)
+{
+	struct pt_regs *pt = (struct pt_regs *) &stack;
+	stack_t uss, uoss;
+	ia32_stack_t buf32;
+	int ret;
+	mm_segment_t old_fs = get_fs();
+
+	if (uss32)
+		if (copy_from_user(&buf32, (void *)A(uss32), sizeof(ia32_stack_t)))
+			return(-EFAULT);
+	uss.ss_sp = (void *) (long) buf32.ss_sp;
+	uss.ss_flags = buf32.ss_flags;
+	uss.ss_size = buf32.ss_size;
+	set_fs(KERNEL_DS);
+	ret = do_sigaltstack(uss32 ? &uss : NULL, &uoss, pt->r12);
+	set_fs(old_fs);
+	if (ret < 0)
+		return(ret);
+	if (uoss32) {
+		buf32.ss_sp = (long) uoss.ss_sp;
+		buf32.ss_flags = uoss.ss_flags;
+		buf32.ss_size = uoss.ss_size;
+		if (copy_to_user((void*)A(uoss32), &buf32, sizeof(ia32_stack_t)))
+			return(-EFAULT);
+	}
+	return(ret);
+}
+
+asmlinkage int
+sys_pause (void)
+{
+	current->state = TASK_INTERRUPTIBLE;
+	schedule();
+	return -ERESTARTNOHAND;
 }
 
 #ifdef	NOTYET  /* UNTESTED FOR IA64 FROM HERE DOWN */
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/kernel/Makefile linux/arch/ia64/kernel/Makefile
--- v2.4.0-prerelease/linux/arch/ia64/kernel/Makefile	Tue Oct 31 12:42:26 2000
+++ linux/arch/ia64/kernel/Makefile	Thu Jan  4 12:50:17 2001
@@ -9,20 +9,20 @@
 
 all: kernel.o head.o init_task.o
 
-obj-y := acpi.o entry.o gate.o efi.o efi_stub.o irq.o irq_ia64.o irq_sapic.o ivt.o		\
-	 machvec.o pal.o pci-dma.o process.o perfmon.o ptrace.o sal.o semaphore.o setup.o	\
-	 signal.o sys_ia64.o traps.o time.o unaligned.o unwind.o
+O_TARGET := kernel.o
 
-obj-$(CONFIG_IA64_GENERIC) += machvec.o
+obj-y := acpi.o entry.o gate.o efi.o efi_stub.o irq.o irq_ia64.o irq_sapic.o ivt.o \
+	 machvec.o pal.o process.o perfmon.o ptrace.o sal.o semaphore.o setup.o	\
+	 signal.o sys_ia64.o traps.o time.o unaligned.o unwind.o
+obj-$(CONFIG_IA64_GENERIC) += machvec.o iosapic.o
+obj-$(CONFIG_IA64_DIG) += iosapic.o
 obj-$(CONFIG_IA64_PALINFO) += palinfo.o
 obj-$(CONFIG_PCI) += pci.o
 obj-$(CONFIG_SMP) += smp.o smpboot.o
 obj-$(CONFIG_IA64_MCA) += mca.o mca_asm.o
 obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
 
-O_TARGET := kernel.o
-O_OBJS	 :=  $(obj-y)
-OX_OBJS  := ia64_ksyms.o
+export-objs := ia64_ksyms.o
 
 clean::
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/kernel/acpi.c linux/arch/ia64/kernel/acpi.c
--- v2.4.0-prerelease/linux/arch/ia64/kernel/acpi.c	Tue Oct 31 12:42:26 2000
+++ linux/arch/ia64/kernel/acpi.c	Thu Jan  4 12:50:17 2001
@@ -6,6 +6,12 @@
  * 
  * Copyright (C) 1999 VA Linux Systems
  * Copyright (C) 1999,2000 Walt Drummond <drummond@valinux.com>
+ * Copyright (C) 2000 Hewlett-Packard Co.
+ * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 2000 Intel Corp.
+ * Copyright (C) 2000 J.I. Lee <jung-ik.lee@intel.com>
+ *      ACPI based kernel configuration manager.
+ *      ACPI 2.0 & IA64 ext 0.71
  */
 
 #include <linux/config.h>
@@ -36,29 +42,87 @@
 
 void (*pm_idle)(void);
 
+asm (".weak iosapic_register_legacy_irq");
+asm (".weak iosapic_init");
+
+const char *
+acpi_get_sysname (void)
+{
+	/* the following should go away once we have an ACPI parser: */
+#ifdef CONFIG_IA64_GENERIC
+	return "hpsim";
+#else
+# if defined (CONFIG_IA64_HP_SIM)
+	return "hpsim";
+# elif defined (CONFIG_IA64_SGI_SN1)
+	return "sn1";
+# elif defined (CONFIG_IA64_DIG)
+	return "dig";
+# else
+#	error Unknown platform.  Fix acpi.c.
+# endif
+#endif
+
+}
+
 /*
- * Identify usable CPU's and remember them for SMP bringup later.
+ * Configure legacy IRQ information.
  */
 static void __init
-acpi_lsapic(char *p) 
+acpi_legacy_irq (char *p)
 {
-	int add = 1;
-
-	acpi_entry_lsapic_t *lsapic = (acpi_entry_lsapic_t *) p;
+	acpi_entry_int_override_t *legacy = (acpi_entry_int_override_t *) p;
+	unsigned long polarity = 0, edge_triggered = 0;
 
-	if ((lsapic->flags & LSAPIC_PRESENT) == 0) 
+	/*
+	 * If the platform we're running doesn't define
+	 * iosapic_register_legacy_irq(), we ignore this info...
+	 */
+	if (!iosapic_register_legacy_irq)
 		return;
 
+	switch (legacy->flags) {
+	      case 0x5:	polarity = 1; edge_triggered = 1; break;
+	      case 0x7: polarity = 0; edge_triggered = 1; break;
+	      case 0xd: polarity = 1; edge_triggered = 0; break;
+	      case 0xf: polarity = 0; edge_triggered = 0; break;
+	      default:
+		printk("    ACPI Legacy IRQ 0x%02x: Unknown flags 0x%x\n", legacy->isa_irq,
+		       legacy->flags);
+		break;
+	}
+	iosapic_register_legacy_irq(legacy->isa_irq, legacy->pin, polarity, edge_triggered);
+}
+
+/*
+ * ACPI 2.0 tables parsing functions
+ */
+
+static unsigned long
+readl_unaligned(void *p)
+{
+	unsigned long ret;
+
+	memcpy(&ret, p, sizeof(long));
+	return ret;
+}
+
+/*
+ * Identify usable CPU's and remember them for SMP bringup later.
+ */
+static void __init
+acpi20_lsapic (char *p) 
+{
+	int add = 1;
+
+	acpi20_entry_lsapic_t *lsapic = (acpi20_entry_lsapic_t *) p;
 	printk("      CPU %d (%.04x:%.04x): ", total_cpus, lsapic->eid, lsapic->id);
 
 	if ((lsapic->flags & LSAPIC_ENABLED) == 0) {
 		printk("Disabled.\n");
 		add = 0;
-	} else if (lsapic->flags & LSAPIC_PERFORMANCE_RESTRICTED) {
-		printk("Performance Restricted; ignoring.\n");
-		add = 0;
 	}
-	
+
 #ifdef CONFIG_SMP
 	smp_boot_data.cpu_phys_id[total_cpus] = -1;
 #endif
@@ -73,87 +137,234 @@
 }
 
 /*
- * Configure legacy IRQ information in iosapic_vector
+ * Info on platform interrupt sources: NMI. PMI, INIT, etc.
  */
 static void __init
-acpi_legacy_irq(char *p)
+acpi20_platform (char *p)
 {
-	/*
-	 * This is not good.  ACPI is not necessarily limited to CONFIG_IA64_DIG, yet
-	 * ACPI does not necessarily imply IOSAPIC either.  Perhaps there should be
-	 * a means for platform_setup() to register ACPI handlers?
-	 */
-#ifdef CONFIG_IA64_IRQ_ACPI
-	acpi_entry_int_override_t *legacy = (acpi_entry_int_override_t *) p;
-	unsigned char vector; 
-	int i;
+	acpi20_entry_platform_src_t *plat = (acpi20_entry_platform_src_t *) p;
+
+	printk("PLATFORM: IOSAPIC %x -> Vector %x on CPU %.04u:%.04u\n",
+	       plat->iosapic_vector, plat->global_vector, plat->eid, plat->id);
+}
 
-	vector = isa_irq_to_vector(legacy->isa_irq);
+/*
+ * Override the physical address of the local APIC in the MADT stable header.
+ */
+static void __init
+acpi20_lapic_addr_override (char *p)
+{
+	acpi20_entry_lapic_addr_override_t * lapic = (acpi20_entry_lapic_addr_override_t *) p;
+
+	if (lapic->lapic_address) {
+		iounmap((void *)ipi_base_addr);
+		ipi_base_addr = (unsigned long) ioremap(lapic->lapic_address, 0);
+
+		printk("LOCAL ACPI override to 0x%lx(p=0x%lx)\n",
+		       ipi_base_addr, lapic->lapic_address);
+	}
+}
+
+/*
+ * Parse the ACPI Multiple APIC Description Table
+ */
+static void __init
+acpi20_parse_madt (acpi_madt_t *madt)
+{
+	acpi_entry_iosapic_t *iosapic;
+	char *p, *end;
+
+	/* Base address of IPI Message Block */
+	if (madt->lapic_address) {
+		ipi_base_addr = (unsigned long) ioremap(madt->lapic_address, 0);
+		printk("Lapic address set to 0x%lx\n", ipi_base_addr);
+	} else
+		printk("Lapic address set to default 0x%lx\n", ipi_base_addr);
+
+	p = (char *) (madt + 1);
+	end = p + (madt->header.length - sizeof(acpi_madt_t));
 
 	/*
-	 * Clobber any old pin mapping.  It may be that it gets replaced later on
-	 */
-	for (i = 0; i < IA64_MAX_VECTORED_IRQ; i++) {
-		if (i == vector) 
-			continue;
-		if (iosapic_pin(i) == iosapic_pin(vector))
-			iosapic_pin(i) = 0xff;
-        }
-
-	iosapic_pin(vector) = legacy->pin;
-	iosapic_bus(vector) = BUS_ISA;	/* This table only overrides the ISA devices */
-	iosapic_busdata(vector) = 0;
-	
-	/* 
-	 * External timer tick is special... 
+	 * Splitted entry parsing to ensure ordering.
 	 */
-	if (vector != TIMER_IRQ)
-		iosapic_dmode(vector) = IO_SAPIC_LOWEST_PRIORITY;
-	else 
-		iosapic_dmode(vector) = IO_SAPIC_FIXED;
+
+	while (p < end) {
+		switch (*p) {
+		case ACPI20_ENTRY_LOCAL_APIC_ADDR_OVERRIDE:
+			printk("ACPI 2.0 MADT: LOCAL APIC Override\n");
+			acpi20_lapic_addr_override(p);
+			break;
+
+		case ACPI20_ENTRY_LOCAL_SAPIC:
+			printk("ACPI 2.0 MADT: LOCAL SAPIC\n");
+			acpi20_lsapic(p);
+			break;
 	
-	/* See MPS 1.4 section 4.3.4 */
-	switch (legacy->flags) {
-	case 0x5:
-		iosapic_polarity(vector) = IO_SAPIC_POL_HIGH;
-		iosapic_trigger(vector) = IO_SAPIC_EDGE;
-		break;
-	case 0x8:
-		iosapic_polarity(vector) = IO_SAPIC_POL_LOW;
-		iosapic_trigger(vector) = IO_SAPIC_EDGE;
-		break;
-	case 0xd:
-		iosapic_polarity(vector) = IO_SAPIC_POL_HIGH;
-		iosapic_trigger(vector) = IO_SAPIC_LEVEL;
-		break;
-	case 0xf:
-		iosapic_polarity(vector) = IO_SAPIC_POL_LOW;
-		iosapic_trigger(vector) = IO_SAPIC_LEVEL;
-		break;
-	default:
-		printk("    ACPI Legacy IRQ 0x%02x: Unknown flags 0x%x\n", legacy->isa_irq,
-		       legacy->flags);
-		break;
+		case ACPI20_ENTRY_IO_SAPIC:
+			iosapic = (acpi_entry_iosapic_t *) p;
+			if (iosapic_init)
+				iosapic_init(iosapic->address, iosapic->irq_base);
+			break;
+
+		case ACPI20_ENTRY_PLATFORM_INT_SOURCE:
+			printk("ACPI 2.0 MADT: PLATFORM INT SOUCE\n");
+			acpi20_platform(p);
+			break;
+
+		case ACPI20_ENTRY_LOCAL_APIC:
+			printk("ACPI 2.0 MADT: LOCAL APIC entry\n"); break;
+		case ACPI20_ENTRY_IO_APIC:
+			printk("ACPI 2.0 MADT: IO APIC entry\n"); break;
+		case ACPI20_ENTRY_NMI_SOURCE:
+			printk("ACPI 2.0 MADT: NMI SOURCE entry\n"); break;
+		case ACPI20_ENTRY_LOCAL_APIC_NMI:
+			printk("ACPI 2.0 MADT: LOCAL APIC NMI entry\n"); break;
+		case ACPI20_ENTRY_INT_SRC_OVERRIDE:
+			break;
+		default:
+			printk("ACPI 2.0 MADT: unknown entry skip\n"); break;
+			break;
+		}
+
+		p += p[1];
 	}
 
-# ifdef ACPI_DEBUG
-	printk("Legacy ISA IRQ %x -> IA64 Vector %x IOSAPIC Pin %x Active %s %s Trigger\n", 
-	       legacy->isa_irq, vector, iosapic_pin(vector), 
-	       ((iosapic_polarity(vector) == IO_SAPIC_POL_LOW) ? "Low" : "High"),
-	       ((iosapic_trigger(vector) == IO_SAPIC_LEVEL) ? "Level" : "Edge"));
-# endif /* ACPI_DEBUG */
-#endif /* CONFIG_IA64_IRQ_ACPI */
+	p = (char *) (madt + 1);
+	end = p + (madt->header.length - sizeof(acpi_madt_t));
+
+	while (p < end) {
+		
+		switch (*p) {
+		case ACPI20_ENTRY_INT_SRC_OVERRIDE:
+			printk("ACPI 2.0 MADT: INT SOURCE Override\n");
+			acpi_legacy_irq(p);
+			break;
+		default:
+			break;
+		}
+
+		p += p[1];
+	}
+
+	/* Make bootup pretty */
+	printk("      %d CPUs available, %d CPUs total\n",
+		available_cpus, total_cpus);
+}
+
+int __init 
+acpi20_parse (acpi20_rsdp_t *rsdp20)
+{
+	acpi_xsdt_t *xsdt;
+	acpi_desc_table_hdr_t *hdrp;
+	int tables, i;
+
+	if (strncmp(rsdp20->signature, ACPI_RSDP_SIG, ACPI_RSDP_SIG_LEN)) {
+		printk("ACPI 2.0 RSDP signature incorrect!\n");
+		return 0;
+	} else {
+		printk("ACPI 2.0 Root System Description Ptr at 0x%lx\n",
+			(unsigned long)rsdp20);
+	}
+
+	xsdt = __va(rsdp20->xsdt);
+	hdrp = &xsdt->header;
+	if (strncmp(hdrp->signature,
+		ACPI_XSDT_SIG, ACPI_XSDT_SIG_LEN)) {
+		printk("ACPI 2.0 XSDT signature incorrect. Trying RSDT\n");
+		/* RSDT parsing here */
+		return 0;
+	} else {
+		printk("ACPI 2.0 XSDT at 0x%lx (p=0x%lx)\n",
+		(unsigned long)xsdt, (unsigned long)rsdp20->xsdt);
+	}
+
+	printk("ACPI 2.0: %.6s %.8s %d.%d\n",
+		hdrp->oem_id,
+		hdrp->oem_table_id,
+		hdrp->oem_revision >> 16,
+		hdrp->oem_revision & 0xffff);
+
+#ifdef CONFIG_ACPI_KERNEL_CONFIG
+	acpi_cf_init((void *)rsdp20);
+#endif
+
+	tables =(hdrp->length -sizeof(acpi_desc_table_hdr_t))>>3;
+
+	for (i = 0; i < tables; i++) {
+		hdrp = (acpi_desc_table_hdr_t *) __va(readl_unaligned(&xsdt->entry_ptrs[i]));
+		printk("        :table %4.4s found\n", hdrp->signature);
+
+		/* Only interested int the MADT table for now ... */
+		if (strncmp(hdrp->signature,
+			ACPI_MADT_SIG, ACPI_MADT_SIG_LEN) != 0)
+			continue;
+
+		acpi20_parse_madt((acpi_madt_t *) hdrp);
+	}
+
+#ifdef CONFIG_ACPI_KERNEL_CONFIG
+	acpi_cf_terminate();
+#endif
+
+#ifdef CONFIG_SMP
+	if (available_cpus == 0) {
+		printk("ACPI: Found 0 CPUS; assuming 1\n");
+		available_cpus = 1; /* We've got at least one of these, no? */
+	}
+	smp_boot_data.cpu_count = available_cpus;
+#endif
+	return 1;
+}
+/*
+ * ACPI 1.0b with 0.71 IA64 extensions functions; should be removed once all 
+ * platforms start supporting ACPI 2.0
+ */
+
+/*
+ * Identify usable CPU's and remember them for SMP bringup later.
+ */
+static void __init
+acpi_lsapic (char *p) 
+{
+	int add = 1;
+
+	acpi_entry_lsapic_t *lsapic = (acpi_entry_lsapic_t *) p;
+
+	if ((lsapic->flags & LSAPIC_PRESENT) == 0) 
+		return;
+
+	printk("      CPU %d (%.04x:%.04x): ", total_cpus, lsapic->eid, lsapic->id);
+
+	if ((lsapic->flags & LSAPIC_ENABLED) == 0) {
+		printk("Disabled.\n");
+		add = 0;
+	} else if (lsapic->flags & LSAPIC_PERFORMANCE_RESTRICTED) {
+		printk("Performance Restricted; ignoring.\n");
+		add = 0;
+	}
+
+#ifdef CONFIG_SMP
+	smp_boot_data.cpu_phys_id[total_cpus] = -1;
+#endif
+	if (add) {
+		printk("Available.\n");
+		available_cpus++;
+#ifdef CONFIG_SMP
+		smp_boot_data.cpu_phys_id[total_cpus] = (lsapic->id << 8) | lsapic->eid;
+#endif /* CONFIG_SMP */
+	}
+	total_cpus++;
 }
 
 /*
  * Info on platform interrupt sources: NMI. PMI, INIT, etc.
  */
 static void __init
-acpi_platform(char *p)
+acpi_platform (char *p)
 {
 	acpi_entry_platform_src_t *plat = (acpi_entry_platform_src_t *) p;
 
-	printk("PLATFORM: IOSAPIC %x -> Vector %lx on CPU %.04u:%.04u\n",
+	printk("PLATFORM: IOSAPIC %x -> Vector %x on CPU %.04u:%.04u\n",
 	       plat->iosapic_vector, plat->global_vector, plat->eid, plat->id);
 }
 
@@ -161,8 +372,9 @@
  * Parse the ACPI Multiple SAPIC Table
  */
 static void __init
-acpi_parse_msapic(acpi_sapic_t *msapic)
+acpi_parse_msapic (acpi_sapic_t *msapic)
 {
+	acpi_entry_iosapic_t *iosapic;
 	char *p, *end;
 
 	/* Base address of IPI Message Block */
@@ -172,41 +384,31 @@
 	end = p + (msapic->header.length - sizeof(acpi_sapic_t));
 
 	while (p < end) {
-		
 		switch (*p) {
-		case ACPI_ENTRY_LOCAL_SAPIC:
+		      case ACPI_ENTRY_LOCAL_SAPIC:
 			acpi_lsapic(p);
 			break;
 	
-		case ACPI_ENTRY_IO_SAPIC:
-			platform_register_iosapic((acpi_entry_iosapic_t *) p);
+		      case ACPI_ENTRY_IO_SAPIC:
+			iosapic = (acpi_entry_iosapic_t *) p;
+			if (iosapic_init)
+				iosapic_init(iosapic->address, iosapic->irq_base);
 			break;
 
-		case ACPI_ENTRY_INT_SRC_OVERRIDE:
+		      case ACPI_ENTRY_INT_SRC_OVERRIDE:
 			acpi_legacy_irq(p);
 			break;
-		
-		case ACPI_ENTRY_PLATFORM_INT_SOURCE:
+
+		      case ACPI_ENTRY_PLATFORM_INT_SOURCE:
 			acpi_platform(p);
 			break;
-		
-		default:
+
+		      default:
 			break;
 		}
 
 		/* Move to next table entry. */
-#define BAD_ACPI_TABLE
-#ifdef BAD_ACPI_TABLE
-		/*
-		 * Some prototype Lion's have a bad ACPI table
-		 * requiring this fix.  Without this fix, those
-		 * machines crash during bootup.
-		 */
-		if (p[1] == 0)
-			p = end;
-		else
-#endif
-			p += p[1];
+		p += p[1];
 	}
 
 	/* Make bootup pretty */
@@ -214,24 +416,18 @@
 }
 
 int __init 
-acpi_parse(acpi_rsdp_t *rsdp)
+acpi_parse (acpi_rsdp_t *rsdp)
 {
 	acpi_rsdt_t *rsdt;
 	acpi_desc_table_hdr_t *hdrp;
 	long tables, i;
 
-	if (!rsdp) {
-		printk("Uh-oh, no ACPI Root System Description Pointer table!\n");
-		return 0;
-	}
-
 	if (strncmp(rsdp->signature, ACPI_RSDP_SIG, ACPI_RSDP_SIG_LEN)) {
 		printk("Uh-oh, ACPI RSDP signature incorrect!\n");
 		return 0;
 	}
 
-	rsdp->rsdt = __va(rsdp->rsdt);
-	rsdt = rsdp->rsdt;
+	rsdt = __va(rsdp->rsdt);
 	if (strncmp(rsdt->header.signature, ACPI_RSDT_SIG, ACPI_RSDT_SIG_LEN)) {
 		printk("Uh-oh, ACPI RDST signature incorrect!\n");
 		return 0;
@@ -256,7 +452,7 @@
 	}
 
 #ifdef CONFIG_ACPI_KERNEL_CONFIG
-       acpi_cf_terminate();
+	acpi_cf_terminate();
 #endif
 
 #ifdef CONFIG_SMP
@@ -267,23 +463,4 @@
 	smp_boot_data.cpu_count = available_cpus;
 #endif
 	return 1;
-}
-
-const char *
-acpi_get_sysname (void)
-{       
-	/* the following should go away once we have an ACPI parser: */
-#ifdef CONFIG_IA64_GENERIC
-	return "hpsim";
-#else
-# if defined (CONFIG_IA64_HP_SIM)
-	return "hpsim";
-# elif defined (CONFIG_IA64_SGI_SN1)
-	return "sn1";
-# elif defined (CONFIG_IA64_DIG)
-	return "dig";
-# else
-#	error Unknown platform.  Fix acpi.c.
-# endif
-#endif
 }
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/kernel/efi.c linux/arch/ia64/kernel/efi.c
--- v2.4.0-prerelease/linux/arch/ia64/kernel/efi.c	Tue Oct 31 12:42:26 2000
+++ linux/arch/ia64/kernel/efi.c	Thu Jan  4 12:50:17 2001
@@ -18,7 +18,6 @@
  * Goutham Rao: <goutham.rao@intel.com>
  * 	Skip non-WB memory and ignore empty memory ranges.
  */
-#include <linux/config.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/types.h>
@@ -333,6 +332,9 @@
 		if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) {
 			efi.mps = __va(config_tables[i].table);
 			printk(" MPS=0x%lx", config_tables[i].table);
+		} else if (efi_guidcmp(config_tables[i].guid, ACPI_20_TABLE_GUID) == 0) {
+			efi.acpi20 = __va(config_tables[i].table);
+			printk(" ACPI 2.0=0x%lx", config_tables[i].table);
 		} else if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID) == 0) {
 			efi.acpi = __va(config_tables[i].table);
 			printk(" ACPI=0x%lx", config_tables[i].table);
@@ -364,7 +366,7 @@
 #if EFI_DEBUG
 	/* print EFI memory map: */
 	{
-		efi_memory_desc_t *md = p;
+		efi_memory_desc_t *md;
 		void *p;
 
 		for (i = 0, p = efi_map_start; p < efi_map_end; ++i, p += efi_desc_size) {
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/kernel/entry.S linux/arch/ia64/kernel/entry.S
--- v2.4.0-prerelease/linux/arch/ia64/kernel/entry.S	Tue Oct 31 12:42:26 2000
+++ linux/arch/ia64/kernel/entry.S	Thu Jan  4 12:50:17 2001
@@ -11,6 +11,17 @@
  * Copyright (C) 1999 Don Dugger <Don.Dugger@intel.com>
  */
 /*
+ * ia64_switch_to now places correct virtual mapping in in TR2 for
+ * kernel stack. This allows us to handle interrupts without changing
+ * to physical mode.
+ *
+ * ar.k4 is now used to hold last virtual map address
+ * 
+ * Jonathan Nickin	<nicklin@missioncriticallinux.com>
+ * Patrick O'Rourke	<orourke@missioncriticallinux.com>
+ * 11/07/2000
+ /
+/*
  * Global (preserved) predicate usage on syscall entry/exit path:
  *
  *	pKern:		See entry.h.
@@ -27,7 +38,8 @@
 #include <asm/processor.h>
 #include <asm/unistd.h>
 #include <asm/asmmacro.h>
-
+#include <asm/pgtable.h>
+	
 #include "entry.h"
 
 	.text
@@ -98,6 +110,8 @@
 	br.ret.sptk.many rp
 END(sys_clone)
 
+#define KSTACK_TR	2
+
 /*
  * prev_task <- ia64_switch_to(struct task_struct *next)
  */
@@ -108,22 +122,55 @@
 	UNW(.body)
 
 	adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
-	dep r18=-1,r0,0,61	// build mask 0x1fffffffffffffff
+	mov r27=ar.k4
+	dep r20=0,in0,61,3		// physical address of "current"
+	;;
+	st8 [r22]=sp			// save kernel stack pointer of old task
+	shr.u r26=r20,_PAGE_SIZE_256M
+	;;
+	cmp.eq p7,p6=r26,r0		// check < 256M
 	adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
 	;;
-	st8 [r22]=sp		// save kernel stack pointer of old task
-	ld8 sp=[r21]		// load kernel stack pointer of new task
-	and r20=in0,r18		// physical address of "current"
-	;;
-	mov ar.k6=r20		// copy "current" into ar.k6
-	mov r8=r13		// return pointer to previously running task
-	mov r13=in0		// set "current" pointer
+	/*
+	 * If we've already mapped this task's page, we can skip doing it
+	 * again.
+	 */
+(p6)	cmp.eq p7,p6=r26,r27
+(p6)	br.cond.dpnt.few .map
+	;;
+.done:	ld8 sp=[r21]			// load kernel stack pointer of new task
+(p6)	ssm psr.ic			// if we we had to map, renable the psr.ic bit FIRST!!!
 	;;
+(p6)	srlz.d
+	mov ar.k6=r20			// copy "current" into ar.k6
+	mov r8=r13			// return pointer to previously running task
+	mov r13=in0			// set "current" pointer
+	;;
+(p6)	ssm psr.i			// renable psr.i AFTER the ic bit is serialized
 	DO_LOAD_SWITCH_STACK( )
+
 #ifdef CONFIG_SMP
-	sync.i			// ensure "fc"s done by this CPU are visible on other CPUs
-#endif
-	br.ret.sptk.few rp
+	sync.i				// ensure "fc"s done by this CPU are visible on other CPUs
+#endif 
+	br.ret.sptk.few rp		// boogie on out in new context
+
+.map:
+	rsm psr.i | psr.ic
+	movl r25=__DIRTY_BITS|_PAGE_PL_0|_PAGE_AR_RWX
+	;;
+	srlz.d
+	or r23=r25,r20			// construct PA | page properties
+	mov r25=_PAGE_SIZE_256M<<2
+	;;
+	mov cr.itir=r25
+	mov cr.ifa=in0			// VA of next task...
+	;;
+	mov r25=KSTACK_TR		// use tr entry #2...
+	mov ar.k4=r26			// remember last page we mapped...
+	;;
+	itr.d dtr[r25]=r23		// wire in new mapping...
+	br.cond.sptk.many .done
+	;;
 END(ia64_switch_to)
 
 #ifndef CONFIG_IA64_NEW_UNWIND
@@ -503,7 +550,7 @@
 	;;
 	ld4 r2=[r2]
 	;;
-	shl r2=r2,SMP_LOG_CACHE_BYTES	// can't use shladd here...
+	shl r2=r2,SMP_CACHE_SHIFT	// can't use shladd here...
 	;;
 	add r3=r2,r3
 #else
@@ -542,7 +589,7 @@
 	// check & deliver pending signals:
 (p2)	br.call.spnt.few rp=handle_signal_delivery
 .ret9:
-#if defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) || defined(CONFIG_IA64_SOFTSDV_HACKS)
+#ifdef CONFIG_IA64_SOFTSDV_HACKS
 	// Check for lost ticks
 	rsm psr.i
 	mov r2 = ar.itc
@@ -611,14 +658,13 @@
 	mov ar.ccv=r1
 	mov ar.fpsr=r13
 	mov b0=r14
-	// turn off interrupts, interrupt collection, & data translation
-	rsm psr.i | psr.ic | psr.dt
+	// turn off interrupts, interrupt collection
+	rsm psr.i | psr.ic
 	;;
 	srlz.i			// EAS 2.5
 	mov b7=r15
 	;;
 	invala			// invalidate ALAT
-	dep r12=0,r12,61,3	// convert sp to physical address
 	bsw.0;;			// switch back to bank 0 (must be last in insn group)
 	;;
 #ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC
@@ -757,7 +803,7 @@
 
 #endif /* CONFIG_SMP */
 
-#if defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) || defined(CONFIG_IA64_SOFTSDV_HACKS)
+#ifdef CONFIG_IA64_SOFTSDV_HACKS
 
 ENTRY(invoke_ia64_reset_itm)
 	UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8))
@@ -772,7 +818,7 @@
 	br.ret.sptk.many rp
 END(invoke_ia64_reset_itm)
 
-#endif /* CONFIG_ITANIUM_ASTEP_SPECIFIC || CONFIG_IA64_SOFTSDV_HACKS */
+#endif /* CONFIG_IA64_SOFTSDV_HACKS */
 
 	/*
 	 * Invoke do_softirq() while preserving in0-in7, which may be needed
@@ -1091,7 +1137,7 @@
 	data8 sys_setpriority
 	data8 sys_statfs
 	data8 sys_fstatfs
-	data8 ia64_ni_syscall
+	data8 ia64_ni_syscall			// 1105
 	data8 sys_semget
 	data8 sys_semop
 	data8 sys_semctl
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/kernel/fw-emu.c linux/arch/ia64/kernel/fw-emu.c
--- v2.4.0-prerelease/linux/arch/ia64/kernel/fw-emu.c	Tue Oct 31 12:42:26 2000
+++ linux/arch/ia64/kernel/fw-emu.c	Thu Jan  4 12:50:17 2001
@@ -402,7 +402,6 @@
 	sal_systab->sal_rev_minor = 1;
 	sal_systab->sal_rev_major = 0;
 	sal_systab->entry_count = 1;
-	sal_systab->ia32_bios_present = 0;
 
 #ifdef CONFIG_IA64_GENERIC
         strcpy(sal_systab->oem_id, "Generic");
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/kernel/head.S linux/arch/ia64/kernel/head.S
--- v2.4.0-prerelease/linux/arch/ia64/kernel/head.S	Tue Oct 31 12:42:26 2000
+++ linux/arch/ia64/kernel/head.S	Thu Jan  4 12:50:17 2001
@@ -74,8 +74,8 @@
 	;;
 
 #ifdef CONFIG_IA64_EARLY_PRINTK
-	mov r2=6
-	mov r3=(8<<8) | (28<<2)
+	mov r3=(6<<8) | (28<<2)
+	movl r2=6<<61
 	;;
 	mov rr[r2]=r3
 	;;
@@ -168,6 +168,11 @@
 	add r19=IA64_NUM_DBG_REGS*8,in0
 	;;
 1:	mov r16=dbr[r18]
+#if defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) || defined(CONFIG_ITANIUM_BSTEP_SPECIFIC) \
+    || defined(CONFIG_ITANIUM_C0_SPECIFIC)
+	;;
+	srlz.d
+#endif
 	mov r17=ibr[r18]
 	add r18=1,r18
 	;;
@@ -181,7 +186,8 @@
 
 GLOBAL_ENTRY(ia64_load_debug_regs)
 	alloc r16=ar.pfs,1,0,0,0
-#if !(defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) || defined(CONFIG_ITANIUM_BSTEP_SPECIFIC))
+#if !(defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) \
+   || defined(CONFIG_ITANIUM_B0_SPECIFIC) || defined(CONFIG_ITANIUM_B1_SPECIFIC))
 	lfetch.nta [in0]
 #endif
 	mov r20=ar.lc			// preserve ar.lc
@@ -194,6 +200,11 @@
 	add r18=1,r18
 	;;
 	mov dbr[r18]=r16
+#if defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) || defined(CONFIG_ITANIUM_BSTEP_SPECIFIC) \
+    || defined(CONFIG_ITANIUM_C0_SPECIFIC)
+	;;
+	srlz.d
+#endif
 	mov ibr[r18]=r17
 	br.cloop.sptk.few 1b
 	;;
@@ -754,7 +765,7 @@
 	mov tmp=ar.itc
 (p15)	br.cond.sptk .wait
 	;;
-	ld1 tmp=[r31]
+	ld4 tmp=[r31]
 	;;
 	cmp.ne p15,p0=tmp,r0
 	mov tmp=ar.itc
@@ -764,7 +775,7 @@
 	mov tmp=1
 	;;
 	IA64_SEMFIX_INSN
-	cmpxchg1.acq tmp=[r31],tmp,ar.ccv
+	cmpxchg4.acq tmp=[r31],tmp,ar.ccv
 	;;
 	cmp.eq p15,p0=tmp,r0
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/kernel/ia64_ksyms.c linux/arch/ia64/kernel/ia64_ksyms.c
--- v2.4.0-prerelease/linux/arch/ia64/kernel/ia64_ksyms.c	Tue Oct 31 12:42:26 2000
+++ linux/arch/ia64/kernel/ia64_ksyms.c	Thu Jan  4 12:50:17 2001
@@ -24,9 +24,8 @@
 EXPORT_SYMBOL(strstr);
 EXPORT_SYMBOL(strtok);
 
-#include <linux/pci.h>
-EXPORT_SYMBOL(pci_alloc_consistent);
-EXPORT_SYMBOL(pci_free_consistent);
+#include <asm/hw_irq.h>
+EXPORT_SYMBOL(isa_irq_to_vector_map);
 
 #include <linux/in6.h>
 #include <asm/checksum.h>
@@ -49,14 +48,6 @@
 #include <asm/page.h>
 EXPORT_SYMBOL(clear_page);
 
-#include <asm/pci.h>
-EXPORT_SYMBOL(pci_dma_sync_sg);
-EXPORT_SYMBOL(pci_dma_sync_single);
-EXPORT_SYMBOL(pci_map_sg);
-EXPORT_SYMBOL(pci_map_single);
-EXPORT_SYMBOL(pci_unmap_sg);
-EXPORT_SYMBOL(pci_unmap_single);
-
 #include <asm/processor.h>
 EXPORT_SYMBOL(cpu_data);
 EXPORT_SYMBOL(kernel_thread);
@@ -92,6 +83,9 @@
 #include <asm/uaccess.h>
 EXPORT_SYMBOL(__copy_user);
 EXPORT_SYMBOL(__do_clear_user);
+EXPORT_SYMBOL(__strlen_user);
+EXPORT_SYMBOL(__strncpy_from_user);
+EXPORT_SYMBOL(__strnlen_user);
 
 #include <asm/unistd.h>
 EXPORT_SYMBOL(__ia64_syscall);
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/kernel/iosapic.c linux/arch/ia64/kernel/iosapic.c
--- v2.4.0-prerelease/linux/arch/ia64/kernel/iosapic.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/kernel/iosapic.c	Thu Jan  4 12:50:17 2001
@@ -0,0 +1,498 @@
+/*
+ * I/O SAPIC support.
+ *
+ * Copyright (C) 1999 Intel Corp.
+ * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
+ * Copyright (C) 1999-2000 Hewlett-Packard Co.
+ * Copyright (C) 1999-2000 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999,2000 Walt Drummond <drummond@valinux.com>
+ *
+ * 00/04/19	D. Mosberger	Rewritten to mirror more closely the x86 I/O APIC code.
+ *				In particular, we now have separate handlers for edge
+ *				and level triggered interrupts.
+ * 00/10/27	Asit Mallick, Goutham Rao <goutham.rao@intel.com> IRQ vector allocation 
+ *				PCI to vector mapping, shared PCI interrupts.
+ * 00/10/27	D. Mosberger	Document things a bit more to make them more understandable.
+ *				Clean up much of the old IOSAPIC cruft.
+ */
+/*
+ * Here is what the interrupt logic between a PCI device and the CPU looks like:
+ *
+ * (1) A PCI device raises one of the four interrupt pins (INTA, INTB, INTC, INTD).  The
+ *     device is uniquely identified by its bus-, device-, and slot-number (the function
+ *     number does not matter here because all functions share the same interrupt
+ *     lines).
+ *
+ * (2) The motherboard routes the interrupt line to a pin on a IOSAPIC controller.
+ *     Multiple interrupt lines may have to share the same IOSAPIC pin (if they're level
+ *     triggered and use the same polarity).  Each interrupt line has a unique IOSAPIC
+ *     irq number which can be calculated as the sum of the controller's base irq number
+ *     and the IOSAPIC pin number to which the line connects.
+ *
+ * (3) The IOSAPIC uses an internal table to map the IOSAPIC pin into the IA-64 interrupt
+ *     vector.  This interrupt vector is then sent to the CPU.
+ *
+ * In other words, there are two levels of indirections involved:
+ *
+ *	pci pin -> iosapic irq -> IA-64 vector
+ *
+ * Note: outside this module, IA-64 vectors are called "irqs".  This is because that's
+ * the traditional name Linux uses for interrupt vectors.
+ */
+#include <linux/config.h>
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/string.h>
+#include <linux/irq.h>
+
+#include <asm/acpi-ext.h>
+#include <asm/delay.h>
+#include <asm/io.h>
+#include <asm/iosapic.h>
+#include <asm/machvec.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+#include <asm/system.h>
+
+#ifdef	CONFIG_ACPI_KERNEL_CONFIG
+# include <asm/acpikcfg.h>
+#endif
+
+#undef DEBUG_IRQ_ROUTING
+
+static spinlock_t iosapic_lock = SPIN_LOCK_UNLOCKED;
+
+/* PCI pin to IOSAPIC irq routing information.  This info typically comes from ACPI. */
+
+static struct {
+	int num_routes;
+	struct pci_vector_struct *route;
+} pci_irq;
+
+/* This tables maps IA-64 vectors to the IOSAPIC pin that generates this vector. */
+
+static struct iosapic_irq {
+	char *addr;			/* base address of IOSAPIC */
+	unsigned char base_irq;		/* first irq assigned to this IOSAPIC */
+        char pin;			/* IOSAPIC pin (-1 => not an IOSAPIC irq) */
+	unsigned char dmode 	: 3;	/* delivery mode (see iosapic.h) */
+	unsigned char polarity	: 1;	/* interrupt polarity (see iosapic.h) */
+	unsigned char trigger	: 1;	/* trigger mode (see iosapic.h) */
+} iosapic_irq[NR_IRQS];
+
+/*
+ * Translate IOSAPIC irq number to the corresponding IA-64 interrupt vector.  If no
+ * entry exists, return -1.
+ */
+static int 
+iosapic_irq_to_vector (int irq)
+{
+	int vector;
+
+	for (vector = 0; vector < NR_IRQS; ++vector)
+		if (iosapic_irq[vector].base_irq + iosapic_irq[vector].pin == irq)
+			return vector;
+	return -1;
+}
+		
+/*
+ * Map PCI pin to the corresponding IA-64 interrupt vector.  If no such mapping exists,
+ * return -1.
+ */
+static int
+pci_pin_to_vector (int bus, int slot, int pci_pin)
+{
+	struct pci_vector_struct *r;
+
+	for (r = pci_irq.route; r < pci_irq.route + pci_irq.num_routes; ++r)
+		if (r->bus == bus && (r->pci_id >> 16) == slot && r->pin == pci_pin)
+			return iosapic_irq_to_vector(r->irq);
+	return -1;
+}
+
+static void
+set_rte (unsigned int vector, unsigned long dest)
+{
+	unsigned long pol, trigger, dmode;
+	u32 low32, high32;
+	char *addr;
+	int pin;
+
+	pin = iosapic_irq[vector].pin;
+	if (pin < 0)
+		return;		/* not an IOSAPIC interrupt */
+
+	addr    = iosapic_irq[vector].addr;
+	pol     = iosapic_irq[vector].polarity;
+	trigger = iosapic_irq[vector].trigger;
+	dmode   = iosapic_irq[vector].dmode;
+
+	low32 = ((pol << IOSAPIC_POLARITY_SHIFT) |
+		 (trigger << IOSAPIC_TRIGGER_SHIFT) |
+		 (dmode << IOSAPIC_DELIVERY_SHIFT) |
+		 vector);
+
+#ifdef CONFIG_IA64_AZUSA_HACKS
+	/* set Flush Disable bit */
+	if (addr != (char *) 0xc0000000fec00000)
+		low32 |= (1 << 17);
+#endif
+
+	/* dest contains both id and eid */
+	high32 = (dest << IOSAPIC_DEST_SHIFT);	
+
+	writel(IOSAPIC_RTE_HIGH(pin), addr + IOSAPIC_REG_SELECT);
+	writel(high32, addr + IOSAPIC_WINDOW);
+	writel(IOSAPIC_RTE_LOW(pin), addr + IOSAPIC_REG_SELECT);
+	writel(low32, addr + IOSAPIC_WINDOW);
+}
+
+static void
+nop (unsigned int vector)
+{
+	/* do nothing... */
+}
+
+static void 
+mask_irq (unsigned int vector)
+{
+	unsigned long flags;
+	char *addr;
+	u32 low32;
+	int pin;
+
+	addr = iosapic_irq[vector].addr;
+	pin = iosapic_irq[vector].pin;
+
+	if (pin < 0)
+		return;			/* not an IOSAPIC interrupt! */
+
+	spin_lock_irqsave(&iosapic_lock, flags);
+	{
+		writel(IOSAPIC_RTE_LOW(pin), addr + IOSAPIC_REG_SELECT);
+		low32 = readl(addr + IOSAPIC_WINDOW);
+
+		low32 |= (1 << IOSAPIC_MASK_SHIFT);    /* set only the mask bit */
+		writel(low32, addr + IOSAPIC_WINDOW);
+	}
+	spin_unlock_irqrestore(&iosapic_lock, flags);
+}
+
+static void 
+unmask_irq (unsigned int vector)
+{
+	unsigned long flags;
+	char *addr;
+	u32 low32;
+	int pin;
+
+	addr = iosapic_irq[vector].addr;
+	pin = iosapic_irq[vector].pin;
+	if (pin < 0)
+		return;			/* not an IOSAPIC interrupt! */
+
+	spin_lock_irqsave(&iosapic_lock, flags);
+	{
+		writel(IOSAPIC_RTE_LOW(pin), addr + IOSAPIC_REG_SELECT);
+		low32 = readl(addr + IOSAPIC_WINDOW);
+
+		low32 &= ~(1 << IOSAPIC_MASK_SHIFT);    /* clear only the mask bit */
+		writel(low32, addr + IOSAPIC_WINDOW);
+	}
+	spin_unlock_irqrestore(&iosapic_lock, flags);
+}
+
+
+static void
+iosapic_set_affinity (unsigned int vector, unsigned long mask)
+{
+	printk("iosapic_set_affinity: not implemented yet\n");
+}
+
+/*
+ * Handlers for level-triggered interrupts.
+ */
+
+static unsigned int
+iosapic_startup_level_irq (unsigned int vector)
+{
+	unmask_irq(vector);
+	return 0;
+}
+
+static void
+iosapic_end_level_irq (unsigned int vector)
+{
+	writel(vector, iosapic_irq[vector].addr + IOSAPIC_EOI);
+}
+
+#define iosapic_shutdown_level_irq	mask_irq
+#define iosapic_enable_level_irq	unmask_irq
+#define iosapic_disable_level_irq	mask_irq
+#define iosapic_ack_level_irq		nop
+
+struct hw_interrupt_type irq_type_iosapic_level = {
+	typename:	"IO-SAPIC-level",
+	startup:	iosapic_startup_level_irq,
+	shutdown:	iosapic_shutdown_level_irq,
+	enable:		iosapic_enable_level_irq,
+	disable:	iosapic_disable_level_irq,
+	ack:		iosapic_ack_level_irq,
+	end:		iosapic_end_level_irq,
+	set_affinity:	iosapic_set_affinity
+};
+
+/*
+ * Handlers for edge-triggered interrupts.
+ */
+
+static unsigned int
+iosapic_startup_edge_irq (unsigned int vector)
+{
+	unmask_irq(vector);
+	/*
+	 * IOSAPIC simply drops interrupts pended while the
+	 * corresponding pin was masked, so we can't know if an
+	 * interrupt is pending already.  Let's hope not...
+	 */
+	return 0;
+}
+
+static void
+iosapic_ack_edge_irq (unsigned int vector)
+{
+	/*
+	 * Once we have recorded IRQ_PENDING already, we can mask the
+	 * interrupt for real. This prevents IRQ storms from unhandled
+	 * devices.
+	 */
+	if ((irq_desc[vector].status & (IRQ_PENDING|IRQ_DISABLED)) == (IRQ_PENDING|IRQ_DISABLED))
+		mask_irq(vector);
+}
+
+#define iosapic_enable_edge_irq		unmask_irq
+#define iosapic_disable_edge_irq	nop
+#define iosapic_end_edge_irq		nop
+
+struct hw_interrupt_type irq_type_iosapic_edge = {
+	typename:	"IO-SAPIC-edge",
+	startup:	iosapic_startup_edge_irq,
+	shutdown:	iosapic_disable_edge_irq,
+	enable:		iosapic_enable_edge_irq,
+	disable:	iosapic_disable_edge_irq,
+	ack:		iosapic_ack_edge_irq,
+	end:		iosapic_end_edge_irq,
+	set_affinity:	iosapic_set_affinity
+};
+
+static unsigned int
+iosapic_version (char *addr) 
+{
+	/*
+	 * IOSAPIC Version Register return 32 bit structure like:
+	 * {
+	 *	unsigned int version   : 8;
+	 *	unsigned int reserved1 : 8;
+	 *	unsigned int pins      : 8;
+	 *	unsigned int reserved2 : 8;
+	 * }
+	 */
+	writel(IOSAPIC_VERSION, addr + IOSAPIC_REG_SELECT);
+	return readl(IOSAPIC_WINDOW + addr);
+}
+
+/*
+ * ACPI calls this when it finds an entry for a legacy ISA interrupt.  Note that the
+ * irq_base and IOSAPIC address must be set in iosapic_init().
+ */
+void
+iosapic_register_legacy_irq (unsigned long irq,
+			     unsigned long pin, unsigned long polarity,
+			     unsigned long edge_triggered)
+{
+	unsigned int vector = isa_irq_to_vector(irq);
+
+#ifdef DEBUG_IRQ_ROUTING
+	printk("ISA: IRQ %u -> IOSAPIC irq 0x%02x (%s, %s) -> vector %02x\n",
+	       (unsigned) irq, (unsigned) pin,
+	       polarity ? "high" : "low", edge_triggered ? "edge" : "level",
+	       vector);
+#endif
+
+	iosapic_irq[vector].pin = pin;
+	iosapic_irq[vector].dmode = IOSAPIC_LOWEST_PRIORITY;
+	iosapic_irq[vector].polarity = polarity ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW;
+	iosapic_irq[vector].trigger = edge_triggered ? IOSAPIC_EDGE : IOSAPIC_LEVEL;
+}
+
+void __init
+iosapic_init (unsigned long phys_addr, unsigned int base_irq)
+{
+	struct hw_interrupt_type *irq_type;
+	int i, irq, max_pin, vector;
+	unsigned int ver;
+	char *addr;
+	static int first_time = 1;
+
+	if (first_time) {
+		first_time = 0;
+
+		for (vector = 0; vector < NR_IRQS; ++vector)
+			iosapic_irq[vector].pin = -1;	/* mark as unused */
+
+		/* 
+		 * Fetch the PCI interrupt routing table:
+		 */
+#ifdef CONFIG_ACPI_KERNEL_CONFIG
+		acpi_cf_get_pci_vectors(&pci_irq.route, &pci_irq.num_routes);
+#else
+		pci_irq.route =
+			(struct pci_vector_struct *) __va(ia64_boot_param.pci_vectors);
+		pci_irq.num_routes = ia64_boot_param.num_pci_vectors;
+#endif
+	}
+
+	addr = ioremap(phys_addr, 0);
+
+	ver = iosapic_version(addr);
+	max_pin = (ver >> 16) & 0xff;
+	
+	printk("IOSAPIC: version %x.%x, address 0x%lx, IRQs 0x%02x-0x%02x\n", 
+	       (ver & 0xf0) >> 4, (ver & 0x0f), phys_addr, base_irq, base_irq + max_pin);
+
+	if (base_irq == 0)
+		/*
+		 * Map the legacy ISA devices into the IOSAPIC data.  Some of these may
+		 * get reprogrammed later on with data from the ACPI Interrupt Source
+		 * Override table.
+		 */
+		for (irq = 0; irq < 16; ++irq) {
+			vector = isa_irq_to_vector(irq);
+			iosapic_irq[vector].addr = addr;
+			iosapic_irq[vector].base_irq = 0;
+			if (iosapic_irq[vector].pin == -1)
+				iosapic_irq[vector].pin = irq;
+			iosapic_irq[vector].dmode = IOSAPIC_LOWEST_PRIORITY;
+			iosapic_irq[vector].trigger  = IOSAPIC_EDGE;
+			iosapic_irq[vector].polarity = IOSAPIC_POL_HIGH;
+#ifdef DEBUG_IRQ_ROUTING
+			printk("ISA: IRQ %u -> IOSAPIC irq 0x%02x (high, edge) -> vector 0x%02x\n",
+			       irq, iosapic_irq[vector].base_irq + iosapic_irq[vector].pin,
+			       vector);
+#endif
+		  	irq_type = &irq_type_iosapic_edge;
+			if (irq_desc[vector].handler != irq_type) {
+				if (irq_desc[vector].handler != &no_irq_type)
+					printk("iosapic_init: changing vector 0x%02x from %s to "
+					       "%s\n", irq, irq_desc[vector].handler->typename,
+					       irq_type->typename);
+				irq_desc[vector].handler = irq_type;
+			}
+
+			/* program the IOSAPIC routing table: */
+			set_rte(vector, (ia64_get_lid() >> 16) & 0xffff);
+		}
+
+#ifndef CONFIG_IA64_SOFTSDV_HACKS
+	for (i = 0; i < pci_irq.num_routes; i++) {
+		irq = pci_irq.route[i].irq;
+
+		if ((unsigned) (irq - base_irq) > max_pin)
+			/* the interrupt route is for another controller... */
+			continue;
+
+		if (irq < 16)
+			vector = isa_irq_to_vector(irq);
+		else {
+			vector = iosapic_irq_to_vector(irq);
+			if (vector < 0)
+				/* new iosapic irq: allocate a vector for it */
+				vector = ia64_alloc_irq();
+		}
+
+		iosapic_irq[vector].addr     = addr;
+		iosapic_irq[vector].base_irq = base_irq;
+		iosapic_irq[vector].pin	     = (irq - base_irq);
+		iosapic_irq[vector].dmode    = IOSAPIC_LOWEST_PRIORITY;
+		iosapic_irq[vector].trigger  = IOSAPIC_LEVEL;
+		iosapic_irq[vector].polarity = IOSAPIC_POL_LOW;
+
+# ifdef DEBUG_IRQ_ROUTING
+		printk("PCI: (B%d,I%d,P%d) -> IOSAPIC irq 0x%02x -> vector 0x%02x\n",
+		       pci_irq.route[i].bus, pci_irq.route[i].pci_id>>16, pci_irq.route[i].pin,
+		       iosapic_irq[vector].base_irq + iosapic_irq[vector].pin, vector);
+# endif
+		irq_type = &irq_type_iosapic_level;
+		if (irq_desc[vector].handler != irq_type){
+			if (irq_desc[vector].handler != &no_irq_type)
+				printk("iosapic_init: changing vector 0x%02x from %s to %s\n",
+				       vector, irq_desc[vector].handler->typename,
+				       irq_type->typename);
+			irq_desc[vector].handler = irq_type;
+		}
+
+		/* program the IOSAPIC routing table: */
+		set_rte(vector, (ia64_get_lid() >> 16) & 0xffff);
+	}
+#endif /* !CONFIG_IA64_SOFTSDV_HACKS */
+}
+
+void
+iosapic_pci_fixup (int phase)
+{
+	struct	pci_dev	*dev;
+	unsigned char pin;
+	int vector;
+
+	if (phase != 1)
+		return;
+
+	pci_for_each_dev(dev) {
+		pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
+		if (pin) {
+			pin--;          /* interrupt pins are numbered starting from 1 */
+			vector = pci_pin_to_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin);
+			if (vector < 0 && dev->bus->parent) {
+				/* go back to the bridge */
+				struct pci_dev *bridge = dev->bus->self;
+
+				if (bridge) {
+					/* allow for multiple bridges on an adapter */
+					do {
+						/* do the bridge swizzle... */
+						pin = (pin + PCI_SLOT(dev->devfn)) % 4;
+						vector = pci_pin_to_vector(bridge->bus->number,
+									   PCI_SLOT(bridge->devfn),
+									   pin);
+					} while (vector < 0 && (bridge = bridge->bus->self));
+				}
+				if (vector >= 0)
+					printk(KERN_WARNING
+					       "PCI: using PPB(B%d,I%d,P%d) to get vector %02x\n",
+					       bridge->bus->number, PCI_SLOT(bridge->devfn),
+					       pin, vector);
+				else
+					printk(KERN_WARNING
+					       "PCI: Couldn't map irq for (B%d,I%d,P%d)o\n",
+					       bridge->bus->number, PCI_SLOT(bridge->devfn),
+					       pin);
+			}
+			if (vector >= 0) {
+				printk("PCI->APIC IRQ transform: (B%d,I%d,P%d) -> 0x%02x\n",
+				       dev->bus->number, PCI_SLOT(dev->devfn), pin, vector);
+				dev->irq = vector;
+			}
+		}
+		/*
+		 * Nothing to fixup
+		 * Fix out-of-range IRQ numbers
+		 */
+		if (dev->irq >= NR_IRQS)
+			dev->irq = 15;	/* Spurious interrupts */
+	}
+}
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/kernel/irq.c linux/arch/ia64/kernel/irq.c
--- v2.4.0-prerelease/linux/arch/ia64/kernel/irq.c	Mon Dec 11 17:59:43 2000
+++ linux/arch/ia64/kernel/irq.c	Thu Jan  4 12:50:17 2001
@@ -541,6 +541,18 @@
 	spin_unlock_irqrestore(&desc->lock, flags);
 }
 
+void do_IRQ_per_cpu(unsigned long irq, struct pt_regs *regs)
+{
+	irq_desc_t *desc = irq_desc + irq;
+	int cpu = smp_processor_id();
+
+	kstat.irqs[cpu][irq]++;
+
+	desc->handler->ack(irq);
+	handle_IRQ_event(irq, regs, desc->action);
+	desc->handler->end(irq);
+}
+
 /*
  * do_IRQ handles all normal device IRQ's (the special
  * SMP cross-CPU interrupts have their own specific
@@ -581,8 +593,7 @@
 	if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
 		action = desc->action;
 		status &= ~IRQ_PENDING; /* we commit to handling */
-		if (!(status & IRQ_PER_CPU))
-			status |= IRQ_INPROGRESS; /* we are handling it */
+		status |= IRQ_INPROGRESS; /* we are handling it */
 	}
 	desc->status = status;
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/kernel/irq_ia64.c linux/arch/ia64/kernel/irq_ia64.c
--- v2.4.0-prerelease/linux/arch/ia64/kernel/irq_ia64.c	Tue Oct 31 12:42:26 2000
+++ linux/arch/ia64/kernel/irq_ia64.c	Thu Jan  4 12:50:17 2001
@@ -7,6 +7,9 @@
  *
  *  6/10/99: Updated to bring in sync with x86 version to facilitate
  *	     support for SMP and different interrupt controllers.
+ *
+ * 09/15/00 Goutham Rao <goutham.rao@intel.com> Implemented pci_irq_to_vector
+ *                      PCI to vector allocation routine.
  */
 
 #include <linux/config.h>
@@ -35,38 +38,28 @@
 
 #define IRQ_DEBUG	0
 
-#ifdef CONFIG_ITANIUM_A1_SPECIFIC
-spinlock_t ivr_read_lock;
-#endif
-
 /* default base addr of IPI table */
 unsigned long ipi_base_addr = (__IA64_UNCACHED_OFFSET | IPI_DEFAULT_BASE_ADDR);	
 
 /*
- * Legacy IRQ to IA-64 vector translation table.  Any vector not in
- * this table maps to itself (ie: irq 0x30 => IA64 vector 0x30)
+ * Legacy IRQ to IA-64 vector translation table.
  */
 __u8 isa_irq_to_vector_map[16] = {
 	/* 8259 IRQ translation, first 16 entries */
-	0x60, 0x50, 0x10, 0x51, 0x52, 0x53, 0x43, 0x54,
-	0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x40, 0x41
+	0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
+	0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21
 };
 
-#ifdef CONFIG_ITANIUM_A1_SPECIFIC
-
-int usbfix;
-
-static int __init
-usbfix_option (char *str)
+int
+ia64_alloc_irq (void)
 {
-	printk("irq: enabling USB workaround\n");
-	usbfix = 1;
-	return 1;
-}
-
-__setup("usbfix", usbfix_option);
+	static int next_irq = FIRST_DEVICE_IRQ;
 
-#endif /* CONFIG_ITANIUM_A1_SPECIFIC */
+	if (next_irq > LAST_DEVICE_IRQ)
+		/* XXX could look for sharable vectors instead of panic'ing... */
+		panic("ia64_alloc_irq: out of interrupt vectors!");
+	return next_irq++;
+}
 
 /*
  * That's where the IVT branches when we get an external
@@ -77,42 +70,6 @@
 ia64_handle_irq (unsigned long vector, struct pt_regs *regs)
 {
 	unsigned long saved_tpr;
-#ifdef CONFIG_ITANIUM_A1_SPECIFIC
-	unsigned long eoi_ptr;
- 
-# ifdef CONFIG_USB
-	extern void reenable_usb (void);
-	extern void disable_usb (void);
-
-	if (usbfix)
-		disable_usb();
-# endif
-	/*
-	 * Stop IPIs by getting the ivr_read_lock
-	 */
-	spin_lock(&ivr_read_lock);
-	{
-		unsigned int tmp;
-		/*
-		 * Disable PCI writes
-		 */
-		outl(0x80ff81c0, 0xcf8);
-		tmp = inl(0xcfc);
-		outl(tmp | 0x400, 0xcfc);
-		eoi_ptr = inl(0xcfc);
-		vector = ia64_get_ivr();
-		/*
-		 * Enable PCI writes
-		 */
-		outl(tmp, 0xcfc);
-	}
-	spin_unlock(&ivr_read_lock);
-
-# ifdef CONFIG_USB
-	if (usbfix)
-		reenable_usb();
-# endif
-#endif /* CONFIG_ITANIUM_A1_SPECIFIC */
 
 #if IRQ_DEBUG
 	{
@@ -161,7 +118,10 @@
 		ia64_set_tpr(vector);
 		ia64_srlz_d();
 
-		do_IRQ(vector, regs);
+		if ((irq_desc[vector].status & IRQ_PER_CPU) != 0)
+			do_IRQ_per_cpu(vector, regs);
+		else
+			do_IRQ(vector, regs);
 
 		/*
 		 * Disable interrupts and send EOI:
@@ -169,9 +129,6 @@
 		local_irq_disable();
 		ia64_set_tpr(saved_tpr);
 		ia64_eoi();
-#ifdef CONFIG_ITANIUM_A1_SPECIFIC
-		break;
-#endif
 		vector = ia64_get_ivr();
 	} while (vector != IA64_SPURIOUS_INT);
 }
@@ -194,8 +151,8 @@
 	 * Disable all local interrupts
 	 */
 	ia64_set_itv(0, 1);
-	ia64_set_lrr0(0, 1);	
-	ia64_set_lrr1(0, 1);	
+	ia64_set_lrr0(0, 1);
+	ia64_set_lrr1(0, 1);
 
 	irq_desc[IA64_SPURIOUS_INT].handler = &irq_type_ia64_sapic;
 #ifdef CONFIG_SMP
@@ -217,14 +174,11 @@
 }
 
 void
-ipi_send (int cpu, int vector, int delivery_mode, int redirect)
+ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
 {
 	unsigned long ipi_addr;
 	unsigned long ipi_data;
 	unsigned long phys_cpu_id;
-#ifdef CONFIG_ITANIUM_A1_SPECIFIC
-	unsigned long flags;
-#endif
 
 #ifdef CONFIG_SMP
 	phys_cpu_id = cpu_physical_id(cpu);
@@ -239,13 +193,5 @@
 	ipi_data = (delivery_mode << 8) | (vector & 0xff);
 	ipi_addr = ipi_base_addr | (phys_cpu_id << 4) | ((redirect & 1)  << 3);
 
-#ifdef CONFIG_ITANIUM_A1_SPECIFIC
-	spin_lock_irqsave(&ivr_read_lock, flags);
-#endif
-
 	writeq(ipi_data, ipi_addr);
-
-#ifdef CONFIG_ITANIUM_A1_SPECIFIC
-	spin_unlock_irqrestore(&ivr_read_lock, flags);
-#endif
 }
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/kernel/ivt.S linux/arch/ia64/kernel/ivt.S
--- v2.4.0-prerelease/linux/arch/ia64/kernel/ivt.S	Tue Oct 31 12:42:26 2000
+++ linux/arch/ia64/kernel/ivt.S	Thu Jan  4 12:50:17 2001
@@ -6,6 +6,7 @@
  * Copyright (C) 1998-2000 David Mosberger <davidm@hpl.hp.com>
  *
  * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP
+ * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT.
  */
 /*
  * This file defines the interrupt vector table used by the CPU.
@@ -44,23 +45,13 @@
 #include <asm/system.h>
 #include <asm/unistd.h>
 
-#define MINSTATE_START_SAVE_MIN	/* no special action needed */
-#define MINSTATE_END_SAVE_MIN									\
-	or r2=r2,r14;		/* make first base a kernel virtual address */			\
-	or r12=r12,r14;		/* make sp a kernel virtual address */				\
-	or r13=r13,r14;		/* make `current' a kernel virtual address */			\
-	bsw.1;			/* switch back to bank 1 (must be last in insn group) */	\
-	;;
-
+#define MINSTATE_VIRT	/* needed by minstate.h */
 #include "minstate.h"
 
 #define FAULT(n)									\
-	rsm psr.dt;			/* avoid nested faults due to TLB misses... */	\
-	;;										\
-	srlz.d;				/* ensure everyone knows psr.dt is off... */	\
 	mov r31=pr;									\
 	mov r19=n;;			/* prepare to save predicates */		\
-	br.cond.sptk.many dispatch_to_fault_handler
+	br.sptk.many dispatch_to_fault_handler
 
 /*
  * As we don't (hopefully) use the space available, we need to fill it with
@@ -122,15 +113,14 @@
 (p7)	dep r17=r17,r19,(PAGE_SHIFT-3),3	// put region number bits in place
 	srlz.d					// ensure "rsm psr.dt" has taken effect
 (p6)	movl r19=__pa(SWAPPER_PGD_ADDR)		// region 5 is rooted at swapper_pg_dir
-(p6)	shr r21=r21,PGDIR_SHIFT+PAGE_SHIFT-1
-(p7)	shr r21=r21,PGDIR_SHIFT+PAGE_SHIFT-4
+(p6)	shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
+(p7)	shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
 	;;
 (p6)	dep r17=r18,r19,3,(PAGE_SHIFT-3)	// r17=PTA + IFA(33,42)*8
 (p7)	dep r17=r18,r17,3,(PAGE_SHIFT-6)	// r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8)
 	cmp.eq p7,p6=0,r21			// unused address bits all zeroes?
 	shr.u r18=r16,PMD_SHIFT			// shift L2 index into position
 	;;
-(p6)	cmp.eq p7,p6=-1,r21			// unused address bits all ones?
 	ld8 r17=[r17]				// fetch the L1 entry (may be 0)
 	;;
 (p7)	cmp.eq p6,p7=r17,r0			// was L1 entry NULL?
@@ -145,7 +135,7 @@
 (p7)	ld8 r18=[r21]				// read the L3 PTE
 	mov r19=cr.isr				// cr.isr bit 0 tells us if this is an insn miss
 	;;
-(p7)	tbit.z p6,p7=r18,0			// page present bit cleared?
+(p7)	tbit.z p6,p7=r18,_PAGE_P_BIT		// page present bit cleared?
 	mov r22=cr.iha				// get the VHPT address that caused the TLB miss
 	;;					// avoid RAW on p7
 (p7)	tbit.nz.unc p10,p11=r19,32		// is it an instruction TLB miss?
@@ -153,7 +143,7 @@
 	;;
 (p10)	itc.i r18				// insert the instruction TLB entry
 (p11)	itc.d r18				// insert the data TLB entry
-(p6)	br.spnt.few page_fault			// handle bad address/page not present (page fault)
+(p6)	br.spnt.many page_fault			// handle bad address/page not present (page fault)
 	mov cr.ifa=r22
 
 	// Now compute and insert the TLB entry for the virtual page table.
@@ -183,212 +173,117 @@
 
 	mov pr=r31,-1				// restore predicate registers
 	rfi
+	;;
 
 	.align 1024
 /////////////////////////////////////////////////////////////////////////////////////////
 // 0x0400 Entry 1 (size 64 bundles) ITLB (21)
 	/*
-	 * The ITLB basically does the same as the VHPT handler except
-	 * that we always insert exactly one instruction TLB entry.
-	 */
-	/*
-	 * Attempt to lookup PTE through virtual linear page table.
-	 * The speculative access will fail if there is no TLB entry
-	 * for the L3 page table page we're trying to access.
+	 * The ITLB handler accesses the L3 PTE via the virtually mapped linear
+	 * page table.  If a nested TLB miss occurs, we switch into physical
+	 * mode, walk the page table, and then re-execute the L3 PTE read
+	 * and go on normally after that.
 	 */
+itlb_fault:
 	mov r16=cr.ifa				// get virtual address
-	mov r19=cr.iha				// get virtual address of L3 PTE
-	;;
-	ld8.s r17=[r19]				// try to read L3 PTE
+	mov r29=b0				// save b0
 	mov r31=pr				// save predicates
+	mov r17=cr.iha				// get virtual address of L3 PTE
+	movl r30=1f				// load nested fault continuation point
+	;;
+1:	ld8 r18=[r17]				// read L3 PTE
 	;;
-	tnat.nz p6,p0=r17			// did read succeed?
-(p6)	br.cond.spnt.many 1f
+	tbit.z p6,p0=r18,_PAGE_P_BIT		// page present bit cleared?
+(p6)	br.cond.spnt.many page_fault
 	;;
-	itc.i r17
+	itc.i r18
 	;;
 #ifdef CONFIG_SMP
-	ld8.s r18=[r19]				// try to read L3 PTE again and see if same
+	ld8 r19=[r17]				// read L3 PTE again and see if same
 	mov r20=PAGE_SHIFT<<2			// setup page size for purge
 	;;
-	cmp.eq p6,p7=r17,r18
+	cmp.ne p7,p0=r18,r19
 	;;
 (p7)	ptc.l r16,r20
 #endif
 	mov pr=r31,-1
 	rfi
-
-#ifdef CONFIG_DISABLE_VHPT
-itlb_fault:
-#endif
-1:	rsm psr.dt				// use physical addressing for data
-	mov r19=ar.k7				// get page table base address
-	shl r21=r16,3				// shift bit 60 into sign bit
-	shr.u r17=r16,61			// get the region number into r17
-	;;
-	cmp.eq p6,p7=5,r17			// is IFA pointing into to region 5?
-	shr.u r18=r16,PGDIR_SHIFT		// get bits 33-63 of the faulting address
-	;;
-(p7)	dep r17=r17,r19,(PAGE_SHIFT-3),3	// put region number bits in place
-	srlz.d					// ensure "rsm psr.dt" has taken effect
-(p6)	movl r19=__pa(SWAPPER_PGD_ADDR)		// region 5 is rooted at swapper_pg_dir
-(p6)	shr r21=r21,PGDIR_SHIFT+PAGE_SHIFT-1
-(p7)	shr r21=r21,PGDIR_SHIFT+PAGE_SHIFT-4
-	;;
-(p6)	dep r17=r18,r19,3,(PAGE_SHIFT-3)	// r17=PTA + IFA(33,42)*8
-(p7)	dep r17=r18,r17,3,(PAGE_SHIFT-6)	// r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8)
-	cmp.eq p7,p6=0,r21			// unused address bits all zeroes?
-	shr.u r18=r16,PMD_SHIFT			// shift L2 index into position
-	;;
-(p6)	cmp.eq p7,p6=-1,r21			// unused address bits all ones?
-	ld8 r17=[r17]				// fetch the L1 entry (may be 0)
-	;;
-(p7)	cmp.eq p6,p7=r17,r0			// was L1 entry NULL?
-	dep r17=r18,r17,3,(PAGE_SHIFT-3)	// compute address of L2 page table entry
 	;;
-(p7)	ld8 r17=[r17]				// fetch the L2 entry (may be 0)
-	shr.u r19=r16,PAGE_SHIFT		// shift L3 index into position
-	;;
-(p7)	cmp.eq.or.andcm p6,p7=r17,r0		// was L2 entry NULL?
-	dep r17=r19,r17,3,(PAGE_SHIFT-3)	// compute address of L3 page table entry
-	;;
-(p7)	ld8 r18=[r17]				// read the L3 PTE
-	;;
-(p7)	tbit.z p6,p7=r18,0			// page present bit cleared?
-	;;
-(p7)	itc.i r18				// insert the instruction TLB entry
-(p6)	br.spnt.few page_fault			// handle bad address/page not present (page fault)
-	;;
-#ifdef CONFIG_SMP
-	ld8 r19=[r17]				// re-read the PTE and check if same
-	;;
-	cmp.eq p6,p7=r18,r19
-	mov r20=PAGE_SHIFT<<2
-	;;
-(p7)	ptc.l r16,r20				// PTE changed purge translation
-#endif
-
-	mov pr=r31,-1				// restore predicate registers
-	rfi
 
 	.align 1024
 /////////////////////////////////////////////////////////////////////////////////////////
 // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
 	/*
-	 * The DTLB basically does the same as the VHPT handler except
-	 * that we always insert exactly one data TLB entry.
-	 */
-	/*
-	 * Attempt to lookup PTE through virtual linear page table.
-	 * The speculative access will fail if there is no TLB entry
-	 * for the L3 page table page we're trying to access.
+	 * The DTLB handler accesses the L3 PTE via the virtually mapped linear
+	 * page table.  If a nested TLB miss occurs, we switch into physical
+	 * mode, walk the page table, and then re-execute the L3 PTE read
+	 * and go on normally after that.
 	 */
+dtlb_fault:
 	mov r16=cr.ifa				// get virtual address
-	mov r19=cr.iha				// get virtual address of L3 PTE
-	;;
-	ld8.s r17=[r19]				// try to read L3 PTE
+	mov r29=b0				// save b0
 	mov r31=pr				// save predicates
+	mov r17=cr.iha				// get virtual address of L3 PTE
+	movl r30=1f				// load nested fault continuation point
 	;;
-	tnat.nz p6,p0=r17			// did read succeed?
-(p6)	br.cond.spnt.many 1f
+1:	ld8 r18=[r17]				// read L3 PTE
 	;;
-	itc.d r17
+	tbit.z p6,p0=r18,_PAGE_P_BIT		// page present bit cleared?
+(p6)	br.cond.spnt.many page_fault
+	;;
+	itc.d r18
 	;;
 #ifdef CONFIG_SMP
-	ld8.s r18=[r19]				// try to read L3 PTE again and see if same
+	ld8 r19=[r17]				// read L3 PTE again and see if same
 	mov r20=PAGE_SHIFT<<2			// setup page size for purge
 	;;
-	cmp.eq p6,p7=r17,r18
+	cmp.ne p7,p0=r18,r19
 	;;
 (p7)	ptc.l r16,r20
 #endif
 	mov pr=r31,-1
 	rfi
-
-#ifdef CONFIG_DISABLE_VHPT
-dtlb_fault:
-#endif
-1:	rsm psr.dt				// use physical addressing for data
-	mov r19=ar.k7				// get page table base address
-	shl r21=r16,3				// shift bit 60 into sign bit
-	shr.u r17=r16,61			// get the region number into r17
 	;;
-	cmp.eq p6,p7=5,r17			// is IFA pointing into to region 5?
-	shr.u r18=r16,PGDIR_SHIFT		// get bits 33-63 of the faulting address
-	;;
-(p7)	dep r17=r17,r19,(PAGE_SHIFT-3),3	// put region number bits in place
-	srlz.d					// ensure "rsm psr.dt" has taken effect
-(p6)	movl r19=__pa(SWAPPER_PGD_ADDR)		// region 5 is rooted at swapper_pg_dir
-(p6)	shr r21=r21,PGDIR_SHIFT+PAGE_SHIFT-1
-(p7)	shr r21=r21,PGDIR_SHIFT+PAGE_SHIFT-4
-	;;
-(p6)	dep r17=r18,r19,3,(PAGE_SHIFT-3)	// r17=PTA + IFA(33,42)*8
-(p7)	dep r17=r18,r17,3,(PAGE_SHIFT-6)	// r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8)
-	cmp.eq p7,p6=0,r21			// unused address bits all zeroes?
-	shr.u r18=r16,PMD_SHIFT			// shift L2 index into position
-	;;
-(p6)	cmp.eq p7,p6=-1,r21			// unused address bits all ones?
-	ld8 r17=[r17]				// fetch the L1 entry (may be 0)
-	;;
-(p7)	cmp.eq p6,p7=r17,r0			// was L1 entry NULL?
-	dep r17=r18,r17,3,(PAGE_SHIFT-3)	// compute address of L2 page table entry
-	;;
-(p7)	ld8 r17=[r17]				// fetch the L2 entry (may be 0)
-	shr.u r19=r16,PAGE_SHIFT		// shift L3 index into position
-	;;
-(p7)	cmp.eq.or.andcm p6,p7=r17,r0		// was L2 entry NULL?
-	dep r17=r19,r17,3,(PAGE_SHIFT-3)	// compute address of L3 page table entry
-	;;
-(p7)	ld8 r18=[r17]				// read the L3 PTE
-	;;
-(p7)	tbit.z p6,p7=r18,0			// page present bit cleared?
-	;;
-(p7)	itc.d r18				// insert the instruction TLB entry
-(p6)	br.spnt.few page_fault			// handle bad address/page not present (page fault)
-	;;
-#ifdef CONFIG_SMP
-	ld8 r19=[r17]				// re-read the PTE and check if same
-	;;
-	cmp.eq p6,p7=r18,r19
-	mov r20=PAGE_SHIFT<<2
-	;;
-(p7)	ptc.l r16,r20				// PTE changed purge translation
-#endif
-	mov pr=r31,-1				// restore predicate registers
-	rfi
 
 	.align 1024
 /////////////////////////////////////////////////////////////////////////////////////////
 // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
 	mov r16=cr.ifa		// get address that caused the TLB miss
-#ifdef CONFIG_DISABLE_VHPT
+	movl r17=__DIRTY_BITS|_PAGE_PL_0|_PAGE_AR_RWX
+	mov r21=cr.ipsr
 	mov r31=pr
 	;;
-	shr.u r21=r16,61			// get the region number into r21
+#ifdef CONFIG_DISABLE_VHPT
+	shr.u r22=r16,61			// get the region number into r21
 	;;
-	cmp.gt p6,p0=6,r21			// user mode 
-(p6)	br.cond.dptk.many itlb_fault
+	cmp.gt p8,p0=6,r22			// user mode 
 	;;
-	mov pr=r31,-1
-#endif
-	movl r17=__DIRTY_BITS|_PAGE_PL_0|_PAGE_AR_RX
+(p8)	thash r17=r16
 	;;
+(p8)	mov cr.iha=r17
+(p8)	br.cond.dptk.many itlb_fault
+#endif
+	extr.u r23=r21,IA64_PSR_CPL0_BIT,2	// extract psr.cpl
 	shr.u r18=r16,57	// move address bit 61 to bit 4
-	dep r16=0,r16,IA64_MAX_PHYS_BITS,(64-IA64_MAX_PHYS_BITS)	// clear ed & reserved bits
+	dep r19=0,r16,IA64_MAX_PHYS_BITS,(64-IA64_MAX_PHYS_BITS)	// clear ed & reserved bits
 	;;
 	andcm r18=0x10,r18	// bit 4=~address-bit(61)
-	dep r16=r17,r16,0,12	// insert PTE control bits into r16
+	cmp.ne p8,p0=r0,r23	// psr.cpl != 0?
+	dep r19=r17,r19,0,12	// insert PTE control bits into r19
 	;;
-	or r16=r16,r18		// set bit 4 (uncached) if the access was to region 6
+	or r19=r19,r18		// set bit 4 (uncached) if the access was to region 6
+(p8)	br.cond.spnt.many page_fault
 	;;
-	itc.i r16		// insert the TLB entry
+	itc.i r19		// insert the TLB entry
+	mov pr=r31,-1
 	rfi
+	;;
 
 	.align 1024
 /////////////////////////////////////////////////////////////////////////////////////////
 // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
 	mov r16=cr.ifa		// get address that caused the TLB miss
-	movl r17=__DIRTY_BITS|_PAGE_PL_0|_PAGE_AR_RW
+	movl r17=__DIRTY_BITS|_PAGE_PL_0|_PAGE_AR_RWX
 	mov r20=cr.isr
 	mov r21=cr.ipsr
 	mov r31=pr
@@ -396,29 +291,40 @@
 #ifdef CONFIG_DISABLE_VHPT
 	shr.u r22=r16,61			// get the region number into r21
 	;;
-	cmp.gt p8,p0=6,r22			// user mode
+	cmp.gt p8,p0=6,r22			// access to region 0-5
+	;;
+(p8)	thash r17=r16
+	;;
+(p8)	mov cr.iha=r17
 (p8)	br.cond.dptk.many dtlb_fault
 #endif
+	extr.u r23=r21,IA64_PSR_CPL0_BIT,2	// extract psr.cpl
 	tbit.nz p6,p7=r20,IA64_ISR_SP_BIT	// is speculation bit on?
 	shr.u r18=r16,57	// move address bit 61 to bit 4
-	dep r16=0,r16,IA64_MAX_PHYS_BITS,(64-IA64_MAX_PHYS_BITS) // clear ed & reserved bits
+	dep r19=0,r16,IA64_MAX_PHYS_BITS,(64-IA64_MAX_PHYS_BITS) // clear ed & reserved bits
 	;;
-	dep r21=-1,r21,IA64_PSR_ED_BIT,1
 	andcm r18=0x10,r18	// bit 4=~address-bit(61)
-	dep r16=r17,r16,0,12	// insert PTE control bits into r16
+	cmp.ne p8,p0=r0,r23
+(p8)	br.cond.spnt.many page_fault
+
+	dep r21=-1,r21,IA64_PSR_ED_BIT,1
+	dep r19=r17,r19,0,12	// insert PTE control bits into r19
 	;;
-	or r16=r16,r18		// set bit 4 (uncached) if the access was to region 6
+	or r19=r19,r18		// set bit 4 (uncached) if the access was to region 6
 (p6)	mov cr.ipsr=r21
 	;;
-(p7)	itc.d r16		// insert the TLB entry
+(p7)	itc.d r19		// insert the TLB entry
 	mov pr=r31,-1
 	rfi
-
 	;;
 
 	//-----------------------------------------------------------------------------------
-	// call do_page_fault (predicates are in r31, psr.dt is off, r16 is faulting address)
+	// call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address)
 page_fault:
+	ssm psr.dt
+	;;
+	srlz.i
+	;;
 	SAVE_MIN_WITH_COVER
 	//
 	// Copy control registers to temporary registers, then turn on psr bits,
@@ -430,7 +336,7 @@
 	mov r9=cr.isr
 	adds r3=8,r2				// set up second base pointer
 	;;
-	ssm psr.ic | psr.dt
+	ssm psr.ic
 	;;
 	srlz.i					// guarantee that interrupt collection is enabled
 	;;
@@ -445,36 +351,37 @@
 	mov rp=r14
 	;;
 	adds out2=16,r12			// out2 = pointer to pt_regs
-	br.call.sptk.few b6=ia64_do_page_fault	// ignore return address
+	br.call.sptk.many b6=ia64_do_page_fault	// ignore return address
+	;;
 
 	.align 1024
 /////////////////////////////////////////////////////////////////////////////////////////
 // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
 	//
-	// In the absence of kernel bugs, we get here when the Dirty-bit, Instruction
-	// Access-bit, or Data Access-bit faults cause a nested fault because the
-	// dTLB entry for the virtual page table isn't present.  In such a case,
-	// we lookup the pte for the faulting address by walking the page table
-	// and return to the continuation point passed in register r30.
-	// In accessing the page tables, we don't need to check for NULL entries
-	// because if the page tables didn't map the faulting address, it would not
-	// be possible to receive one of the above faults.
+	// In the absence of kernel bugs, we get here when the virtually mapped linear page
+	// table is accessed non-speculatively (e.g.,  in the Dirty-bit, Instruction
+	// Access-bit, or Data Access-bit faults).  If the DTLB entry for the virtual page
+	// table is missing, a nested TLB miss fault is triggered and control is transferred
+	// to this point.  When this happens, we lookup the pte for the faulting address
+	// by walking the page table in physical mode and return to the continuation point
+	// passed in register r30 (or call page_fault if the address is not mapped).
 	//
 	// Input:	r16:	faulting address
 	//		r29:	saved b0
 	//		r30:	continuation address
+	//		r31:	saved pr
 	//
 	// Output:	r17:	physical address of L3 PTE of faulting address
 	//		r29:	saved b0
 	//		r30:	continuation address
+	//		r31:	saved pr
 	//
-	// Clobbered:	b0, r18, r19, r21, r31, psr.dt (cleared)
+	// Clobbered:	b0, r18, r19, r21, psr.dt (cleared)
 	//
 	rsm psr.dt				// switch to using physical data addressing
 	mov r19=ar.k7				// get the page table base address
 	shl r21=r16,3				// shift bit 60 into sign bit
 	;;
-	mov r31=pr				// save the predicate registers
 	shr.u r17=r16,61			// get the region number into r17
 	;;
 	cmp.eq p6,p7=5,r17			// is faulting address in region 5?
@@ -482,26 +389,30 @@
 	;;
 (p7)	dep r17=r17,r19,(PAGE_SHIFT-3),3	// put region number bits in place
 	srlz.d
-(p6)	movl r17=__pa(SWAPPER_PGD_ADDR)		// region 5 is rooted at swapper_pg_dir
-(p6)	shr r21=r21,PGDIR_SHIFT+PAGE_SHIFT-1
-(p7)	shr r21=r21,PGDIR_SHIFT+PAGE_SHIFT-4
+(p6)	movl r19=__pa(SWAPPER_PGD_ADDR)		// region 5 is rooted at swapper_pg_dir
+(p6)	shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
+(p7)	shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
 	;;
-(p6)	dep r17=r18,r17,3,(PAGE_SHIFT-3)	// r17=PTA + IFA(33,42)*8
+(p6)	dep r17=r18,r19,3,(PAGE_SHIFT-3)	// r17=PTA + IFA(33,42)*8
 (p7)	dep r17=r18,r17,3,(PAGE_SHIFT-6)	// r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8)
+	cmp.eq p7,p6=0,r21			// unused address bits all zeroes?
 	shr.u r18=r16,PMD_SHIFT			// shift L2 index into position
 	;;
-	ld8 r17=[r17]				// fetch the L1 entry
+	ld8 r17=[r17]				// fetch the L1 entry (may be 0)
 	mov b0=r30
 	;;
+(p7)	cmp.eq p6,p7=r17,r0			// was L1 entry NULL?
 	dep r17=r18,r17,3,(PAGE_SHIFT-3)	// compute address of L2 page table entry
 	;;
-	ld8 r17=[r17]				// fetch the L2 entry
+(p7)	ld8 r17=[r17]				// fetch the L2 entry (may be 0)
 	shr.u r19=r16,PAGE_SHIFT		// shift L3 index into position
 	;;
+(p7)	cmp.eq.or.andcm p6,p7=r17,r0		// was L2 entry NULL?
 	dep r17=r19,r17,3,(PAGE_SHIFT-3)	// compute address of L3 page table entry
 	;;
-	mov pr=r31,-1				// restore predicates
-	br.cond.sptk.few b0			// return to continuation point
+(p6)	br.cond.spnt.many page_fault
+	br.sptk.many b0				// return to continuation point
+	;;
 
 	.align 1024
 /////////////////////////////////////////////////////////////////////////////////////////
@@ -526,33 +437,19 @@
 	// a nested TLB miss hit where we look up the physical address of the L3 PTE
 	// and then continue at label 1 below.
 	//
-#ifndef CONFIG_SMP
 	mov r16=cr.ifa				// get the address that caused the fault
 	movl r30=1f				// load continuation point in case of nested fault
 	;;
 	thash r17=r16				// compute virtual address of L3 PTE
 	mov r29=b0				// save b0 in case of nested fault
-	;;
-1:	ld8 r18=[r17]
-	;;					// avoid RAW on r18
-	or r18=_PAGE_D,r18			// set the dirty bit
-	mov b0=r29				// restore b0
-	;;
-	st8 [r17]=r18				// store back updated PTE
-	itc.d r18				// install updated PTE
-#else
-	mov r16=cr.ifa				// get the address that caused the fault
-	movl r30=1f				// load continuation point in case of nested fault
-	;;
-	thash r17=r16				// compute virtual address of L3 PTE
+	mov r31=pr				// save pr
+#ifdef CONFIG_SMP
 	mov r28=ar.ccv				// save ar.ccv
-	mov r29=b0				// save b0 in case of nested fault
-	mov r27=pr
 	;;
 1:	ld8 r18=[r17]
 	;;					// avoid RAW on r18
 	mov ar.ccv=r18				// set compare value for cmpxchg
-	or r25=_PAGE_D,r18			// set the dirty bit
+	or r25=_PAGE_D|_PAGE_A,r18		// set the dirty and accessed bits
 	;;
 	cmpxchg8.acq r26=[r17],r25,ar.ccv
 	mov r24=PAGE_SHIFT<<2
@@ -568,70 +465,46 @@
 (p7)	ptc.l r16,r24
 	mov b0=r29				// restore b0
 	mov ar.ccv=r28
-	mov pr=r27,-1
+#else
+	;;
+1:	ld8 r18=[r17]
+	;;					// avoid RAW on r18
+	or r18=_PAGE_D|_PAGE_A,r18		// set the dirty and accessed bits
+	mov b0=r29				// restore b0
+	;;
+	st8 [r17]=r18				// store back updated PTE
+	itc.d r18				// install updated PTE
 #endif
+	mov pr=r31,-1				// restore pr
 	rfi
+	;;
 
 	.align 1024
 /////////////////////////////////////////////////////////////////////////////////////////
 // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
 	// Like Entry 8, except for instruction access
 	mov r16=cr.ifa				// get the address that caused the fault
+	movl r30=1f				// load continuation point in case of nested fault
+	mov r31=pr				// save predicates
 #ifdef CONFIG_ITANIUM
 	/*
-	 * Erratum 10 (IFA may contain incorrect address) now has
-	 * "NoFix" status.  There are no plans for fixing this.
+	 * Erratum 10 (IFA may contain incorrect address) has "NoFix" status.
 	 */
 	mov r17=cr.ipsr
-	mov r31=pr				// save predicates
 	;;
 	mov r18=cr.iip
 	tbit.z p6,p0=r17,IA64_PSR_IS_BIT	// IA64 instruction set?
 	;;
 (p6)	mov r16=r18				// if so, use cr.iip instead of cr.ifa
-	mov pr=r31,-1
 #endif /* CONFIG_ITANIUM */
-
-#ifndef CONFIG_SMP
-	movl r30=1f				// load continuation point in case of nested fault
 	;;
 	thash r17=r16				// compute virtual address of L3 PTE
 	mov r29=b0				// save b0 in case of nested fault)
-	;;
-1:	ld8 r18=[r17]
-#if defined(CONFIG_IA32_SUPPORT) && \
-    (defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) || defined(CONFIG_ITANIUM_B0_SPECIFIC))
-	//
-	// Erratum 85 (Access bit fault could be reported before page not present fault)
-	//   If the PTE is indicates the page is not present, then just turn this into a
-	//   page fault.
-	//
-	mov r31=pr				// save predicates
-	;;
-	tbit.nz p6,p0=r18,0			// page present bit set?
-(p6)	br.cond.sptk 1f
-	;;					// avoid WAW on p6
-	mov pr=r31,-1
-	br.cond.sptk page_fault			// page wasn't present
-1:	mov pr=r31,-1
-#else
-	;;					// avoid RAW on r18
-#endif
-	or r18=_PAGE_A,r18			// set the accessed bit
-	mov b0=r29				// restore b0
-	;;
-	st8 [r17]=r18				// store back updated PTE
-	itc.i r18				// install updated PTE
-#else
-	movl r30=1f				// load continuation point in case of nested fault
-	;;
-	thash r17=r16				// compute virtual address of L3 PTE
+#ifdef CONFIG_SMP
 	mov r28=ar.ccv				// save ar.ccv
-	mov r29=b0				// save b0 in case of nested fault)
-	mov r27=pr
 	;;
 1:	ld8 r18=[r17]
-#if defined(CONFIG_IA32_SUPPORT) && \
+# if defined(CONFIG_IA32_SUPPORT) && \
     (defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) || defined(CONFIG_ITANIUM_B0_SPECIFIC))
 	//
 	// Erratum 85 (Access bit fault could be reported before page not present fault)
@@ -639,15 +512,9 @@
 	//   page fault.
 	//
 	;;
-	tbit.nz p6,p0=r18,0			// page present bit set?
-(p6)	br.cond.sptk 1f
-	;;					// avoid WAW on p6
-	mov pr=r27,-1
-	br.cond.sptk page_fault			// page wasn't present
-1:	
-#else
-	;;					// avoid RAW on r18
-#endif
+	tbit.z p6,p0=r18,_PAGE_P_BIT		// page present bit cleared?
+(p6)	br.sptk page_fault			// page wasn't present
+# endif
 	mov ar.ccv=r18				// set compare value for cmpxchg
 	or r25=_PAGE_A,r18			// set the accessed bit
 	;;
@@ -665,36 +532,42 @@
 (p7)	ptc.l r16,r24
 	mov b0=r29				// restore b0
 	mov ar.ccv=r28
-	mov pr=r27,-1
-#endif
+#else /* !CONFIG_SMP */
+	;;
+1:	ld8 r18=[r17]
+	;;
+# if defined(CONFIG_IA32_SUPPORT) && \
+    (defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) || defined(CONFIG_ITANIUM_B0_SPECIFIC))
+	//
+	// Erratum 85 (Access bit fault could be reported before page not present fault)
+	//   If the PTE is indicates the page is not present, then just turn this into a
+	//   page fault.
+	//
+	tbit.z p6,p0=r18,_PAGE_P_BIT		// page present bit cleared?
+(p6)	br.sptk page_fault			// page wasn't present
+# endif
+	or r18=_PAGE_A,r18			// set the accessed bit
+	mov b0=r29				// restore b0
+	;;
+	st8 [r17]=r18				// store back updated PTE
+	itc.i r18				// install updated PTE
+#endif /* !CONFIG_SMP */
+	mov pr=r31,-1
 	rfi
+	;;
 
 	.align 1024
 /////////////////////////////////////////////////////////////////////////////////////////
 // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
 	// Like Entry 8, except for data access
-#ifndef CONFIG_SMP
 	mov r16=cr.ifa				// get the address that caused the fault
 	movl r30=1f				// load continuation point in case of nested fault
 	;;
 	thash r17=r16				// compute virtual address of L3 PTE
+	mov r31=pr
 	mov r29=b0				// save b0 in case of nested fault)
-	;;
-1:	ld8 r18=[r17]
-	;;					// avoid RAW on r18
-	or r18=_PAGE_A,r18			// set the accessed bit
-	mov b0=r29				// restore b0
-	;;
-	st8 [r17]=r18				// store back updated PTE
-	itc.d r18				// install updated PTE
-#else
-	mov r16=cr.ifa				// get the address that caused the fault
-	movl r30=1f				// load continuation point in case of nested fault
-	;;
-	thash r17=r16				// compute virtual address of L3 PTE
+#ifdef CONFIG_SMP
 	mov r28=ar.ccv				// save ar.ccv
-	mov r29=b0				// save b0 in case of nested fault
-	mov r27=pr
 	;;
 1:	ld8 r18=[r17]
 	;;					// avoid RAW on r18
@@ -713,11 +586,20 @@
 	cmp.eq p6,p7=r18,r25			// is it same as the newly installed
 	;;
 (p7)	ptc.l r16,r24
-	mov b0=r29				// restore b0
 	mov ar.ccv=r28
-	mov pr=r27,-1
+#else
+	;;
+1:	ld8 r18=[r17]
+	;;					// avoid RAW on r18
+	or r18=_PAGE_A,r18			// set the accessed bit
+	;;
+	st8 [r17]=r18				// store back updated PTE
+	itc.d r18				// install updated PTE
 #endif
+	mov b0=r29				// restore b0
+	mov pr=r31,-1
 	rfi
+	;;
 
 	.align 1024
 /////////////////////////////////////////////////////////////////////////////////////////
@@ -725,16 +607,14 @@
 	mov r16=cr.iim
 	mov r17=__IA64_BREAK_SYSCALL
 	mov r31=pr		// prepare to save predicates
-	rsm psr.dt		// avoid nested faults due to TLB misses...
 	;;
-	srlz.d			// ensure everyone knows psr.dt is off...
 	cmp.eq p0,p7=r16,r17	// is this a system call? (p7 <- false, if so)
 (p7)	br.cond.spnt.many non_syscall
 
 	SAVE_MIN				// uses r31; defines r2:
 
-	// turn interrupt collection and data translation back on:
-	ssm psr.ic | psr.dt
+	// turn interrupt collection back on:
+	ssm psr.ic
 	;;
 	srlz.i					// guarantee that interrupt collection is enabled
 	cmp.eq pSys,pNonSys=r0,r0		// set pSys=1, pNonSys=0
@@ -746,14 +626,13 @@
 	adds r3=8,r2		// set up second base pointer for SAVE_REST
 	;;
 	SAVE_REST
-	;;			// avoid WAW on r2 & r3
+	br.call.sptk rp=demine_args		// clear NaT bits in (potential) syscall args
 
 	mov r3=255
 	adds r15=-1024,r15			// r15 contains the syscall number---subtract 1024
 	adds r2=IA64_TASK_PTRACE_OFFSET,r13	// r2 = &current->ptrace
-
 	;;
-	cmp.geu.unc p6,p7=r3,r15		// (syscall > 0 && syscall <= 1024+255) ?
+	cmp.geu p6,p7=r3,r15		// (syscall > 0 && syscall <= 1024+255) ?
 	movl r16=sys_call_table
 	;;
 (p6)	shladd r16=r15,3,r16
@@ -788,40 +667,61 @@
 	;;
 	st8 [r16]=r18				// store new value for cr.isr
 
-(p8)	br.call.sptk.few b6=b6			// ignore this return addr 
-	br.call.sptk.few rp=ia64_trace_syscall	// rp will be overwritten (ignored)
+(p8)	br.call.sptk.many b6=b6			// ignore this return addr 
+	br.call.sptk.many rp=ia64_trace_syscall	// rp will be overwritten (ignored)
 	// NOT REACHED
 
+	.proc demine_args
+demine_args:
+	alloc r2=ar.pfs,8,0,0,0
+	tnat.nz p8,p0=in0
+	tnat.nz p9,p0=in1
+	;;
+(p8)	mov in0=-1
+	tnat.nz p10,p0=in2
+	tnat.nz p11,p0=in3
+
+(p9)	mov in1=-1
+	tnat.nz p12,p0=in4
+	tnat.nz p13,p0=in5
+	;;
+(p10)	mov in2=-1
+	tnat.nz p14,p0=in6
+	tnat.nz p15,p0=in7
+
+(p11)	mov in3=-1
+(p12)	mov in4=-1
+(p13)	mov in5=-1
+	;;
+(p14)	mov in6=-1
+(p15)	mov in7=-1
+	br.ret.sptk.many rp
+	.endp demine_args
+
 	.align 1024
 /////////////////////////////////////////////////////////////////////////////////////////
 // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
-	rsm psr.dt		// avoid nested faults due to TLB misses...
-	;;
-	srlz.d			// ensure everyone knows psr.dt is off...
 	mov r31=pr		// prepare to save predicates
 	;;
 
 	SAVE_MIN_WITH_COVER	// uses r31; defines r2 and r3
-	ssm psr.ic | psr.dt	// turn interrupt collection and data translation back on
+	ssm psr.ic		// turn interrupt collection
 	;;
 	adds r3=8,r2		// set up second base pointer for SAVE_REST
-	srlz.i			// ensure everybody knows psr.ic and psr.dt are back on
+	srlz.i			// ensure everybody knows psr.ic is back on
 	;;
 	SAVE_REST
 	;;
 	alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
-#ifdef CONFIG_ITANIUM_A1_SPECIFIC
-	mov out0=r0		// defer reading of cr.ivr to handle_irq...
-#else
 	mov out0=cr.ivr		// pass cr.ivr as first arg
-#endif
 	add out1=16,sp		// pass pointer to pt_regs as second arg
 	;;
 	srlz.d			// make  sure we see the effect of cr.ivr
 	movl r14=ia64_leave_kernel
 	;;
 	mov rp=r14
-	br.call.sptk.few b6=ia64_handle_irq
+	br.call.sptk.many b6=ia64_handle_irq
+	;;
 
 	.align 1024
 /////////////////////////////////////////////////////////////////////////////////////////
@@ -855,7 +755,7 @@
 	// The "alloc" can cause a mandatory store which could lead to
 	// an "Alt DTLB" fault which we can handle only if psr.ic is on.
 	//
-	ssm psr.ic | psr.dt
+	ssm psr.ic
 	;;
 	srlz.i		// guarantee that interrupt collection is enabled
 	;;
@@ -867,7 +767,7 @@
 	;;
 	SAVE_REST
 	;;
-	br.call.sptk.few rp=ia64_illegal_op_fault
+	br.call.sptk.many rp=ia64_illegal_op_fault
 .ret0:	;;
 	alloc r14=ar.pfs,0,0,3,0	// must be first in insn group
 	mov out0=r9
@@ -881,6 +781,7 @@
 	cmp.ne p6,p0=0,r8
 (p6)	br.call.dpnt b6=b6		// call returns to ia64_leave_kernel
 	br.sptk ia64_leave_kernel
+	;;
 
 	.align 1024
 /////////////////////////////////////////////////////////////////////////////////////////
@@ -900,7 +801,7 @@
 	SAVE_MIN
 	;;
 	mov r14=cr.isr
-	ssm psr.ic | psr.dt
+	ssm psr.ic
 	;;
 	srlz.i					// guarantee that interrupt collection is enabled
 	;;
@@ -913,7 +814,7 @@
 	shr r14=r14,16          // Get interrupt number
 	;; 
 	cmp.ne p6,p0=r14,r15
-(p6)    br.call.dpnt.few b6=non_ia32_syscall
+(p6)    br.call.dpnt.many b6=non_ia32_syscall
 
 	adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp	// 16 byte hole per SW conventions
 	adds r15=IA64_PT_REGS_R1_OFFSET + 16,sp
@@ -924,7 +825,7 @@
 	alloc r15=ar.pfs,0,0,6,0	// must first in an insn group
 	;; 
 	ld4 r8=[r14],8          // r8 == EAX (syscall number)
-	mov r15=190		// sys_vfork - last implemented system call
+	mov r15=222		// sys_vfork - last implemented system call
 	;;
 	cmp.leu.unc p6,p7=r8,r15
 	ld4 out1=[r14],8        // r9 == ecx
@@ -961,11 +862,12 @@
 	mov out0=r14                            // interrupt #
 	add out1=16,sp                          // pointer to pt_regs
 	;;			// avoid WAW on CFM
-	br.call.sptk.few rp=ia32_bad_interrupt
+	br.call.sptk.many rp=ia32_bad_interrupt
 .ret1:	movl r15=ia64_leave_kernel
 	;;
 	mov rp=r15
 	br.ret.sptk.many rp
+	;;
 
 #endif /* CONFIG_IA32_SUPPORT */
 
@@ -985,8 +887,8 @@
 	mov r8=cr.iim			// get break immediate (must be done while psr.ic is off)
 	adds r3=8,r2			// set up second base pointer for SAVE_REST
 
-	// turn interrupt collection and data translation back on:
-	ssm psr.ic | psr.dt
+	// turn interrupt collection back on:
+	ssm psr.ic
 	;;
 	srlz.i				// guarantee that interrupt collection is enabled
 	;;
@@ -1000,7 +902,8 @@
 	SAVE_REST
 	mov rp=r15
 	;;
-	br.call.sptk.few b6=ia64_bad_break	// avoid WAW on CFM and ignore return addr
+	br.call.sptk.many b6=ia64_bad_break	// avoid WAW on CFM and ignore return addr
+	;;
 
 	.align 1024
 /////////////////////////////////////////////////////////////////////////////////////////
@@ -1023,7 +926,7 @@
 	// wouldn't get the state to recover.
 	//
 	mov r15=cr.ifa
-	ssm psr.ic | psr.dt
+	ssm psr.ic
 	;;
 	srlz.i					// guarantee that interrupt collection is enabled
 	;;
@@ -1039,7 +942,8 @@
 	adds out1=16,sp				// out1 = pointer to pt_regs
 	;;
 	mov rp=r14
-	br.sptk.few ia64_prepare_handle_unaligned
+	br.sptk.many ia64_prepare_handle_unaligned
+	;;
 
 	.align 1024
 /////////////////////////////////////////////////////////////////////////////////////////
@@ -1055,7 +959,6 @@
 	//
 	// Input:
 	//	psr.ic:	off
-	//	psr.dt:	off
 	//	r19:	fault vector number (e.g., 24 for General Exception)
 	//	r31:	contains saved predicates (pr)
 	//
@@ -1071,7 +974,7 @@
 	mov r10=cr.iim
 	mov r11=cr.itir
 	;;
-	ssm psr.ic | psr.dt
+	ssm psr.ic
 	;;
 	srlz.i					// guarantee that interrupt collection is enabled
 	;;
@@ -1089,7 +992,9 @@
 	movl r14=ia64_leave_kernel
 	;;
 	mov rp=r14
-	br.call.sptk.few b6=ia64_fault
+	br.call.sptk.many b6=ia64_fault
+	;;
+
 //
 // --- End of long entries, Beginning of short entries
 //
@@ -1099,16 +1004,16 @@
 // 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49)
 	mov r16=cr.ifa
 	rsm psr.dt
-#if 1
-	// If you disable this, you MUST re-enable to update_mmu_cache() code in pgtable.h
+	// The Linux page fault handler doesn't expect non-present pages to be in
+	// the TLB.  Flush the existing entry now, so we meet that expectation.
 	mov r17=_PAGE_SIZE_4K<<2
 	;;
 	ptc.l r16,r17
-#endif
 	;;
 	mov r31=pr
 	srlz.d
-	br.cond.sptk.many page_fault
+	br.sptk.many page_fault
+	;;
 
 	.align 256
 /////////////////////////////////////////////////////////////////////////////////////////
@@ -1118,7 +1023,8 @@
 	mov r31=pr
 	;;
 	srlz.d
-	br.cond.sptk.many page_fault
+	br.sptk.many page_fault
+	;;
 
 	.align 256
 /////////////////////////////////////////////////////////////////////////////////////////
@@ -1128,7 +1034,8 @@
 	mov r31=pr
 	;;
 	srlz.d
-	br.cond.sptk.many page_fault
+	br.sptk.many page_fault
+	;;
 
 	.align 256
 /////////////////////////////////////////////////////////////////////////////////////////
@@ -1138,31 +1045,32 @@
 	mov r31=pr
 	;;
 	srlz.d
-	br.cond.sptk.many page_fault
+	br.sptk.many page_fault
+	;;
 
 	.align 256
 /////////////////////////////////////////////////////////////////////////////////////////
 // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
 	mov r16=cr.isr
 	mov r31=pr
-	rsm psr.dt		// avoid nested faults due to TLB misses...
 	;;
-	srlz.d			// ensure everyone knows psr.dt is off...
 	cmp4.eq p6,p0=0,r16
 (p6)	br.sptk dispatch_illegal_op_fault
 	;;
 	mov r19=24		// fault number
-	br.cond.sptk.many dispatch_to_fault_handler
+	br.sptk.many dispatch_to_fault_handler
+	;;
 
 	.align 256
 /////////////////////////////////////////////////////////////////////////////////////////
 // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
-	rsm psr.dt | psr.dfh			// ensure we can access fph
+	rsm psr.dfh		// ensure we can access fph
 	;;
 	srlz.d
 	mov r31=pr
 	mov r19=25
-	br.cond.sptk.many dispatch_to_fault_handler
+	br.sptk.many dispatch_to_fault_handler
+	;;
 
 	.align 256
 /////////////////////////////////////////////////////////////////////////////////////////
@@ -1204,6 +1112,7 @@
 	;;
 
 	rfi				// and go back
+	;;
 
 	.align 256
 /////////////////////////////////////////////////////////////////////////////////////////
@@ -1218,12 +1127,11 @@
 	.align 256
 /////////////////////////////////////////////////////////////////////////////////////////
 // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
-	rsm psr.dt		// avoid nested faults due to TLB misses...
 	mov r16=cr.ipsr
 	mov r31=pr		// prepare to save predicates
 	;;									
-	srlz.d			// ensure everyone knows psr.dt is off
-	br.cond.sptk.many dispatch_unaligned_handler
+	br.sptk.many dispatch_unaligned_handler
+	;;
 
 	.align 256
 /////////////////////////////////////////////////////////////////////////////////////////
@@ -1304,9 +1212,6 @@
 /////////////////////////////////////////////////////////////////////////////////////////
 // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept  (30,31,59,70,71)
 #ifdef	CONFIG_IA32_SUPPORT
-	rsm psr.dt
-	;;
-	srlz.d
 	mov r31=pr
 	mov r16=cr.isr
 	;;
@@ -1325,7 +1230,7 @@
 	;;
 	mov pr=r31,-1		// restore predicate registers
 	rfi
-
+	;;
 1:
 #endif	// CONFIG_IA32_SUPPORT
 	FAULT(46)
@@ -1334,11 +1239,9 @@
 /////////////////////////////////////////////////////////////////////////////////////////
 // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt  (74)
 #ifdef CONFIG_IA32_SUPPORT
-	rsm psr.dt
-	;;
-	srlz.d
 	mov r31=pr
-	br.cond.sptk.many dispatch_to_ia32_handler
+	br.sptk.many dispatch_to_ia32_handler
+	;;
 #else
 	FAULT(47)
 #endif
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/kernel/machvec.c linux/arch/ia64/kernel/machvec.c
--- v2.4.0-prerelease/linux/arch/ia64/kernel/machvec.c	Sun Aug 13 10:17:16 2000
+++ linux/arch/ia64/kernel/machvec.c	Thu Jan  4 12:50:17 2001
@@ -1,10 +1,12 @@
 #include <linux/config.h>
+
+#ifdef CONFIG_IA64_GENERIC
+
 #include <linux/kernel.h>
+#include <linux/string.h>
 
 #include <asm/page.h>
 #include <asm/machvec.h>
-
-#ifdef CONFIG_IA64_GENERIC
 
 struct ia64_machine_vector ia64_mv;
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/kernel/mca.c linux/arch/ia64/kernel/mca.c
--- v2.4.0-prerelease/linux/arch/ia64/kernel/mca.c	Tue Oct 31 12:42:26 2000
+++ linux/arch/ia64/kernel/mca.c	Thu Jan  4 12:50:17 2001
@@ -19,6 +19,7 @@
 #include <linux/irq.h>
 #include <linux/smp_lock.h>
 
+#include <asm/machvec.h>
 #include <asm/page.h>
 #include <asm/ptrace.h>
 #include <asm/system.h>
@@ -365,7 +366,7 @@
 void
 ia64_mca_wakeup(int cpu)
 {
-	ipi_send(cpu, IA64_MCA_WAKEUP_INT_VECTOR, IA64_IPI_DM_INT, 0);
+	platform_send_ipi(cpu, IA64_MCA_WAKEUP_INT_VECTOR, IA64_IPI_DM_INT, 0);
 	ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
 	
 }
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/kernel/mca_asm.S linux/arch/ia64/kernel/mca_asm.S
--- v2.4.0-prerelease/linux/arch/ia64/kernel/mca_asm.S	Tue Oct 31 12:42:26 2000
+++ linux/arch/ia64/kernel/mca_asm.S	Thu Jan  4 12:50:17 2001
@@ -3,11 +3,10 @@
 //
 // Mods by cfleck to integrate into kernel build
 // 00/03/15 davidm Added various stop bits to get a clean compile
-// 00/03/29 cfleck Added code to save INIT handoff state in pt_regs format, switch to temp kstack,
-//		   switch modes, jump to C INIT handler
 //
-#include <linux/config.h>
-
+// 00/03/29 cfleck Added code to save INIT handoff state in pt_regs format, switch to temp
+//		   kstack, switch modes, jump to C INIT handler
+//
 #include <asm/pgtable.h>
 #include <asm/processor.h>
 #include <asm/mca_asm.h>
@@ -17,14 +16,7 @@
  * When we get an machine check, the kernel stack pointer is no longer
  * valid, so we need to set a new stack pointer.
  */
-#define MINSTATE_START_SAVE_MIN							\
-(pKern) movl sp=ia64_init_stack+IA64_STK_OFFSET-IA64_PT_REGS_SIZE;		\
-	;;
-
-#define MINSTATE_END_SAVE_MIN							\
-	or r12=r12,r14;		/* make sp a kernel virtual address */		\
-	or r13=r13,r14;		/* make `current' a kernel virtual address */	\
-	;;
+#define	MINSTATE_PHYS	/* Make sure stack access is physical for MINSTATE */ 
 
 #include "minstate.h"
 	
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/kernel/minstate.h linux/arch/ia64/kernel/minstate.h
--- v2.4.0-prerelease/linux/arch/ia64/kernel/minstate.h	Tue Oct 31 12:42:26 2000
+++ linux/arch/ia64/kernel/minstate.h	Thu Jan  4 12:50:17 2001
@@ -20,6 +20,72 @@
 #define rR1		r20
 
 /*
+ * Here start the source dependent macros.
+ */
+
+/*
+ * For ivt.s we want to access the stack virtually so we dont have to disable translation
+ * on interrupts.
+ */
+#define MINSTATE_START_SAVE_MIN_VIRT								\
+	dep r1=-1,r1,61,3;				/* r1 = current (virtual) */		\
+(p7)	mov ar.rsc=r0;		/* set enforced lazy mode, pl 0, little-endian, loadrs=0 */	\
+	;;											\
+(p7)	addl rKRBS=IA64_RBS_OFFSET,r1;			/* compute base of RBS */		\
+(p7)	mov rARRNAT=ar.rnat;									\
+(pKern) mov r1=sp;					/* get sp  */				\
+	;;											\
+(p7)	addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1;	/* compute base of memory stack */	\
+(p7)	mov rARBSPSTORE=ar.bspstore;			/* save ar.bspstore */			\
+	;;											\
+(pKern) addl r1=-IA64_PT_REGS_SIZE,r1;			/* if in kernel mode, use sp (r12) */	\
+(p7)	mov ar.bspstore=rKRBS;				/* switch to kernel RBS */		\
+	;;											\
+(p7)	mov r18=ar.bsp;										\
+(p7)	mov ar.rsc=0x3;		/* set eager mode, pl 0, little-endian, loadrs=0 */		\
+
+#define MINSTATE_END_SAVE_MIN_VIRT								\
+	or r13=r13,r14;		/* make `current' a kernel virtual address */			\
+	bsw.1;			/* switch back to bank 1 (must be last in insn group) */	\
+	;;
+
+/*
+ * For mca_asm.S we want to access the stack physically since the state is saved before we
+ * go virtual and dont want to destroy the iip or ipsr.
+ */
+#define MINSTATE_START_SAVE_MIN_PHYS								\
+(pKern) movl sp=ia64_init_stack+IA64_STK_OFFSET-IA64_PT_REGS_SIZE;				\
+(p7)	mov ar.rsc=r0;		/* set enforced lazy mode, pl 0, little-endian, loadrs=0 */	\
+(p7)	addl rKRBS=IA64_RBS_OFFSET,r1;		/* compute base of register backing store */	\
+	;;											\
+(p7)	mov rARRNAT=ar.rnat;									\
+(pKern) dep r1=0,sp,61,3;				/* compute physical addr of sp	*/	\
+(p7)	addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1;	/* compute base of memory stack */	\
+(p7)	mov rARBSPSTORE=ar.bspstore;			/* save ar.bspstore */			\
+(p7)	dep rKRBS=-1,rKRBS,61,3;			/* compute kernel virtual addr of RBS */\
+	;;											\
+(pKern) addl r1=-IA64_PT_REGS_SIZE,r1;		/* if in kernel mode, use sp (r12) */		\
+(p7)	mov ar.bspstore=rKRBS;			/* switch to kernel RBS */			\
+	;;											\
+(p7)	mov r18=ar.bsp;										\
+(p7)	mov ar.rsc=0x3;		/* set eager mode, pl 0, little-endian, loadrs=0 */		\
+
+#define MINSTATE_END_SAVE_MIN_PHYS								\
+	or r12=r12,r14;		/* make sp a kernel virtual address */				\
+	or r13=r13,r14;		/* make `current' a kernel virtual address */			\
+	;;
+
+#ifdef MINSTATE_VIRT
+# define MINSTATE_START_SAVE_MIN	MINSTATE_START_SAVE_MIN_VIRT
+# define MINSTATE_END_SAVE_MIN		MINSTATE_END_SAVE_MIN_VIRT
+#endif
+
+#ifdef MINSTATE_PHYS
+# define MINSTATE_START_SAVE_MIN	MINSTATE_START_SAVE_MIN_PHYS
+# define MINSTATE_END_SAVE_MIN		MINSTATE_END_SAVE_MIN_PHYS
+#endif
+
+/*
  * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
  * the minimum state necessary that allows us to turn psr.ic back
  * on.
@@ -31,7 +97,6 @@
  *
  * Upon exit, the state is as follows:
  *	psr.ic: off
- *	psr.dt: off
  *	r2 = points to &pt_regs.r16
  *	r12 = kernel sp (kernel virtual address)
  *	r13 = points to current task_struct (kernel virtual address)
@@ -50,7 +115,7 @@
 	mov rCRIPSR=cr.ipsr;									  \
 	mov rB6=b6;		/* rB6 = branch reg 6 */					  \
 	mov rCRIIP=cr.iip;									  \
-	mov r1=ar.k6;		/* r1 = current */						  \
+	mov r1=ar.k6;		/* r1 = current (physical) */					  \
 	;;											  \
 	invala;											  \
 	extr.u r16=rCRIPSR,32,2;		/* extract psr.cpl */				  \
@@ -58,25 +123,11 @@
 	cmp.eq pKern,p7=r0,r16;			/* are we in kernel mode already? (psr.cpl==0) */ \
 	/* switch from user to kernel RBS: */							  \
 	COVER;											  \
-	;; 									                  \
-	MINSTATE_START_SAVE_MIN									  \
-(p7)	mov ar.rsc=r0;		/* set enforced lazy mode, pl 0, little-endian, loadrs=0 */	  \
-(p7)	addl rKRBS=IA64_RBS_OFFSET,r1;		/* compute base of register backing store */	  \
-	;;											  \
-(p7)	mov rARRNAT=ar.rnat;									  \
-(pKern)	dep r1=0,sp,61,3;				/* compute physical addr of sp  */	  \
-(p7)	addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1;	/* compute base of memory stack */	  \
-(p7)	mov rARBSPSTORE=ar.bspstore;			/* save ar.bspstore */			  \
-(p7)	dep rKRBS=-1,rKRBS,61,3;			/* compute kernel virtual addr of RBS */  \
 	;;											  \
-(pKern)	addl r1=-IA64_PT_REGS_SIZE,r1;		/* if in kernel mode, use sp (r12) */		  \
-(p7)	mov ar.bspstore=rKRBS;			/* switch to kernel RBS */			  \
+	MINSTATE_START_SAVE_MIN									  \
 	;;											  \
-(p7)	mov r18=ar.bsp;										  \
-(p7)	mov ar.rsc=0x3;		/* set eager mode, pl 0, little-endian, loadrs=0 */		  \
-												  \
-	mov r16=r1;		/* initialize first base pointer */				  \
-	adds r17=8,r1;		/* initialize second base pointer */				  \
+	mov r16=r1;					/* initialize first base pointer */	  \
+	adds r17=8,r1;					/* initialize second base pointer */	  \
 	;;											  \
 	st8 [r16]=rCRIPSR,16;	/* save cr.ipsr */						  \
 	st8 [r17]=rCRIIP,16;	/* save cr.iip */						  \
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/kernel/pal.S linux/arch/ia64/kernel/pal.S
--- v2.4.0-prerelease/linux/arch/ia64/kernel/pal.S	Tue Oct 31 12:42:26 2000
+++ linux/arch/ia64/kernel/pal.S	Thu Jan  4 12:50:17 2001
@@ -52,10 +52,9 @@
 /*
  * Make a PAL call using the static calling convention.
  *
- * in0         Pointer to struct ia64_pal_retval
- * in1         Index of PAL service
- * in2 - in4   Remaining PAL arguments
- * in5	       1 ==> clear psr.ic,  0 ==> don't clear psr.ic
+ * in0         Index of PAL service
+ * in1 - in3   Remaining PAL arguments
+ * in4	       1 ==> clear psr.ic,  0 ==> don't clear psr.ic
  *
  */
 GLOBAL_ENTRY(ia64_pal_call_static)
@@ -69,7 +68,7 @@
 	}
 	;;
 	ld8 loc2 = [loc2]		// loc2 <- entry point
-	tbit.nz p6,p7 = in5, 0
+	tbit.nz p6,p7 = in4, 0
 	adds r8 = 1f-1b,r8
 	;;
 	mov loc3 = psr
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/kernel/palinfo.c linux/arch/ia64/kernel/palinfo.c
--- v2.4.0-prerelease/linux/arch/ia64/kernel/palinfo.c	Tue Oct 31 12:42:26 2000
+++ linux/arch/ia64/kernel/palinfo.c	Thu Jan  4 12:50:17 2001
@@ -16,7 +16,6 @@
  *	- as of 2.2.9/2.2.12, the following values are still wrong
  *		PAL_VM_SUMMARY: key & rid sizes
  */
-#include <linux/config.h>
 #include <linux/types.h>
 #include <linux/errno.h>
 #include <linux/init.h>
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/kernel/pci-dma.c linux/arch/ia64/kernel/pci-dma.c
--- v2.4.0-prerelease/linux/arch/ia64/kernel/pci-dma.c	Tue Oct 31 12:42:26 2000
+++ linux/arch/ia64/kernel/pci-dma.c	Wed Dec 31 16:00:00 1969
@@ -1,517 +0,0 @@
-/*
- * Dynamic DMA mapping support.
- *
- * This implementation is for IA-64 platforms that do not support
- * I/O TLBs (aka DMA address translation hardware).
- * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
- * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
- */
-
-#include <linux/config.h>
-
-#include <linux/mm.h>
-#include <linux/pci.h>
-#include <linux/spinlock.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
-#include <asm/io.h>
-#include <asm/pci.h>
-#include <asm/dma.h>
-
-#ifdef CONFIG_SWIOTLB
-
-#include <linux/init.h>
-#include <linux/bootmem.h>
-
-#define ALIGN(val, align) ((unsigned long) (((unsigned long) (val) + ((align) - 1)) & ~((align) - 1)))
-
-/*
- * log of the size of each IO TLB slab.  The number of slabs is command line
- * controllable.
- */
-#define IO_TLB_SHIFT 11
-
-/*
- * Used to do a quick range check in pci_unmap_single and pci_sync_single, to see if the 
- * memory was in fact allocated by this API.
- */
-static char *io_tlb_start, *io_tlb_end;
-
-/*
- * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and io_tlb_end.
- * This is command line adjustable via setup_io_tlb_npages.
- */
-unsigned long io_tlb_nslabs = 1024;
-
-/*
- * This is a free list describing the number of free entries available from each index
- */
-static unsigned int *io_tlb_list;
-static unsigned int io_tlb_index;
-
-/*
- * We need to save away the original address corresponding to a mapped entry for the sync 
- * operations.
- */
-static unsigned char **io_tlb_orig_addr;
-
-/*
- * Protect the above data structures in the map and unmap calls
- */ 
-spinlock_t io_tlb_lock = SPIN_LOCK_UNLOCKED;
-
-static int __init
-setup_io_tlb_npages (char *str)
-{
-	io_tlb_nslabs = simple_strtoul(str, NULL, 0) << (PAGE_SHIFT - IO_TLB_SHIFT);
-	return 1;
-}
-__setup("swiotlb=", setup_io_tlb_npages);
-
-/*
- * Statically reserve bounce buffer space and initialize bounce buffer
- * data structures for the software IO TLB used to implement the PCI DMA API
- */
-void
-setup_swiotlb (void)
-{
-	int i;
-
-	/*
-	 * Get IO TLB memory from the low pages
-	 */
-	io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * (1 << IO_TLB_SHIFT));
-	if (!io_tlb_start)
-		BUG();
-	io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
-
-	/*
-	 * Allocate and initialize the free list array.  This array is used
-	 * to find contiguous free memory regions of size 2^IO_TLB_SHIFT between
-	 * io_tlb_start and io_tlb_end.
-	 */
-	io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
-	for (i = 0; i < io_tlb_nslabs; i++)
-		io_tlb_list[i] = io_tlb_nslabs - i;
-	io_tlb_index = 0;
-	io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *));
-
-	printk("Placing software IO TLB between 0x%p - 0x%p\n",
-	       (void *) io_tlb_start, (void *) io_tlb_end);
-}
-
-/*
- * Allocates bounce buffer and returns its kernel virtual address.
- */
-static void *
-__pci_map_single (struct pci_dev *hwdev, char *buffer, size_t size, int direction)
-{
-	unsigned long flags;
-	char *dma_addr;
-	unsigned int i, nslots, stride, index, wrap;
-
-	/*
-	 * For mappings greater than a page size, we limit the stride (and hence alignment)
-	 * to a page size.
-	 */
-	nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
-	if (size > (1 << PAGE_SHIFT))
-		stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
-	else
-		stride = nslots;
-
-	if (!nslots)
-		BUG();
-
-	/*
-	 * Find suitable number of IO TLB entries size that will fit this request and allocate a buffer
-	 * from that IO TLB pool.
-	 */
-	spin_lock_irqsave(&io_tlb_lock, flags);
-	{
-		wrap = index = ALIGN(io_tlb_index, stride);
-		do {
-			/*
-			 * If we find a slot that indicates we have 'nslots' number of 
-			 * contiguous buffers, we allocate the buffers from that slot and mark the
-			 * entries as '0' indicating unavailable.
-			 */
-			if (io_tlb_list[index] >= nslots) {
-				for (i = index; i < index + nslots; i++)
-					io_tlb_list[i] = 0;
-				dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
-
-				/*
-				 * Update the indices to avoid searching in the next round.
-				 */
-				io_tlb_index = (index + nslots) < io_tlb_nslabs ? (index + nslots) : 0;
-
-				goto found;
-			}
-			index += stride;
-			if (index >= io_tlb_nslabs)
-				index = 0;
-		} while (index != wrap);
-
-		/*
-		 * XXX What is a suitable recovery mechanism here?  We cannot 
-		 * sleep because we are called from with in interrupts!
-		 */
-		panic("__pci_map_single: could not allocate software IO TLB (%ld bytes)", size);
-found:
-	}
-	spin_unlock_irqrestore(&io_tlb_lock, flags);
-
-	/*
-	 * Save away the mapping from the original address to the DMA address.  This is needed
-	 * when we sync the memory.  Then we sync the buffer if needed.
-	 */
-	io_tlb_orig_addr[index] = buffer;
-	if (direction == PCI_DMA_TODEVICE || direction == PCI_DMA_BIDIRECTIONAL)
-		memcpy(dma_addr, buffer, size);
-
-	return dma_addr;
-}
-
-/*
- * dma_addr is the kernel virtual address of the bounce buffer to unmap.
- */
-static void
-__pci_unmap_single (struct pci_dev *hwdev, char *dma_addr, size_t size, int direction)
-{
-	unsigned long flags;
-	int i, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
-	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
-	char *buffer = io_tlb_orig_addr[index];
-
-	/*
-	 * First, sync the memory before unmapping the entry
-	 */
-	if ((direction == PCI_DMA_FROMDEVICE) || (direction == PCI_DMA_BIDIRECTIONAL))
-		/*
- 	 	 * bounce... copy the data back into the original buffer
-	  	 * and delete the bounce buffer.
- 	 	 */
-		memcpy(buffer, dma_addr, size);
-
-	/*
-	 * Return the buffer to the free list by setting the corresponding entries to indicate
-	 * the number of contigous entries available.  
-	 * While returning the entries to the free list, we merge the entries with slots below
-	 * and above the pool being returned.
-	 */
-	spin_lock_irqsave(&io_tlb_lock, flags);
-	{
-		int count = ((index + nslots) < io_tlb_nslabs ? io_tlb_list[index + nslots] : 0);
-		/*
-		 * Step 1: return the slots to the free list, merging the slots with superceeding slots
-		 */
-		for (i = index + nslots - 1; i >= index; i--)
-			io_tlb_list[i] = ++count;
-		/*
-		 * Step 2: merge the returned slots with the preceeding slots, if available (non zero)
-		 */
-		for (i = index - 1; (i >= 0) && io_tlb_list[i]; i--)
-			io_tlb_list[i] += io_tlb_list[index];
-	}
-	spin_unlock_irqrestore(&io_tlb_lock, flags);
-}
-
-static void
-__pci_sync_single (struct pci_dev *hwdev, char *dma_addr, size_t size, int direction)
-{
-	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
-	char *buffer = io_tlb_orig_addr[index];
-
-	/*
-  	 * bounce... copy the data back into/from the original buffer
-	 * XXX How do you handle PCI_DMA_BIDIRECTIONAL here ?
- 	 */
-	if (direction == PCI_DMA_FROMDEVICE)
-		memcpy(buffer, dma_addr, size);
-	else if (direction == PCI_DMA_TODEVICE)
-		memcpy(dma_addr, buffer, size);
-	else
-		BUG();
-}
-
-/*
- * Map a single buffer of the indicated size for DMA in streaming mode.
- * The PCI address to use is returned.
- *
- * Once the device is given the dma address, the device owns this memory
- * until either pci_unmap_single or pci_dma_sync_single is performed.
- */
-dma_addr_t
-pci_map_single (struct pci_dev *hwdev, void *ptr, size_t size, int direction)
-{
-	unsigned long pci_addr = virt_to_phys(ptr);
-
-	if (direction == PCI_DMA_NONE)
-		BUG();
-	/*
-	 * Check if the PCI device can DMA to ptr... if so, just return ptr
-	 */
-	if ((pci_addr & ~hwdev->dma_mask) == 0)
-		/*
-		 * Device is bit capable of DMA'ing to the
-		 * buffer... just return the PCI address of ptr
-		 */
-		return pci_addr;
-
-	/* 
-	 * get a bounce buffer: 
-	 */
-	pci_addr = virt_to_phys(__pci_map_single(hwdev, ptr, size, direction));
-
-	/*
-	 * Ensure that the address returned is DMA'ble:
-	 */
-	if ((pci_addr & ~hwdev->dma_mask) != 0)
-		panic("__pci_map_single: bounce buffer is not DMA'ble");
-
-	return pci_addr;
-}
-
-/*
- * Unmap a single streaming mode DMA translation.  The dma_addr and size
- * must match what was provided for in a previous pci_map_single call.  All
- * other usages are undefined.
- *
- * After this call, reads by the cpu to the buffer are guarenteed to see
- * whatever the device wrote there.
- */
-void
-pci_unmap_single (struct pci_dev *hwdev, dma_addr_t pci_addr, size_t size, int direction)
-{
-	char *dma_addr = phys_to_virt(pci_addr);
-
-	if (direction == PCI_DMA_NONE)
-		BUG();
-	if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
-		__pci_unmap_single(hwdev, dma_addr, size, direction);
-}
-
-/*
- * Make physical memory consistent for a single
- * streaming mode DMA translation after a transfer.
- *
- * If you perform a pci_map_single() but wish to interrogate the
- * buffer using the cpu, yet do not wish to teardown the PCI dma
- * mapping, you must call this function before doing so.  At the
- * next point you give the PCI dma address back to the card, the
- * device again owns the buffer.
- */
-void
-pci_dma_sync_single (struct pci_dev *hwdev, dma_addr_t pci_addr, size_t size, int direction)
-{
-	char *dma_addr = phys_to_virt(pci_addr);
-
-	if (direction == PCI_DMA_NONE)
-		BUG();
-	if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
-		__pci_sync_single(hwdev, dma_addr, size, direction);
-}
-
-/*
- * Map a set of buffers described by scatterlist in streaming
- * mode for DMA.  This is the scather-gather version of the
- * above pci_map_single interface.  Here the scatter gather list
- * elements are each tagged with the appropriate dma address
- * and length.  They are obtained via sg_dma_{address,length}(SG).
- *
- * NOTE: An implementation may be able to use a smaller number of
- *       DMA address/length pairs than there are SG table elements.
- *       (for example via virtual mapping capabilities)
- *       The routine returns the number of addr/length pairs actually
- *       used, at most nents.
- *
- * Device ownership issues as mentioned above for pci_map_single are
- * the same here.
- */
-int
-pci_map_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction)
-{
-	int i;
-
-	if (direction == PCI_DMA_NONE)
-		BUG();
-
-	for (i = 0; i < nelems; i++, sg++) {
-		sg->orig_address = sg->address;
-		if ((virt_to_phys(sg->address) & ~hwdev->dma_mask) != 0) {
-			sg->address = __pci_map_single(hwdev, sg->address, sg->length, direction);
-		}
-	}
-	return nelems;
-}
-
-/*
- * Unmap a set of streaming mode DMA translations.
- * Again, cpu read rules concerning calls here are the same as for
- * pci_unmap_single() above.
- */
-void
-pci_unmap_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction)
-{
-	int i;
-
-	if (direction == PCI_DMA_NONE)
-		BUG();
-
-	for (i = 0; i < nelems; i++, sg++)
-		if (sg->orig_address != sg->address) {
-			__pci_unmap_single(hwdev, sg->address, sg->length, direction);
-			sg->address = sg->orig_address;
-		}
-}
-
-/*
- * Make physical memory consistent for a set of streaming mode DMA
- * translations after a transfer.
- *
- * The same as pci_dma_sync_single but for a scatter-gather list,
- * same rules and usage.
- */
-void
-pci_dma_sync_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction)
-{
-	int i;
-
-	if (direction == PCI_DMA_NONE)
-		BUG();
-
-	for (i = 0; i < nelems; i++, sg++)
-		if (sg->orig_address != sg->address)
-			__pci_sync_single(hwdev, sg->address, sg->length, direction);
-}
-
-#else
-/*
- * Map a single buffer of the indicated size for DMA in streaming mode.
- * The 32-bit bus address to use is returned.
- *
- * Once the device is given the dma address, the device owns this memory
- * until either pci_unmap_single or pci_dma_sync_single is performed.
- */
-dma_addr_t
-pci_map_single (struct pci_dev *hwdev, void *ptr, size_t size, int direction)
-{
-        if (direction == PCI_DMA_NONE)
-                BUG();
-        return virt_to_bus(ptr);
-}
-
-/*
- * Unmap a single streaming mode DMA translation.  The dma_addr and size
- * must match what was provided for in a previous pci_map_single call.  All
- * other usages are undefined.
- *
- * After this call, reads by the cpu to the buffer are guarenteed to see
- * whatever the device wrote there.
- */
-void
-pci_unmap_single (struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction)
-{
-        if (direction == PCI_DMA_NONE)
-                BUG();
-        /* Nothing to do */
-}
-/*
- * Map a set of buffers described by scatterlist in streaming
- * mode for DMA.  This is the scather-gather version of the
- * above pci_map_single interface.  Here the scatter gather list
- * elements are each tagged with the appropriate dma address
- * and length.  They are obtained via sg_dma_{address,length}(SG).
- *
- * NOTE: An implementation may be able to use a smaller number of
- *       DMA address/length pairs than there are SG table elements.
- *       (for example via virtual mapping capabilities)
- *       The routine returns the number of addr/length pairs actually
- *       used, at most nents.
- *
- * Device ownership issues as mentioned above for pci_map_single are
- * the same here.
- */
-int
-pci_map_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
-{
-        if (direction == PCI_DMA_NONE)
-                BUG();
-        return nents;
-}
-
-/*
- * Unmap a set of streaming mode DMA translations.
- * Again, cpu read rules concerning calls here are the same as for
- * pci_unmap_single() above.
- */
-void
-pci_unmap_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
-{
-        if (direction == PCI_DMA_NONE)
-                BUG();
-        /* Nothing to do */
-}
-/*
- * Make physical memory consistent for a single
- * streaming mode DMA translation after a transfer.
- *
- * If you perform a pci_map_single() but wish to interrogate the
- * buffer using the cpu, yet do not wish to teardown the PCI dma
- * mapping, you must call this function before doing so.  At the
- * next point you give the PCI dma address back to the card, the
- * device again owns the buffer.
- */
-void
-pci_dma_sync_single (struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction)
-{
-        if (direction == PCI_DMA_NONE)
-                BUG();
-        /* Nothing to do */
-}
-
-/*
- * Make physical memory consistent for a set of streaming mode DMA
- * translations after a transfer.
- *
- * The same as pci_dma_sync_single but for a scatter-gather list,
- * same rules and usage.
- */
-void
-pci_dma_sync_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction)
-{
-        if (direction == PCI_DMA_NONE)
-                BUG();
-        /* Nothing to do */
-}
-
-#endif /* CONFIG_SWIOTLB */
-
-void *
-pci_alloc_consistent (struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle)
-{
-	unsigned long pci_addr;
-	int gfp = GFP_ATOMIC;
-	void *ret;
-
-	if (!hwdev || hwdev->dma_mask <= 0xffffffff)
-		gfp |= GFP_DMA; /* XXX fix me: should change this to GFP_32BIT or ZONE_32BIT */
-	ret = (void *)__get_free_pages(gfp, get_order(size));
-	if (!ret)
-		return NULL;
-
-	memset(ret, 0, size);
-	pci_addr = virt_to_phys(ret);
-	if ((pci_addr & ~hwdev->dma_mask) != 0)
-		panic("pci_alloc_consistent: allocated memory is out of range for PCI device");
-	*dma_handle = pci_addr;
-	return ret;
-}
-
-void
-pci_free_consistent (struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
-{
-	free_pages((unsigned long) vaddr, get_order(size));
-}
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/kernel/pci.c linux/arch/ia64/kernel/pci.c
--- v2.4.0-prerelease/linux/arch/ia64/kernel/pci.c	Mon Aug  7 14:31:40 2000
+++ linux/arch/ia64/kernel/pci.c	Thu Jan  4 12:50:17 2001
@@ -1,10 +1,8 @@
 /*
- * pci.c - Low-Level PCI Access in IA64
+ * pci.c - Low-Level PCI Access in IA-64
  * 
  * Derived from bios32.c of i386 tree.
- *
  */
-
 #include <linux/config.h>
 
 #include <linux/types.h>
@@ -44,19 +42,16 @@
  * This interrupt-safe spinlock protects all accesses to PCI
  * configuration space.
  */
-
 spinlock_t pci_lock = SPIN_LOCK_UNLOCKED;
 
-struct pci_fixup pcibios_fixups[] = { { 0 } };
-
-#define PCI_NO_CHECKS		0x400
-#define PCI_NO_PEER_FIXUP	0x800
-
-static unsigned int pci_probe = PCI_NO_CHECKS;
+struct pci_fixup pcibios_fixups[] = {
+	{ 0 }
+};
 
 /* Macro to build a PCI configuration address to be passed as a parameter to SAL. */
 
-#define PCI_CONFIG_ADDRESS(dev, where) (((u64) dev->bus->number << 16) | ((u64) (dev->devfn & 0xff) << 8) | (where & 0xff))
+#define PCI_CONFIG_ADDRESS(dev, where) \
+	(((u64) dev->bus->number << 16) | ((u64) (dev->devfn & 0xff) << 8) | (where & 0xff))
 
 static int 
 pci_conf_read_config_byte(struct pci_dev *dev, int where, u8 *value)
@@ -109,8 +104,7 @@
 	return ia64_sal_pci_config_write(PCI_CONFIG_ADDRESS(dev, where), 4, value);
 }
 
-
-static struct pci_ops pci_conf = {
+struct pci_ops pci_conf = {
       pci_conf_read_config_byte,
       pci_conf_read_config_word,
       pci_conf_read_config_dword,
@@ -120,36 +114,21 @@
 };
 
 /*
- * Try to find PCI BIOS.  This will always work for IA64.
- */
-
-static struct pci_ops * __init
-pci_find_bios(void)
-{
-	return &pci_conf;
-}
-
-/*
  * Initialization. Uses the SAL interface
  */
-
-#define PCI_BUSES_TO_SCAN 255
-
 void __init 
-pcibios_init(void)
+pcibios_init (void)
 {
-	struct pci_ops *ops = NULL;
+#	define PCI_BUSES_TO_SCAN 255
 	int i;
 
-	if ((ops = pci_find_bios()) == NULL) {
-		printk("PCI: No PCI bus detected\n");
-		return;
-	}
+	platform_pci_fixup(0);	/* phase 0 initialization (before PCI bus has been scanned) */
 
 	printk("PCI: Probing PCI hardware\n");
 	for (i = 0; i < PCI_BUSES_TO_SCAN; i++) 
-		pci_scan_bus(i, ops, NULL);
-	platform_pci_fixup();
+		pci_scan_bus(i, &pci_conf, NULL);
+
+	platform_pci_fixup(1);	/* phase 1 initialization (after PCI bus has been scanned) */
 	return;
 }
 
@@ -157,16 +136,15 @@
  *  Called after each bus is probed, but before its children
  *  are examined.
  */
-
 void __init
-pcibios_fixup_bus(struct pci_bus *b)
+pcibios_fixup_bus (struct pci_bus *b)
 {
 	return;
 }
 
 void __init
-pcibios_update_resource(struct pci_dev *dev, struct resource *root,
-			struct resource *res, int resource)
+pcibios_update_resource (struct pci_dev *dev, struct resource *root,
+			 struct resource *res, int resource)
 {
         unsigned long where, size;
         u32 reg;
@@ -181,7 +159,7 @@
 }
 
 void __init
-pcibios_update_irq(struct pci_dev *dev, int irq)
+pcibios_update_irq (struct pci_dev *dev, int irq)
 {
 	pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
 
@@ -204,18 +182,16 @@
 	return 0;
 }
 
+void
+pcibios_align_resource (void *data, struct resource *res, unsigned long size)
+{
+}
+
 /*
  * PCI BIOS setup, always defaults to SAL interface
  */
-
 char * __init 
-pcibios_setup(char *str)
+pcibios_setup (char *str)
 {
-	pci_probe =  PCI_NO_CHECKS;
 	return NULL;
-}
-
-void
-pcibios_align_resource (void *data, struct resource *res, unsigned long size)
-{
 }
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/kernel/perfmon.c linux/arch/ia64/kernel/perfmon.c
--- v2.4.0-prerelease/linux/arch/ia64/kernel/perfmon.c	Tue Oct 31 12:42:26 2000
+++ linux/arch/ia64/kernel/perfmon.c	Thu Jan  4 12:50:17 2001
@@ -4,18 +4,20 @@
  *
  * Originaly Written by Ganesh Venkitachalam, IBM Corp.
  * Modifications by David Mosberger-Tang, Hewlett-Packard Co.
+ * Modifications by Stephane Eranian, Hewlett-Packard Co.
  * Copyright (C) 1999 Ganesh Venkitachalam <venkitac@us.ibm.com>
  * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 2000 Stephane Eranian <eranian@hpl.hp.com>
  */
 
 #include <linux/config.h>
+
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/interrupt.h>
 #include <linux/smp_lock.h>
 #include <linux/proc_fs.h>
-#include <linux/ptrace.h>
 
 #include <asm/errno.h>
 #include <asm/hw_irq.h>
@@ -58,19 +60,51 @@
 #define MAX_PERF_COUNTER	4	/* true for Itanium, at least */
 #define PMU_FIRST_COUNTER	4	/* first generic counter */
 
-#define WRITE_PMCS_AND_START	0xa0
-#define WRITE_PMCS		0xa1
-#define READ_PMDS		0xa2
-#define STOP_PMCS		0xa3
+#define PFM_WRITE_PMCS		0xa0
+#define PFM_WRITE_PMDS		0xa1
+#define PFM_READ_PMDS		0xa2
+#define PFM_STOP		0xa3
+#define PFM_START		0xa4
+#define PFM_ENABLE		0xa5	/* unfreeze only */
+#define PFM_DISABLE		0xa6	/* freeze only */
+/* 
+ * Those 2 are just meant for debugging. I considered using sysctl() for
+ * that but it is a little bit too pervasive. This solution is at least
+ * self-contained.
+ */
+#define PFM_DEBUG_ON		0xe0	
+#define PFM_DEBUG_OFF		0xe1
+
+#ifdef CONFIG_SMP
+#define cpu_is_online(i) (cpu_online_map & (1UL << i))
+#else
+#define cpu_is_online(i)	1
+#endif
 
+#define PMC_IS_IMPL(i)		(pmu_conf.impl_regs[i>>6] & (1<< (i&~(64-1))))
+#define PMD_IS_IMPL(i)  	(pmu_conf.impl_regs[4+(i>>6)] & (1<< (i&~(64-1))))
+#define PMD_IS_COUNTER(i)	(i>=PMU_FIRST_COUNTER && i < (PMU_FIRST_COUNTER+pmu_conf.max_counters))
+#define PMC_IS_COUNTER(i)	(i>=PMU_FIRST_COUNTER && i < (PMU_FIRST_COUNTER+pmu_conf.max_counters))
 
 /*
  * this structure needs to be enhanced
  */
 typedef struct {
+	unsigned long	pfr_reg_num;	/* which register */
+	unsigned long	pfr_reg_value;	/* configuration (PMC) or initial value (PMD) */
+	unsigned long	pfr_reg_reset;	/* reset value on overflow (PMD) */
+	void		*pfr_smpl_buf;	/* pointer to user buffer for EAR/BTB */
+	unsigned long	pfr_smpl_size;	/* size of user buffer for EAR/BTB */
+	pid_t		pfr_notify_pid;	/* process to notify */
+	int		pfr_notify_sig;	/* signal for notification, 0=no notification */
+} perfmon_req_t;
+
+#if 0
+typedef struct {
 	unsigned long pmu_reg_data;	/* generic PMD register */
 	unsigned long pmu_reg_num;	/* which register number */
 } perfmon_reg_t; 
+#endif
 
 /*
  * This structure is initialize at boot time and contains
@@ -78,86 +112,141 @@
  * by PAL
  */
 typedef struct {
-	unsigned long perf_ovfl_val;	/* overflow value for generic counters */
-	unsigned long max_pmc;		/* highest PMC */
-	unsigned long max_pmd;		/* highest PMD */
-	unsigned long max_counters;	/* number of generic counter pairs (PMC/PMD) */
+	unsigned long perf_ovfl_val;	/* overflow value for generic counters   */
+	unsigned long max_counters;	/* upper limit on counter pair (PMC/PMD) */
+	unsigned long impl_regs[16];	/* buffer used to hold implememted PMC/PMD mask */
 } pmu_config_t;
 
-/* XXX will go static when ptrace() is cleaned */
-unsigned long perf_ovfl_val;	/* overflow value for generic counters */
-
 static pmu_config_t pmu_conf;
 
+/* for debug only */
+static unsigned long pfm_debug=1;	/* 0= nodebug, >0= debug output on */
+#define DBprintk(a)	{\
+	if (pfm_debug >0) { printk a; } \
+}
+
 /*
- * could optimize to avoid cache conflicts in SMP
+ * could optimize to avoid cache line conflicts in SMP
  */
-unsigned long pmds[NR_CPUS][MAX_PERF_COUNTER];
+static struct task_struct *pmu_owners[NR_CPUS];
 
-asmlinkage unsigned long
-sys_perfmonctl (int cmd, int count, void *ptr, long arg4, long arg5, long arg6, long arg7, long arg8, long stack)
+static int
+do_perfmonctl (struct task_struct *task, int cmd, int flags, perfmon_req_t *req, int count, struct pt_regs *regs)
 {
-	struct pt_regs *regs = (struct pt_regs *) &stack;
-        perfmon_reg_t tmp, *cptr = ptr;
-        unsigned long cnum;
+        perfmon_req_t tmp;
         int i;
 
         switch (cmd) {
-	      case WRITE_PMCS:           /* Writes to PMC's and clears PMDs */
-	      case WRITE_PMCS_AND_START: /* Also starts counting */
+		case PFM_WRITE_PMCS:          
+			/* we don't quite support this right now */
+			if (task != current) return -EINVAL;
+
+			if (!access_ok(VERIFY_READ, req, sizeof(struct perfmon_req_t)*count)) return -EFAULT;
+
+			for (i = 0; i < count; i++, req++) {
+				copy_from_user(&tmp, req, sizeof(tmp));
+
+				/* XXX needs to check validity of the data maybe */
+
+				if (!PMC_IS_IMPL(tmp.pfr_reg_num)) {
+					DBprintk((__FUNCTION__ " invalid pmc[%ld]\n", tmp.pfr_reg_num));
+					return -EINVAL;
+				}
+
+				/* XXX: for counters, need to some checks */
+				if (PMC_IS_COUNTER(tmp.pfr_reg_num)) {
+					current->thread.pmu_counters[tmp.pfr_reg_num - PMU_FIRST_COUNTER].sig = tmp.pfr_notify_sig;
+					current->thread.pmu_counters[tmp.pfr_reg_num - PMU_FIRST_COUNTER].pid = tmp.pfr_notify_pid;
+
+					DBprintk((__FUNCTION__" setting PMC[%ld] send sig %d to %d\n",tmp.pfr_reg_num, tmp.pfr_notify_sig, tmp.pfr_notify_pid));
+				}
+				ia64_set_pmc(tmp.pfr_reg_num, tmp.pfr_reg_value);
+
+				DBprintk((__FUNCTION__" setting PMC[%ld]=0x%lx\n", tmp.pfr_reg_num, tmp.pfr_reg_value));
+			}
+			/*
+			 * we have to set this here event hough we haven't necessarily started monitoring
+			 * because we may be context switched out
+			 */
+			current->thread.flags |= IA64_THREAD_PM_VALID;
+                	break;
+
+		case PFM_WRITE_PMDS:
+			/* we don't quite support this right now */
+			if (task != current) return -EINVAL;
+
+			if (!access_ok(VERIFY_READ, req, sizeof(struct perfmon_req_t)*count)) return -EFAULT;
+
+			for (i = 0; i < count; i++, req++) {
+				copy_from_user(&tmp, req, sizeof(tmp));
+
+				if (!PMD_IS_IMPL(tmp.pfr_reg_num)) return -EINVAL;
+
+				/* update virtualized (64bits) counter */
+				if (PMD_IS_COUNTER(tmp.pfr_reg_num)) {
+					current->thread.pmu_counters[tmp.pfr_reg_num - PMU_FIRST_COUNTER].val  = tmp.pfr_reg_value & ~pmu_conf.perf_ovfl_val;
+					current->thread.pmu_counters[tmp.pfr_reg_num - PMU_FIRST_COUNTER].rval = tmp.pfr_reg_reset;
+				}
+				/* writes to unimplemented part is ignored, so this is safe */
+				ia64_set_pmd(tmp.pfr_reg_num, tmp.pfr_reg_value);
+				/* to go away */
+				ia64_srlz_d();
+				DBprintk((__FUNCTION__" setting PMD[%ld]:  pmod.val=0x%lx pmd=0x%lx rval=0x%lx\n", tmp.pfr_reg_num, current->thread.pmu_counters[tmp.pfr_reg_num - PMU_FIRST_COUNTER].val, ia64_get_pmd(tmp.pfr_reg_num),current->thread.pmu_counters[tmp.pfr_reg_num - PMU_FIRST_COUNTER].rval));
+			}
+			/*
+			 * we have to set this here event hough we haven't necessarily started monitoring
+			 * because we may be context switched out
+			 */
+			current->thread.flags |= IA64_THREAD_PM_VALID;
+                	break;
+
+		case PFM_START:
+			/* we don't quite support this right now */
+			if (task != current) return -EINVAL;
 
-		if (!access_ok(VERIFY_READ, cptr, sizeof(struct perfmon_reg_t)*count))
-			return -EFAULT;
+			pmu_owners[smp_processor_id()] = current;
 
-		for (i = 0; i < count; i++, cptr++) {
+			/* will start monitoring right after rfi */
+			ia64_psr(regs)->up = 1;
 
-			copy_from_user(&tmp, cptr, sizeof(tmp));
+			/* 
+		 	 * mark the state as valid.
+		 	 * this will trigger save/restore at context switch
+		 	 */
+			current->thread.flags |= IA64_THREAD_PM_VALID;
 
-			/* XXX need to check validity of pmu_reg_num and perhaps data!! */
+			ia64_set_pmc(0, 0);
 
-			if (tmp.pmu_reg_num > pmu_conf.max_pmc || tmp.pmu_reg_num == 0) return -EFAULT;
+                	break;
 
-			ia64_set_pmc(tmp.pmu_reg_num, tmp.pmu_reg_data);
+		case PFM_ENABLE:
+			/* we don't quite support this right now */
+			if (task != current) return -EINVAL;
 
-			/* to go away */
-			if (tmp.pmu_reg_num >= PMU_FIRST_COUNTER && tmp.pmu_reg_num < PMU_FIRST_COUNTER+pmu_conf.max_counters) {
-				ia64_set_pmd(tmp.pmu_reg_num, 0);
-				pmds[smp_processor_id()][tmp.pmu_reg_num - PMU_FIRST_COUNTER] = 0;
+			pmu_owners[smp_processor_id()] = current;
 
-				printk(__FUNCTION__" setting PMC/PMD[%ld] es=0x%lx pmd[%ld]=%lx\n", tmp.pmu_reg_num, (tmp.pmu_reg_data>>8) & 0x7f, tmp.pmu_reg_num, ia64_get_pmd(tmp.pmu_reg_num));
-			} else
-				printk(__FUNCTION__" setting PMC[%ld]=0x%lx\n", tmp.pmu_reg_num, tmp.pmu_reg_data);
-		}
-
-		if (cmd == WRITE_PMCS_AND_START) {
-#if 0
-/* irrelevant with user monitors */
-			local_irq_save(flags);
-
-			dcr = ia64_get_dcr();
-			dcr |= IA64_DCR_PP;
-			ia64_set_dcr(dcr);
-
-			local_irq_restore(flags);
-#endif
+			/* 
+		 	 * mark the state as valid.
+		 	 * this will trigger save/restore at context switch
+		 	 */
+			current->thread.flags |= IA64_THREAD_PM_VALID;
 
+			/* simply unfreeze */
 			ia64_set_pmc(0, 0);
+			break;
 
-			/* will start monitoring right after rfi */
-			ia64_psr(regs)->up = 1;
-		}
-		/* 
-		 * mark the state as valid.
-		 * this will trigger save/restore at context switch
-		 */
-		current->thread.flags |= IA64_THREAD_PM_VALID;
-                break;
-
-	      case READ_PMDS:
-		if (count <= 0 || count > MAX_PERF_COUNTER)
-			return -EINVAL;
-		if (!access_ok(VERIFY_WRITE, cptr, sizeof(struct perfmon_reg_t)*count))
-			return -EFAULT;
+		case PFM_DISABLE:
+			/* we don't quite support this right now */
+			if (task != current) return -EINVAL;
+
+			/* simply unfreeze */
+			ia64_set_pmc(0, 1);
+			ia64_srlz_d();
+			break;
+
+	        case PFM_READ_PMDS:
+			if (!access_ok(VERIFY_READ, req, sizeof(struct perfmon_req_t)*count)) return -EFAULT;
+			if (!access_ok(VERIFY_WRITE, req, sizeof(struct perfmon_req_t)*count)) return -EFAULT;
 
 		/* This looks shady, but IMHO this will work fine. This is  
 		 * the sequence that I could come up with to avoid races
@@ -187,16 +276,31 @@
 		 * is the irq_save/restore needed?
 		 */
 
+		for (i = 0; i < count; i++, req++) {
+			unsigned long val=0;
 
-		/* XXX: This needs to change to read more than just the counters */
-		for (i = 0, cnum = PMU_FIRST_COUNTER;i < count; i++, cnum++, cptr++) {
+			copy_from_user(&tmp, req, sizeof(tmp));
 
-			tmp.pmu_reg_data = (pmds[smp_processor_id()][i]
-				    + (ia64_get_pmd(cnum) & pmu_conf.perf_ovfl_val));
+			if (!PMD_IS_IMPL(tmp.pfr_reg_num)) return -EINVAL;
 
-			tmp.pmu_reg_num = cnum;
+			if (PMD_IS_COUNTER(tmp.pfr_reg_num)) {
+				if (task == current){
+					val = ia64_get_pmd(tmp.pfr_reg_num) & pmu_conf.perf_ovfl_val;
+				} else {
+					val = task->thread.pmd[tmp.pfr_reg_num - PMU_FIRST_COUNTER] & pmu_conf.perf_ovfl_val;
+				}
+				val += task->thread.pmu_counters[tmp.pfr_reg_num - PMU_FIRST_COUNTER].val;
+			} else {
+				/* for now */
+				if (task != current) return -EINVAL;
+
+				val = ia64_get_pmd(tmp.pfr_reg_num);
+			}
+			tmp.pfr_reg_value = val;
 
-			if (copy_to_user(cptr, &tmp, sizeof(tmp))) return -EFAULT;
+DBprintk((__FUNCTION__" reading PMD[%ld]=0x%lx\n", tmp.pfr_reg_num, val));
+
+			if (copy_to_user(req, &tmp, sizeof(tmp))) return -EFAULT;
 		}
 #if 0
 /* irrelevant with user monitors */
@@ -209,11 +313,18 @@
 #endif
                 break;
 
-	      case STOP_PMCS:
+	      case PFM_STOP:
+		/* we don't quite support this right now */
+		if (task != current) return -EINVAL;
+
 		ia64_set_pmc(0, 1);
 		ia64_srlz_d();
-		for (i = 0; i < MAX_PERF_COUNTER; ++i)
-			ia64_set_pmc(4+i, 0);
+
+		ia64_psr(regs)->up = 0;
+
+		current->thread.flags &= ~IA64_THREAD_PM_VALID;
+
+		pmu_owners[smp_processor_id()] = NULL;
 
 #if 0
 /* irrelevant with user monitors */
@@ -225,48 +336,140 @@
 		ia64_psr(regs)->up = 0;
 #endif
 
-		current->thread.flags &= ~(IA64_THREAD_PM_VALID);
-
 		break;
 
+	      case PFM_DEBUG_ON:
+			printk(__FUNCTION__" debuggin on\n");
+			pfm_debug = 1;
+			break;
+
+	      case PFM_DEBUG_OFF:
+			printk(__FUNCTION__" debuggin off\n");
+			pfm_debug = 0;
+			break;
+
 	      default:
+		DBprintk((__FUNCTION__" UNknown command 0x%x\n", cmd));
 		return -EINVAL;
 		break;
         }
         return 0;
 }
 
-static inline void
-update_counters (void)
+asmlinkage int
+sys_perfmonctl (int pid, int cmd, int flags, perfmon_req_t *req, int count, long arg6, long arg7, long arg8, long stack)
 {
-	unsigned long mask, i, cnum, val;
+	struct pt_regs *regs = (struct pt_regs *) &stack;
+	struct task_struct *child = current;
+	int ret;
 
-	mask = ia64_get_pmc(0) >> 4;
-	for (i = 0, cnum = PMU_FIRST_COUNTER ; i < pmu_conf.max_counters; cnum++, i++, mask >>= 1) {
+	if (pid != current->pid) {
+		read_lock(&tasklist_lock);
+		{
+			child = find_task_by_pid(pid);
+			if (child)
+				get_task_struct(child);
+		}
+		if (!child) { 
+			read_unlock(&tasklist_lock);
+			return -ESRCH;
+		}
+		/*
+		 * XXX: need to do more checking here
+		 */
+		if (child->state != TASK_ZOMBIE) {
+			DBprintk((__FUNCTION__" warning process %d not in stable state %ld\n", pid, child->state));
+		}
+	} 
+	ret = do_perfmonctl(child, cmd, flags, req, count, regs);
 
+	if (child != current) read_unlock(&tasklist_lock);
 
-		val = mask & 0x1 ? pmu_conf.perf_ovfl_val + 1 : 0;
+	return ret;
+}
 
-		if (mask & 0x1) 
-			printk(__FUNCTION__ " PMD%ld overflowed pmd=%lx pmod=%lx\n", cnum, ia64_get_pmd(cnum), pmds[smp_processor_id()][i]); 
 
-		/* since we got an interrupt, might as well clear every pmd. */
-		val += ia64_get_pmd(cnum) & pmu_conf.perf_ovfl_val;
+static inline int
+update_counters (u64 pmc0)
+{
+	unsigned long mask, i, cnum;
+	struct thread_struct *th;
+	struct task_struct *ta;
+
+	if (pmu_owners[smp_processor_id()] == NULL) {
+		DBprintk((__FUNCTION__" Spurious overflow interrupt: PMU not owned\n"));
+		return 0;
+	}
+	
+	/*
+	 * It is never safe to access the task for which the overflow interrupt is destinated
+	 * using the current variable as the interrupt may occur in the middle of a context switch
+	 * where current does not hold the task that is running yet.
+	 *
+	 * For monitoring, however, we do need to get access to the task which caused the overflow
+	 * to account for overflow on the counters.
+	 * We accomplish this by maintaining a current owner of the PMU per CPU. During context
+	 * switch the ownership is changed in a way such that the reflected owner is always the 
+	 * valid one, i.e. the one that caused the interrupt.
+	 */
+	ta = pmu_owners[smp_processor_id()];
+	th = &pmu_owners[smp_processor_id()]->thread;
 
-		printk(__FUNCTION__ " adding val=%lx to pmod[%ld]=%lx \n", val, i, pmds[smp_processor_id()][i]); 
+	/*
+	 * Don't think this could happen given first test. Keep as sanity check
+	 */
+	if ((th->flags & IA64_THREAD_PM_VALID) == 0) {
+		DBprintk((__FUNCTION__" Spurious overflow interrupt: process %d not using perfmon\n", ta->pid));
+		return 0;
+	}
+
+	/*
+	 * if PMU not frozen: spurious from previous context 
+	 * if PMC[0] = 0x1 : frozen but no overflow reported: leftover from previous context
+	 *
+	 * in either case we don't touch the state upon return from handler
+	 */
+	if ((pmc0 & 0x1) == 0 || pmc0 == 0x1) { 
+		DBprintk((__FUNCTION__" Spurious overflow interrupt: process %d freeze=0\n",ta->pid));
+		return 0;
+	}
 
-		pmds[smp_processor_id()][i] += val;
+	mask = pmc0 >> 4;
 
-		ia64_set_pmd(cnum, 0);
+	for (i = 0, cnum = PMU_FIRST_COUNTER; i < pmu_conf.max_counters; cnum++, i++, mask >>= 1) {
+
+		if (mask & 0x1) {
+			DBprintk((__FUNCTION__ " PMD[%ld] overflowed pmd=0x%lx pmod.val=0x%lx\n", cnum, ia64_get_pmd(cnum), th->pmu_counters[i].val)); 
+			
+			/*
+			 * Because we somtimes (EARS/BTB) reset to a specific value, we cannot simply use 
+			 * val to count the number of times we overflowed. Otherwise we would loose the value
+			 * current in the PMD (which can be >0). So to make sure we don't loose
+			 * the residual counts we set val to contain full 64bits value of the counter.
+			 */
+			th->pmu_counters[i].val += 1+pmu_conf.perf_ovfl_val+(ia64_get_pmd(cnum) &pmu_conf.perf_ovfl_val);
+
+			/* writes to upper part are ignored, so this is safe */
+			ia64_set_pmd(cnum, th->pmu_counters[i].rval);
+
+			DBprintk((__FUNCTION__ " pmod[%ld].val=0x%lx pmd=0x%lx\n", i, th->pmu_counters[i].val, ia64_get_pmd(cnum)&pmu_conf.perf_ovfl_val)); 
+
+			if (th->pmu_counters[i].pid != 0 && th->pmu_counters[i].sig>0) {
+				DBprintk((__FUNCTION__ " shouild notify process %d with signal %d\n",th->pmu_counters[i].pid, th->pmu_counters[i].sig)); 
+			}
+		}
 	}
+	return 1;
 }
 
 static void
 perfmon_interrupt (int irq, void *arg, struct pt_regs *regs)
 {
-	update_counters();
-	ia64_set_pmc(0, 0);
-	ia64_srlz_d();
+	/* unfreeze if not spurious */
+	if ( update_counters(ia64_get_pmc(0)) ) {
+		ia64_set_pmc(0, 0);
+		ia64_srlz_d();
+	}
 }
 
 static struct irqaction perfmon_irqaction = {
@@ -280,9 +483,13 @@
 {
 	char *p = page;
 	u64 pmc0 = ia64_get_pmc(0);
+	int i;
 
-	p += sprintf(p, "PMC[0]=%lx\n", pmc0);
-
+	p += sprintf(p, "PMC[0]=%lx\nPerfmon debug: %s\n", pmc0, pfm_debug ? "On" : "Off");
+	for(i=0; i < NR_CPUS; i++) {
+		if (cpu_is_online(i)) 
+			p += sprintf(p, "CPU%d.PMU %d\n", i, pmu_owners[i] ? pmu_owners[i]->pid: -1);
+	}
 	return p - page;
 }
 
@@ -308,7 +515,6 @@
 perfmon_init (void)
 {
 	pal_perf_mon_info_u_t pm_info;
-	u64 pm_buffer[16];
 	s64 status;
 	
 	irq_desc[PERFMON_IRQ].status |= IRQ_PER_CPU;
@@ -320,15 +526,13 @@
 
 	printk("perfmon: Initialized vector to %u\n",PERFMON_IRQ);
 
-	if ((status=ia64_pal_perf_mon_info(pm_buffer, &pm_info)) != 0) {
+	if ((status=ia64_pal_perf_mon_info(pmu_conf.impl_regs, &pm_info)) != 0) {
 		printk(__FUNCTION__ " pal call failed (%ld)\n", status);
 		return;
 	} 
-	pmu_conf.perf_ovfl_val = perf_ovfl_val = (1L << pm_info.pal_perf_mon_info_s.width) - 1; 
+	pmu_conf.perf_ovfl_val = (1L << pm_info.pal_perf_mon_info_s.width) - 1; 
 
 	/* XXX need to use PAL instead */
-	pmu_conf.max_pmc       = 13;
-	pmu_conf.max_pmd       = 17;
 	pmu_conf.max_counters  = pm_info.pal_perf_mon_info_s.generic;
 
 	printk("perfmon: Counters are %d bits\n", pm_info.pal_perf_mon_info_s.width);
@@ -347,36 +551,137 @@
 	ia64_srlz_d();
 }
 
+/*
+ * XXX: for system wide this function MUST never be called
+ */
 void
-ia64_save_pm_regs (struct thread_struct *t)
+ia64_save_pm_regs (struct task_struct *ta)
 {
-	int i;
+	struct thread_struct *t = &ta->thread;
+	u64 pmc0, psr;
+	int i,j;
+
+	/*
+	 * We must maek sure that we don't loose any potential overflow
+	 * interrupt while saving PMU context. In this code, external
+	 * interrupts are always enabled.
+	 */
+
+	/*
+	 * save current PSR: needed because we modify it
+	 */
+	__asm__ __volatile__ ("mov %0=psr;;": "=r"(psr) :: "memory");
+
+	/*
+	 * stop monitoring:
+	 * This is the only way to stop monitoring without destroying overflow
+	 * information in PMC[0..3].
+	 * This is the last instruction which can cause overflow when monitoring
+	 * in kernel.
+	 * By now, we could still have an overflow interrupt in flight.
+	 */
+	__asm__ __volatile__ ("rsm psr.up;;"::: "memory");
+	
+	/*
+	 * read current overflow status:
+	 *
+	 * We may be reading stale information at this point, if we got interrupt
+	 * just before the read(pmc0) but that's all right. However, if we did
+	 * not get the interrupt before, this read reflects LAST state.
+	 *
+	 */
+	pmc0 = ia64_get_pmc(0);
 
+	/*
+	 * freeze PMU:
+	 *
+	 * This destroys the overflow information. This is required to make sure
+	 * next process does not start with monitoring on if not requested
+	 * (PSR.up may not be enough).
+	 *
+	 * We could still get an overflow interrupt by now. However the handler
+	 * will not do anything if is sees PMC[0].fr=1 but no overflow bits
+	 * are set. So PMU will stay in frozen state. This implies that pmc0
+	 * will still be holding the correct unprocessed information.
+	 *
+	 */
 	ia64_set_pmc(0, 1);
 	ia64_srlz_d();
+
+	/*
+	 * check for overflow bits set:
+	 *
+	 * If pmc0 reports PMU frozen, this means we have a pending overflow,
+	 * therefore we invoke the handler. Handler is reentrant with regards
+	 * to PMC[0] so it is safe to call it twice.
+	 *
+	 * IF pmc0 reports overflow, we need to reread current PMC[0] value
+	 * in case the handler was invoked right after the first pmc0 read.
+	 * it is was not invoked then pmc0==PMC[0], otherwise it's been invoked
+	 * and overflow information has been processed, so we don't need to call.
+	 *
+	 * Test breakdown:
+	 *	- pmc0 & ~0x1: test if overflow happened
+	 * 	- second part: check if current register reflects this as well.
+	 *
+	 * NOTE: testing for pmc0 & 0x1 is not enough has it would trigger call
+	 * when PM_VALID and PMU.fr which is common when setting up registers
+	 * just before actually starting monitors.
+	 *
+	 */
+	if ((pmc0 & ~0x1) && ((pmc0=ia64_get_pmc(0)) &~0x1) ) {
+		printk(__FUNCTION__" Warning: pmc[0]=0x%lx\n", pmc0);
+		update_counters(pmc0);
+		/* 
+		 * XXX: not sure that's enough. the next task may still get the
+		 * interrupt.
+		 */
+	}
+
+	/*
+	 * restore PSR for context switch to save
+	 */
+	__asm__ __volatile__ ("mov psr.l=%0;;"::"r"(psr): "memory");
+
 	/*
 	 * XXX: this will need to be extended beyong just counters
 	 */
-	for (i=0; i< IA64_NUM_PM_REGS; i++) {
-		t->pmd[i]  = ia64_get_pmd(4+i);
-		t->pmod[i] = pmds[smp_processor_id()][i];
-		t->pmc[i]  = ia64_get_pmc(4+i);
+	for (i=0,j=4; i< IA64_NUM_PMD_COUNTERS; i++,j++) {
+		t->pmd[i] = ia64_get_pmd(j);
+		t->pmc[i] = ia64_get_pmc(j);
 	}
+	/*
+	 * PMU is frozen, PMU context is saved: nobody owns the PMU on this CPU
+	 * At this point, we should not receive any pending interrupt from the 
+	 * 'switched out' task
+	 */
+	pmu_owners[smp_processor_id()] = NULL;
 }
 
 void
-ia64_load_pm_regs (struct thread_struct *t)
+ia64_load_pm_regs (struct task_struct *ta)
 {
-	int i;
+	struct thread_struct *t = &ta->thread;
+	int i,j;
+
+	/*
+	 * we first restore ownership of the PMU to the 'soon to be current'
+	 * context. This way, if, as soon as we unfreeze the PMU at the end
+	 * of this function, we get an interrupt, we attribute it to the correct
+	 * task
+	 */
+	pmu_owners[smp_processor_id()] = ta;
 
 	/*
 	 * XXX: this will need to be extended beyong just counters 
 	 */
-	for (i=0; i< IA64_NUM_PM_REGS ; i++) {
-		ia64_set_pmd(4+i, t->pmd[i]);
-		pmds[smp_processor_id()][i] = t->pmod[i];
-		ia64_set_pmc(4+i, t->pmc[i]);
+	for (i=0,j=4; i< IA64_NUM_PMD_COUNTERS; i++,j++) {
+		ia64_set_pmd(j, t->pmd[i]);
+		ia64_set_pmc(j, t->pmc[i]);
 	}
+	/*
+	 * unfreeze PMU
+	 */
 	ia64_set_pmc(0, 0);
 	ia64_srlz_d();
 }
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/kernel/process.c linux/arch/ia64/kernel/process.c
--- v2.4.0-prerelease/linux/arch/ia64/kernel/process.c	Tue Oct 31 12:42:26 2000
+++ linux/arch/ia64/kernel/process.c	Thu Jan  4 12:50:17 2001
@@ -137,23 +137,6 @@
 		check_pgt_cache();
 		if (pm_idle)
 			(*pm_idle)();
-#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC
-		local_irq_disable();
-		{
-			u64 itc, itm;
-
-			itc = ia64_get_itc();
-			itm = ia64_get_itm();
-			if (time_after(itc, itm + 1000)) {
-				extern void ia64_reset_itm (void);
-
-				printk("cpu_idle: ITM in past (itc=%lx,itm=%lx:%lums)\n",
-				       itc, itm, (itc - itm)/500000);
-				ia64_reset_itm();
-			}
-		}
-		local_irq_enable();
-#endif
 	}
 }
 
@@ -164,7 +147,7 @@
 		ia64_save_debug_regs(&task->thread.dbr[0]);
 #ifdef CONFIG_PERFMON
 	if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0)
-		ia64_save_pm_regs(&task->thread);
+		ia64_save_pm_regs(task);
 #endif
 	if (IS_IA32_PROCESS(ia64_task_regs(task)))
 		ia32_save_state(&task->thread);
@@ -177,7 +160,7 @@
 		ia64_load_debug_regs(&task->thread.dbr[0]);
 #ifdef CONFIG_PERFMON
 	if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0)
-		ia64_load_pm_regs(&task->thread);
+		ia64_load_pm_regs(task);
 #endif
 	if (IS_IA32_PROCESS(ia64_task_regs(task)))
 		ia32_load_state(&task->thread);
@@ -299,6 +282,14 @@
 #	define THREAD_FLAGS_TO_SET	0
 	p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR)
 			   | THREAD_FLAGS_TO_SET);
+#ifdef CONFIG_IA32_SUPPORT
+	/*
+	 * If we're cloning an IA32 task then save the IA32 extra
+	 * state from the current task to the new task
+	 */
+	if (IS_IA32_PROCESS(ia64_task_regs(current)))
+		ia32_save_state(&p->thread);
+#endif
 	return 0;
 }
 
@@ -554,7 +545,7 @@
 		 * we garantee no race.  this call we also stop
 		 * monitoring
 		 */
-		ia64_save_pm_regs(&current->thread);
+		ia64_save_pm_regs(current);
 		/*
 		 * make sure that switch_to() will not save context again
 		 */
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/kernel/ptrace.c linux/arch/ia64/kernel/ptrace.c
--- v2.4.0-prerelease/linux/arch/ia64/kernel/ptrace.c	Tue Oct 31 12:42:26 2000
+++ linux/arch/ia64/kernel/ptrace.c	Thu Jan  4 12:50:17 2001
@@ -617,7 +617,6 @@
 	struct switch_stack *sw;
 	struct unw_frame_info info;
 	struct pt_regs *pt;
-	unsigned long pmd_tmp;
 
 	pt = ia64_task_regs(child);
 	sw = (struct switch_stack *) (child->thread.ksp + 16);
@@ -794,11 +793,7 @@
 				addr);
 			return -1;
 		}
-	} else 
-#ifdef CONFIG_PERFMON
-		if (addr < PT_PMD) 
-#endif
-		{
+	} else {
 		/* access debug registers */
 
 		if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
@@ -820,33 +815,14 @@
 		}
 
 		ptr += regnum;
-	}
-#ifdef CONFIG_PERFMON
-	else {
-		/*
-		 * XXX: will eventually move back to perfmonctl()
-		 */
-		unsigned long pmd = (addr - PT_PMD) >> 3;
-		extern unsigned long perf_ovfl_val;
-
-		/* we just use ptrace to read */
-		if (write_access) return -1;
-
-		if (pmd > 3) {
-			printk("ptrace: rejecting access to PMD[%ld] address 0x%lx\n", pmd, addr);
-			return -1;
-		}
 
-		/* 
-		 * We always need to mask upper 32bits of pmd because value is random
-		 */
-		pmd_tmp = child->thread.pmod[pmd]+(child->thread.pmd[pmd]& perf_ovfl_val);
-
-		/*printk(__FUNCTION__" child=%d reading pmd[%ld]=%lx\n", child->pid, pmd, pmd_tmp);*/
-
-		ptr = &pmd_tmp;
+		if (write_access)
+			/* don't let the user set kernel-level breakpoints... */
+			*ptr = *data & ~(7UL << 56);
+		else
+			*data = *ptr;
+		return 0;
 	}
-#endif
 	if (write_access)
 		*ptr = *data;
 	else
@@ -861,7 +837,6 @@
 {
 	unsigned long *ptr = NULL, *rbs, *bspstore, ndirty, regnum;
 	struct switch_stack *sw;
-	unsigned long pmd_tmp;
 	struct pt_regs *pt;
 
 	if ((addr & 0x7) != 0)
@@ -977,11 +952,7 @@
 			/* disallow accessing anything else... */
 			return -1;
 		}
-	} else 
-#ifdef CONFIG_PERFMON
-		if (addr < PT_PMD) 
-#endif
-		{
+	} else {
 
 		/* access debug registers */
 
@@ -1002,34 +973,14 @@
 			return -1;
 
 		ptr += regnum;
-	}
-#ifdef CONFIG_PERFMON
-	else {
-		/*
-		 * XXX: will eventually move back to perfmonctl()
-		 */
-		unsigned long pmd = (addr - PT_PMD) >> 3;
-		extern unsigned long perf_ovfl_val;
 
-		/* we just use ptrace to read */
-		if (write_access) return -1;
-
-		if (pmd > 3) {
-			printk("ptrace: rejecting access to PMD[%ld] address 0x%lx\n", pmd, addr);
-			return -1;
-		}
-
-		/* 
-		 * We always need to mask upper 32bits of pmd because value is random
-		 */
-		pmd_tmp = child->thread.pmod[pmd]+(child->thread.pmd[pmd]& perf_ovfl_val);
-
-		/*printk(__FUNCTION__" child=%d reading pmd[%ld]=%lx\n", child->pid, pmd, pmd_tmp);*/
-
-		ptr = &pmd_tmp;
+		if (write_access)
+			/* don't let the user set kernel-level breakpoints... */
+			*ptr = *data & ~(7UL << 56);
+		else
+			*data = *ptr;
+		return 0;
 	}
-#endif
-
 	if (write_access)
 		*ptr = *data;
 	else
@@ -1107,7 +1058,7 @@
 		goto out_tsk;
 
 	if (child->state != TASK_STOPPED) {
-		if (request != PTRACE_KILL && request != PTRACE_PEEKUSR)
+		if (request != PTRACE_KILL)
 			goto out_tsk;
 	}
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/kernel/sal.c linux/arch/ia64/kernel/sal.c
--- v2.4.0-prerelease/linux/arch/ia64/kernel/sal.c	Tue Oct 31 12:42:26 2000
+++ linux/arch/ia64/kernel/sal.c	Thu Jan  4 12:50:17 2001
@@ -104,9 +104,11 @@
 	if (strncmp(systab->signature, "SST_", 4) != 0)
 		printk("bad signature in system table!");
 
-	printk("SAL v%u.%02u: ia32bios=%s, oem=%.32s, product=%.32s\n",
+	/* 
+	 * revisions are coded in BCD, so %x does the job for us
+	 */
+	printk("SAL v%x.%02x: oem=%.32s, product=%.32s\n",
 	       systab->sal_rev_major, systab->sal_rev_minor,
-	       systab->ia32_bios_present ? "present" : "absent",
 	       systab->oem_id, systab->product_id);
 
 	min = ~0UL;
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/kernel/setup.c linux/arch/ia64/kernel/setup.c
--- v2.4.0-prerelease/linux/arch/ia64/kernel/setup.c	Tue Oct 31 12:42:26 2000
+++ linux/arch/ia64/kernel/setup.c	Thu Jan  4 12:50:17 2001
@@ -235,6 +235,12 @@
 	machvec_init(acpi_get_sysname());
 #endif
 
+#ifdef	CONFIG_ACPI20
+	if (efi.acpi20) {
+		/* Parse the ACPI 2.0 tables */
+		acpi20_parse(efi.acpi20);
+	} else 
+#endif
 	if (efi.acpi) {
 		/* Parse the ACPI tables */
 		acpi_parse(efi.acpi);
@@ -255,13 +261,6 @@
 
 	paging_init();
 	platform_setup(cmdline_p);
-
-#ifdef CONFIG_SWIOTLB
-	{
-		extern void setup_swiotlb (void);
-		setup_swiotlb();
-	}
-#endif
 }
 
 /*
@@ -271,9 +270,9 @@
 get_cpuinfo (char *buffer)
 {
 #ifdef CONFIG_SMP
-#	define lps	c->loops_per_sec
+#	define lpj	c->loops_per_jiffy
 #else
-#	define lps	loops_per_sec
+#	define lpj	loops_per_jiffy
 #endif
 	char family[32], model[32], features[128], *cp, *p = buffer;
 	struct cpuinfo_ia64 *c;
@@ -325,7 +324,7 @@
 			     features,
 			     c->ppn, c->number, c->proc_freq / 1000000, c->proc_freq % 1000000,
 			     c->itc_freq / 1000000, c->itc_freq % 1000000,
-			     lps / 500000, (lps / 5000) % 100);
+			     lpj*HZ/500000, (lpj*HZ/5000) % 100);
         }
 	return p - buffer;
 }
@@ -376,15 +375,7 @@
 
 	status = ia64_pal_vm_summary(&vm1, &vm2);
 	if (status == PAL_STATUS_SUCCESS) {
-#if 1
-		/*
-		 * XXX the current PAL code returns IMPL_VA_MSB==60, which is dead-wrong.
-		 * --davidm 00/05/26
-		 s*/
-		impl_va_msb = 50;
-#else
 		impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb;
-#endif
 		phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size;
 	}
 	printk("CPU %d: %lu virtual and %lu physical address bits\n",
@@ -408,6 +399,8 @@
 {
 	extern void __init ia64_rid_init (void);
 	extern void __init ia64_tlb_init (void);
+	pal_vm_info_2_u_t vmi;
+	unsigned int max_ctx;
 
 	identify_cpu(&my_cpu_data);
 
@@ -415,15 +408,12 @@
 	memset(ia64_task_regs(current), 0, sizeof(struct pt_regs));
 
 	/*
-	 * Initialize default control register to defer speculative
-	 * faults.  On a speculative load, we want to defer access
-	 * right, key miss, and key permission faults.  We currently
-	 * do NOT defer TLB misses, page-not-present, access bit, or
-	 * debug faults but kernel code should not rely on any
-	 * particular setting of these bits.
-	ia64_set_dcr(IA64_DCR_DR | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_PP);
+	 * Initialize default control register to defer all speculative faults.  The
+	 * kernel MUST NOT depend on a particular setting of these bits (in other words,
+	 * the kernel must have recovery code for all speculative accesses).
 	 */
-	ia64_set_dcr(IA64_DCR_DR | IA64_DCR_DK | IA64_DCR_DX );
+	ia64_set_dcr(  IA64_DCR_DM | IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR
+		     | IA64_DCR_DA | IA64_DCR_DD);
 #ifndef CONFIG_SMP
 	ia64_set_fpu_owner(0);		/* initialize ar.k5 */
 #endif
@@ -444,4 +434,17 @@
 #ifdef CONFIG_SMP
 	normal_xtp();
 #endif
+
+	/* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */
+	if (ia64_pal_vm_summary(NULL, &vmi) == 0)
+		max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1;
+	else {
+		printk("ia64_rid_init: PAL VM summary failed, assuming 18 RID bits\n");
+		max_ctx = (1U << 15) - 1;	/* use architected minimum */
+	}
+	while (max_ctx < ia64_ctx.max_ctx) {
+		unsigned int old = ia64_ctx.max_ctx;
+		if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old)
+			break;
+	}
 }
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/kernel/signal.c linux/arch/ia64/kernel/signal.c
--- v2.4.0-prerelease/linux/arch/ia64/kernel/signal.c	Tue Oct 31 12:42:26 2000
+++ linux/arch/ia64/kernel/signal.c	Thu Jan  4 12:50:17 2001
@@ -91,7 +91,7 @@
 		scr->pt.r10 = -1;
 	}
 	while (1) {
-		set_current_state(TASK_INTERRUPTIBLE);
+		current->state = TASK_INTERRUPTIBLE;
 		schedule();
 		if (ia64_do_signal(&oldset, scr, 1))
 			return -EINTR;
@@ -499,9 +499,10 @@
 			/* Let the debugger run.  */
 			current->exit_code = signr;
 			current->thread.siginfo = &info;
-			set_current_state(TASK_STOPPED);
+			current->state = TASK_STOPPED;
 			notify_parent(current, SIGCHLD);
 			schedule();
+
 			signr = current->exit_code;
 			current->thread.siginfo = 0;
 
@@ -557,7 +558,7 @@
 				/* FALLTHRU */
 
 			      case SIGSTOP:
-				set_current_state(TASK_STOPPED);
+				current->state = TASK_STOPPED;
 				current->exit_code = signr;
 				if (!(current->p_pptr->sig->action[SIGCHLD-1].sa.sa_flags
 				      & SA_NOCLDSTOP))
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/kernel/smp.c linux/arch/ia64/kernel/smp.c
--- v2.4.0-prerelease/linux/arch/ia64/kernel/smp.c	Tue Oct 31 12:42:26 2000
+++ linux/arch/ia64/kernel/smp.c	Thu Jan  4 12:50:17 2001
@@ -6,11 +6,13 @@
  * 
  * Lots of stuff stolen from arch/alpha/kernel/smp.c
  *
- *  00/09/11 David Mosberger <davidm@hpl.hp.com> Do loops_per_sec calibration on each CPU.
+ *  00/09/11 David Mosberger <davidm@hpl.hp.com> Do loops_per_jiffy calibration on each CPU.
  *  00/08/23 Asit Mallick <asit.k.mallick@intel.com> fixed logical processor id
  *  00/03/31 Rohit Seth <rohit.seth@intel.com>	Fixes for Bootstrap Processor & cpu_online_map
  *			now gets done here (instead of setup.c)
  *  99/10/05 davidm	Update to bring it in sync with new command-line processing scheme.
+ *  10/13/00 Goutham Rao <goutham.rao@intel.com> Updated smp_call_function and
+ *		smp_call_function_single to resend IPI on timeouts
  */
 #define __KERNEL_SYSCALLS__
 
@@ -30,6 +32,7 @@
 #include <asm/current.h>
 #include <asm/delay.h>
 #include <asm/efi.h>
+#include <asm/machvec.h>
 
 #include <asm/io.h>
 #include <asm/irq.h>
@@ -78,10 +81,6 @@
 };
 static volatile struct smp_call_struct *smp_call_function_data;
 
-#ifdef	CONFIG_ITANIUM_A1_SPECIFIC
-extern spinlock_t ivr_read_lock;
-#endif
-
 #define IPI_RESCHEDULE	        0
 #define IPI_CALL_FUNC	        1
 #define IPI_CPU_STOP	        2
@@ -269,14 +268,14 @@
 }
 
 static inline void
-send_IPI_single(int dest_cpu, int op) 
+send_IPI_single (int dest_cpu, int op) 
 {
 	
 	if (dest_cpu == -1) 
                 return;
         
 	set_bit(op, &ipi_op[dest_cpu]);
-	ipi_send(dest_cpu, IPI_IRQ, IA64_IPI_DM_INT, 0);
+	platform_send_ipi(dest_cpu, IPI_IRQ, IA64_IPI_DM_INT, 0);
 }
 
 static inline void
@@ -358,6 +357,7 @@
 	if (pointer_lock(&smp_call_function_data, &data, retry))
 		return -EBUSY;
 
+resend:
 	/*  Send a message to all other CPUs and wait for them to respond  */
 	send_IPI_single(cpuid, IPI_CALL_FUNC);
 
@@ -366,8 +366,12 @@
 	while ((atomic_read(&data.unstarted_count) > 0) && time_before(jiffies, timeout))
 		barrier();
 	if (atomic_read(&data.unstarted_count) > 0) {
+#if (defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) || defined(CONFIG_ITANIUM_BSTEP_SPECIFIC))
+		goto resend;
+#else
 		smp_call_function_data = NULL;
 		return -ETIMEDOUT;
+#endif
 	}
 	if (wait)
 		while (atomic_read(&data.unfinished_count) > 0)
@@ -411,13 +415,23 @@
 	/*  Send a message to all other CPUs and wait for them to respond  */
 	send_IPI_allbutself(IPI_CALL_FUNC);
 
+retry:
 	/*  Wait for response  */
 	timeout = jiffies + HZ;
 	while ((atomic_read(&data.unstarted_count) > 0) && time_before(jiffies, timeout))
 		barrier();
 	if (atomic_read(&data.unstarted_count) > 0) {
+#if (defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) || defined(CONFIG_ITANIUM_BSTEP_SPECIFIC))
+		int i;
+		for (i = 0; i < smp_num_cpus; i++) {
+			if (i != smp_processor_id())
+				platform_send_ipi(i, IPI_IRQ, IA64_IPI_DM_INT, 0);
+		}
+		goto retry;
+#else
 		smp_call_function_data = NULL;
 		return -ETIMEDOUT;
+#endif
 	}
 	if (wait)
 		while (atomic_read(&data.unfinished_count) > 0)
@@ -430,8 +444,6 @@
 /*
  * Flush all other CPU's tlb and then mine.  Do this with smp_call_function() as we
  * want to ensure all TLB's flushed before proceeding.
- *
- * XXX: Is it OK to use the same ptc.e info on all cpus?
  */
 void
 smp_flush_tlb_all(void)
@@ -502,7 +514,7 @@
 	local_irq_enable();		/* Interrupts have been off until now */
 
 	calibrate_delay();
-	my_cpu_data.loops_per_sec = loops_per_sec;
+	my_cpu_data.loops_per_jiffy = loops_per_jiffy;
 
 	/* allow the master to continue */
 	set_bit(cpu, &cpu_callin_map);
@@ -569,7 +581,7 @@
 	cpu_now_booting = cpu;
 
 	/* Kick the AP in the butt */
-	ipi_send(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0);
+	platform_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0);
 
 	/* wait up to 10s for the AP to start  */
 	for (timeout = 0; timeout < 100000; timeout++) {
@@ -603,7 +615,7 @@
 	__cpu_physical_id[0] = hard_smp_processor_id();
 
 	/* on the BP, the kernel already called calibrate_delay_loop() in init/main.c */
-	my_cpu_data.loops_per_sec = loops_per_sec;
+	my_cpu_data.loops_per_jiffy = loops_per_jiffy;
 #if 0
 	smp_tune_scheduling();
 #endif
@@ -653,13 +665,11 @@
 	bogosum = 0;
         for (i = 0; i < NR_CPUS; i++) {
 		if (cpu_online_map & (1L << i))
-			bogosum += cpu_data[i].loops_per_sec;
+			bogosum += cpu_data[i].loops_per_jiffy;
         }
 
-	printk(KERN_INFO "SMP: Total of %d processors activated "
-	       "(%lu.%02lu BogoMIPS).\n",
-	       cpu_count, (bogosum + 2500) / 500000,
-	       ((bogosum + 2500) / 5000) % 100);
+	printk(KERN_INFO "SMP: Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
+	       cpu_count, bogosum*HZ/500000, (bogosum*HZ/5000) % 100);
 
 	smp_num_cpus = cpu_count;
 }
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/kernel/smpboot.c linux/arch/ia64/kernel/smpboot.c
--- v2.4.0-prerelease/linux/arch/ia64/kernel/smpboot.c	Tue Oct 31 12:42:26 2000
+++ linux/arch/ia64/kernel/smpboot.c	Thu Jan  4 12:50:17 2001
@@ -4,6 +4,8 @@
  * Application processor startup code, moved from smp.c to better support kernel profile
  */
 
+#include <linux/config.h>
+
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/init.h>
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/kernel/sys_ia64.c linux/arch/ia64/kernel/sys_ia64.c
--- v2.4.0-prerelease/linux/arch/ia64/kernel/sys_ia64.c	Tue Oct 31 12:42:26 2000
+++ linux/arch/ia64/kernel/sys_ia64.c	Thu Jan  4 12:50:17 2001
@@ -16,8 +16,38 @@
 #include <linux/smp_lock.h>
 #include <linux/highuid.h>
 
+#include <asm/shmparam.h>
 #include <asm/uaccess.h>
 
+#define COLOR_ALIGN(addr)	(((addr) + SHMLBA - 1) & ~(SHMLBA - 1))
+
+unsigned long
+get_unmapped_area (unsigned long addr, unsigned long len)
+{
+	struct vm_area_struct * vmm;
+
+	if (len > RGN_MAP_LIMIT)
+		return 0;
+	if (!addr)
+		addr = TASK_UNMAPPED_BASE;
+
+	if (current->thread.flags & IA64_THREAD_MAP_SHARED)
+		addr = COLOR_ALIGN(addr);
+	else
+		addr = PAGE_ALIGN(addr);
+
+	for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
+		/* At this point:  (!vmm || addr < vmm->vm_end). */
+		if (TASK_SIZE - len < addr)
+			return 0;
+		if (rgn_offset(addr) + len > RGN_MAP_LIMIT)	/* no risk of overflow here... */
+			return 0;
+		if (!vmm || addr + len <= vmm->vm_start)
+			return addr;
+		addr = vmm->vm_end;
+	}
+}
+
 asmlinkage long
 ia64_getpriority (int which, int who, long arg2, long arg3, long arg4, long arg5, long arg6, 
 		  long arg7, long stack)
@@ -34,6 +64,7 @@
 	return prio;
 }
 
+/* XXX obsolete, but leave it here until the old libc is gone... */
 asmlinkage unsigned long
 sys_getpagesize (void)
 {
@@ -58,16 +89,61 @@
 }
 
 asmlinkage unsigned long
-ia64_brk (long brk, long arg1, long arg2, long arg3,
+ia64_brk (unsigned long brk, long arg1, long arg2, long arg3,
 	  long arg4, long arg5, long arg6, long arg7, long stack)
 {
-	extern unsigned long sys_brk (unsigned long brk);
+	extern int vm_enough_memory (long pages);
 	struct pt_regs *regs = (struct pt_regs *) &stack;
-	unsigned long retval;
+	unsigned long rlim, retval, newbrk, oldbrk;
+	struct mm_struct *mm = current->mm;
+
+	/*
+	 * Most of this replicates the code in sys_brk() except for an additional safety
+	 * check and the clearing of r8.  However, we can't call sys_brk() because we need
+	 * to acquire the mmap_sem before we can do the test...
+	 */
+	down(&mm->mmap_sem);
+
+	if (brk < mm->end_code)
+		goto out;
+	newbrk = PAGE_ALIGN(brk);
+	oldbrk = PAGE_ALIGN(mm->brk);
+	if (oldbrk == newbrk)
+		goto set_brk;
+
+	/* Always allow shrinking brk. */
+	if (brk <= mm->brk) {
+		if (!do_munmap(mm, newbrk, oldbrk-newbrk))
+			goto set_brk;
+		goto out;
+	}
+
+	/* Check against unimplemented/unmapped addresses: */
+	if ((newbrk - oldbrk) > RGN_MAP_LIMIT || rgn_offset(newbrk) > RGN_MAP_LIMIT)
+		goto out;
+
+	/* Check against rlimit.. */
+	rlim = current->rlim[RLIMIT_DATA].rlim_cur;
+	if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
+		goto out;
+
+	/* Check against existing mmap mappings. */
+	if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
+		goto out;
 
-	retval = sys_brk(brk);
+	/* Check if we have enough memory.. */
+	if (!vm_enough_memory((newbrk-oldbrk) >> PAGE_SHIFT))
+		goto out;
 
-	regs->r8 = 0;	/* ensure large retval isn't mistaken as error code */
+	/* Ok, looks good - let it rip. */
+	if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
+		goto out;
+set_brk:
+	mm->brk = brk;
+out:
+	retval = mm->brk;
+	up(&mm->mmap_sem);
+	regs->r8 = 0;		/* ensure large retval isn't mistaken as error code */
 	return retval;
 }
 
@@ -95,10 +171,8 @@
 static inline unsigned long
 do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, unsigned long pgoff)
 {
-	unsigned long loff, hoff;
+	unsigned long roff;
 	struct file *file = 0;
-	/* the virtual address space that is mappable in each region: */
-#	define OCTANT_SIZE	((PTRS_PER_PGD<<PGDIR_SHIFT)/8)
 
 	/*
 	 * A zero mmap always succeeds in Linux, independent of
@@ -107,15 +181,12 @@
 	if (PAGE_ALIGN(len) == 0)
 		return addr;
 
-	/* Don't permit mappings into or across the address hole in a region: */
-	loff = rgn_offset(addr);
-	hoff = loff - (RGN_SIZE - OCTANT_SIZE/2);
-	if ((len | loff | (loff + len)) >= OCTANT_SIZE/2
-	    && (len | hoff | (hoff + len)) >= OCTANT_SIZE/2)
+	/* don't permit mappings into unmapped space or the virtual page table of a region: */
+	roff = rgn_offset(addr);
+	if ((len | roff | (roff + len)) >= RGN_MAP_LIMIT)
 		return -EINVAL;
 
-	/* Don't permit mappings that would cross a region boundary: */
-
+	/* don't permit mappings that would cross a region boundary: */
 	if (rgn_index(addr) != rgn_index(addr + len))
 		return -EINVAL;
 
@@ -126,9 +197,14 @@
 			return -EBADF;
 	}
 
+	if (flags & MAP_SHARED)
+		current->thread.flags |= IA64_THREAD_MAP_SHARED;
+
 	down(&current->mm->mmap_sem);
 	addr = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
 	up(&current->mm->mmap_sem);
+
+	current->thread.flags &= ~IA64_THREAD_MAP_SHARED;
 
 	if (file)
 		fput(file);
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/kernel/time.c linux/arch/ia64/kernel/time.c
--- v2.4.0-prerelease/linux/arch/ia64/kernel/time.c	Tue Oct 31 12:42:26 2000
+++ linux/arch/ia64/kernel/time.c	Thu Jan  4 12:50:17 2001
@@ -152,19 +152,7 @@
 {
 	int cpu = smp_processor_id();
 	unsigned long new_itm;
-#if 0
-	static unsigned long last_time;
-	static unsigned char count;
-	int printed = 0;
-#endif
 
-	/*
-	 * Here we are in the timer irq handler. We have irqs locally
-	 * disabled, but we don't know if the timer_bh is running on
-	 * another CPU. We need to avoid to SMP race by acquiring the
-	 * xtime_lock.
-	 */
-	write_lock(&xtime_lock);
 	new_itm = itm.next[cpu].count;
 
 	if (!time_after(ia64_get_itc(), new_itm))
@@ -173,48 +161,33 @@
 
 	while (1) {
 		/*
-		 * Do kernel PC profiling here.  We multiply the
-		 * instruction number by four so that we can use a
-		 * prof_shift of 2 to get instruction-level instead of
-		 * just bundle-level accuracy.
+		 * Do kernel PC profiling here.  We multiply the instruction number by
+		 * four so that we can use a prof_shift of 2 to get instruction-level
+		 * instead of just bundle-level accuracy.
 		 */
 		if (!user_mode(regs)) 
 			do_profile(regs->cr_iip + 4*ia64_psr(regs)->ri);
 
 #ifdef CONFIG_SMP
 		smp_do_timer(regs);
-		if (smp_processor_id() == 0)
-			do_timer(regs);
-#else
-		do_timer(regs);
 #endif
+		if (smp_processor_id() == 0) {
+			/*
+			 * Here we are in the timer irq handler. We have irqs locally
+			 * disabled, but we don't know if the timer_bh is running on
+			 * another CPU. We need to avoid to SMP race by acquiring the
+			 * xtime_lock.
+			 */
+			write_lock(&xtime_lock);
+			do_timer(regs);
+			write_unlock(&xtime_lock);
+		}
 
 		new_itm += itm.delta;
 		itm.next[cpu].count = new_itm;
 		if (time_after(new_itm, ia64_get_itc()))
 			break;
-
-#if 0
-		/*
-		 * SoftSDV in SMP mode is _slow_, so we do "lose" ticks, 
-		 * but it's really OK...
-		 */
-		if (count > 0 && jiffies - last_time > 5*HZ)
-			count = 0;
-		if (count++ == 0) {
-			last_time = jiffies;
-			if (!printed) {
-				printk("Lost clock tick on CPU %d (now=%lx, next=%lx)!!\n",
-				       cpu, ia64_get_itc(), itm.next[cpu].count);
-				printed = 1;
-# ifdef CONFIG_IA64_DEBUG_IRQ
-				printk("last_cli_ip=%lx\n", last_cli_ip);
-# endif
-			}
-		}
-#endif
 	}
-	write_unlock(&xtime_lock);
 
 	/*
 	 * If we're too close to the next clock tick for comfort, we
@@ -229,7 +202,7 @@
 	ia64_set_itm(new_itm);
 }
 
-#if defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) || defined(CONFIG_IA64_SOFTSDV_HACKS)
+#ifdef CONFIG_IA64_SOFTSDV_HACKS
 
 /*
  * Interrupts must be disabled before calling this routine.
@@ -240,7 +213,7 @@
 	timer_interrupt(0, 0, ia64_task_regs(current));
 }
 
-#endif /* CONFIG_ITANIUM_ASTEP_SPECIFIC */
+#endif
 
 /*
  * Encapsulate access to the itm structure for SMP.
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/kernel/traps.c linux/arch/ia64/kernel/traps.c
--- v2.4.0-prerelease/linux/arch/ia64/kernel/traps.c	Tue Oct 31 12:42:26 2000
+++ linux/arch/ia64/kernel/traps.c	Thu Jan  4 12:50:17 2001
@@ -78,7 +78,7 @@
 die_if_kernel (char *str, struct pt_regs *regs, long err)
 {
 	if (user_mode(regs)) {
-#if 1
+#if 0
 		/* XXX for debugging only */
 		printk ("!!die_if_kernel: %s(%d): %s %ld\n",
 			current->comm, current->pid, str, err);
@@ -484,6 +484,20 @@
 		sprintf(buf, "Disabled FPL fault---not supposed to happen!");
 		break;
 
+	      case 26: /* NaT Consumption */
+	      case 31: /* Unsupported Data Reference */
+		if (user_mode(regs)) {
+			siginfo.si_signo = SIGILL;
+			siginfo.si_code = ILL_ILLOPN;
+			siginfo.si_errno = 0;
+			siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
+			siginfo.si_imm = vector;
+			force_sig_info(SIGILL, &siginfo, current);
+			return;
+		}
+		sprintf(buf, (vector == 26) ? "NaT consumption" : "Unsupported data reference");
+		break;
+
 	      case 29: /* Debug */
 	      case 35: /* Taken Branch Trap */
 	      case 36: /* Single Step Trap */
@@ -522,10 +536,10 @@
 
 	      case 34:		/* Unimplemented Instruction Address Trap */
 		if (user_mode(regs)) {
-			printk("Woah! Unimplemented Instruction Address Trap!\n");
-			siginfo.si_code = ILL_BADIADDR;
 			siginfo.si_signo = SIGILL;
+			siginfo.si_code = ILL_BADIADDR;
 			siginfo.si_errno = 0;
+			siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
 			force_sig_info(SIGILL, &siginfo, current);
 			return;
 		}
@@ -544,7 +558,8 @@
 
 	      case 46:
 		printk("Unexpected IA-32 intercept trap (Trap 46)\n");
-		printk("  iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n", regs->cr_iip, ifa, isr);
+		printk("  iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx, iim - 0x%lx\n",
+		       regs->cr_iip, ifa, isr, iim);
 		force_sig(SIGSEGV, current);
 		return;
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/kernel/unaligned.c linux/arch/ia64/kernel/unaligned.c
--- v2.4.0-prerelease/linux/arch/ia64/kernel/unaligned.c	Tue Oct 31 12:42:26 2000
+++ linux/arch/ia64/kernel/unaligned.c	Thu Jan  4 12:50:17 2001
@@ -572,7 +572,8 @@
 	 */
 	if (regnum == 0) {
 		*val = 0;
-		*nat = 0;
+		if (nat)
+			*nat = 0;
 		return;
 	}
 
@@ -1563,9 +1564,13 @@
 
 	DPRINT(("ret=%d\n", ret));
 	if (ret) {
-		lock_kernel();
-	        force_sig(SIGSEGV, current);
-	        unlock_kernel();
+		struct siginfo si;
+
+		si.si_signo = SIGBUS;
+		si.si_errno = 0;
+		si.si_code = BUS_ADRALN;
+		si.si_addr = (void *) ifa;
+	        force_sig_info(SIGBUS, &si, current);
 	} else {
 		/*
 	 	 * given today's architecture this case is not likely to happen
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/kernel/unwind.c linux/arch/ia64/kernel/unwind.c
--- v2.4.0-prerelease/linux/arch/ia64/kernel/unwind.c	Tue Oct 31 12:42:26 2000
+++ linux/arch/ia64/kernel/unwind.c	Thu Jan  4 12:50:17 2001
@@ -46,16 +46,6 @@
 #define MIN(a,b)	((a) < (b) ? (a) : (b))
 #define p5		5
 
-/*
- * The unwind tables are supposed to be sorted, but the GNU toolchain
- * currently fails to produce a sorted table in the presence of
- * functions that go into sections other than .text.  For example, the
- * kernel likes to put initialization code into .text.init, which
- * messes up the sort order.  Hopefully, this will get fixed sometime
- * soon.  --davidm 00/05/23
- */
-#define UNWIND_TABLE_SORT_BUG
-
 #define UNW_LOG_CACHE_SIZE	7	/* each unw_script is ~256 bytes in size */
 #define UNW_CACHE_SIZE		(1 << UNW_LOG_CACHE_SIZE)
 
@@ -531,6 +521,10 @@
 	struct unw_reg_state *rs;
 
 	rs = alloc_reg_state();
+	if (!rs) {
+		printk("unwind: cannot stack reg state!\n");
+		return;
+	}
 	memcpy(rs, &sr->curr, sizeof(*rs));
 	rs->next = sr->stack;
 	sr->stack = rs;
@@ -1964,23 +1958,6 @@
 {
 	struct unw_table_entry *start = table_start, *end = table_end;
 
-#ifdef UNWIND_TABLE_SORT_BUG
-	{
-		struct unw_table_entry *e1, *e2, tmp;
-
-		/* stupid bubble sort... */
-
-		for (e1 = start; e1 < end; ++e1) {
-			for (e2 = e1 + 1; e2 < end; ++e2) {
-				if (e2->start_offset < e1->start_offset) {
-					tmp = *e1;
-					*e1 = *e2;
-					*e2 = tmp;
-				}
-			}
-		}
-	}
-#endif
 	table->name = name;
 	table->segment_base = segment_base;
 	table->gp = gp;
@@ -2023,8 +2000,8 @@
 void
 unw_remove_unwind_table (void *handle)
 {
-	struct unw_table *table, *prevt;
-	struct unw_script *tmp, *prev;
+	struct unw_table *table, *prev;
+	struct unw_script *tmp;
 	unsigned long flags;
 	long index;
 
@@ -2043,41 +2020,35 @@
 	{
 		/* first, delete the table: */
 
-		for (prevt = (struct unw_table *) &unw.tables; prevt; prevt = prevt->next)
-			if (prevt->next == table)
+		for (prev = (struct unw_table *) &unw.tables; prev; prev = prev->next)
+			if (prev->next == table)
 				break;
-		if (!prevt) {
+		if (!prev) {
 			dprintk("unwind: failed to find unwind table %p\n", (void *) table);
 			spin_unlock_irqrestore(&unw.lock, flags);
 			return;
 		}
-		prevt->next = table->next;
+		prev->next = table->next;
+	}
+	spin_unlock_irqrestore(&unw.lock, flags);
 
-		/* next, remove hash table entries for this table */
+	/* next, remove hash table entries for this table */
 
-		for (index = 0; index <= UNW_HASH_SIZE; ++index) {
-			if (unw.hash[index] >= UNW_CACHE_SIZE)
-				continue;
+	for (index = 0; index <= UNW_HASH_SIZE; ++index) {
+		tmp = unw.cache + unw.hash[index];
+		if (unw.hash[index] >= UNW_CACHE_SIZE
+		    || tmp->ip < table->start || tmp->ip >= table->end)
+			continue;
 
-			tmp = unw.cache + unw.hash[index];
-			prev = 0;
-			while (1) {
-				write_lock(&tmp->lock);
-				{
-					if (tmp->ip >= table->start && tmp->ip < table->end) {
-						if (prev)
-							prev->coll_chain = tmp->coll_chain;
-						else
-							unw.hash[index] = -1;
-						tmp->ip = 0;
-					} else
-						prev = tmp;
-				}
-				write_unlock(&tmp->lock);
+		write_lock(&tmp->lock);
+		{
+			if (tmp->ip >= table->start && tmp->ip < table->end) {
+				unw.hash[index] = tmp->coll_chain;
+				tmp->ip = 0;
 			}
 		}
+		write_unlock(&tmp->lock);
 	}
-	spin_unlock_irqrestore(&unw.lock, flags);
 
 	kfree(table);
 }
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/lib/Makefile linux/arch/ia64/lib/Makefile
--- v2.4.0-prerelease/linux/arch/ia64/lib/Makefile	Tue Oct 31 12:42:26 2000
+++ linux/arch/ia64/lib/Makefile	Thu Jan  4 12:50:17 2001
@@ -7,22 +7,23 @@
 
 L_TARGET = lib.a
 
-L_OBJS  = __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o					\
+obj-y := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o					\
 	__divdi3.o __udivdi3.o __moddi3.o __umoddi3.o					\
 	checksum.o clear_page.o csum_partial_copy.o copy_page.o				\
 	copy_user.o clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o	\
-	flush.o do_csum.o
+	flush.o do_csum.o								\
+	swiotlb.o
 
 ifneq ($(CONFIG_ITANIUM_ASTEP_SPECIFIC),y)
-  L_OBJS += memcpy.o memset.o strlen.o
+  obj-y += memcpy.o memset.o strlen.o
 endif
 
-LX_OBJS = io.o
+export-objs += io.o
 
 IGNORE_FLAGS_OBJS =	__divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \
 			__divdi3.o __udivdi3.o __moddi3.o __umoddi3.o
 
-$(L_TARGET):
+$(L_TARGET): $(obj-y) $(export-objs)
 
 __divdi3.o: idiv64.S
 	$(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -o $@ $<
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/lib/copy_user.S linux/arch/ia64/lib/copy_user.S
--- v2.4.0-prerelease/linux/arch/ia64/lib/copy_user.S	Fri Jul 14 16:08:12 2000
+++ linux/arch/ia64/lib/copy_user.S	Thu Jan  4 12:50:17 2001
@@ -65,6 +65,12 @@
 //
 // local registers
 //
+#define t1		r2	// rshift in bytes
+#define t2		r3	// lshift in bytes
+#define rshift		r14	// right shift in bits
+#define lshift		r15	// left shift in bits
+#define word1		r16
+#define word2		r17
 #define cnt		r18
 #define len2		r19
 #define saved_lc	r20
@@ -134,6 +140,190 @@
 	br.ret.sptk.few rp	// end of short memcpy
 
 	//
+	// Not 8-byte aligned
+	//
+diff_align_copy_user:
+	// At this point we know we have more than 16 bytes to copy
+	// and also that src and dest do _not_ have the same alignment.
+	and src2=0x7,src1				// src offset
+	and dst2=0x7,dst1				// dst offset
+	;;
+	// The basic idea is that we copy byte-by-byte at the head so 
+	// that we can reach 8-byte alignment for both src1 and dst1. 
+	// Then copy the body using software pipelined 8-byte copy, 
+	// shifting the two back-to-back words right and left, then copy 
+	// the tail by copying byte-by-byte.
+	//
+	// Fault handling. If the byte-by-byte at the head fails on the
+	// load, then restart and finish the pipleline by copying zeros
+	// to the dst1. Then copy zeros for the rest of dst1.
+	// If 8-byte software pipeline fails on the load, do the same as
+	// failure_in3 does. If the byte-by-byte at the tail fails, it is
+	// handled simply by failure_in_pipe1.
+	//
+	// The case p14 represents the source has more bytes in the
+	// the first word (by the shifted part), whereas the p15 needs to 
+	// copy some bytes from the 2nd word of the source that has the 
+	// tail of the 1st of the destination.
+	//
+
+	//
+	// Optimization. If dst1 is 8-byte aligned (not rarely), we don't need 
+	// to copy the head to dst1, to start 8-byte copy software pipleline. 
+	// We know src1 is not 8-byte aligned in this case.
+	//
+	cmp.eq p14,p15=r0,dst2
+(p15)	br.cond.spnt.few 1f				
+	;;
+	sub t1=8,src2
+	mov t2=src2
+	;;
+	shl rshift=t2,3
+	sub len1=len,t1					// set len1
+	;;
+	sub lshift=64,rshift
+	;; 
+	br.cond.spnt.few word_copy_user
+	;; 
+1:			
+	cmp.leu	p14,p15=src2,dst2
+	sub t1=dst2,src2
+	;;
+	.pred.rel "mutex", p14, p15
+(p14)	sub word1=8,src2				// (8 - src offset)
+(p15)	sub t1=r0,t1					// absolute value
+(p15)	sub word1=8,dst2				// (8 - dst offset)
+	;;
+	// For the case p14, we don't need to copy the shifted part to
+	// the 1st word of destination.
+	sub t2=8,t1	
+(p14)	sub word1=word1,t1
+	;;
+	sub len1=len,word1				// resulting len
+(p15)	shl rshift=t1,3					// in bits
+(p14)	shl rshift=t2,3
+	;; 
+(p14)	sub len1=len1,t1
+	adds cnt=-1,word1
+	;; 
+	sub lshift=64,rshift
+	mov ar.ec=PIPE_DEPTH
+	mov pr.rot=1<<16	// p16=true all others are false
+	mov ar.lc=cnt
+	;; 
+2:	
+	EX(failure_in_pipe2,(p16) ld1 val1[0]=[src1],1)
+	;; 
+	EX(failure_out,(EPI) st1 [dst1]=val1[PIPE_DEPTH-1],1)
+	br.ctop.dptk.few 2b
+	;;
+	clrrrb	
+	;; 
+word_copy_user:		
+	cmp.gtu p9,p0=16,len1
+(p9)	br.cond.spnt.few 4f		// if (16 > len1) skip 8-byte copy
+	;;
+	shr.u cnt=len1,3		// number of 64-bit words
+	;;
+	adds cnt=-1,cnt
+	;;
+	.pred.rel "mutex", p14, p15	
+(p14)	sub src1=src1,t2
+(p15)	sub src1=src1,t1
+	//
+	// Now both src1 and dst1 point to an 8-byte aligned address. And
+	// we have more than 8 bytes to copy.
+	//
+	mov ar.lc=cnt
+	mov ar.ec=PIPE_DEPTH
+	mov pr.rot=1<<16	// p16=true all others are false
+	;; 
+3:
+	//
+	// The pipleline consists of 3 stages:	
+	// 1 (p16):	Load a word from src1
+	// 2 (EPI_1):	Shift right pair, saving to tmp
+	// 3 (EPI):	Store tmp to dst1
+	//
+	// To make it simple, use at least 2 (p16) loops to set up val1[n] 
+	// because we need 2 back-to-back val1[] to get tmp.
+	// Note that this implies EPI_2 must be p18 or greater.
+	// 
+
+#define EPI_1		p[PIPE_DEPTH-2]
+#define SWITCH(pred, shift)	cmp.eq pred,p0=shift,rshift
+#define CASE(pred, shift)	\
+	(pred)	br.cond.spnt.few copy_user_bit##shift	
+#define BODY(rshift)							\
+copy_user_bit##rshift:							\
+1:									\
+	EX(failure_out,(EPI) st8 [dst1]=tmp,8);				\
+(EPI_1) shrp tmp=val1[PIPE_DEPTH-3],val1[PIPE_DEPTH-2],rshift;		\
+	EX(failure_in2,(p16) ld8 val1[0]=[src1],8);			\
+	br.ctop.dptk.few 1b;						\
+	;;								\
+	br.cond.spnt.few .diff_align_do_tail
+
+	//
+	// Since the instruction 'shrp' requires a fixed 128-bit value
+	// specifying the bits to shift, we need to provide 7 cases
+	// below. 
+	//
+	SWITCH(p6, 8)
+	SWITCH(p7, 16)
+	SWITCH(p8, 24)	
+	SWITCH(p9, 32)
+	SWITCH(p10, 40)
+	SWITCH(p11, 48)
+	SWITCH(p12, 56)
+	;;
+	CASE(p6, 8)
+	CASE(p7, 16)
+	CASE(p8, 24)
+	CASE(p9, 32)
+	CASE(p10, 40)
+	CASE(p11, 48)
+	CASE(p12, 56)
+	;;
+	BODY(8)
+	BODY(16)
+	BODY(24)
+	BODY(32)
+	BODY(40)		
+	BODY(48)
+	BODY(56)
+	;; 
+.diff_align_do_tail:	
+	.pred.rel "mutex", p14, p15		
+(p14)	sub src1=src1,t1
+(p14)	adds dst1=-8,dst1			
+(p15)	sub dst1=dst1,t1
+	;; 
+4:	
+	// Tail correction.
+	//
+	// The problem with this piplelined loop is that the last word is not
+	// loaded and thus parf of the last word written is not correct. 
+	// To fix that, we simply copy the tail byte by byte.
+	
+	sub len1=endsrc,src1,1
+	clrrrb
+	;; 
+	mov ar.ec=PIPE_DEPTH
+	mov pr.rot=1<<16	// p16=true all others are false
+	mov ar.lc=len1
+	;;
+5:		
+	EX(failure_in_pipe1,(p16) ld1 val1[0]=[src1],1)
+	
+	EX(failure_out,(EPI) st1 [dst1]=val1[PIPE_DEPTH-1],1)
+	br.ctop.dptk.few 5b
+	;;
+	mov pr=saved_pr,0xffffffffffff0000
+	mov ar.pfs=saved_pfs
+	br.ret.dptk.few rp
+	
+	//
 	// Beginning of long mempcy (i.e. > 16 bytes)
 	//
 long_copy_user:
@@ -142,7 +332,7 @@
 	;;
 	cmp.eq p10,p8=r0,tmp
 	mov len1=len		// copy because of rotation
-(p8)	br.cond.dpnt.few 1b	// XXX Fixme. memcpy_diff_align 
+(p8)	br.cond.dpnt.few diff_align_copy_user
 	;;
 	// At this point we know we have more than 16 bytes to copy
 	// and also that both src and dest have the same alignment
@@ -267,6 +457,21 @@
 	mov ar.pfs=saved_pfs
 	br.ret.dptk.few rp
 
+	//
+	// This is the case where the byte by byte copy fails on the load
+	// when we copy the head. We need to finish the pipeline and copy 
+	// zeros for the rest of the destination. Since this happens
+	// at the top we still need to fill the body and tail.
+failure_in_pipe2:
+	sub ret0=endsrc,src1	// number of bytes to zero, i.e. not copied
+2:
+(p16)	mov val1[0]=r0
+(EPI)	st1 [dst1]=val1[PIPE_DEPTH-1],1
+	br.ctop.dptk.few 2b
+	;;
+	sub len=enddst,dst1,1		// precompute len
+	br.cond.dptk.few failure_in1bis
+	;; 
 
 	//
 	// Here we handle the head & tail part when we check for alignment.
@@ -395,6 +600,23 @@
 	mov ar.pfs=saved_pfs
 	br.ret.dptk.few rp
 
+failure_in2:
+	sub ret0=endsrc,src1	// number of bytes to zero, i.e. not copied
+	;;
+3:
+(p16)	mov val1[0]=r0
+(EPI)	st8 [dst1]=val1[PIPE_DEPTH-1],8
+	br.ctop.dptk.few 3b
+	;;
+	cmp.ne p6,p0=dst1,enddst	// Do we need to finish the tail ?
+	sub len=enddst,dst1,1		// precompute len
+(p6)	br.cond.dptk.few failure_in1bis	
+	;;
+	mov pr=saved_pr,0xffffffffffff0000
+	mov ar.lc=saved_lc
+	mov ar.pfs=saved_pfs
+	br.ret.dptk.few rp
+	
 	//
 	// handling of failures on stores: that's the easy part
 	//
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/lib/flush.S linux/arch/ia64/lib/flush.S
--- v2.4.0-prerelease/linux/arch/ia64/lib/flush.S	Thu Jun 22 07:09:44 2000
+++ linux/arch/ia64/lib/flush.S	Thu Jan  4 12:50:17 2001
@@ -12,29 +12,33 @@
 	.psr lsb
 	.lsb
 
-GLOBAL_ENTRY(ia64_flush_icache_page)
+	/*
+	 * flush_icache_range(start,end)
+	 *	Must flush range from start to end-1 but nothing else (need to
+	 *	be careful not to touch addresses that may be unmapped).
+	 */
+GLOBAL_ENTRY(flush_icache_range)
 	UNW(.prologue)
-	alloc r2=ar.pfs,1,0,0,0
+	alloc r2=ar.pfs,2,0,0,0
+	sub r8=in1,in0,1
+	;;
+	shr.u r8=r8,5			// we flush 32 bytes per iteration
 	UNW(.save ar.lc, r3)
 	mov r3=ar.lc			// save ar.lc	
+	;;
 
 	.body
 
-	mov r8=PAGE_SIZE/64-1		// repeat/until loop
-	;;
 	mov ar.lc=r8
-	add r8=32,in0
 	;;
-.Loop1:	fc in0				// issuable on M0 only
-	add in0=64,in0
-	fc r8
-	add r8=64,r8
-	br.cloop.sptk.few .Loop1
+.Loop:	fc in0				// issuable on M0 only
+	add in0=32,in0
+	br.cloop.sptk.few .Loop
 	;;
 	sync.i
 	;;
 	srlz.i
 	;;	
 	mov ar.lc=r3			// restore ar.lc
-	br.ret.sptk.few rp
-END(ia64_flush_icache_page)
+	br.ret.sptk.many rp
+END(flush_icache_range)
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/lib/io.c linux/arch/ia64/lib/io.c
--- v2.4.0-prerelease/linux/arch/ia64/lib/io.c	Tue Oct 31 12:42:26 2000
+++ linux/arch/ia64/lib/io.c	Thu Jan  4 12:50:17 2001
@@ -1,3 +1,4 @@
+#include <linux/config.h>
 #include <linux/types.h>
 
 #include <asm/io.h>
@@ -48,3 +49,54 @@
 	}
 }
 
+#ifdef CONFIG_IA64_GENERIC
+
+unsigned int
+ia64_inb (unsigned long port)
+{
+	return __ia64_inb(port);
+}
+
+unsigned int
+ia64_inw (unsigned long port)
+{
+	return __ia64_inw(port);
+}
+
+unsigned int
+ia64_inl (unsigned long port)
+{
+	return __ia64_inl(port);
+}
+
+void
+ia64_outb (unsigned char val, unsigned long port)
+{
+	__ia64_outb(val, port);
+}
+
+void
+ia64_outw (unsigned short val, unsigned long port)
+{
+	__ia64_outw(val, port);
+}
+
+void
+ia64_outl (unsigned int val, unsigned long port)
+{
+	__ia64_outl(val, port);
+}
+
+/* define aliases: */
+
+asm (".global __ia64_inb, __ia64_inw, __ia64_inl");
+asm ("__ia64_inb = ia64_inb");
+asm ("__ia64_inw = ia64_inw");
+asm ("__ia64_inl = ia64_inl");
+
+asm (".global __ia64_outb, __ia64_outw, __ia64_outl");
+asm ("__ia64_outb = ia64_outb");
+asm ("__ia64_outw = ia64_outw");
+asm ("__ia64_outl = ia64_outl");
+
+#endif /* CONFIG_IA64_GENERIC */
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/lib/memcpy.S linux/arch/ia64/lib/memcpy.S
--- v2.4.0-prerelease/linux/arch/ia64/lib/memcpy.S	Fri Aug 11 19:09:06 2000
+++ linux/arch/ia64/lib/memcpy.S	Thu Jan  4 12:50:17 2001
@@ -17,17 +17,31 @@
 
 #include <asm/asmmacro.h>
 
+#if defined(CONFIG_ITANIUM_B0_SPECIFIC) || defined(CONFIG_ITANIUM_B1_SPECIFIC)
+# define BRP(args...)	nop.b 0
+#else
+# define BRP(args...)	brp.loop.imp args
+#endif
+
 GLOBAL_ENTRY(bcopy)
 	.regstk 3,0,0,0
 	mov r8=in0
 	mov in0=in1
 	;;
 	mov in1=r8
+	// gas doesn't handle control flow across procedures, so it doesn't
+	// realize that a stop bit is needed before the "alloc" instruction
+	// below
+{
+	nop.m 0
+	nop.f 0
+	nop.i 0
+}	;;
 END(bcopy)
 	// FALL THROUGH
 GLOBAL_ENTRY(memcpy)
 
-#	define MEM_LAT	2		/* latency to L1 cache */
+#	define MEM_LAT	21		/* latency to memory */
 
 #	define dst	r2
 #	define src	r3
@@ -57,20 +71,17 @@
 	UNW(.prologue)
 	UNW(.save ar.pfs, saved_pfs)
 	alloc saved_pfs=ar.pfs,3,Nrot,0,Nrot
-#if !(defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) || defined(CONFIG_ITANIUM_BSTEP_SPECIFIC))
-	lfetch [in1]
-#else
-	nop.m 0
-#endif
+	UNW(.save ar.lc, saved_lc)
+	mov saved_lc=ar.lc
 	or t0=in0,in1
 	;;
 
 	or t0=t0,in2
-	UNW(.save ar.lc, saved_lc)
-	mov saved_lc=ar.lc
 	UNW(.save pr, saved_pr)
 	mov saved_pr=pr
 
+	UNW(.body)
+
 	cmp.eq p6,p0=in2,r0	// zero length?
 	mov retval=in0		// return dst
 (p6)	br.ret.spnt.many rp	// zero length, return immediately
@@ -83,7 +94,6 @@
 
 	adds cnt=-1,cnt		// br.ctop is repeat/until
 	cmp.gtu p7,p0=16,in2	// copying less than 16 bytes?
-	UNW(.body)
 	mov ar.ec=N
 	;;
 
@@ -96,12 +106,26 @@
 (p7)	br.cond.spnt.few memcpy_short
 (p6)	br.cond.spnt.few memcpy_long
 	;;
+	nop.m	0
+	;;
+	nop.m	0
+	nop.i	0
+	;;
+	nop.m	0
+	;;
 	.rotr val[N]
 	.rotp p[N]
-1:
+	.align 32
+1: { .mib
 (p[0])	ld8 val[0]=[src],8
+	nop.i 0
+	BRP(1b, 2f)
+}
+2: { .mfb
 (p[N-1])st8 [dst]=val[N-1],8
+	nop.f 0
 	br.ctop.dptk.few 1b
+}
 	;;
 	mov ar.lc=saved_lc
 	mov pr=saved_pr,-1
@@ -118,19 +142,34 @@
 memcpy_short:
 	adds cnt=-1,in2		// br.ctop is repeat/until
 	mov ar.ec=MEM_LAT
+	BRP(1f, 2f)
 	;;
 	mov ar.lc=cnt
 	;;
+	nop.m	0			
+	;;
+	nop.m	0
+	nop.i	0
+	;;
+	nop.m	0
+	;;
+	nop.m	0
+	;;
 	/*
 	 * It is faster to put a stop bit in the loop here because it makes
 	 * the pipeline shorter (and latency is what matters on short copies).
 	 */
-1:
+	.align 32
+1: { .mib
 (p[0])	ld1 val[0]=[src],1
-	;;
+	nop.i 0
+	BRP(1b, 2f)
+} ;;
+2: { .mfb
 (p[MEM_LAT-1])st1 [dst]=val[MEM_LAT-1],1
+	nop.f 0
 	br.ctop.dptk.few 1b
-	;;
+} ;;
 	mov ar.lc=saved_lc
 	mov pr=saved_pr,-1
 	mov ar.pfs=saved_pfs
@@ -227,6 +266,13 @@
 	mov pr=cnt,0x38			// set (p5,p4,p3) to # of bytes last-word bytes to copy
 	mov ar.lc=t2
 	;;
+	nop.m	0			
+	;;
+	nop.m	0
+	nop.i	0
+	;;
+	nop.m	0
+	;;
 (p6)	ld8 val[1]=[src2],8		// prime the pump...
 	mov b6=t4
 	br.sptk.few b6
@@ -251,17 +297,16 @@
 	.align 64
 
 #define COPY(shift,index)									\
- 1:												\
-  { .mfi											\
+ 1: { .mib											\
 	(p[0])		ld8 val[0]=[src2],8;							\
-			nop.f 0;								\
 	(p[MEM_LAT+3])	shrp w[0]=val[MEM_LAT+3],val[MEM_LAT+4-index],shift;			\
-  };												\
-  { .mbb											\
+			BRP(1b, 2f)								\
+    };												\
+ 2: { .mfb											\
 	(p[MEM_LAT+4])	st8 [dst]=w[1],8;							\
-			nop.b 0;								\
+			nop.f 0;								\
 			br.ctop.dptk.few 1b;							\
-  };												\
+    };												\
 			;;									\
 			ld8 val[N-1]=[src_end];	/* load last word (may be same as val[N]) */	\
 			;;									\
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/lib/swiotlb.c linux/arch/ia64/lib/swiotlb.c
--- v2.4.0-prerelease/linux/arch/ia64/lib/swiotlb.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/lib/swiotlb.c	Thu Jan  4 12:50:17 2001
@@ -0,0 +1,458 @@
+/*
+ * Dynamic DMA mapping support.
+ *
+ * This implementation is for IA-64 platforms that do not support
+ * I/O TLBs (aka DMA address translation hardware).
+ * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
+ * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
+ *
+ * 00/12/13 davidm	Rename to swiotlb.c and add mark_clean() to avoid
+ *			unnecessary i-cache flushing.
+ */
+
+#include <linux/config.h>
+
+#include <linux/mm.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <asm/io.h>
+#include <asm/pci.h>
+#include <asm/dma.h>
+
+#include <linux/init.h>
+#include <linux/bootmem.h>
+
+#define ALIGN(val, align) ((unsigned long)	\
+	(((unsigned long) (val) + ((align) - 1)) & ~((align) - 1)))
+
+/*
+ * log of the size of each IO TLB slab.  The number of slabs is command line controllable.
+ */
+#define IO_TLB_SHIFT 11
+
+/*
+ * Used to do a quick range check in swiotlb_unmap_single and swiotlb_sync_single, to see
+ * if the memory was in fact allocated by this API.
+ */
+static char *io_tlb_start, *io_tlb_end;
+
+/*
+ * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and io_tlb_end.
+ * This is command line adjustable via setup_io_tlb_npages.
+ */
+static unsigned long io_tlb_nslabs = 1024;
+
+/*
+ * This is a free list describing the number of free entries available from each index
+ */
+static unsigned int *io_tlb_list;
+static unsigned int io_tlb_index;
+
+/*
+ * We need to save away the original address corresponding to a mapped entry for the sync 
+ * operations.
+ */
+static unsigned char **io_tlb_orig_addr;
+
+/*
+ * Protect the above data structures in the map and unmap calls
+ */ 
+static spinlock_t io_tlb_lock = SPIN_LOCK_UNLOCKED;
+
+static int __init
+setup_io_tlb_npages (char *str)
+{
+	io_tlb_nslabs = simple_strtoul(str, NULL, 0) << (PAGE_SHIFT - IO_TLB_SHIFT);
+	return 1;
+}
+__setup("swiotlb=", setup_io_tlb_npages);
+
+/*
+ * Statically reserve bounce buffer space and initialize bounce buffer data structures for
+ * the software IO TLB used to implement the PCI DMA API.
+ */
+void
+swiotlb_init (void)
+{
+	int i;
+
+	/*
+	 * Get IO TLB memory from the low pages
+	 */
+	io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * (1 << IO_TLB_SHIFT));
+	if (!io_tlb_start)
+		BUG();
+	io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
+
+	/*
+	 * Allocate and initialize the free list array.  This array is used
+	 * to find contiguous free memory regions of size 2^IO_TLB_SHIFT between
+	 * io_tlb_start and io_tlb_end.
+	 */
+	io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
+	for (i = 0; i < io_tlb_nslabs; i++)
+		io_tlb_list[i] = io_tlb_nslabs - i;
+	io_tlb_index = 0;
+	io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *));
+
+	printk("Placing software IO TLB between 0x%p - 0x%p\n",
+	       (void *) io_tlb_start, (void *) io_tlb_end);
+}
+
+/*
+ * Allocates bounce buffer and returns its kernel virtual address.
+ */
+static void *
+map_single (struct pci_dev *hwdev, char *buffer, size_t size, int direction)
+{
+	unsigned long flags;
+	char *dma_addr;
+	unsigned int nslots, stride, index, wrap;
+	int i;
+
+	/*
+	 * For mappings greater than a page size, we limit the stride (and hence alignment)
+	 * to a page size.
+	 */
+	nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
+	if (size > (1 << PAGE_SHIFT))
+		stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
+	else
+		stride = nslots;
+
+	if (!nslots)
+		BUG();
+
+	/*
+	 * Find suitable number of IO TLB entries size that will fit this request and
+	 * allocate a buffer from that IO TLB pool.
+	 */
+	spin_lock_irqsave(&io_tlb_lock, flags);
+	{
+		wrap = index = ALIGN(io_tlb_index, stride);
+
+		if (index >= io_tlb_nslabs) 
+			wrap = index = 0;
+
+		do {
+			/*
+			 * If we find a slot that indicates we have 'nslots' number of
+			 * contiguous buffers, we allocate the buffers from that slot and
+			 * mark the entries as '0' indicating unavailable.
+			 */
+			if (io_tlb_list[index] >= nslots) {
+				int count = 0;
+
+				for (i = index; i < index + nslots; i++)
+					io_tlb_list[i] = 0;
+				for (i = index - 1; (i >= 0) && io_tlb_list[i]; i--)
+					io_tlb_list[i] = ++count;
+				dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
+
+				/*
+				 * Update the indices to avoid searching in the next round.
+				 */
+				io_tlb_index = ((index + nslots) < io_tlb_nslabs
+						? (index + nslots) : 0);
+
+				goto found;
+			}
+			index += stride;
+			if (index >= io_tlb_nslabs)
+				index = 0;
+		} while (index != wrap);
+
+		/*
+		 * XXX What is a suitable recovery mechanism here?  We cannot 
+		 * sleep because we are called from with in interrupts!
+		 */
+		panic("map_single: could not allocate software IO TLB (%ld bytes)", size);
+found:
+	}
+	spin_unlock_irqrestore(&io_tlb_lock, flags);
+
+	/*
+	 * Save away the mapping from the original address to the DMA address.  This is
+	 * needed when we sync the memory.  Then we sync the buffer if needed.
+	 */
+	io_tlb_orig_addr[index] = buffer;
+	if (direction == PCI_DMA_TODEVICE || direction == PCI_DMA_BIDIRECTIONAL)
+		memcpy(dma_addr, buffer, size);
+
+	return dma_addr;
+}
+
+/*
+ * dma_addr is the kernel virtual address of the bounce buffer to unmap.
+ */
+static void
+unmap_single (struct pci_dev *hwdev, char *dma_addr, size_t size, int direction)
+{
+	unsigned long flags;
+	int i, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
+	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
+	char *buffer = io_tlb_orig_addr[index];
+
+	/*
+	 * First, sync the memory before unmapping the entry
+	 */
+	if ((direction == PCI_DMA_FROMDEVICE) || (direction == PCI_DMA_BIDIRECTIONAL))
+		/*
+ 	 	 * bounce... copy the data back into the original buffer * and delete the
+ 	 	 * bounce buffer.
+ 	 	 */
+		memcpy(buffer, dma_addr, size);
+
+	/*
+	 * Return the buffer to the free list by setting the corresponding entries to
+	 * indicate the number of contigous entries available.  While returning the
+	 * entries to the free list, we merge the entries with slots below and above the
+	 * pool being returned.
+	 */
+	spin_lock_irqsave(&io_tlb_lock, flags);
+	{
+		int count = ((index + nslots) < io_tlb_nslabs ? io_tlb_list[index + nslots] : 0);
+		/*
+		 * Step 1: return the slots to the free list, merging the slots with
+		 * superceeding slots
+		 */
+		for (i = index + nslots - 1; i >= index; i--)
+			io_tlb_list[i] = ++count;
+		/*
+		 * Step 2: merge the returned slots with the preceeding slots, if
+		 * available (non zero)
+		 */
+		for (i = index - 1; (i >= 0) && io_tlb_list[i]; i--)
+			io_tlb_list[i] = ++count;
+	}
+	spin_unlock_irqrestore(&io_tlb_lock, flags);
+}
+
+static void
+sync_single (struct pci_dev *hwdev, char *dma_addr, size_t size, int direction)
+{
+	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
+	char *buffer = io_tlb_orig_addr[index];
+
+	/*
+  	 * bounce... copy the data back into/from the original buffer
+	 * XXX How do you handle PCI_DMA_BIDIRECTIONAL here ?
+ 	 */
+	if (direction == PCI_DMA_FROMDEVICE)
+		memcpy(buffer, dma_addr, size);
+	else if (direction == PCI_DMA_TODEVICE)
+		memcpy(dma_addr, buffer, size);
+	else
+		BUG();
+}
+
+void *
+swiotlb_alloc_consistent (struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle)
+{
+	unsigned long pci_addr;
+	int gfp = GFP_ATOMIC;
+	void *ret;
+
+	if (!hwdev || hwdev->dma_mask <= 0xffffffff)
+		gfp |= GFP_DMA; /* XXX fix me: should change this to GFP_32BIT or ZONE_32BIT */
+	ret = (void *)__get_free_pages(gfp, get_order(size));
+	if (!ret)
+		return NULL;
+
+	memset(ret, 0, size);
+	pci_addr = virt_to_phys(ret);
+	if ((pci_addr & ~hwdev->dma_mask) != 0)
+		panic("swiotlb_alloc_consistent: allocated memory is out of range for PCI device");
+	*dma_handle = pci_addr;
+	return ret;
+}
+
+void
+swiotlb_free_consistent (struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
+{
+	free_pages((unsigned long) vaddr, get_order(size));
+}
+
+/*
+ * Map a single buffer of the indicated size for DMA in streaming mode.  The PCI address
+ * to use is returned.
+ *
+ * Once the device is given the dma address, the device owns this memory until either
+ * swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
+ */
+dma_addr_t
+swiotlb_map_single (struct pci_dev *hwdev, void *ptr, size_t size, int direction)
+{
+	unsigned long pci_addr = virt_to_phys(ptr);
+
+	if (direction == PCI_DMA_NONE)
+		BUG();
+	/*
+	 * Check if the PCI device can DMA to ptr... if so, just return ptr
+	 */
+	if ((pci_addr & ~hwdev->dma_mask) == 0)
+		/*
+		 * Device is bit capable of DMA'ing to the buffer... just return the PCI
+		 * address of ptr
+		 */
+		return pci_addr;
+
+	/* 
+	 * get a bounce buffer: 
+	 */
+	pci_addr = virt_to_phys(map_single(hwdev, ptr, size, direction));
+
+	/*
+	 * Ensure that the address returned is DMA'ble:
+	 */
+	if ((pci_addr & ~hwdev->dma_mask) != 0)
+		panic("map_single: bounce buffer is not DMA'ble");
+
+	return pci_addr;
+}
+
+/*
+ * Since DMA is i-cache coherent, any (complete) pages that were written via
+ * DMA can be marked as "clean" so that update_mmu_cache() doesn't have to
+ * flush them when they get mapped into an executable vm-area.
+ */
+static void
+mark_clean (void *addr, size_t size)
+{
+	unsigned long pg_addr, end;
+
+	pg_addr = PAGE_ALIGN((unsigned long) addr);
+	end = (unsigned long) addr + size;
+	while (pg_addr + PAGE_SIZE <= end) {
+#if 0
+		set_bit(PG_arch_1, virt_to_page(pg_addr));
+#else
+		if (!VALID_PAGE(virt_to_page(pg_addr)))
+			printk("Invalid addr %lx!!!\n", pg_addr);
+#endif
+		pg_addr += PAGE_SIZE;
+	}
+}
+
+/*
+ * Unmap a single streaming mode DMA translation.  The dma_addr and size must match what
+ * was provided for in a previous swiotlb_map_single call.  All other usages are
+ * undefined.
+ *
+ * After this call, reads by the cpu to the buffer are guarenteed to see whatever the
+ * device wrote there.
+ */
+void
+swiotlb_unmap_single (struct pci_dev *hwdev, dma_addr_t pci_addr, size_t size, int direction)
+{
+	char *dma_addr = phys_to_virt(pci_addr);
+
+	if (direction == PCI_DMA_NONE)
+		BUG();
+	if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
+		unmap_single(hwdev, dma_addr, size, direction);
+	else if (direction == PCI_DMA_FROMDEVICE)
+		mark_clean(dma_addr, size);
+}
+
+/*
+ * Make physical memory consistent for a single streaming mode DMA translation after a
+ * transfer.
+ *
+ * If you perform a swiotlb_map_single() but wish to interrogate the buffer using the cpu,
+ * yet do not wish to teardown the PCI dma mapping, you must call this function before
+ * doing so.  At the next point you give the PCI dma address back to the card, the device
+ * again owns the buffer.
+ */
+void
+swiotlb_sync_single (struct pci_dev *hwdev, dma_addr_t pci_addr, size_t size, int direction)
+{
+	char *dma_addr = phys_to_virt(pci_addr);
+
+	if (direction == PCI_DMA_NONE)
+		BUG();
+	if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
+		sync_single(hwdev, dma_addr, size, direction);
+	else if (direction == PCI_DMA_FROMDEVICE)
+		mark_clean(dma_addr, size);
+}
+
+/*
+ * Map a set of buffers described by scatterlist in streaming mode for DMA.  This is the
+ * scather-gather version of the above swiotlb_map_single interface.  Here the scatter
+ * gather list elements are each tagged with the appropriate dma address and length.  They
+ * are obtained via sg_dma_{address,length}(SG).
+ *
+ * NOTE: An implementation may be able to use a smaller number of
+ *       DMA address/length pairs than there are SG table elements.
+ *       (for example via virtual mapping capabilities)
+ *       The routine returns the number of addr/length pairs actually
+ *       used, at most nents.
+ *
+ * Device ownership issues as mentioned above for swiotlb_map_single are the same here.
+ */
+int
+swiotlb_map_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction)
+{
+	int i;
+
+	if (direction == PCI_DMA_NONE)
+		BUG();
+
+	for (i = 0; i < nelems; i++, sg++) {
+		sg->orig_address = sg->address;
+		if ((virt_to_phys(sg->address) & ~hwdev->dma_mask) != 0) {
+			sg->address = map_single(hwdev, sg->address, sg->length, direction);
+		}
+	}
+	return nelems;
+}
+
+/*
+ * Unmap a set of streaming mode DMA translations.  Again, cpu read rules concerning calls
+ * here are the same as for swiotlb_unmap_single() above.
+ */
+void
+swiotlb_unmap_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction)
+{
+	int i;
+
+	if (direction == PCI_DMA_NONE)
+		BUG();
+
+	for (i = 0; i < nelems; i++, sg++)
+		if (sg->orig_address != sg->address) {
+			unmap_single(hwdev, sg->address, sg->length, direction);
+			sg->address = sg->orig_address;
+		} else if (direction == PCI_DMA_FROMDEVICE)
+			mark_clean(sg->address, sg->length);
+}
+
+/*
+ * Make physical memory consistent for a set of streaming mode DMA translations after a
+ * transfer.
+ *
+ * The same as swiotlb_dma_sync_single but for a scatter-gather list, same rules and
+ * usage.
+ */
+void
+swiotlb_sync_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction)
+{
+	int i;
+
+	if (direction == PCI_DMA_NONE)
+		BUG();
+
+	for (i = 0; i < nelems; i++, sg++)
+		if (sg->orig_address != sg->address)
+			sync_single(hwdev, sg->address, sg->length, direction);
+}
+
+unsigned long
+swiotlb_dma_address (struct scatterlist *sg)
+{
+	return virt_to_phys(sg->address);
+}
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/mm/Makefile linux/arch/ia64/mm/Makefile
--- v2.4.0-prerelease/linux/arch/ia64/mm/Makefile	Sun Feb  6 18:42:40 2000
+++ linux/arch/ia64/mm/Makefile	Thu Jan  4 12:50:17 2001
@@ -8,7 +8,7 @@
 # Note 2! The CFLAGS definition is now in the main makefile...
 
 O_TARGET := mm.o
-#O_OBJS	 := ioremap.o
-O_OBJS	 := init.o fault.o tlb.o extable.o
+
+obj-y	 := init.o fault.o tlb.o extable.o
 
 include $(TOPDIR)/Rules.make
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/mm/fault.c linux/arch/ia64/mm/fault.c
--- v2.4.0-prerelease/linux/arch/ia64/mm/fault.c	Thu Jun 22 07:09:45 2000
+++ linux/arch/ia64/mm/fault.c	Thu Jan  4 12:50:17 2001
@@ -94,7 +94,7 @@
 	 * sure we exit gracefully rather than endlessly redo the
 	 * fault.
 	 */
-	switch (handle_mm_fault(mm, vma, address, (mask & VM_WRITE) != 0)) {
+	switch (handle_mm_fault(mm, vma, address, mask) != 0) {
 	      case 1:
 		++current->min_flt;
 		break;
@@ -119,19 +119,27 @@
 	if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
 		if (!(vma->vm_flags & VM_GROWSDOWN))
 			goto bad_area;
+		if (rgn_index(address) != rgn_index(vma->vm_start)
+		    || rgn_offset(address) >= RGN_MAP_LIMIT)
+			goto bad_area;
 		if (expand_stack(vma, address))
 			goto bad_area;
-	} else if (expand_backing_store(prev_vma, address))
-		goto bad_area;
+	} else {
+		vma = prev_vma;
+		if (rgn_index(address) != rgn_index(vma->vm_start)
+		    || rgn_offset(address) >= RGN_MAP_LIMIT)
+			goto bad_area;
+		if (expand_backing_store(vma, address))
+			goto bad_area;
+	}
 	goto good_area;
 
   bad_area:
 	up(&mm->mmap_sem);
 	if (isr & IA64_ISR_SP) {
 		/*
-		 * This fault was due to a speculative load set the
-		 * "ed" bit in the psr to ensure forward progress
-		 * (target register will get a NaT).
+		 * This fault was due to a speculative load set the "ed" bit in the psr to
+		 * ensure forward progress (target register will get a NaT).
 		 */
 		ia64_psr(regs)->ed = 1;
 		return;
@@ -146,6 +154,15 @@
 	}
 
   no_context:
+	if (isr & IA64_ISR_SP) {
+		/*
+		 * This fault was due to a speculative load set the "ed" bit in the psr to
+		 * ensure forward progress (target register will get a NaT).
+		 */
+		ia64_psr(regs)->ed = 1;
+		return;
+	}
+
 	fix = search_exception_table(regs->cr_iip);
 	if (fix) {
 		regs->r8 = -EFAULT;
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/mm/init.c linux/arch/ia64/mm/init.c
--- v2.4.0-prerelease/linux/arch/ia64/mm/init.c	Mon Dec 11 17:59:43 2000
+++ linux/arch/ia64/mm/init.c	Thu Jan  4 12:50:17 2001
@@ -1,8 +1,8 @@
 /*
  * Initialize MMU support.
  *
- * Copyright (C) 1998, 1999 Hewlett-Packard Co
- * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1998-2000 Hewlett-Packard Co
+ * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
  */
 #include <linux/config.h>
 #include <linux/kernel.h>
@@ -19,6 +19,7 @@
 #include <asm/efi.h>
 #include <asm/ia32.h>
 #include <asm/io.h>
+#include <asm/machvec.h>
 #include <asm/pgalloc.h>
 #include <asm/sal.h>
 #include <asm/system.h>
@@ -303,7 +304,7 @@
 		return 0;
 	}
 	flush_page_to_ram(page);
-	set_pte(pte, page_pte_prot(page, PAGE_GATE));
+	set_pte(pte, mk_pte(page, PAGE_GATE));
 	/* no need for flush_tlb */
 	return page;
 }
@@ -311,7 +312,12 @@
 void __init
 ia64_rid_init (void)
 {
-	unsigned long flags, rid, pta, impl_va_msb;
+	unsigned long flags, rid, pta, impl_va_bits;
+#ifdef CONFIG_DISABLE_VHPT
+#	define VHPT_ENABLE_BIT	0
+#else
+#	define VHPT_ENABLE_BIT	1
+#endif
 
 	/* Set up the kernel identity mappings (regions 6 & 7) and the vmalloc area (region 5): */
 	ia64_clear_ic(flags);
@@ -328,44 +334,46 @@
 	__restore_flags(flags);
 
 	/*
-	 * Check if the virtually mapped linear page table (VMLPT)
-	 * overlaps with a mapped address space.  The IA-64
-	 * architecture guarantees that at least 50 bits of virtual
-	 * address space are implemented but if we pick a large enough
-	 * page size (e.g., 64KB), the VMLPT is big enough that it
-	 * will overlap with the upper half of the kernel mapped
-	 * region.  I assume that once we run on machines big enough
-	 * to warrant 64KB pages, IMPL_VA_MSB will be significantly
-	 * bigger, so we can just adjust the number below to get
-	 * things going.  Alternatively, we could truncate the upper
-	 * half of each regions address space to not permit mappings
-	 * that would overlap with the VMLPT.  --davidm 99/11/13
+	 * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
+	 * address space.  The IA-64 architecture guarantees that at least 50 bits of
+	 * virtual address space are implemented but if we pick a large enough page size
+	 * (e.g., 64KB), the mapped address space is big enough that it will overlap with
+	 * VMLPT.  I assume that once we run on machines big enough to warrant 64KB pages,
+	 * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a
+	 * problem in practice.  Alternatively, we could truncate the top of the mapped
+	 * address space to not permit mappings that would overlap with the VMLPT.
+	 * --davidm 00/12/06
+	 */
+#	define pte_bits			3
+#	define mapped_space_bits	(3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
+	/*
+	 * The virtual page table has to cover the entire implemented address space within
+	 * a region even though not all of this space may be mappable.  The reason for
+	 * this is that the Access bit and Dirty bit fault handlers perform
+	 * non-speculative accesses to the virtual page table, so the address range of the
+	 * virtual page table itself needs to be covered by virtual page table.
 	 */
-#	define ld_pte_size		3
-#	define ld_max_addr_space_pages	3*(PAGE_SHIFT - ld_pte_size) /* max # of mappable pages */
-#	define ld_max_addr_space_size	(ld_max_addr_space_pages + PAGE_SHIFT)
-#	define ld_max_vpt_size		(ld_max_addr_space_pages + ld_pte_size)
+#	define vmlpt_bits		(impl_va_bits - PAGE_SHIFT + pte_bits)
 #	define POW2(n)			(1ULL << (n))
-	impl_va_msb = ffz(~my_cpu_data.unimpl_va_mask) - 1;
 
-	if (impl_va_msb < 50 || impl_va_msb > 60)
-		panic("Bogus impl_va_msb value of %lu!\n", impl_va_msb);
+	impl_va_bits = ffz(~my_cpu_data.unimpl_va_mask);
+
+	if (impl_va_bits < 51 || impl_va_bits > 61)
+		panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
+
+	/* place the VMLPT at the end of each page-table mapped region: */
+	pta = POW2(61) - POW2(vmlpt_bits);
 
-	if (POW2(ld_max_addr_space_size - 1) + POW2(ld_max_vpt_size) > POW2(impl_va_msb))
+	if (POW2(mapped_space_bits) >= pta)
 		panic("mm/init: overlap between virtually mapped linear page table and "
 		      "mapped kernel space!");
-	pta = POW2(61) - POW2(impl_va_msb);
-#ifndef CONFIG_DISABLE_VHPT
 	/*
 	 * Set the (virtually mapped linear) page table address.  Bit
 	 * 8 selects between the short and long format, bits 2-7 the
 	 * size of the table, and bit 0 whether the VHPT walker is
 	 * enabled.
 	 */
-	ia64_set_pta(pta | (0<<8) | ((3*(PAGE_SHIFT-3)+3)<<2) | 1);
-#else
-	ia64_set_pta(pta | (0<<8) | ((3*(PAGE_SHIFT-3)+3)<<2) | 0);
-#endif
+	ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
 }
 
 /*
@@ -420,6 +428,15 @@
 {
 	extern char __start_gate_section[];
 	long reserved_pages, codesize, datasize, initsize;
+
+#ifdef CONFIG_PCI
+	/*
+	 * This needs to be called _after_ the command line has been parsed but _before_
+	 * any drivers that may need the PCI DMA interface are initialized or bootmem has
+	 * been freed.
+	 */
+	platform_pci_dma_init();
+#endif
 
 	if (!mem_map)
 		BUG();
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/mm/tlb.c linux/arch/ia64/mm/tlb.c
--- v2.4.0-prerelease/linux/arch/ia64/mm/tlb.c	Fri Aug 11 19:09:06 2000
+++ linux/arch/ia64/mm/tlb.c	Thu Jan  4 12:50:17 2001
@@ -6,6 +6,8 @@
  *
  * 08/02/00 A. Mallick <asit.k.mallick@intel.com>	
  *		Modified RID allocation for SMP 
+ *          Goutham Rao <goutham.rao@intel.com>
+ *              IPI based ptc implementation and A-step IPI implementation.
  */
 #include <linux/config.h>
 #include <linux/init.h>
@@ -17,6 +19,7 @@
 #include <asm/mmu_context.h>
 #include <asm/pgalloc.h>
 #include <asm/pal.h>
+#include <asm/delay.h>
 
 #define SUPPORTED_PGBITS (			\
 		1 << _PAGE_SIZE_256M |		\
@@ -33,15 +36,10 @@
 struct ia64_ctx ia64_ctx = {
 	lock:	SPIN_LOCK_UNLOCKED,
 	next:	1,
-	limit:	(1UL << IA64_HW_CONTEXT_BITS)
+	limit:	(1 << 15) - 1,		/* start out with the safe (architected) limit */
+	max_ctx: ~0U
 };
 
- /*
-  * Put everything in a struct so we avoid the global offset table whenever
-  * possible.
-  */
-ia64_ptce_info_t ia64_ptce_info;
-
 /*
  * Seralize usage of ptc.g 
  */
@@ -99,9 +97,22 @@
 	/*
 	 * Wait for other CPUs to finish purging entries.
 	 */
+#if (defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) || defined(CONFIG_ITANIUM_BSTEP_SPECIFIC))
+	{
+		unsigned long start = ia64_get_itc();
+		while (atomic_read(&flush_cpu_count) > 0) {
+			if ((ia64_get_itc() - start) > 40000UL) {
+				atomic_set(&flush_cpu_count, smp_num_cpus - 1);
+				smp_send_flush_tlb();
+				start = ia64_get_itc();
+			}
+		}
+	}
+#else
 	while (atomic_read(&flush_cpu_count)) {
 		/* Nothing */
 	}
+#endif
 	if (!(flags & IA64_PSR_I)) {
 		local_irq_disable();
 		ia64_set_tpr(saved_tpr);
@@ -117,12 +128,12 @@
 void
 wrap_mmu_context (struct mm_struct *mm)
 {
+	unsigned long tsk_context, max_ctx = ia64_ctx.max_ctx;
 	struct task_struct *tsk;
-	unsigned long tsk_context;
 
-	if (ia64_ctx.next >= (1UL << IA64_HW_CONTEXT_BITS)) 
+	if (ia64_ctx.next > max_ctx)
 		ia64_ctx.next = 300;	/* skip daemons */
-	ia64_ctx.limit = (1UL << IA64_HW_CONTEXT_BITS);
+	ia64_ctx.limit = max_ctx + 1;
 
 	/*
 	 * Scan all the task's mm->context and set proper safe range
@@ -137,9 +148,9 @@
 		if (tsk_context == ia64_ctx.next) {
 			if (++ia64_ctx.next >= ia64_ctx.limit) {
 				/* empty range: reset the range limit and start over */
-				if (ia64_ctx.next >= (1UL << IA64_HW_CONTEXT_BITS)) 
+				if (ia64_ctx.next > max_ctx) 
 					ia64_ctx.next = 300;
-				ia64_ctx.limit = (1UL << IA64_HW_CONTEXT_BITS);
+				ia64_ctx.limit = max_ctx + 1;
 				goto repeat;
 			}
 		}
@@ -153,12 +164,13 @@
 void
 __flush_tlb_all (void)
 {
-	unsigned long i, j, flags, count0, count1, stride0, stride1, addr = ia64_ptce_info.base;
+	unsigned long i, j, flags, count0, count1, stride0, stride1, addr;
 
-	count0  = ia64_ptce_info.count[0];
-	count1  = ia64_ptce_info.count[1];
-	stride0 = ia64_ptce_info.stride[0];
-	stride1 = ia64_ptce_info.stride[1];
+	addr    = my_cpu_data.ptce_base;
+	count0  = my_cpu_data.ptce_count[0];
+	count1  = my_cpu_data.ptce_count[1];
+	stride0 = my_cpu_data.ptce_stride[0];
+	stride1 = my_cpu_data.ptce_stride[1];
 
 	local_irq_save(flags);
 	for (i = 0; i < count0; ++i) {
@@ -182,7 +194,11 @@
 
 	if (mm != current->active_mm) {
 		/* this does happen, but perhaps it's not worth optimizing for? */
+#ifdef CONFIG_SMP
+		flush_tlb_all();
+#else
 		mm->context = 0;
+#endif
 		return;
 	}
 
@@ -230,6 +246,14 @@
 void __init
 ia64_tlb_init (void)
 {
-	ia64_get_ptce(&ia64_ptce_info);
+	ia64_ptce_info_t ptce_info;
+
+	ia64_get_ptce(&ptce_info);
+	my_cpu_data.ptce_base = ptce_info.base;
+	my_cpu_data.ptce_count[0] = ptce_info.count[0];
+	my_cpu_data.ptce_count[1] = ptce_info.count[1];
+	my_cpu_data.ptce_stride[0] = ptce_info.stride[0];
+	my_cpu_data.ptce_stride[1] = ptce_info.stride[1];
+
 	__flush_tlb_all();		/* nuke left overs from bootstrapping... */
 }
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/Makefile linux/arch/ia64/sn/Makefile
--- v2.4.0-prerelease/linux/arch/ia64/sn/Makefile	Thu Mar 30 16:56:04 2000
+++ linux/arch/ia64/sn/Makefile	Thu Jan  4 13:00:15 2001
@@ -5,15 +5,10 @@
 # Copyright (C) Srinivasa Thirumalachar (sprasad@engr.sgi.com)
 #
 
-CFLAGS          :=     $(CFLAGS) -DCONFIG_SGI_SN1 -DSN1 -DSN -DSOFTSDV \
-			-DLANGUAGE_C=1 -D_LANGUAGE_C=1
-AFLAGS          :=      $(AFLAGS) -DCONFIG_SGI_SN1 -DSN1 -DSOFTSDV
-
-.S.s:
-	$(CPP) $(AFLAGS) -o $*.s $<
-.S.o:
-	$(CC) $(AFLAGS) -c -o $*.o $<
-
+EXTRA_CFLAGS	:= -DSN -DLANGUAGE_C=1 -D_LANGUAGE_C=1 -I. -DBRINGUP \
+		   -DDIRECT_L1_CONSOLE -DNUMA_BASE -DSIMULATED_KLGRAPH \
+		   -DNUMA_MIGR_CONTROL -DLITTLE_ENDIAN -DREAL_HARDWARE \
+		   -DNEW_INTERRUPTS -DCONFIG_IA64_SGI_IO
 all: sn.a
 
 O_TARGET        = sn.a
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/fprom/Makefile linux/arch/ia64/sn/fprom/Makefile
--- v2.4.0-prerelease/linux/arch/ia64/sn/fprom/Makefile	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/fprom/Makefile	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,30 @@
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2000 Silicon Graphics, Inc.
+# Copyright (C) Jack Steiner (steiner@sgi.com)
+#
+
+TOPDIR=../../../..
+HPATH           = $(TOPDIR)/include
+
+LIB     = ../../lib/lib.a
+
+OBJ=fpromasm.o main.o  fw-emu.o fpmem.o
+
+fprom: $(OBJ)
+	$(LD) -static -Tfprom.lds -o fprom $(OBJ) $(LIB)
+
+.S.o:
+	$(CC)  -D__ASSEMBLY__ $(AFLAGS) $(AFLAGS_KERNEL) -c -o $*.o $<
+.c.o:
+	$(CC)  $(CFLAGS) $(CFLAGS_KERNEL) -c -o $*.o $<
+
+clean:
+	rm -f *.o fprom
+
+
+include $(TOPDIR)/Rules.make
+
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/fprom/README linux/arch/ia64/sn/fprom/README
--- v2.4.0-prerelease/linux/arch/ia64/sn/fprom/README	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/fprom/README	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,85 @@
+This directory contains the files required to build
+the fake PROM image that is currently being used to
+boot IA64 kernels running under the SGI Medusa kernel.
+
+The FPROM currently provides the following functions:
+
+	- PAL emulation for all PAL calls we've made so far.
+	- SAL emulation for all SAL calls we've made so far.
+	- EFI emulation for all EFI calls we've made so far.
+	- builds the "ia64_bootparam" structure that is
+	  passed to the kernel from SAL. This structure 
+	  shows the cpu & memory configurations.
+	- supports medusa boottime options for changing
+	  the number of cpus present
+	- supports medusa boottime options for changing
+	  the memory configuration.
+
+
+
+At some point, this fake PROM will be replaced by the
+real PROM.
+
+
+
+
+To build a fake PROM, cd to this directory & type:
+
+	make
+
+This will (or should) build a fake PROM named "fprom".
+
+
+
+
+Use this fprom image when booting the Medusa simulator. The
+control file used to boot Medusa should include the 
+following lines:
+
+	load fprom
+	load vmlinux
+	sr pc 0x100000
+	sr g 9 <address of kernel _start function> #(currently 0xe000000000520000)
+
+NOTE: There is a script "runsim" in this directory that can be used to
+simplify setting up an environment for running under Medusa.
+
+
+
+
+The following parameters may be passed to the fake PROM to
+control the PAL/SAL/EFI parameters passed to the kernel:
+
+	GR[8] = # of cpus
+	GR[9] = address of primary entry point into the kernel
+	GR[20] = memory configuration for node 0
+	GR[21] = memory configuration for node 1
+	GR[22] = memory configuration for node 2
+	GR[23] = memory configuration for node 3
+
+
+Registers GR[20] - GR[23] contain information to specify the
+amount of memory present on nodes 0-3.
+
+  - if nothing is specified (all registers are 0), the configuration
+    defaults to 8 MB on node 0.
+
+  - a mem config entry for node N is passed in GR[20+N]
+
+  - a mem config entry consists of 8 hex digits. Each digit gives the
+    amount of physical memory available on the node starting at
+    1GB*<dn>, where dn is the digit number. The amount of memory
+    is 8MB*2**<d>. (If <d> = 0, the memory size is 0).
+
+    SN1 doesnt support dimms this small but small memory systems 
+    boot faster on Medusa.
+
+
+
+An example helps a lot. The following specifies that node 0 has
+physical memory 0 to 8MB and 1GB to 1GB+32MB, and that node 1 has
+64MB starting at address 0 of the node which is 8GB.
+
+      gr[20] = 0x21           # 0 to 8MB, 1GB to 1GB+32MB
+      gr[21] = 0x4            # 8GB to 8GB+64MB
+
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/fprom/fpmem.c linux/arch/ia64/sn/fprom/fpmem.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/fprom/fpmem.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/fprom/fpmem.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,200 @@
+/* 
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Jack Steiner (steiner@sgi.com)
+ */
+
+
+/*
+ * FPROM EFI memory descriptor build routines
+ *
+ * 	- Routines to build the EFI memory descriptor map
+ * 	- Should also be usable by the SGI SN1 prom to convert
+ * 	  klconfig to efi_memmap
+ */
+
+#include <asm/efi.h>
+#include "fpmem.h"
+
+/*
+ * args points to a layout in memory like this
+ *
+ *		32 bit		32 bit
+ *
+ * 		numnodes	numcpus
+ *
+ *		16 bit   16 bit		   32 bit
+ *		nasid0	cpuconf		membankdesc0
+ *		nasid1	cpuconf		membankdesc1
+ *			   .
+ *			   .
+ *			   .
+ *			   .
+ *			   .
+ */
+
+sn_memmap_t	*sn_memmap ;
+sn_config_t	*sn_config ;
+
+/*
+ * There is a hole in the node 0 address space. Dont put it
+ * in the memory map
+ */
+#define NODE0_HOLE_SIZE         (20*MB)
+#define NODE0_HOLE_END          (4UL*GB)
+
+#define	MB			(1024*1024)
+#define GB			(1024*MB)
+#define KERNEL_SIZE		(4*MB)
+#define PROMRESERVED_SIZE	(1*MB)
+#define MD_BANK_SHFT 30
+
+#define TO_NODE(_n, _x)		(((long)_n<<33L) | (long)_x)
+
+/*
+ * For SN, this may not take an arg and gets the numnodes from 
+ * the prom variable or by traversing klcfg or promcfg
+ */
+int
+GetNumNodes(void)
+{
+	return sn_config->nodes;
+}
+
+int
+GetNumCpus(void)
+{
+	return sn_config->cpus;
+}
+
+/* For SN1, get the index th nasid */
+
+int
+GetNasid(int index)
+{
+	return sn_memmap[index].nasid ;
+}
+
+node_memmap_t
+GetMemBankInfo(int index)
+{
+	return sn_memmap[index].node_memmap ;
+}
+
+int
+IsCpuPresent(int cnode, int cpu)
+{
+	return  sn_memmap[cnode].cpuconfig & (1<<cpu);
+}
+
+
+/*
+ * Made this into an explicit case statement so that
+ * we can assign specific properties to banks like bank0
+ * actually disabled etc.
+ */
+
+int
+IsBankPresent(int index, node_memmap_t nmemmap)
+{
+	switch (index) {
+		case 0:return nmemmap.b0;
+		case 1:return nmemmap.b1;
+		case 2:return nmemmap.b2;
+		case 3:return nmemmap.b3;
+		case 4:return nmemmap.b4;
+		case 5:return nmemmap.b5;
+		case 6:return nmemmap.b6;
+		case 7:return nmemmap.b7;
+		default:return -1 ;
+	}
+}
+
+int
+GetBankSize(int index, node_memmap_t nmemmap)
+{
+        switch (index) {
+                case 0:
+                case 1:return nmemmap.b01size;
+                case 2:
+                case 3:return nmemmap.b23size;
+                case 4:
+                case 5:return nmemmap.b45size;
+                case 6:
+                case 7:return nmemmap.b67size;
+                default:return -1 ;
+        }
+}
+
+void
+build_mem_desc(efi_memory_desc_t *md, int type, long paddr, long numbytes)
+{
+        md->type = type;
+        md->phys_addr = paddr;
+        md->virt_addr = 0;
+        md->num_pages = numbytes >> 12;
+        md->attribute = EFI_MEMORY_WB;
+}
+
+int
+build_efi_memmap(void *md, int mdsize)
+{
+	int		numnodes = GetNumNodes() ;
+	int		cnode,bank ;
+	int		nasid ;
+	node_memmap_t	membank_info ;
+	int		bsize;
+	int		count = 0 ;
+	long		paddr, hole, numbytes;
+
+
+	for (cnode=0;cnode<numnodes;cnode++) {
+		nasid = GetNasid(cnode) ;
+		membank_info = GetMemBankInfo(cnode) ;
+		for (bank=0;bank<SN1_MAX_BANK_PER_NODE;bank++) {
+			if (IsBankPresent(bank, membank_info)) {
+				bsize = GetBankSize(bank, membank_info) ;
+                                paddr = TO_NODE(nasid, (long)bank<<MD_BANK_SHFT);
+                                numbytes = BankSizeBytes(bsize);
+
+                                /*
+                                 * Check for the node 0 hole. Since banks cant
+                                 * span the hole, we only need to check if the end of
+                                 * the range is the end of the hole.
+                                 */
+                                if (paddr+numbytes == NODE0_HOLE_END)
+                                        numbytes -= NODE0_HOLE_SIZE;
+                                /*
+                                 * UGLY hack - we must skip overr the kernel and
+                                 * PROM runtime services but we dont exactly where it is.
+                                 * So lets just reserve 0-12MB.
+                                 */
+                                if (bank == 0) {
+					hole = (cnode == 0) ? KERNEL_SIZE : PROMRESERVED_SIZE;
+					numbytes -= hole;
+                                        build_mem_desc(md, EFI_RUNTIME_SERVICES_DATA, paddr, hole);
+                                        paddr += hole;
+			        	count++ ;
+                                        md += mdsize;
+                                }
+                                build_mem_desc(md, EFI_CONVENTIONAL_MEMORY, paddr, numbytes);
+
+			        md += mdsize ;
+			        count++ ;
+			}
+		}
+	}
+	return count ;
+}
+
+void
+build_init(unsigned long args)
+{
+	sn_config = (sn_config_t *) (args);	
+	sn_memmap = (sn_memmap_t *)(args + 8) ; /* SN equiv for this is */
+						/* init to klconfig start */
+}
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/fprom/fpmem.h linux/arch/ia64/sn/fprom/fpmem.h
--- v2.4.0-prerelease/linux/arch/ia64/sn/fprom/fpmem.h	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/fprom/fpmem.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,35 @@
+/* 
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Jack Steiner (steiner@sgi.com)
+ */
+
+
+
+#include <asm/sn/mmzone_sn1.h>
+
+typedef struct sn_memmap_s
+{
+	short		nasid ;
+	short		cpuconfig;
+	node_memmap_t 	node_memmap ;
+} sn_memmap_t ;
+
+typedef struct sn_config_s
+{
+	int		cpus;
+	int		nodes;
+	sn_memmap_t	memmap[1];		/* start of array */
+} sn_config_t;
+
+
+extern void build_init(unsigned long);
+extern int build_efi_memmap(void *, int);
+extern int GetNumNodes(void);
+extern int GetNumCpus(void);
+extern int IsCpuPresent(int, int);
+extern int GetNasid(int);
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/fprom/fprom.lds linux/arch/ia64/sn/fprom/fprom.lds
--- v2.4.0-prerelease/linux/arch/ia64/sn/fprom/fprom.lds	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/fprom/fprom.lds	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,96 @@
+
+OUTPUT_FORMAT("elf64-ia64-little")
+OUTPUT_ARCH(ia64)
+ENTRY(_start)
+SECTIONS
+{
+  v = 0x0000000000000000 ;	/* this symbol is here to make debugging with kdb easier... */
+
+  . = (0x000000000000000  + 0x100000) ;
+
+  _text = .;
+  .text : AT(ADDR(.text) - 0x0000000000000000 )
+    {
+	*(__ivt_section)
+	/* these are not really text pages, but the zero page needs to be in a fixed location: */
+	*(__special_page_section)
+	__start_gate_section = .;
+	*(__gate_section)
+	__stop_gate_section = .;
+	*(.text)
+    }
+
+  /* Global data */
+  _data = .;
+
+  .rodata : AT(ADDR(.rodata) - 0x0000000000000000 )
+	{ *(.rodata) }
+  .opd : AT(ADDR(.opd) - 0x0000000000000000 )
+	{ *(.opd) }
+  .data : AT(ADDR(.data) - 0x0000000000000000 )
+	{ *(.data) *(.gnu.linkonce.d*) CONSTRUCTORS }
+
+  __gp = ALIGN (8) + 0x200000;
+
+  .got : AT(ADDR(.got) - 0x0000000000000000 )
+	{ *(.got.plt) *(.got) }
+  /* We want the small data sections together, so single-instruction offsets
+     can access them all, and initialized data all before uninitialized, so
+     we can shorten the on-disk segment size.  */
+  .sdata : AT(ADDR(.sdata) - 0x0000000000000000 )
+	{ *(.sdata) }
+  _edata  =  .;
+  _bss = .;
+  .sbss : AT(ADDR(.sbss) - 0x0000000000000000 )
+	{ *(.sbss) *(.scommon) }
+  .bss : AT(ADDR(.bss) - 0x0000000000000000 )
+	{ *(.bss) *(COMMON) }
+  . = ALIGN(64 / 8);
+  _end = .;
+
+  /* Sections to be discarded */
+  /DISCARD/ : {
+	*(.text.exit)
+	*(.data.exit)
+	}
+
+  /* Stabs debugging sections.  */
+  .stab 0 : { *(.stab) }
+  .stabstr 0 : { *(.stabstr) }
+  .stab.excl 0 : { *(.stab.excl) }
+  .stab.exclstr 0 : { *(.stab.exclstr) }
+  .stab.index 0 : { *(.stab.index) }
+  .stab.indexstr 0 : { *(.stab.indexstr) }
+  /* DWARF debug sections.
+     Symbols in the DWARF debugging sections are relative to the beginning
+     of the section so we begin them at 0.  */
+  /* DWARF 1 */
+  .debug          0 : { *(.debug) }
+  .line           0 : { *(.line) }
+  /* GNU DWARF 1 extensions */
+  .debug_srcinfo  0 : { *(.debug_srcinfo) }
+  .debug_sfnames  0 : { *(.debug_sfnames) }
+  /* DWARF 1.1 and DWARF 2 */
+  .debug_aranges  0 : { *(.debug_aranges) }
+  .debug_pubnames 0 : { *(.debug_pubnames) }
+  /* DWARF 2 */
+  .debug_info     0 : { *(.debug_info) }
+  .debug_abbrev   0 : { *(.debug_abbrev) }
+  .debug_line     0 : { *(.debug_line) }
+  .debug_frame    0 : { *(.debug_frame) }
+  .debug_str      0 : { *(.debug_str) }
+  .debug_loc      0 : { *(.debug_loc) }
+  .debug_macinfo  0 : { *(.debug_macinfo) }
+  /* SGI/MIPS DWARF 2 extensions */
+  .debug_weaknames 0 : { *(.debug_weaknames) }
+  .debug_funcnames 0 : { *(.debug_funcnames) }
+  .debug_typenames 0 : { *(.debug_typenames) }
+  .debug_varnames  0 : { *(.debug_varnames) }
+  /* These must appear regardless of  .  */
+  /* Discard them for now since Intel SoftSDV cannot handle them.
+  .comment 0 : { *(.comment) }
+  .note 0 : { *(.note) }
+  */
+  /DISCARD/ : { *(.comment) }
+  /DISCARD/ : { *(.note) }
+}
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/fprom/fpromasm.S linux/arch/ia64/sn/fprom/fpromasm.S
--- v2.4.0-prerelease/linux/arch/ia64/sn/fprom/fpromasm.S	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/fprom/fpromasm.S	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,314 @@
+/* 
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ *   (Code copied from or=ther files)
+ * Copyright (C) 1998-2000 Hewlett-Packard Co
+ * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * Copyright (C) 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Jack Steiner (steiner@sgi.com)
+ */
+
+
+
+#define __ASSEMBLY__ 1
+#include "asm/processor.h"
+
+/*
+ * This file contains additional set up code that is needed to get going on
+ * Medusa.  This code should disappear once real hw is available.
+ *
+ * On entry to this routine, the following register values are assumed:
+ *
+ *	gr[8]	- BSP cpu
+ *	pr[9]	- kernel entry address
+ *
+ * NOTE:
+ *   This FPROM may be loaded/executed at an address different from the
+ *   address that it was linked at. The FPROM is linked to run on node 0
+ *   at address 0x100000. If the code in loaded into another node, it
+ *   must be loaded at offset 0x100000 of the node. In addition, the
+ *   FPROM does the following things:
+ *		- determine the base address of the node it is loaded on
+ *		- add the node base to _gp.
+ *		- add the node base to all addresses derived from "movl" 
+ *		  instructions. (I couldnt get GPREL addressing to work)
+ *		  (maybe newer versions of the tools will support this)
+ *		- scan the .got section and add the node base to all
+ *		  pointers in this section.
+ *		- add the node base to all physical addresses in the
+ *		  SAL/PAL/EFI table built by the C code. (This is done
+ *		  in the C code - not here)
+ *		- add the node base to the TLB entries for vmlinux
+ */
+
+#define KERNEL_BASE	0xe000000000000000
+#define PAGESIZE_256M	28
+
+/* 
+ * ar.k0 gets set to IOPB_PA value, on 460gx chipset it should 
+ * be 0x00000ffffc000000, but on snia we use the (inverse swizzled)
+ * IOSPEC_BASE value
+ */
+#define IOPB_PA		0x00000a0000000000 /* inv swizzle IOSPEC_BASE */
+
+#define RR_RID		8
+
+
+
+// ====================================================================================	
+        .text
+        .align 16
+	.global _start
+	.proc _start
+_start:
+
+// Setup psr and rse for system init
+	mov		psr.l = r0;;
+	srlz.d;;
+	invala
+	mov		ar.rsc = r0;;
+	loadrs
+	;;
+
+// Set CALIAS size to zero. We dont use it.
+	movl		r24=0x80000a0001000028;;	// BR_PI_CALIAS_SIZE
+	st8 		[r24]=r0
+
+// Isolate node number we are running on.
+	mov		r6 = ip;;
+	shr		r5 = r6,33;;			// r5 = node number
+	shl		r6 = r5,33			// r6 = base memory address of node
+
+// Set & relocate gp.
+	movl		r1= __gp;;			// Add base memory address
+	add 		r1 = r1,r6			// Relocate to boot node
+
+// Lets figure out who we are & put it in the LID register.
+// The BR_PI_SELF_CPU_NUM register gives us a value of 0-3.
+// This identifies the cpu on the node. 
+// Merge the cpu number with the NASID to generate the LID.
+	movl		r24=0x80000a0001000020;;	// BR_PI_SELF_CPU_NUM
+	ld8 		r25=[r24]			// Fetch PI_SELF
+	movl		r27=0x80000a0001600000;;	// Fetch REVID to get local NASID
+	ld8 		r27=[r27];;
+	extr.u		r27=r27,32,8
+	shl 		r26=r25,16;;			// Align local cpu# to lid.eid
+	shl 		r27=r27,24;;			// Align NASID to lid.id
+	or  		r26=r26,r27;;			// build the LID
+	mov 		cr.lid=r26			// Now put in in the LID register
+	
+	movl		r2=FPSR_DEFAULT;;
+	mov 		ar.fpsr=r2
+	movl		sp = bootstacke-16;;
+	add 		sp = sp,r6			// Relocate to boot node			
+
+// Save the NASID that we are loaded on.
+	movl		r2=base_nasid;;			// Save base_nasid for C code
+	add 		r2 = r2,r6;;			// Relocate to boot node
+  	st8 		[r2]=r5				// Uncond st8 - same on all cpus
+
+// Save the kernel entry address. It is passed in r9 on one of
+// the cpus.
+	movl		r2=bsp_entry_pc
+	cmp.ne		p6,p0=r9,r0;;
+	add 		r2 = r2,r6;;			// Relocate to boot node
+(p6)  	st8 		[r2]=r9				// Uncond st8 - same on all cpus
+
+
+// The following can ONLY be done by 1 cpu. Lets set a lock - the
+// cpu that gets it does the initilization. The rest just spin waiting
+// til initilization is complete.
+	movl		r22 = initlock;;
+	add		r22 = r22,r6			// Relocate to boot node
+	mov		r23 = 1;;
+	xchg8		r23 = [r22],r23;;
+	cmp.eq 		p6,p0 = 0,r23
+(p6)	br.cond.spnt.few init
+1:	ld4		r23 = [r22];;
+	cmp.eq		p6,p0 = 1,r23
+(p6)	br.cond.sptk	1b
+	br		initx
+
+// Add base address of node memory to each pointer in the .got section.
+init:	movl		r16 = _GLOBAL_OFFSET_TABLE_;;
+	add		r16 = r16,r6;;			// Relocate to boot node
+1: 	ld8		r17 = [r16];;
+	cmp.eq		p6,p7=0,r17
+(p6)	br.cond.sptk.few.clr 2f;;
+	add		r17 = r17,r6;;			// Relocate to boot node
+	st8		[r16] = r17,8
+	br		1b
+2:
+	mov		r23 = 2;;			// All done, release the spinning cpus
+	st4		[r22] = r23
+initx:
+
+//
+//	I/O-port space base address:
+//
+	movl		r2 = IOPB_PA;;
+	mov		ar.k0 = r2
+
+
+// Now call main & pass it the current LID value.
+	alloc 		r0=ar.pfs,0,0,2,0
+	mov    		r32=r26
+	mov   		r33=r8;;
+	br.call.sptk.few rp=fmain
+	
+// Initialize Region Registers
+//
+	mov		r10 = r0
+	mov		r2 = (13<<2) 
+	mov		r3 = r0;;
+1:	cmp4.gtu	p6,p7 = 7, r3
+	dep		r10 = r3, r10, 61, 3
+	dep		r2 = r3, r2, RR_RID, 4;;
+(p7)	dep		r2 = 0, r2, 0, 1;;
+(p6)	dep		r2 = -1, r2, 0, 1;;
+	mov		rr[r10] = r2
+	add		r3 = 1, r3;;
+	srlz.d;;
+	cmp4.gtu	p6,p0 = 8, r3
+(p6)	br.cond.sptk.few.clr 1b
+
+//
+// Return value indicates if we are the BSP or AP.
+// 	   1 = BSP, 0 = AP
+	mov             cr.tpr=r0;;
+	cmp.eq		p6,p0=r8,r0
+(p6)	br.cond.spnt	slave
+
+//
+// Initialize the protection key registers with only pkr[0] = valid.
+//
+// Should be initialized in accordance with the OS.
+//
+	mov		r2 = 1
+	mov		r3 = r0;;
+	mov		pkr[r3] = r2;;
+	srlz.d;;
+	mov		r2 = r0
+
+1:	add		r3 = r3, r0, 1;;		// increment PKR
+	cmp.gtu		p6, p0 = 16, r3;;
+(p6)	mov		pkr[r3] = r2
+(p6)	br.cond.sptk.few.clr 1b
+
+	mov		ar.rnat = r0			// clear RNAT register
+
+//
+// Setup system address translation for kernel
+//
+// Note: The setup of Kernel Virtual address space can be done by the
+// C code of the boot loader.
+//
+//
+
+#define LINUX_PAGE_OFFSET       0xe000000000000000
+#define ITIR(key, ps)           ((key<<8) | (ps<<2))
+#define ITRGR(ed,ar,ma)         ((ed<<52) | (ar<<9) | (ma<<2) | 0x61)
+
+#define AR_RX                   1                       // RX permission
+#define AR_RW                   4                       // RW permission
+#define MA_WB                   0                       // WRITEBACK memory attribute
+
+#define TLB_PAGESIZE		28			// Use 256MB pages for now.
+	mov		r16=r5
+
+//
+//     text section
+//
+        movl            r2 = LINUX_PAGE_OFFSET;;        // Set up IFA with VPN of linux
+        mov             cr.ifa = r2
+        movl            r3 = ITIR(0,TLB_PAGESIZE);;     // Set ITIR to default pagesize
+        mov             cr.itir = r3
+
+        shl             r4 = r16,33;;                   // physical addr of start of node
+        movl            r5 = ITRGR(1,AR_RX,MA_WB);;     // TLB attributes
+        or              r10=r4,r5;;
+
+        itr.i           itr[r0] = r10;;                   // Dropin ITR entry
+	srlz.i;;
+
+//
+//     data section
+//
+        movl            r2 = LINUX_PAGE_OFFSET;;        // Set up IFA with VPN of linux
+        mov             cr.ifa = r2
+        movl            r3 = ITIR(0,TLB_PAGESIZE);;     // Set ITIR to default pagesize
+        mov             cr.itir = r3
+
+        shl             r4 = r16,33;;                   // physical addr of start of node
+        movl            r5 = ITRGR(1,AR_RW,MA_WB);;     // TLB attributes
+        or              r10=r4,r5;;
+
+        itr.d           dtr[r0] = r10;;                 // Dropin DTR entry
+	srlz.d;;
+
+
+
+
+//
+// Turn on address translation, interrupt collection, psr.ed, protection key.
+// Interrupts (PSR.i) are still off here.
+//
+
+	movl		r3 = (	IA64_PSR_BN | \
+				IA64_PSR_AC | \
+				IA64_PSR_IT | \
+				IA64_PSR_DB | \
+				IA64_PSR_DA | \
+				IA64_PSR_RT | \
+				IA64_PSR_DT | \
+				IA64_PSR_IC   \
+			     )
+	;;
+	mov		cr.ipsr = r3
+
+//
+// Go to kernel C startup routines
+//	Need to do a "rfi" in order set "it" and "ed" bits in the PSR.
+//	This is the only way to set them.
+
+	movl		r2=bsp_entry_pc;;
+	add 		r2 = r2,r6;;			// Relocate to boot node
+	ld8		r2=[r2];;
+	mov		cr.iip = r2
+	srlz.d;;
+	rfi;;
+	.endp		_start
+
+// Slave processors come here to spin til they get an interrupt. Then they launch themselves to
+// the place ap_entry points. No initialization is necessary - the kernel makes no
+// assumptions about state on this entry.
+//	Note: should verify that the interrupt we got was really the ap_wakeup
+//	      interrupt but this should not be an issue on medusa
+slave:
+	nop.i		0x8beef				// Medusa - put cpu to sleep til interrupt occurs
+	mov		r8=cr.irr0;;			// Check for interrupt pending.
+	cmp.eq		p6,p0=r8,r0
+(p6)	br.cond.sptk	slave;;
+
+	mov		r8=cr.ivr;;			// Got one. Must read ivr to accept it
+	srlz.d;;
+	mov		cr.eoi=r0;;			// must write eoi to clear
+	movl		r8=ap_entry;;			// now jump to kernel entry
+	add 		r8 = r8,r6;;			// Relocate to boot node
+	ld8		r9=[r8],8;;
+	ld8		r1=[r8]
+	mov		b0=r9;;
+	br		b0
+
+// Here is the kernel stack used for the fake PROM
+	.bss
+	.align		16384
+bootstack:
+	.skip		16384
+bootstacke:
+initlock:
+	data4
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/fprom/fw-emu.c linux/arch/ia64/sn/fprom/fw-emu.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/fprom/fw-emu.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/fprom/fw-emu.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,494 @@
+/*
+ * PAL & SAL emulation.
+ *
+ * Copyright (C) 1998-2000 Hewlett-Packard Co
+ * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ *
+ * Copyright (C) 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Jack Steiner (steiner@sgi.com)
+ */
+#include <linux/config.h>
+
+#include <asm/efi.h>
+#include <asm/pal.h>
+#include <asm/sal.h>
+#include <asm/processor.h>
+#include <asm/acpi-ext.h>
+#include "fpmem.h"
+
+#define MB	(1024*1024UL)
+#define GB	(MB*1024UL)
+
+#define FPROM_BUG()		do {while (1);} while (0)
+#define MAX_NODES		128
+#define MAX_LSAPICS		512
+#define MAX_CPUS		512
+#define MAX_CPUS_NODE		4
+#define CPUS_PER_NODE		4
+#define CPUS_PER_FSB		2
+#define CPUS_PER_FSB_MASK	(CPUS_PER_FSB-1)
+
+#define NUM_EFI_DESCS		2
+
+typedef union ia64_nasid_va {
+        struct {
+                unsigned long off   : 33;       /* intra-region offset */
+		unsigned long nasid :  7;	/* NASID */
+		unsigned long off2  : 21;	/* fill */
+                unsigned long reg   :  3;       /* region number */
+        } f;
+        unsigned long l;
+        void *p;
+} ia64_nasid_va;
+
+typedef struct {
+	unsigned long	pc;
+	unsigned long	gp;
+} func_ptr_t;
+ 
+#define IS_VIRTUAL_MODE() 	 ({struct ia64_psr psr; asm("mov %0=psr" : "=r"(psr)); psr.dt;})
+#define ADDR_OF(p)		(IS_VIRTUAL_MODE() ? ((void*)((long)(p)+PAGE_OFFSET)) : ((void*) (p)))
+#define __fwtab_pa(n,x)		({ia64_nasid_va _v; _v.l = (long) (x); _v.f.nasid = (x) ? (n) : 0; _v.f.reg = 0; _v.l;})
+
+/*
+ * The following variables are passed thru registersfrom the configuration file and
+ * are set via the _start function.
+ */
+long		base_nasid;
+long		num_cpus;
+long		bsp_entry_pc=0;
+long		num_nodes;
+long		app_entry_pc;
+int		bsp_lid;
+func_ptr_t	ap_entry;
+
+
+static char fw_mem[(  sizeof(efi_system_table_t)
+		    + sizeof(efi_runtime_services_t)
+		    + NUM_EFI_DESCS*sizeof(efi_config_table_t)
+		    + sizeof(struct ia64_sal_systab)
+		    + sizeof(struct ia64_sal_desc_entry_point)
+		    + sizeof(struct ia64_sal_desc_ap_wakeup)
+		    + sizeof(acpi_rsdp_t)
+		    + sizeof(acpi_rsdt_t)
+		    + sizeof(acpi_sapic_t)
+		    + MAX_LSAPICS*(sizeof(acpi_entry_lsapic_t))
+		    + (1+8*MAX_NODES)*(sizeof(efi_memory_desc_t))
+		    + sizeof(ia64_sal_desc_ptc_t) +
+		    + MAX_NODES*sizeof(ia64_sal_ptc_domain_info_t) +
+		    + MAX_CPUS*sizeof(ia64_sal_ptc_domain_proc_entry_t) +
+		    + 1024)] __attribute__ ((aligned (8)));
+
+/*
+ * Very ugly, but we need this in the simulator only.  Once we run on
+ * real hw, this can all go away.
+ */
+extern void pal_emulator_static (void);
+
+asm ("
+	.text
+	.proc pal_emulator_static
+pal_emulator_static:
+	mov r8=-1
+	cmp.eq p6,p7=6,r28		/* PAL_PTCE_INFO */
+(p7)	br.cond.sptk.few 1f
+	;;
+	mov r8=0			/* status = 0 */
+	movl r9=0x500000000		/* tc.base */
+	movl r10=0x0000000200000003	/* count[0], count[1] */
+	movl r11=0x1000000000002000	/* stride[0], stride[1] */
+	br.cond.sptk.few rp
+
+1:	cmp.eq p6,p7=14,r28		/* PAL_FREQ_RATIOS */
+(p7)	br.cond.sptk.few 1f
+	mov r8=0			/* status = 0 */
+	movl r9 =0x100000064		/* proc_ratio (1/100) */
+	movl r10=0x100000100		/* bus_ratio<<32 (1/256) */
+	movl r11=0x10000000a		/* itc_ratio<<32 (1/100) */
+
+1:	cmp.eq p6,p7=22,r28		/* PAL_MC_DRAIN */
+(p7)	br.cond.sptk.few 1f
+	mov r8=0
+	br.cond.sptk.few rp
+
+1:	cmp.eq p6,p7=23,r28		/* PAL_MC_EXPECTED */
+(p7)	br.cond.sptk.few 1f
+	mov r8=0
+	br.cond.sptk.few rp
+
+1:	br.cond.sptk.few rp
+	.endp pal_emulator_static\n");
+
+
+static efi_status_t
+efi_get_time (efi_time_t *tm, efi_time_cap_t *tc)
+{
+	if (tm) {
+		memset(tm, 0, sizeof(*tm));
+		tm->year = 2000;
+		tm->month = 2;
+		tm->day = 13;
+		tm->hour = 10;
+		tm->minute = 11;
+		tm->second = 12;
+	}
+
+	if (tc) {
+		tc->resolution = 10;
+		tc->accuracy = 12;
+		tc->sets_to_zero = 1;
+	}
+
+	return EFI_SUCCESS;
+}
+
+static void
+efi_reset_system (int reset_type, efi_status_t status, unsigned long data_size, efi_char16_t *data)
+{
+	while(1);	/* Is there a pseudo-op to stop medusa */
+}
+
+static efi_status_t
+efi_success (void)
+{
+	return EFI_SUCCESS;
+}
+
+static efi_status_t
+efi_unimplemented (void)
+{
+	return EFI_UNSUPPORTED;
+}
+
+static long
+sal_emulator (long index, unsigned long in1, unsigned long in2,
+	      unsigned long in3, unsigned long in4, unsigned long in5,
+	      unsigned long in6, unsigned long in7)
+{
+	register long r9 asm ("r9") = 0;
+	register long r10 asm ("r10") = 0;
+	register long r11 asm ("r11") = 0;
+	long status;
+
+	/*
+	 * Don't do a "switch" here since that gives us code that
+	 * isn't self-relocatable.
+	 */
+	status = 0;
+	if (index == SAL_FREQ_BASE) {
+		switch (in1) {
+		      case SAL_FREQ_BASE_PLATFORM:
+			r9 = 500000000;
+			break;
+
+		      case SAL_FREQ_BASE_INTERVAL_TIMER:
+			/*
+			 * Is this supposed to be the cr.itc frequency
+			 * or something platform specific?  The SAL
+			 * doc ain't exactly clear on this...
+			 */
+			r9 = 700000000;
+			break;
+
+		      case SAL_FREQ_BASE_REALTIME_CLOCK:
+			r9 = 1;
+			break;
+
+		      default:
+			status = -1;
+			break;
+		}
+	} else if (index == SAL_SET_VECTORS) {
+		if (in1 == SAL_VECTOR_OS_BOOT_RENDEZ) {
+			func_ptr_t	*fp;
+			fp = ADDR_OF(&ap_entry);
+			fp->pc = in2;
+			fp->gp = in3;
+		} else {
+			status = -1;
+		}
+		;
+	} else if (index == SAL_GET_STATE_INFO) {
+		;
+	} else if (index == SAL_GET_STATE_INFO_SIZE) {
+		;
+	} else if (index == SAL_CLEAR_STATE_INFO) {
+		;
+	} else if (index == SAL_MC_RENDEZ) {
+		;
+	} else if (index == SAL_MC_SET_PARAMS) {
+		;
+	} else if (index == SAL_CACHE_FLUSH) {
+		;
+	} else if (index == SAL_CACHE_INIT) {
+		;
+	} else if (index == SAL_UPDATE_PAL) {
+		;
+	} else {
+		status = -1;
+	}
+	asm volatile ("" :: "r"(r9), "r"(r10), "r"(r11));
+	return status;
+}
+
+
+/*
+ * This is here to work around a bug in egcs-1.1.1b that causes the
+ * compiler to crash (seems like a bug in the new alias analysis code.
+ */
+void *
+id (long addr)
+{
+	return (void *) addr;
+}
+
+
+/*
+ * Fix the addresses in a function pointer by adding base node address
+ * to pc & gp.
+ */
+void
+fix_function_pointer(void *fp)
+{
+	func_ptr_t	*_fp;
+
+	_fp = fp;
+	_fp->pc = __fwtab_pa(base_nasid, _fp->pc);
+	_fp->gp = __fwtab_pa(base_nasid, _fp->gp);
+}
+
+
+void
+sys_fw_init (const char *args, int arglen, int bsp)
+{
+	/*
+	 * Use static variables to keep from overflowing the RSE stack
+	 */
+	static efi_system_table_t *efi_systab;
+	static efi_runtime_services_t *efi_runtime;
+	static efi_config_table_t *efi_tables;
+	static ia64_sal_desc_ptc_t *sal_ptc;
+	static ia64_sal_ptc_domain_info_t *sal_ptcdi;
+	static ia64_sal_ptc_domain_proc_entry_t *sal_ptclid;
+	static acpi_rsdp_t *acpi_systab;
+	static acpi_rsdt_t *acpi_rsdt;
+	static acpi_sapic_t *acpi_sapic;
+	static acpi_entry_lsapic_t *acpi_lsapic;
+	static struct ia64_sal_systab *sal_systab;
+	static efi_memory_desc_t *efi_memmap, *md;
+	static unsigned long *pal_desc, *sal_desc;
+	static struct ia64_sal_desc_entry_point *sal_ed;
+	static struct ia64_boot_param *bp;
+	static struct ia64_sal_desc_ap_wakeup *sal_apwake;
+	static unsigned char checksum = 0;
+	static char *cp, *cmd_line, *vendor;
+	static int mdsize, domain, last_domain ;
+	static int cnode, nasid, cpu, num_memmd, cpus_found;
+
+	/*
+	 * Pass the parameter base address to the build_efi_xxx routines.
+	 */
+	build_init(8LL*GB*base_nasid);
+
+	num_nodes = GetNumNodes();
+	num_cpus = GetNumCpus();
+
+
+	memset(fw_mem, 0, sizeof(fw_mem));
+
+	pal_desc = (unsigned long *) &pal_emulator_static;
+	sal_desc = (unsigned long *) &sal_emulator;
+	fix_function_pointer(&pal_emulator_static);
+	fix_function_pointer(&sal_emulator);
+
+	/* Align this to 16 bytes, probably EFI does this  */
+	mdsize = (sizeof(efi_memory_desc_t) + 15) & ~15 ;
+
+	cp = fw_mem;
+	efi_systab  = (void *) cp; cp += sizeof(*efi_systab);
+	efi_runtime = (void *) cp; cp += sizeof(*efi_runtime);
+	efi_tables  = (void *) cp; cp += NUM_EFI_DESCS*sizeof(*efi_tables);
+	sal_systab  = (void *) cp; cp += sizeof(*sal_systab);
+	sal_ed      = (void *) cp; cp += sizeof(*sal_ed);
+	sal_ptc     = (void *) cp; cp += sizeof(*sal_ptc);
+	sal_apwake  = (void *) cp; cp += sizeof(*sal_apwake);
+	acpi_systab = (void *) cp; cp += sizeof(*acpi_systab);
+	acpi_rsdt   = (void *) cp; cp += sizeof(*acpi_rsdt);
+	acpi_sapic  = (void *) cp; cp += sizeof(*acpi_sapic);
+	acpi_lsapic = (void *) cp; cp += num_cpus*sizeof(*acpi_lsapic);
+	vendor 	    = (char *) cp; cp += 32;
+	efi_memmap  = (void *) cp; cp += 8*32*sizeof(*efi_memmap);
+	sal_ptcdi   = (void *) cp; cp += CPUS_PER_FSB*(1+num_nodes)*sizeof(*sal_ptcdi);
+	sal_ptclid  = (void *) cp; cp += ((3+num_cpus)*sizeof(*sal_ptclid)+7)/8*8;
+	cmd_line    = (void *) cp;
+
+	if (args) {
+		if (arglen >= 1024)
+			arglen = 1023;
+		memcpy(cmd_line, args, arglen);
+	} else {
+		arglen = 0;
+	}
+	cmd_line[arglen] = '\0';
+#ifdef BRINGUP
+	/* for now, just bring up bash */
+	strcpy(cmd_line, "init=/bin/bash");
+#else
+	strcpy(cmd_line, "");
+#endif
+
+	memset(efi_systab, 0, sizeof(efi_systab));
+	efi_systab->hdr.signature = EFI_SYSTEM_TABLE_SIGNATURE;
+	efi_systab->hdr.revision  = EFI_SYSTEM_TABLE_REVISION;
+	efi_systab->hdr.headersize = sizeof(efi_systab->hdr);
+	efi_systab->fw_vendor = __fwtab_pa(base_nasid, vendor);
+	efi_systab->fw_revision = 1;
+	efi_systab->runtime = __fwtab_pa(base_nasid, efi_runtime);
+	efi_systab->nr_tables = 2;
+	efi_systab->tables = __fwtab_pa(base_nasid, efi_tables);
+	memcpy(vendor, "S\0i\0l\0i\0c\0o\0n\0-\0G\0r\0a\0p\0h\0i\0c\0s\0\0", 32);
+
+	efi_runtime->hdr.signature = EFI_RUNTIME_SERVICES_SIGNATURE;
+	efi_runtime->hdr.revision = EFI_RUNTIME_SERVICES_REVISION;
+	efi_runtime->hdr.headersize = sizeof(efi_runtime->hdr);
+	efi_runtime->get_time = __fwtab_pa(base_nasid, &efi_get_time);
+	efi_runtime->set_time = __fwtab_pa(base_nasid, &efi_unimplemented);
+	efi_runtime->get_wakeup_time = __fwtab_pa(base_nasid, &efi_unimplemented);
+	efi_runtime->set_wakeup_time = __fwtab_pa(base_nasid, &efi_unimplemented);
+	efi_runtime->set_virtual_address_map = __fwtab_pa(base_nasid, &efi_success);
+	efi_runtime->get_variable = __fwtab_pa(base_nasid, &efi_unimplemented);
+	efi_runtime->get_next_variable = __fwtab_pa(base_nasid, &efi_unimplemented);
+	efi_runtime->set_variable = __fwtab_pa(base_nasid, &efi_unimplemented);
+	efi_runtime->get_next_high_mono_count = __fwtab_pa(base_nasid, &efi_unimplemented);
+	efi_runtime->reset_system = __fwtab_pa(base_nasid, &efi_reset_system);
+
+	efi_tables->guid = SAL_SYSTEM_TABLE_GUID;
+	efi_tables->table = __fwtab_pa(base_nasid, sal_systab);
+	efi_tables++;
+	efi_tables->guid = ACPI_TABLE_GUID;
+	efi_tables->table = __fwtab_pa(base_nasid, acpi_systab);
+	fix_function_pointer(&efi_unimplemented);
+	fix_function_pointer(&efi_get_time);
+	fix_function_pointer(&efi_success);
+	fix_function_pointer(&efi_reset_system);
+
+	/* fill in the ACPI system table: */
+	memcpy(acpi_systab->signature, "RSD PTR ", 8);
+	acpi_systab->rsdt = (acpi_rsdt_t*)__fwtab_pa(base_nasid, acpi_rsdt);
+
+	memcpy(acpi_rsdt->header.signature, "RSDT",4);
+	acpi_rsdt->header.length = sizeof(acpi_rsdt_t);
+	memcpy(acpi_rsdt->header.oem_id, "SGI", 3);
+	memcpy(acpi_rsdt->header.oem_table_id, "SN1", 3);
+	acpi_rsdt->header.oem_revision = 0x00010001;
+	acpi_rsdt->entry_ptrs[0] = __fwtab_pa(base_nasid, acpi_sapic);
+
+	memcpy(acpi_sapic->header.signature, "SPIC ", 4);
+	acpi_sapic->header.length = sizeof(acpi_sapic_t)+num_cpus*sizeof(acpi_entry_lsapic_t);
+	for (cnode=0; cnode<num_nodes; cnode++) {
+		nasid = GetNasid(cnode);
+		for(cpu=0; cpu<CPUS_PER_NODE; cpu++) {
+			if (!IsCpuPresent(cnode, cpu))
+				continue;
+			acpi_lsapic->type = ACPI_ENTRY_LOCAL_SAPIC;
+			acpi_lsapic->length = sizeof(acpi_entry_lsapic_t);
+			acpi_lsapic->acpi_processor_id = cnode*4+cpu;
+			acpi_lsapic->flags = LSAPIC_ENABLED|LSAPIC_PRESENT;
+			acpi_lsapic->eid = cpu;
+			acpi_lsapic->id = nasid;
+			acpi_lsapic++;
+		}
+	}
+
+
+	/* fill in the SAL system table: */
+	memcpy(sal_systab->signature, "SST_", 4);
+	sal_systab->size = sizeof(*sal_systab);
+	sal_systab->sal_rev_minor = 1;
+	sal_systab->sal_rev_major = 0;
+	sal_systab->entry_count = 3;
+
+	strcpy(sal_systab->oem_id, "SGI");
+	strcpy(sal_systab->product_id, "SN1");
+
+	/* fill in an entry point: */	
+	sal_ed->type = SAL_DESC_ENTRY_POINT;
+	sal_ed->pal_proc = __fwtab_pa(base_nasid, pal_desc[0]);
+	sal_ed->sal_proc = __fwtab_pa(base_nasid, sal_desc[0]);
+	sal_ed->gp = __fwtab_pa(base_nasid, sal_desc[1]);
+
+	/* kludge the PTC domain info */
+	sal_ptc->type = SAL_DESC_PTC;
+	sal_ptc->num_domains = 0;
+	sal_ptc->domain_info = __fwtab_pa(base_nasid, sal_ptcdi);
+	cpus_found = 0;
+	last_domain = -1;
+	sal_ptcdi--;
+	for (cnode=0; cnode<num_nodes; cnode++) {
+		nasid = GetNasid(cnode);
+		for(cpu=0; cpu<CPUS_PER_NODE; cpu++) {
+			if (IsCpuPresent(cnode, cpu)) {
+				domain = cnode*CPUS_PER_NODE + cpu/CPUS_PER_FSB;
+				if (domain != last_domain) {
+					sal_ptc->num_domains++;
+					sal_ptcdi++;
+					sal_ptcdi->proc_count = 0;
+					sal_ptcdi->proc_list = __fwtab_pa(base_nasid, sal_ptclid);
+					last_domain = domain;
+				}
+				sal_ptcdi->proc_count++;
+				sal_ptclid->id = nasid;
+				sal_ptclid->eid = cpu;
+				sal_ptclid++;
+				cpus_found++;
+			}
+		}
+	}
+
+	if (cpus_found != num_cpus)
+		FPROM_BUG();
+
+	/* Make the AP WAKEUP entry */
+	sal_apwake->type = SAL_DESC_AP_WAKEUP;
+	sal_apwake->mechanism = IA64_SAL_AP_EXTERNAL_INT;
+	sal_apwake->vector = 18;
+
+	for (cp = (char *) sal_systab; cp < (char *) efi_memmap; ++cp)
+		checksum += *cp;
+
+	sal_systab->checksum = -checksum;
+
+	md = &efi_memmap[0];
+	num_memmd = build_efi_memmap((void *)md, mdsize) ;
+
+	bp = id(ZERO_PAGE_ADDR + (((long)base_nasid)<<33));
+	bp->efi_systab = __fwtab_pa(base_nasid, &fw_mem);
+	bp->efi_memmap = __fwtab_pa(base_nasid, efi_memmap);
+	bp->efi_memmap_size = num_memmd*mdsize;
+	bp->efi_memdesc_size = mdsize;
+	bp->efi_memdesc_version = 0x101;
+	bp->command_line = __fwtab_pa(base_nasid, cmd_line);
+	bp->console_info.num_cols = 80;
+	bp->console_info.num_rows = 25;
+	bp->console_info.orig_x = 0;
+	bp->console_info.orig_y = 24;
+	bp->num_pci_vectors = 0;
+	bp->fpswa = 0;
+
+	/*
+	 * Now pick the BSP & store it LID value in
+	 * a global variable. Note if BSP is greater than last cpu,
+	 * pick the last cpu.
+	 */
+	for (cnode=0; cnode<num_nodes; cnode++) {
+		for(cpu=0; cpu<CPUS_PER_NODE; cpu++) {
+			if (!IsCpuPresent(cnode, cpu))
+				continue;
+			bsp_lid = (GetNasid(cnode)<<24) | (cpu<<16);
+			if (bsp-- > 0)
+				continue;
+			return;
+		}
+	}
+}
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/fprom/main.c linux/arch/ia64/sn/fprom/main.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/fprom/main.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/fprom/main.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,110 @@
+/* 
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Jack Steiner (steiner@sgi.com)
+ */
+
+
+
+#include <linux/types.h>
+#include <asm/bitops.h>
+
+void bedrock_init(int);
+void synergy_init(int, int);
+void sys_fw_init (const char *args, int arglen, int bsp);
+
+volatile int	bootmaster=0;		/* Used to pick bootmaster */
+volatile int	nasidmaster[128]={0};	/* Used to pick node/synergy masters */
+int		init_done=0;
+extern int	bsp_lid;
+
+#define get_bit(b,p)	(((*p)>>(b))&1)
+
+int
+fmain(int lid, int bsp) {
+	int	syn, nasid, cpu;
+
+	/*
+	 * First lets figure out who we are. This is done from the
+	 * LID passed to us.
+	 */
+	nasid = (lid>>24);
+	syn = (lid>>17)&1;
+	cpu = (lid>>16)&1;
+
+	/*
+	 * Now pick a synergy master to initialize synergy registers.
+	 */
+	if (test_and_set_bit(syn, &nasidmaster[nasid]) == 0) {
+		synergy_init(nasid, syn);
+		test_and_set_bit(syn+2, &nasidmaster[nasid]);
+	} else
+		while (get_bit(syn+2, &nasidmaster[nasid]) == 0);
+	
+	/*
+	 * Now pick a nasid master to initialize Bedrock registers.
+	 */
+	if (test_and_set_bit(8, &nasidmaster[nasid]) == 0) {
+		bedrock_init(nasid);
+		test_and_set_bit(9, &nasidmaster[nasid]);
+	} else
+		while (get_bit(9, &nasidmaster[nasid]) == 0);
+	
+
+	/*
+	 * Now pick a BSP & finish init.
+	 */
+	if (test_and_set_bit(0, &bootmaster) == 0) {
+		sys_fw_init(0, 0, bsp);
+		test_and_set_bit(1, &bootmaster);
+	} else
+		while (get_bit(1, &bootmaster) == 0);
+
+	return (lid == bsp_lid);
+}
+
+
+void
+bedrock_init(int nasid)
+{
+	nasid = nasid;		/* to quiet gcc */
+}
+
+
+void
+synergy_init(int nasid, int syn)
+{
+	long	*base;
+	long	off;
+
+	/*
+	 * Enable all FSB flashed interrupts.
+	 * ZZZ - I'd really like defines for this......
+	 */
+	base = (long*)0x80000e0000000000LL;		/* base of synergy regs */
+	for (off = 0x2a0; off < 0x2e0; off+=8)		/* offset for VEC_MASK_{0-3}_A/B */
+		*(base+off/8) = -1LL;
+
+	/*
+	 * Set the NASID in the FSB_CONFIG register.
+	 */
+	base = (long*)0x80000e0000000450LL;
+	*base = (long)((nasid<<16)|(syn<<9));
+}
+
+
+/* Why isnt there a bcopy/memcpy in lib64.a */
+
+void* 
+memcpy(void * dest, const void *src, size_t count)
+{
+	char *s, *se, *d;
+
+	for(d=dest, s=(char*)src, se=s+count; s<se; s++, d++)
+		*d = *s;
+	return dest;
+}
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/fprom/runsim linux/arch/ia64/sn/fprom/runsim
--- v2.4.0-prerelease/linux/arch/ia64/sn/fprom/runsim	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/fprom/runsim	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,372 @@
+#!/bin/sh
+
+# Script for running PROMs and LINUX kernwls on medusa. 
+# Type "sim -H" for instructions.
+
+MEDUSA=${MEDUSA:-/home/rickc/official_medusa/medusa}
+
+# ------------------ err -----------------------
+err() {
+	echo "ERROR - $1"
+	exit 1
+}
+
+# ----------------  help ----------------------
+help() {
+cat <<END
+Script for running a PROM or LINUX kernel under medusa.
+This script creates a control file, creates links to the appropriate
+linux/prom files, and/or calls medusa to make simulation runs.
+
+Usage:  
+   Initial setup:
+   	sim [-c <config_file>] <-p> | <-k>  [<work_dir>]
+		-p	Create PROM control file & links
+		-k	Create LINUX control file & links
+		-c<cf>	Control file name				[Default: cf]
+		<work_dir> Path to directory that contains the linux or PROM files.
+		    The directory can be any of the following:
+		       (linux simulations)
+		       		worktree
+				worktree/linux
+				any directory with vmlinux, vmlinux.sym & fprom files
+			(prom simulations)
+				worktree
+				worktree/stand/arcs/IP37prom/dev
+				any directory with fw.bin & fw.sim files
+
+    Simulations:
+	sim  [-X <n>] [-o <output>] [-M] [<config_file>]
+		-c<cf>	Control file name				[Default: cf]
+		-M	Pipe output thru fmtmedusa
+		-o	Output filename (copy of all commands/output)	[Default: simout]
+		-X	Specifies number of instructions to execute	[Default: 0]
+			(Used only in auto test mode - not described here)
+
+Examples:
+	sim -p <promtree>	# create control file (cf) & links for prom simulations
+	sim -k <linuxtree>	# create control file (cf) & links for linux simulations
+	sim -p -c cfprom	# create a prom control file (cfprom) only. No links are made.
+
+	sim			# run medusa using previously created links &
+				#   control file (cf).
+END
+exit 1
+}
+
+# ----------------------- create control file header --------------------
+create_cf_header() {
+cat <<END >>$CF
+#
+# Template for a control file for running linux kernels under medusa. 
+# You probably want to make mods here but this is a good starting point.
+#
+
+# Preferences
+setenv cpu_stepping A
+setenv exceptionPrint off
+setenv interrupt_messages off
+setenv lastPCsize 100000
+setenv low_power_mode on
+setenv partialIntelChipSet on
+setenv printIntelMessages off
+setenv prom_write_action halt
+setenv prom_write_messages on
+setenv step_quantum 100
+setenv swizzling on
+setenv tsconsole on
+setenv uart_echo on
+symbols on
+
+# IDE disk params
+setenv diskCylinders 611
+setenv bootDrive C
+setenv diskHeads 16
+setenv diskPath idedisk
+setenv diskPresent 1
+setenv diskSpt 63
+
+# Hardware config
+setenv coherency_type nasid
+setenv cpu_cache_type default
+setenv synergy_cache_type syn_cac_64m_8w
+
+# Numalink config
+setenv route_enable on
+setenv network_type xbar		# Select [xbar|router]
+setenv network_warning 0xff
+
+END
+}
+
+
+# ------------------ create control file entries for linux simulations -------------
+create_cf_linux() {
+cat <<END >>$CF
+# Kernel specific options
+setenv mca_on_memory_failure off
+setenv LOADPC 0x00100000		# FPROM load address/entry point (8 digits!)
+sr g 9 0xe000000000520000		# Kernel entry point
+setenv symbol_table vmlinux.sym
+load fprom
+load vmlinux
+
+# Useful breakpoints to always have set. Add more if desired.
+break 0xe000000000505e00	all	# dispatch_to_fault_handler
+break panic			all	# stop on panic
+break die_if_kernel		all	# may as well stop
+
+END
+}
+
+# ------------------ create control file entries for prom simulations ---------------
+create_cf_prom() {
+	SYM2=""
+	ADDR="0x80000000ff800000"
+	[ "$EMBEDDED_LINUX" != "0" ] || SYM2="setenv symbol_table2 vmlinux.sym"
+	[ "$SIZE" = "8MB" ] || ADDR="0x80000000ffc00000"
+	cat <<END >>$CF
+# PROM specific options
+setenv mca_on_memory_failure on
+setenv LOADPC 0x80000000ffffffb0
+setenv promFile fw.bin
+setenv promAddr $ADDR
+setenv symbol_table fw.sym
+$SYM2
+
+# Useful breakpoints to always have set. Add more if desired.
+break Pr_ivt_gexx 		all
+break Pr_ivt_brk		all
+break Pr_PROM_Panic_Spin	all
+break Pr_PROM_Panic		all
+break Pr_PROM_C_Panic		all
+break Pr_fled_die		all
+break Pr_ResetNow		all
+break Pr_zzzbkpt		all
+
+END
+}
+
+
+# ------------------ create control file entries for memory configuration -------------
+create_cf_memory() {
+cat <<END >>$CF
+# CPU/Memory map format:
+#	setenv nodeN_memory_config 0xBSBSBSBS
+#		B=banksize (0=unused, 1=64M, 2=128M, .., 5-1G, c=8M, d=16M, e=32M)
+#		S=bank enable (0=both disable, 3=both enable, 2=bank1 enable, 1=bank0 enable)
+#		  rightmost digits are for bank 0, the lowest address.
+#	setenv nodeN_nasid <nasid>
+#		specifies the NASID for the node. This is used ONLY if booting the kernel.
+#		On PROM configurations, set to 0 - PROM will change it later.
+#	setenv nodeN_cpu_config <cpu_mask>
+#		Set bit number N to 1 to enable cpu N. Ex., a value of 5 enables cpu 0 & 2.
+#
+# Repeat the above 3 commands for each node.
+#
+# For kernel, default to 32MB. Although this is not a valid hardware configuration,
+# it runs faster on medusa. For PROM, 64MB is smallest allowed value.
+
+setenv node0_cpu_config		0x1	# Enable only cpu 0 on the node
+END
+
+if [ $LINUX -eq 1 ] ; then
+cat <<END >>$CF
+setenv node0_nasid		0	# cnode 0 has NASID 0
+setenv node0_memory_config 	0xe1	# 32MB
+END
+else
+cat <<END >>$CF
+setenv node0_memory_config 	0x11	# 64MB
+END
+fi
+}
+
+# -------------------- set links to linux files -------------------------
+set_linux_links() {
+	if [ -d $D/linux/arch ] ; then
+		D=$D/linux
+	elif [ -d $D/arch -o -e vmlinux.sym ] ; then
+		D=$D
+	else
+		err "cant determine directory for linux binaries"
+	fi
+	rm -rf vmlinux vmlinux.sym fprom
+	ln -s $D/vmlinux vmlinux
+	ln -s $D/vmlinux.sym vmlinux.sym
+	if [ -d $D/arch ] ; then
+		ln -s $D/arch/ia64/sn/fprom/fprom fprom
+	else
+		ln -s $D/fprom fprom
+	fi
+	echo "  .. Created links to linux files"	
+}
+
+# -------------------- set links to prom files -------------------------
+set_prom_links() {
+	if [ -d $D/stand ] ; then
+		D=$D/stand/arcs/IP37prom/dev
+	elif [ -d $D/sal ] ; then
+		D=$D
+	else
+		err "cant determine directory for PROM binaries"
+	fi
+	SETUP="$D/../../../../.setup"
+	grep -q '^ *setenv *PROMSIZE *8MB' $SETUP
+	if [ $? -eq 0 ] ; then
+		SIZE="8MB"
+	else
+		SIZE="4MB"
+	fi
+	grep -q '^ *setenv *LAUNCH_VMLINUX' $SETUP
+	EMBEDDED_LINUX=$?
+	rm -f fw.bin fw.map fw.sym vmlinux vmlinux.sym fprom
+	SDIR="SN1IA${SIZE}.O"
+	BIN="SN1IAip37prom${SIZE}"
+	ln -s $D/$SDIR/$BIN.bin fw.bin
+	ln -s $D/$SDIR/$BIN.map fw.map
+	ln -s $D/$SDIR/$BIN.sym fw.sym
+	echo "  .. Created links to $SIZE prom files"
+	if [ $EMBEDDED_LINUX -eq 0 ] ; then
+		ln -s $D/linux/vmlinux vmlinux
+		ln -s $D/linux/vmlinux.sym vmlinux.sym
+		if [ -d linux/arch ] ; then
+			ln -s $D/linux/arch/ia64/sn/fprom/fprom fprom
+		else
+			ln -s $D/linux/fprom fprom
+		fi
+		echo "  .. Created links to embedded linux files in prom tree"
+	fi
+}
+
+# --------------- start of shell script --------------------------------
+OUT="simout"
+FMTMED=0
+STEPCNT=0
+PROM=0
+LINUX=0
+NCF="cf"
+while getopts "HMX:c:o:pk" c ; do
+        case ${c} in
+                H) help;;
+		M) FMTMED=1;;
+		X) STEPCNT=${OPTARG};;
+		c) NCF=${OPTARG};;
+		k) PROM=0;LINUX=1;;
+		p) PROM=1;LINUX=0;;
+		o) OUT=${OPTARG};;
+                \?) exit 1;;
+        esac
+done
+shift `expr ${OPTIND} - 1`
+
+# Check if command is for creating control file and/or links to images.
+if [ $PROM -eq 1 -o $LINUX -eq 1 ] ; then
+	CF=$NCF
+	[ ! -f $CF ] || err "wont overwrite an existing control file ($CF)"
+	if [ $# -gt 0 ] ; then
+		D=$1
+		[ -d $D ] || err "cannot find directory $D"
+		[ $PROM -eq 0 ]  || set_prom_links
+		[ $LINUX -eq 0 ] || set_linux_links
+	fi
+	create_cf_header
+	[ $PROM -eq 0 ]  || create_cf_prom
+	[ $LINUX -eq 0 ] || create_cf_linux
+	create_cf_memory
+	echo "  .. Basic control file created (in $CF). You might want to edit"
+	echo "     this file (at least, look at it)."
+	exit 0
+fi
+
+# Verify that the control file exists
+CF=${1:-$NCF}
+[ -f $CF ] || err "No control file exists. For help, type: $0 -H"
+
+# Build the .cf files from the user control file. The .cf file is
+# identical except that the actual start & load addresses are inserted
+# into the file. In addition, the FPROM commands for configuring memory
+# and LIDs are generated. 
+
+rm -f .cf .cf1 .cf2
+awk '
+function strtonum(n) {
+	 if (substr(n,1,2) != "0x")
+	 	return int(n)
+	 n = substr(n,3)
+	 r=0
+	 while (length(n) > 0) {
+	 	r = r*16+(index("0123456789abcdef", substr(n,1,1))-1)
+		n = substr(n,2)
+	 }
+	 return r
+	}
+/^#/   	{next}
+/^$/	{next}
+/^setenv *LOADPC/               {loadpc = $3; next}
+/^setenv *node._cpu_config/	{n=int(substr($2,5,1)); cpuconf[n] = strtonum($3); print; next}
+/^setenv *node._memory_config/	{n=int(substr($2,5,1)); memconf[n] = strtonum($3); print; next}
+/^setenv *node._nasid/		{n=int(substr($2,5,1)); nasid[n] = strtonum($3); print; next}
+		{print}
+END	{
+	 # Generate the memmap info that starts at the beginning of
+	 # the node the kernel was loaded on.
+	 loadnasid = nasid[0]
+	 cnode = 0
+	 for (i=0; i<128; i++) {
+		if (memconf[i] != "") {
+			printf "sm 0x%x%08x 0x%x%04x%04x\n", 
+				2*loadnasid, 8*cnodes+8, memconf[i], cpuconf[i], nasid[i]
+			cnodes++
+			cpus += substr("0112122312232334", cpuconf[i]+1,1)
+		}
+	 }
+	 printf "sm 0x%x00000000 0x%x%08x\n", 2*loadnasid, cnodes, cpus
+	 printf "setenv number_of_nodes %d\n", cnodes
+
+	 # Now set the starting PC for each cpu.
+	 cnode = 0
+	 lowcpu=-1
+	 for (i=0; i<128; i++) {
+		if (memconf[i] != "") {
+			printf "setnode %d\n", cnode
+			conf = cpuconf[i]
+			for (j=0; j<4; j++) {
+				if (conf != int(conf/2)*2) {
+	 				printf "setcpu %d\n", j
+					if (length(loadpc) == 18)
+						printf "sr pc %s\n", loadpc
+					else
+						printf "sr pc 0x%x%s\n", 2*loadnasid, substr(loadpc,3)
+					if (lowcpu == -1)
+						lowcpu = j
+				}
+				conf = int(conf/2)
+			}
+			cnode++
+		}
+	 }
+	 printf "setnode 0\n"
+	 printf "setcpu %d\n", lowcpu
+	}
+' <$CF >.cf
+
+# Now build the .cf1 & .cf2 control files.
+CF2_LINES="^sm |^break |^run |^si |^quit |^symbols "
+egrep  "$CF2_LINES" .cf >.cf2
+egrep -v "$CF2_LINES" .cf >.cf1
+if [ $STEPCNT -ne 0 ] ; then
+	echo "s $STEPCNT" >>.cf2
+	echo "lastpc 1000" >>.cf2
+	echo "q" >>.cf2
+fi
+echo "script-on $OUT" >>.cf2
+
+# Now start medusa....
+if [ $FMTMED -ne 0 ] ; then
+	$MEDUSA -system mpsn1 -c .cf1 -i .cf2 |  fmtmedusa
+elif [ $STEPCNT -eq 0 ] ; then
+	$MEDUSA -system mpsn1 -c .cf1 -i .cf2 
+else
+	$MEDUSA -system mpsn1 -c .cf1 -i .cf2 2>&1 
+fi
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/io/Makefile linux/arch/ia64/sn/io/Makefile
--- v2.4.0-prerelease/linux/arch/ia64/sn/io/Makefile	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/io/Makefile	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,32 @@
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2000 Silicon Graphics, Inc.
+# Copyright (C) Jack Steiner (steiner@sgi.com)
+#
+#
+# Makefile for the linux kernel.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+# Note 2! The CFLAGS definitions are now in the main makefile...
+
+EXTRA_CFLAGS	:= -DSN -DLANGUAGE_C=1 -D_LANGUAGE_C=1 -I. -DBRINGUP \
+		   -DDIRECT_L1_CONSOLE -DNUMA_BASE -DSIMULATED_KLGRAPH \
+		   -DNUMA_MIGR_CONTROL -DLITTLE_ENDIAN -DREAL_HARDWARE \
+		   -DNEW_INTERRUPTS -DCONFIG_IA64_SGI_IO
+O_TARGET := sgiio.o
+O_OBJS   := stubs.o sgi_if.o pciio.o pcibr.o xtalk.o xbow.o xswitch.o hubspc.o \
+		klgraph_hack.o io.o hubdev.o \
+		hcl.o labelcl.o invent.o klgraph.o klconflib.o sgi_io_sim.o \
+		module.o sgi_io_init.o klgraph_hack.o ml_SN_init.o \
+		ml_SN_intr.o ip37.o \
+		ml_iograph.o hcl_util.o cdl.o \
+		mem_refcnt.o devsupport.o alenlist.o pci_bus_cvlink.o \
+		eeprom.o pci.o pci_dma.o l1.o l1_command.o
+
+include $(TOPDIR)/Rules.make
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/io/alenlist.c linux/arch/ia64/sn/io/alenlist.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/io/alenlist.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/io/alenlist.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,901 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+/* Implementation of Address/Length Lists. */
+
+
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/alenlist.h>
+#include <asm/sn/mmzone_sn1.h>
+
+/*
+ * Logically, an Address/Length List is a list of Pairs, where each pair
+ * holds an Address and a Length, all in some Address Space.  In this
+ * context, "Address Space" is a particular Crosstalk Widget address
+ * space, a PCI device address space, a VME bus address space, a
+ * physical memory address space, etc.
+ *
+ * The main use for these Lists is to provide a single mechanism that
+ * describes where in an address space a DMA occurs.  This allows the
+ * various I/O Bus support layers to provide a single interface for
+ * DMA mapping and DMA translation without regard to how the DMA target
+ * was specified by upper layers.  The upper layers commonly specify a 
+ * DMA target via a buf structure page list, a kernel virtual address,
+ * a user virtual address, a vector of addresses (a la uio and iov), 
+ * or possibly a pfn list.
+ *
+ * Address/Length Lists also enable drivers to take advantage of their
+ * inate scatter/gather capabilities in systems where some address
+ * translation may be required between bus adapters.  The driver forms
+ * a List that represents physical memory targets.  This list is passed
+ * to the various adapters, which apply various translations.  The final
+ * list that's returned to the driver is in terms of its local address
+ * address space -- addresses which can be passed off to a scatter/gather
+ * capable DMA controller.
+ *
+ * The current implementation is intended to be useful both in kernels
+ * that support interrupt threads (INTR_KTHREAD) and in systems that do
+ * not support interrupt threads.  Of course, in the latter case, some
+ * interfaces can be called only within a suspendable context.
+ *
+ * Basic operations on Address/Length Lists include:
+ *	alenlist_create		Create a list
+ *	alenlist_clear		Clear a list
+ *	alenlist_destroy	Destroy a list
+ *	alenlist_append		Append a Pair to the end of a list
+ *	alenlist_replace	Replace a Pair in the middle of a list
+ *	alenlist_get		Get an Address/Length Pair from a list
+ *	alenlist_size		Return the number of Pairs in a list
+ *	alenlist_concat		Append one list to the end of another
+ *	alenlist_clone		Create a new copy of a list
+ *
+ * Operations that convert from upper-level specifications to Address/
+ * Length Lists currently include:
+ *	kvaddr_to_alenlist	Convert from a kernel virtual address
+ *	uvaddr_to_alenlist	Convert from a user virtual address
+ *	buf_to_alenlist		Convert from a buf structure
+ *	alenlist_done		Tell system that we're done with an alenlist
+ *				obtained from a conversion.
+ * Additional convenience operations:
+ *	alenpair_init		Create a list and initialize it with a Pair
+ *	alenpair_get		Peek at the first pair on a List
+ *
+ * A supporting type for Address/Length Lists is an alenlist_cursor_t.  A
+ * cursor marks a position in a List, and determines which Pair is fetched
+ * by alenlist_get.
+ *	alenlist_cursor_create	Allocate and initialize a cursor
+ *	alenlist_cursor_destroy	Free space consumed by a cursor
+ *	alenlist_cursor_init	(Re-)Initialize a cursor to point 
+ *				to the start of a list
+ *	alenlist_cursor_clone	Clone a cursor (at the current offset)
+ *	alenlist_cursor_offset	Return the number of bytes into
+ *				a list that this cursor marks
+ * Multiple cursors can point at various points into a List.  Also, each
+ * list maintains one "internal cursor" which may be updated by alenlist_clear
+ * and alenlist_get.  If calling code simply wishes to scan sequentially
+ * through a list starting at the beginning, and if it is the only user of
+ * a list, it can rely on this internal cursor rather than managing a 
+ * separate explicit cursor.
+ *
+ * The current implementation allows callers to allocate both cursors and
+ * the lists as local stack (structure) variables.  This allows for some
+ * extra efficiency at the expense of forward binary compatibility.  It 
+ * is recommended that customer drivers refrain from local allocation.
+ * In fact, we likely will choose to move the structures out of the public 
+ * header file into a private place in order to discourage this usage.
+ *
+ * Currently, no locking is provided by the alenlist implementation.
+ *
+ * Implementation notes:
+ * For efficiency, Pairs are grouped into "chunks" of, say, 32 Pairs
+ * and a List consists of some number of these chunks.  Chunks are completely
+ * invisible to calling code.  Chunks should be large enough to hold most
+ * standard-sized DMA's, but not so large that they consume excessive space.
+ *
+ * It is generally expected that Lists will be constructed at one time and
+ * scanned at a later time.  It is NOT expected that drivers will scan
+ * a List while the List is simultaneously extended, although this is
+ * theoretically possible with sufficient upper-level locking.
+ *
+ * In order to support demands of Real-Time drivers and in order to support
+ * swapping under low-memory conditions, we support the concept of a
+ * "pre-allocated fixed-sized List".  After creating a List with 
+ * alenlist_create, a driver may explicitly grow the list (via "alenlist_grow")
+ * to a specific number of Address/Length pairs.  It is guaranteed that future 
+ * operations involving this list will never automatically grow the list 
+ * (i.e. if growth is ever required, the operation will fail).  Additionally, 
+ * operations that use alenlist's (e.g. DMA operations) accept a flag which 
+ * causes processing to take place "in-situ"; that is, the input alenlist 
+ * entries are replaced with output alenlist entries.  The combination of 
+ * pre-allocated Lists and in-situ processing allows us to avoid the 
+ * potential deadlock scenario where we sleep (waiting for memory) in the 
+ * swap out path.
+ *
+ * For debugging, we track the number of allocated Lists in alenlist_count
+ * the number of allocated chunks in alenlist_chunk_count, and the number
+ * of allocate cursors in alenlist_cursor_count.  We also provide a debug 
+ * routine, alenlist_show, which dumps the contents of an Address/Length List.
+ *
+ * Currently, Lists are formed by drivers on-demand.  Eventually, we may
+ * associate an alenlist with a buf structure and keep it up to date as
+ * we go along.  In that case, buf_to_alenlist simply returns a pointer
+ * to the existing List, and increments the Lists's reference count.
+ * alenlist_done would decrement the reference count and destroys the List
+ * if it was the last reference.
+ *
+ * Eventually alenlist's may allow better support for user-level scatter/
+ * gather operations (e.g. via readv/writev):  With proper support, we
+ * could potentially handle a vector of reads with a single scatter/gather
+ * DMA operation.  This could be especially useful on NUMA systems where
+ * there's more of a reason for users to use vector I/O operations.
+ *
+ * Eventually, alenlist's may replace kaio lists, vhand page lists,
+ * buffer cache pfdat lists, DMA page lists, etc.
+ */
+
+/* Opaque data types */
+
+/* An Address/Length pair.  */
+typedef struct alen_s {
+	alenaddr_t	al_addr;
+	size_t		al_length;
+} alen_t;
+
+/* 
+ * Number of elements in one chunk of an Address/Length List.
+ *
+ * This size should be sufficient to hold at least an "average" size
+ * DMA request.  Must be at least 1, and should be a power of 2,
+ * for efficiency.
+ */
+#define ALEN_CHUNK_SZ ((512*1024)/NBPP)
+
+/*
+ * A fixed-size set of Address/Length Pairs.  Chunks of Pairs are strung together 
+ * to form a complete Address/Length List.  Chunking is entirely hidden within the 
+ * alenlist implementation, and it simply makes allocation and growth of lists more 
+ * efficient.
+ */
+typedef struct alenlist_chunk_s {
+	alen_t			alc_pair[ALEN_CHUNK_SZ];/* list of addr/len pairs */
+	struct alenlist_chunk_s *alc_next;		/* point to next chunk of pairs */
+} *alenlist_chunk_t;
+
+/* 
+ * An Address/Length List.  An Address/Length List is allocated with alenlist_create.  
+ * Alternatively, a list can be allocated on the stack (local variable of type 
+ * alenlist_t) and initialized with alenpair_init or with a combination of 
+ * alenlist_clear and alenlist_append, etc.  Code which statically allocates these
+ * structures loses forward binary compatibility!
+ *
+ * A statically allocated List is sufficiently large to hold ALEN_CHUNK_SZ pairs.
+ */
+struct alenlist_s {
+	unsigned short		al_flags;
+	unsigned short		al_logical_size;	/* logical size of list, in pairs */
+	unsigned short		al_actual_size;		/* actual size of list, in pairs */
+	struct alenlist_chunk_s	*al_last_chunk;		/* pointer to last logical chunk */
+	struct alenlist_cursor_s al_cursor;		/* internal cursor */
+	struct alenlist_chunk_s	al_chunk;		/* initial set of pairs */
+	alenaddr_t		al_compaction_address;	/* used to compact pairs */
+};
+
+/* al_flags field */
+#define AL_FIXED_SIZE	0x1	/* List is pre-allocated, and of fixed size */
+
+
+zone_t *alenlist_zone = NULL;
+zone_t *alenlist_chunk_zone = NULL;
+zone_t *alenlist_cursor_zone = NULL;
+
+#if DEBUG
+int alenlist_count=0;		/* Currently allocated Lists */
+int alenlist_chunk_count = 0;	/* Currently allocated chunks */
+int alenlist_cursor_count = 0;	/* Currently allocate cursors */
+#define INCR_COUNT(ptr) atomicAddInt((ptr), 1);
+#define DECR_COUNT(ptr) atomicAddInt((ptr), -1);
+#else
+#define INCR_COUNT(ptr)
+#define DECR_COUNT(ptr)
+#endif /* DEBUG */
+
+#if DEBUG
+static void alenlist_show(alenlist_t);
+#endif /* DEBUG */
+
+/*
+ * Initialize Address/Length List management.  One time initialization.
+ */
+void
+alenlist_init(void)
+{
+	alenlist_zone = kmem_zone_init(sizeof(struct alenlist_s), "alenlist");
+	alenlist_chunk_zone = kmem_zone_init(sizeof(struct alenlist_chunk_s), "alchunk");
+	alenlist_cursor_zone = kmem_zone_init(sizeof(struct alenlist_cursor_s), "alcursor");
+#if DEBUG
+	idbg_addfunc("alenshow", alenlist_show);
+#endif /* DEBUG */
+}
+
+
+/*
+ * Initialize an Address/Length List cursor.
+ */
+static void
+do_cursor_init(alenlist_t alenlist, alenlist_cursor_t cursorp)
+{
+	cursorp->al_alenlist = alenlist;
+	cursorp->al_offset = 0;
+	cursorp->al_chunk = &alenlist->al_chunk;
+	cursorp->al_index = 0;
+	cursorp->al_bcount = 0;
+}
+
+
+/*
+ * Create an Address/Length List, and clear it.
+ * Set the cursor to the beginning.
+ */
+alenlist_t 
+alenlist_create(unsigned flags)
+{
+	alenlist_t alenlist;
+
+	alenlist = kmem_zone_alloc(alenlist_zone, flags & AL_NOSLEEP ? VM_NOSLEEP : 0);
+	if (alenlist) {
+		INCR_COUNT(&alenlist_count);
+
+		alenlist->al_flags = 0;
+		alenlist->al_logical_size = 0;
+		alenlist->al_actual_size = ALEN_CHUNK_SZ;
+		alenlist->al_last_chunk = &alenlist->al_chunk;
+		alenlist->al_chunk.alc_next = NULL;
+		do_cursor_init(alenlist, &alenlist->al_cursor);
+	}
+
+	return(alenlist);
+}
+
+
+/*
+ * Grow an Address/Length List so that all resources needed to contain
+ * the specified number of Pairs are pre-allocated.  An Address/Length
+ * List that has been explicitly "grown" will never *automatically*
+ * grow, shrink, or be destroyed.
+ *
+ * Pre-allocation is useful for Real-Time drivers and for drivers that
+ * may be used along the swap-out path and therefore cannot afford to 
+ * sleep until memory is freed.
+ * 
+ * The cursor is set to the beginning of the list.
+ */
+int
+alenlist_grow(alenlist_t alenlist, size_t npairs)
+{
+	/* 
+	 * This interface should be used relatively rarely, so
+	 * the implementation is kept simple: We clear the List,
+	 * then append npairs bogus entries.  Finally, we mark
+	 * the list as FIXED_SIZE and re-initialize the internal
+	 * cursor.
+	 */
+
+	/* 
+	 * Temporarily mark as non-fixed size, since we're about
+	 * to shrink and expand it.
+	 */
+	alenlist->al_flags &= ~AL_FIXED_SIZE;
+
+	/* Free whatever was in the alenlist. */
+	alenlist_clear(alenlist);
+
+	/* Allocate everything that we need via automatic expansion. */
+	while (npairs--)
+		if (alenlist_append(alenlist, 0, 0, AL_NOCOMPACT) == ALENLIST_FAILURE)
+			return(ALENLIST_FAILURE);
+
+	/* Now, mark as FIXED_SIZE */
+	alenlist->al_flags |= AL_FIXED_SIZE;
+
+	/* Clear out bogus entries */
+	alenlist_clear(alenlist);
+
+	/* Initialize internal cursor to the beginning */
+	do_cursor_init(alenlist, &alenlist->al_cursor);
+
+	return(ALENLIST_SUCCESS);
+}
+
+
+/*
+ * Clear an Address/Length List so that it holds no pairs.
+ */
+void
+alenlist_clear(alenlist_t alenlist)
+{
+	alenlist_chunk_t chunk, freechunk;
+
+	/*
+	 * If this List is not FIXED_SIZE, free all the
+	 * extra chunks.
+	 */
+	if (!(alenlist->al_flags & AL_FIXED_SIZE)) {
+		/* First, free any extension alenlist chunks */
+		chunk = alenlist->al_chunk.alc_next;
+		while (chunk) {
+			freechunk = chunk;
+			chunk = chunk->alc_next;
+			kmem_zone_free(alenlist_chunk_zone, freechunk);
+			DECR_COUNT(&alenlist_chunk_count);
+		}
+		alenlist->al_actual_size = ALEN_CHUNK_SZ;
+		alenlist->al_chunk.alc_next = NULL;
+	}
+
+	alenlist->al_logical_size = 0;
+	alenlist->al_last_chunk = &alenlist->al_chunk;
+	do_cursor_init(alenlist, &alenlist->al_cursor);
+}
+
+
+/*
+ * Create and initialize an Address/Length Pair.
+ * This is intended for degenerate lists, consisting of a single 
+ * address/length pair.
+ */
+alenlist_t
+alenpair_init(	alenaddr_t address, 
+		size_t length)
+{
+	alenlist_t alenlist;
+
+	alenlist = alenlist_create(0);
+
+	alenlist->al_logical_size = 1;
+	ASSERT(alenlist->al_last_chunk == &alenlist->al_chunk);
+	alenlist->al_chunk.alc_pair[0].al_length = length;
+	alenlist->al_chunk.alc_pair[0].al_addr = address;
+
+	return(alenlist);
+}
+
+/*
+ * Return address/length from a degenerate (1-pair) List, or
+ * first pair from a larger list.  Does NOT update the internal cursor,
+ * so this is an easy way to peek at a start address.
+ */
+int
+alenpair_get(	alenlist_t alenlist,
+		alenaddr_t *address,
+		size_t *length)
+{
+	if (alenlist->al_logical_size == 0)
+		return(ALENLIST_FAILURE);
+
+	*length = alenlist->al_chunk.alc_pair[0].al_length;
+	*address = alenlist->al_chunk.alc_pair[0].al_addr;
+	return(ALENLIST_SUCCESS);
+}
+
+
+/*
+ * Destroy an Address/Length List.
+ */
+void 
+alenlist_destroy(alenlist_t alenlist)
+{
+	if (alenlist == NULL)
+		return;
+
+	/* 
+	 * Turn off FIXED_SIZE so this List can be 
+	 * automatically shrunk.
+	 */
+	alenlist->al_flags &= ~AL_FIXED_SIZE;
+
+	/* Free extension chunks first */
+	if (alenlist->al_chunk.alc_next)
+		alenlist_clear(alenlist);
+
+	/* Now, free the alenlist itself */
+	kmem_zone_free(alenlist_zone, alenlist);
+	DECR_COUNT(&alenlist_count);
+}
+
+/*
+ * Release an Address/Length List.
+ * This is in preparation for a day when alenlist's may be longer-lived, and
+ * perhaps associated with a buf structure.  We'd add a reference count, and
+ * this routine would decrement the count.  For now, we create alenlist's on
+ * on demand and free them when done.  If the driver is not explicitly managing
+ * a List for its own use, it should call alenlist_done rather than alenlist_destroy.
+ */
+void
+alenlist_done(alenlist_t alenlist)
+{
+	alenlist_destroy(alenlist);
+}
+
+
+/*
+ * Append another address/length to the end of an Address/Length List,
+ * growing the list if permitted and necessary.
+ *
+ * Returns: SUCCESS/FAILURE
+ */
+int 
+alenlist_append(	alenlist_t alenlist, 		/* append to this list */
+			alenaddr_t address, 		/* address to append */
+			size_t length,			/* length to append */
+			unsigned flags)
+{
+	alen_t *alenp;
+	int index, last_index;
+
+	index = alenlist->al_logical_size % ALEN_CHUNK_SZ;
+
+	if ((alenlist->al_logical_size > 0)) {
+		/*
+		 * See if we can compact this new pair in with the previous entry.
+		 * al_compaction_address holds that value that we'd need to see
+		 * in order to compact.
+		 */
+		if (!(flags & AL_NOCOMPACT) &&
+		    (alenlist->al_compaction_address == address)) {
+			last_index = (alenlist->al_logical_size-1) % ALEN_CHUNK_SZ;
+			alenp = &(alenlist->al_last_chunk->alc_pair[last_index]);
+			alenp->al_length += length;
+			alenlist->al_compaction_address += length;
+			return(ALENLIST_SUCCESS);
+		}
+
+		/*
+		 * If we're out of room in this chunk, move to a new chunk.
+	 	 */
+		if (index == 0) {
+			if (alenlist->al_flags & AL_FIXED_SIZE) {
+				alenlist->al_last_chunk = alenlist->al_last_chunk->alc_next;
+
+				/* If we're out of space in a FIXED_SIZE List, quit. */
+				if (alenlist->al_last_chunk == NULL) {
+					ASSERT(alenlist->al_logical_size == alenlist->al_actual_size);
+					return(ALENLIST_FAILURE);
+				}
+			} else {
+				alenlist_chunk_t new_chunk;
+
+				new_chunk = kmem_zone_alloc(alenlist_chunk_zone, 
+							flags & AL_NOSLEEP ? VM_NOSLEEP : 0);
+
+				if (new_chunk == NULL)
+					return(ALENLIST_FAILURE);
+
+				alenlist->al_last_chunk->alc_next = new_chunk;
+				new_chunk->alc_next = NULL;
+				alenlist->al_last_chunk = new_chunk;
+				alenlist->al_actual_size += ALEN_CHUNK_SZ;
+				INCR_COUNT(&alenlist_chunk_count);
+			}
+		}
+	}
+
+	alenp = &(alenlist->al_last_chunk->alc_pair[index]);
+	alenp->al_addr = address;
+	alenp->al_length = length;
+	
+	alenlist->al_logical_size++;
+	alenlist->al_compaction_address = address + length;
+
+	return(ALENLIST_SUCCESS);
+}
+
+
+/*
+ * Replace an item in an Address/Length List.  Cursor is updated so
+ * that alenlist_get will get the next item in the list.  This interface 
+ * is not very useful for drivers; but it is useful to bus providers 
+ * that need to translate between address spaced in situ.  The old Address
+ * and Length are returned.
+ */
+/* ARGSUSED */
+int
+alenlist_replace(	alenlist_t alenlist, 		/* in: replace in this list */
+			alenlist_cursor_t cursorp, 	/* inout: which item to replace */
+			alenaddr_t *addrp, 		/* inout: address */
+			size_t *lengthp,		/* inout: length */
+			unsigned flags)
+{
+	alen_t *alenp;
+	alenlist_chunk_t chunk;
+	unsigned int index;
+	size_t length;
+	alenaddr_t addr;
+
+	if ((addrp == NULL) || (lengthp == NULL))
+		return(ALENLIST_FAILURE);
+
+	if (alenlist->al_logical_size == 0)
+		return(ALENLIST_FAILURE);
+
+	addr = *addrp;
+	length = *lengthp;
+
+	/* 
+	 * If no cursor explicitly specified, use the Address/Length List's 
+	 * internal cursor.
+	 */
+	if (cursorp == NULL)
+		cursorp = &alenlist->al_cursor;
+
+	chunk = cursorp->al_chunk;
+	index = cursorp->al_index;
+
+	ASSERT(cursorp->al_alenlist == alenlist);
+	if (cursorp->al_alenlist != alenlist)
+		return(ALENLIST_FAILURE);
+
+	alenp = &chunk->alc_pair[index];
+
+	/* Return old values */
+	*addrp = alenp->al_length;
+	*lengthp = alenp->al_addr;
+
+	/* Set up new values */
+	alenp->al_length = length;
+	alenp->al_addr = addr;
+
+	/* Update cursor to point to next item */
+	cursorp->al_bcount = length;
+
+	return(ALENLIST_SUCCESS);
+}
+
+
+/*
+ * Initialize a cursor in order to walk an alenlist.
+ * An alenlist_cursor always points to the last thing that was obtained
+ * from the list.  If al_chunk is NULL, then nothing has yet been obtained.
+ *
+ * Note: There is an "internal cursor" associated with every Address/Length List.
+ * For users that scan sequentially through a List, it is more efficient to
+ * simply use the internal cursor.  The caller must insure that no other users
+ * will simultaneously scan the List.  The caller can reposition the internal
+ * cursor by calling alenlist_cursor_init with a NULL cursorp.
+ */
+int
+alenlist_cursor_init(alenlist_t alenlist, size_t offset, alenlist_cursor_t cursorp)
+{
+	size_t byte_count;
+
+	if (cursorp == NULL)
+		cursorp = &alenlist->al_cursor;
+
+	/* Get internal cursor's byte count for use as a hint.
+	 *
+	 * If the internal cursor points passed the point that we're interested in,
+	 * we need to seek forward from the beginning.  Otherwise, we can seek forward
+	 * from the internal cursor.
+	 */
+	if ((offset > 0) &&
+	   ((byte_count = alenlist_cursor_offset(alenlist, (alenlist_cursor_t)NULL)) <= offset)) {
+		offset -= byte_count;
+		alenlist_cursor_clone(alenlist, NULL, cursorp);
+	} else
+		do_cursor_init(alenlist, cursorp);
+
+	/* We could easily speed this up, but it shouldn't be used very often. */
+	while (offset != 0) {
+		alenaddr_t addr;
+		size_t length;
+
+		if (alenlist_get(alenlist, cursorp, offset, &addr, &length, 0) != ALENLIST_SUCCESS)
+			return(ALENLIST_FAILURE);
+		offset -= length;
+	}
+	return(ALENLIST_SUCCESS);
+}
+
+
+/*
+ * Copy a cursor.  The source cursor is either an internal alenlist cursor
+ * or an explicit cursor.
+ */
+int
+alenlist_cursor_clone(	alenlist_t alenlist, 
+			alenlist_cursor_t cursorp_in, 
+			alenlist_cursor_t cursorp_out)
+{
+	ASSERT(cursorp_out);
+
+	if (alenlist && cursorp_in)
+		if (alenlist != cursorp_in->al_alenlist)
+			return(ALENLIST_FAILURE);
+
+	if (alenlist)
+		*cursorp_out = alenlist->al_cursor; /* small structure copy */
+	else if (cursorp_in)
+		*cursorp_out = *cursorp_in; /* small structure copy */
+	else
+		return(ALENLIST_FAILURE); /* no source */
+
+	return(ALENLIST_SUCCESS);
+}
+
+/*
+ * Return the number of bytes passed so far according to the specified cursor.
+ * If cursorp is NULL, use the alenlist's internal cursor.
+ */
+size_t
+alenlist_cursor_offset(alenlist_t alenlist, alenlist_cursor_t cursorp)
+{
+	ASSERT(!alenlist || !cursorp || (alenlist == cursorp->al_alenlist));
+
+	if (cursorp == NULL) {
+		ASSERT(alenlist);
+		cursorp = &alenlist->al_cursor;
+	}
+
+	return(cursorp->al_offset);
+}
+
+/*
+ * Allocate and initialize an Address/Length List cursor.
+ */
+alenlist_cursor_t
+alenlist_cursor_create(alenlist_t alenlist, unsigned flags)
+{
+	alenlist_cursor_t cursorp;
+
+	ASSERT(alenlist != NULL);
+	cursorp = kmem_zone_alloc(alenlist_cursor_zone, flags & AL_NOSLEEP ? VM_NOSLEEP : 0);
+	if (cursorp) {
+		INCR_COUNT(&alenlist_cursor_count);
+		alenlist_cursor_init(alenlist, 0, cursorp);
+	}
+	return(cursorp);
+}
+
+/*
+ * Free an Address/Length List cursor.
+ */
+void
+alenlist_cursor_destroy(alenlist_cursor_t cursorp)
+{
+	DECR_COUNT(&alenlist_cursor_count);
+	kmem_zone_free(alenlist_cursor_zone, cursorp);
+}
+
+
+/*
+ * Fetch an address/length pair from an Address/Length List.  Update
+ * the "cursor" so that next time this routine is called, we'll get
+ * the next address range.  Never return a length that exceeds maxlength
+ * (if non-zero).  If maxlength is a power of 2, never return a length 
+ * that crosses a maxlength boundary.  [This may seem strange at first,
+ * but it's what many drivers want.]
+ *
+ * Returns: SUCCESS/FAILURE
+ */
+int
+alenlist_get(	alenlist_t alenlist, 		/* in: get from this list */
+		alenlist_cursor_t cursorp, 	/* inout: which item to get */
+		size_t	maxlength,		/* in: at most this length */
+		alenaddr_t *addrp, 		/* out: address */
+		size_t *lengthp,		/* out: length */
+		unsigned flags)
+{
+	alen_t *alenp;
+	alenlist_chunk_t chunk;
+	unsigned int index;
+	size_t bcount;
+	size_t length;
+
+	/* 
+	 * If no cursor explicitly specified, use the Address/Length List's 
+	 * internal cursor.
+	 */
+	if (cursorp == NULL) {
+		if (alenlist->al_logical_size == 0)
+			return(ALENLIST_FAILURE);
+		cursorp = &alenlist->al_cursor;
+	}
+
+	chunk = cursorp->al_chunk;
+	index = cursorp->al_index;
+	bcount = cursorp->al_bcount;
+
+	ASSERT(cursorp->al_alenlist == alenlist);
+	if (cursorp->al_alenlist != alenlist)
+		return(ALENLIST_FAILURE);
+
+	alenp = &chunk->alc_pair[index];
+	length = alenp->al_length - bcount;
+
+	/* Bump up to next pair, if we're done with this pair. */
+	if (length == 0) {
+		cursorp->al_bcount = bcount = 0;
+		cursorp->al_index = index = (index + 1) % ALEN_CHUNK_SZ;
+
+		/* Bump up to next chunk, if we're done with this chunk. */
+		if (index == 0) {
+			if (cursorp->al_chunk == alenlist->al_last_chunk)
+				return(ALENLIST_FAILURE);
+			chunk = chunk->alc_next;
+			ASSERT(chunk != NULL);
+		} else {
+			/* If in last chunk, don't go beyond end. */
+			if (cursorp->al_chunk == alenlist->al_last_chunk) {
+				int last_size = alenlist->al_logical_size % ALEN_CHUNK_SZ;
+				if (last_size && (index >= last_size))
+					return(ALENLIST_FAILURE);
+			}
+		}
+
+		alenp = &chunk->alc_pair[index];
+		length = alenp->al_length;
+	}
+
+	/* Constrain what we return according to maxlength */
+	if (maxlength) {
+		size_t maxlen1 = maxlength - 1;
+
+		if ((maxlength & maxlen1) == 0) /* power of 2 */
+			maxlength -= 
+			   ((alenp->al_addr + cursorp->al_bcount) & maxlen1);
+
+		length = MIN(maxlength, length);
+	}
+
+	/* Update the cursor, if desired. */
+	if (!(flags & AL_LEAVE_CURSOR)) {
+		cursorp->al_bcount += length;
+		cursorp->al_chunk = chunk;
+	}
+
+	*lengthp = length;
+	*addrp = alenp->al_addr + bcount;
+
+	return(ALENLIST_SUCCESS);
+}
+
+
+/*
+ * Return the number of pairs in the specified Address/Length List.
+ * (For FIXED_SIZE Lists, this returns the logical size of the List, 
+ * not the actual capacity of the List.)
+ */
+int
+alenlist_size(alenlist_t alenlist)
+{
+	return(alenlist->al_logical_size);
+}
+
+
+/*
+ * Concatenate two Address/Length Lists.
+ */
+void
+alenlist_concat(alenlist_t from,
+		alenlist_t to)
+{
+	struct alenlist_cursor_s cursor;
+	alenaddr_t addr;
+	size_t length;
+
+	alenlist_cursor_init(from, 0, &cursor);
+
+	while(alenlist_get(from, &cursor, (size_t)0, &addr, &length, 0) == ALENLIST_SUCCESS)
+		alenlist_append(to, addr, length, 0);
+}
+
+/*
+ * Create a copy of a list.
+ * (Not all attributes of the old list are cloned.  For instance, if
+ * a FIXED_SIZE list is cloned, the resulting list is NOT FIXED_SIZE.)
+ */
+alenlist_t
+alenlist_clone(alenlist_t old_list, unsigned flags)
+{
+	alenlist_t new_list;
+
+	new_list = alenlist_create(flags);
+	if (new_list != NULL)
+		alenlist_concat(old_list, new_list);
+
+	return(new_list);
+}
+
+
+/* 
+ * Convert a kernel virtual address to a Physical Address/Length List.
+ */
+alenlist_t
+kvaddr_to_alenlist(alenlist_t alenlist, caddr_t kvaddr, size_t length, unsigned flags)
+{
+	alenaddr_t paddr;
+	long offset;
+	size_t piece_length;
+	int created_alenlist;
+
+	if (length <=0)
+		return(NULL);
+
+	/* If caller supplied a List, use it.  Otherwise, allocate one. */
+	if (alenlist == NULL) {
+		alenlist = alenlist_create(0);
+		created_alenlist = 1;
+	} else {
+		alenlist_clear(alenlist);
+		created_alenlist = 0;
+	}
+
+	paddr = kvtophys(kvaddr);
+	offset = poff(kvaddr);
+
+	/* Handle first page */
+	piece_length = MIN(NBPP - offset, length);
+	if (alenlist_append(alenlist, paddr, piece_length, flags) == ALENLIST_FAILURE)
+		goto failure;
+	length -= piece_length;
+	kvaddr += piece_length;
+
+	/* Handle middle pages */
+	while (length >= NBPP) {
+		paddr = kvtophys(kvaddr);
+		if (alenlist_append(alenlist, paddr, NBPP, flags) == ALENLIST_FAILURE)
+			goto failure;
+		length -= NBPP;
+		kvaddr += NBPP;
+	}
+
+	/* Handle last page */
+	if (length) {
+		ASSERT(length < NBPP);
+		paddr = kvtophys(kvaddr);
+		if (alenlist_append(alenlist, paddr, length, flags) == ALENLIST_FAILURE)
+			goto failure;
+	}
+
+	alenlist_cursor_init(alenlist, 0, NULL);
+	return(alenlist);
+
+failure:
+	if (created_alenlist)
+		alenlist_destroy(alenlist);
+	return(NULL);
+}
+
+
+#if DEBUG
+static void
+alenlist_show(alenlist_t alenlist)
+{
+	struct alenlist_cursor_s cursor;
+	alenaddr_t addr;
+	size_t length;
+	int i = 0;
+
+	alenlist_cursor_init(alenlist, 0, &cursor);
+
+	qprintf("Address/Length List@0x%x:\n", alenlist);
+	qprintf("logical size=0x%x actual size=0x%x last_chunk at 0x%x\n", 
+		alenlist->al_logical_size, alenlist->al_actual_size, 
+		alenlist->al_last_chunk);
+	qprintf("cursor: chunk=0x%x index=%d offset=0x%x\n",
+		alenlist->al_cursor.al_chunk, 
+		alenlist->al_cursor.al_index,
+		alenlist->al_cursor.al_bcount);
+	while(alenlist_get(alenlist, &cursor, (size_t)0, &addr, &length, 0) == ALENLIST_SUCCESS)
+		qprintf("%d:\t0x%lx 0x%lx\n", ++i, addr, length);
+}
+#endif /* DEBUG */
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/io/cdl.c linux/arch/ia64/sn/io/cdl.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/io/cdl.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/io/cdl.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,231 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+#include <linux/types.h>
+#include <linux/config.h>
+#include <asm/sn/sgi.h>
+#include <asm/io.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/pci/bridge.h>
+#include "asm/sn/ioerror_handling.h"
+#include <asm/sn/xtalk/xbow.h>
+
+#ifdef BRINGUP
+/* these get called directly in cdl_add_connpt in fops bypass hack */
+extern int pcibr_attach(devfs_handle_t);
+extern int xbow_attach(devfs_handle_t);
+#endif /* BRINGUP */
+
+/*
+ *    cdl: Connection and Driver List
+ *
+ *	We are not porting this to Linux.  Devices are registered via 
+ *	the normal Linux PCI layer.  This is a very simplified version 
+ *	of cdl that will allow us to register and call our very own 
+ *	IO Infrastructure Drivers e.g. pcibr.
+ */
+
+struct cdl {
+    int		part_num;
+    int		mfg_num;
+    int (*attach) (devfs_handle_t);
+} dummy_reg;
+
+typedef struct cdl     *cdl_p;
+
+#define MAX_SGI_IO_INFRA_DRVR 4
+struct cdl sgi_infrastructure_drivers[MAX_SGI_IO_INFRA_DRVR] =
+{
+	{ XBRIDGE_WIDGET_PART_NUM, XBRIDGE_WIDGET_MFGR_NUM, pcibr_attach /* &pcibr_fops  */},
+	{ BRIDGE_WIDGET_PART_NUM,  BRIDGE_WIDGET_MFGR_NUM,  pcibr_attach /* &pcibr_fops */},
+	{ XXBOW_WIDGET_PART_NUM,   XXBOW_WIDGET_MFGR_NUM,   xbow_attach /* &xbow_fops */},
+	{ XBOW_WIDGET_PART_NUM,    XBOW_WIDGET_MFGR_NUM,    xbow_attach /* &xbow_fops */},
+};
+
+/*
+ * cdl_new:  Called by pciio and xtalk.
+ */
+cdl_p
+cdl_new(char *name, char *k1str, char *k2str)
+{
+    /*
+     * Just return a dummy pointer.
+     */
+    return((cdl_p)&dummy_reg);
+}
+
+/*
+ * cdl_del: Do nothing.
+ */
+void
+cdl_del(cdl_p reg)
+{
+	printk("SGI IO INFRASTRUCTURE - cdl_del not supported.\n");
+}
+
+/*
+ * cdl_add_driver: The driver part number and manufacturers number 
+ * are statically initialized above.
+ * 
+  Do nothing.
+ */
+int
+cdl_add_driver(cdl_p reg, int key1, int key2, char *prefix, int flags)
+{
+    return 0;
+}
+
+/*
+ * cdl_del_driver: Not supported.
+ */
+void
+cdl_del_driver(cdl_p reg,
+	       char *prefix)
+{
+
+	printk("SGI IO INFRASTRUCTURE - cdl_del_driver not supported.\n");
+}
+
+/*
+ * cdl_add_connpt: We found a device and it's connect point.  Call the 
+ * attach routine of that driver.
+ *
+ * May need support for pciba registration here ...
+ *
+ * This routine use to create /hw/.id/pci/.../.. that links to 
+ * /hw/module/006c06/Pbrick/xtalk/15/pci/<slotnum> .. do we still need 
+ * it?  The specified driver attach routine does not reference these 
+ * vertices.
+ */
+int
+cdl_add_connpt(cdl_p reg, int part_num, int mfg_num, 
+	       devfs_handle_t connpt)
+{
+	int i;
+	
+	/*
+	 * Find the driver entry point and call the attach routine.
+	 */
+	for (i = 0; i < MAX_SGI_IO_INFRA_DRVR; i++) {
+
+		if ( (part_num == sgi_infrastructure_drivers[i].part_num) &&
+		   ( mfg_num == sgi_infrastructure_drivers[i].mfg_num) ) {
+			/*
+			 * Call the device attach routines.
+			 */
+			if (sgi_infrastructure_drivers[i].attach) {
+			    return(sgi_infrastructure_drivers[i].attach(connpt));
+			}
+#ifdef BRINGUP
+			/*
+			 * XXX HACK ALERT bypassing fops for now..
+			 */
+			else {
+			    printk("cdl_add_connpt: NEED FOPS FOR OUR DRIVERS!!\n");
+			    printk("cdl_add_connpt: part_num= 0x%x  mfg_num= 0x%x\n",
+				part_num, mfg_num);
+			    return(-1);
+			}
+#endif /* BRINGUP */
+		} else {
+			continue;
+		}
+
+		printk("**** cdl_add_connpt: driver not found for part_num %d mfg_num %d ****\n", part_num, mfg_num);
+
+		return(-1);
+	}	
+	if ( (i == MAX_SGI_IO_INFRA_DRVR) ) 
+		printk("**** cdl_add_connpt: Driver not found for part_num 0x%x mfg_num 0x%x ****\n", part_num, mfg_num);
+
+	return (0);
+}
+
+/*
+ * cdl_del_connpt: Not implemented.
+ */
+void
+cdl_del_connpt(cdl_p reg, int key1, int key2, devfs_handle_t connpt)
+{
+
+	printk("SGI IO INFRASTRUCTURE - cdl_del_cdl_del_connpt not supported.\n");
+}
+
+/*
+ *    cdl_iterate: Not Implemented.
+ */
+void
+cdl_iterate(cdl_p reg,
+	    char *prefix,
+	    cdl_iter_f * func)
+{
+
+	printk("SGI IO INFRASTRUCTURE - cdl_iterate not supported.\n");
+}
+
+async_attach_t 
+async_attach_new(void)
+{
+
+	printk("SGI IO INFRASTRUCTURE - async_attach_new not supported.\n");
+	return(0);
+}
+
+void 
+async_attach_free(async_attach_t aa)
+{
+	printk("SGI IO INFRASTRUCTURE - async_attach_free not supported.\n");
+}
+
+async_attach_t 
+async_attach_get_info(devfs_handle_t vhdl)
+{
+
+	printk("SGI IO INFRASTRUCTURE - async_attach_get_info not supported.\n");
+	return(0);
+}
+
+void            
+async_attach_add_info(devfs_handle_t vhdl, async_attach_t aa)
+{
+	printk("SGI IO INFRASTRUCTURE - async_attach_add_info not supported.\n");
+
+}
+
+void            
+async_attach_del_info(devfs_handle_t vhdl)
+{
+
+	printk("SGI IO INFRASTRUCTURE - async_attach_del_info not supported.\n");
+
+}
+
+void async_attach_signal_start(async_attach_t aa)
+{
+
+	printk("SGI IO INFRASTRUCTURE - async_attach_signal_start not supported.\n");
+
+}
+
+void async_attach_signal_done(async_attach_t aa)
+{
+
+	printk("SGI IO INFRASTRUCTURE - async_attach_signal_done not supported.\n");
+
+}
+
+void async_attach_waitall(async_attach_t aa)
+{
+
+	printk("SGI IO INFRASTRUCTURE - async_attach_waitall not supported.\n");
+
+}
+
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/io/devsupport.c linux/arch/ia64/sn/io/devsupport.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/io/devsupport.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/io/devsupport.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,1292 @@
+#define ilvt_t int
+
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/config.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/iobus.h>
+#include <asm/sn/iograph.h>
+
+/* 
+ * Interfaces in this file are all platform-independent AND IObus-independent.
+ * Be aware that there may be macro equivalents to each of these hiding in
+ * header files which supercede these functions.
+ */
+
+/* =====Generic iobus support===== */
+
+/* String table to hold names of interrupts. */
+#ifdef notyet
+static struct string_table device_desc_string_table;
+#endif
+
+/* One time initialization for device descriptor support. */
+static void
+device_desc_init(void)
+{
+#ifdef notyet
+	string_table_init(&device_desc_string_table);
+#endif
+	FIXME("device_desc_init");
+}
+
+
+/* Drivers use these interfaces to manage device descriptors */
+static device_desc_t
+device_desc_alloc(void)
+{
+#ifdef notyet
+	device_desc_t device_desc;
+
+	device_desc = (device_desc_t)kmem_zalloc(sizeof(struct device_desc_s), 0);
+	device_desc->intr_target = GRAPH_VERTEX_NONE;
+
+	ASSERT(device_desc->intr_policy == 0);
+	device_desc->intr_swlevel = -1;
+	ASSERT(device_desc->intr_name == NULL);
+	ASSERT(device_desc->flags == 0);
+
+	ASSERT(!(device_desc->flags & D_IS_ASSOC));
+	return(device_desc);
+#else
+	FIXME("device_desc_alloc");
+	return((device_desc_t)0);
+#endif
+}
+
+void
+device_desc_free(device_desc_t device_desc)
+{
+#ifdef notyet
+	if (!(device_desc->flags & D_IS_ASSOC)) /* sanity */
+		kfree(device_desc);
+#endif
+	FIXME("device_desc_free");
+}
+
+device_desc_t
+device_desc_dup(devfs_handle_t dev)
+{
+#ifdef notyet
+	device_desc_t orig_device_desc, new_device_desc;
+
+
+	new_device_desc = device_desc_alloc();
+	orig_device_desc = device_desc_default_get(dev);
+	if (orig_device_desc)
+		*new_device_desc = *orig_device_desc;/* small structure copy */
+	else {
+		device_driver_t		driver;
+		ilvl_t			pri;		
+		/* 
+		 * Use the driver's thread priority in 
+		 * case the device thread priority has not
+		 * been given.
+		 */
+		if (driver = device_driver_getbydev(dev)) {
+			pri = device_driver_thread_pri_get(driver);
+			device_desc_intr_swlevel_set(new_device_desc,pri);
+		}
+	}		
+	new_device_desc->flags &= ~D_IS_ASSOC;
+	return(new_device_desc);
+#else
+	FIXME("device_desc_dup");
+	return((device_desc_t)0);
+#endif
+}
+
+device_desc_t	
+device_desc_default_get(devfs_handle_t dev)
+{
+#ifdef notyet
+	graph_error_t rc;
+	device_desc_t device_desc;
+
+	rc = hwgraph_info_get_LBL(dev, INFO_LBL_DEVICE_DESC, (arbitrary_info_t *)&device_desc);
+
+	if (rc == GRAPH_SUCCESS)
+		return(device_desc);
+	else
+		return(NULL);
+#else
+	FIXME("device_desc_default_get");
+	return((device_desc_t)0);
+#endif
+}
+
+void		
+device_desc_default_set(devfs_handle_t dev, device_desc_t new_device_desc)
+{
+#ifdef notyet
+	graph_error_t rc;
+	device_desc_t old_device_desc = NULL;
+
+	if (new_device_desc) {
+		new_device_desc->flags |= D_IS_ASSOC;
+		rc = hwgraph_info_add_LBL(dev, INFO_LBL_DEVICE_DESC, 
+						(arbitrary_info_t)new_device_desc);
+		if (rc == GRAPH_DUP) {
+			rc = hwgraph_info_replace_LBL(dev, INFO_LBL_DEVICE_DESC, 
+				(arbitrary_info_t)new_device_desc, 
+				(arbitrary_info_t *)&old_device_desc);
+
+			ASSERT(rc == GRAPH_SUCCESS);
+		}
+		hwgraph_info_export_LBL(dev, INFO_LBL_DEVICE_DESC,
+					sizeof(struct device_desc_s));
+	} else {
+		rc = hwgraph_info_remove_LBL(dev, INFO_LBL_DEVICE_DESC,
+					(arbitrary_info_t *)&old_device_desc);
+	}
+
+	if (old_device_desc) {
+		ASSERT(old_device_desc->flags & D_IS_ASSOC);
+		old_device_desc->flags &= ~D_IS_ASSOC;
+		device_desc_free(old_device_desc);
+	}
+#endif
+	FIXME("device_desc_default_set");
+}
+
+devfs_handle_t
+device_desc_intr_target_get(device_desc_t device_desc)
+{
+#ifdef notyet
+	return(device_desc->intr_target);
+#else
+	FIXME("device_desc_intr_target_get");
+	return((devfs_handle_t)0);
+#endif
+}
+
+int
+device_desc_intr_policy_get(device_desc_t device_desc)
+{
+#ifdef notyet
+	return(device_desc->intr_policy);
+#else
+	FIXME("device_desc_intr_policy_get");
+	return(0);
+#endif
+}
+
+ilvl_t
+device_desc_intr_swlevel_get(device_desc_t device_desc)
+{
+#ifdef notyet
+	return(device_desc->intr_swlevel);
+#else
+	FIXME("device_desc_intr_swlevel_get");
+	return((ilvl_t)0);
+#endif
+}
+
+char *
+device_desc_intr_name_get(device_desc_t device_desc)
+{
+#ifdef notyet
+	return(device_desc->intr_name);
+#else
+	FIXME("device_desc_intr_name_get");
+	return(NULL);
+#endif
+}
+
+int
+device_desc_flags_get(device_desc_t device_desc)
+{
+#ifdef notyet
+	return(device_desc->flags);
+#else
+	FIXME("device_desc_flags_get");
+	return(0);
+#endif
+}
+
+void
+device_desc_intr_target_set(device_desc_t device_desc, devfs_handle_t target)
+{
+	if ( device_desc != (device_desc_t)0 )
+		device_desc->intr_target = target;
+}
+
+void
+device_desc_intr_policy_set(device_desc_t device_desc, int policy)
+{
+	if ( device_desc != (device_desc_t)0 )
+		device_desc->intr_policy = policy;
+}
+
+void
+device_desc_intr_swlevel_set(device_desc_t device_desc, ilvl_t swlevel)
+{
+	if ( device_desc != (device_desc_t)0 )
+		device_desc->intr_swlevel = swlevel;
+}
+
+void
+device_desc_intr_name_set(device_desc_t device_desc, char *name)
+{
+#ifdef notyet
+	if ( device_desc != (device_desc_t)0 )
+		device_desc->intr_name = string_table_insert(&device_desc_string_table, name);
+#else
+	FIXME("device_desc_intr_name_set");
+#endif
+}
+
+void
+device_desc_flags_set(device_desc_t device_desc, int flags)
+{
+	if ( device_desc != (device_desc_t)0 )
+		device_desc->flags = flags;
+}
+
+
+
+/*============= device admin registry routines ===================== */
+
+/* Linked list of <admin-name,admin-val> pairs */
+typedef struct dev_admin_list_s {
+	struct dev_admin_list_s		*admin_next; 	/* next entry in the
+							 * list 
+							 */
+	char				*admin_name;	/* info label */
+	char				*admin_val;	/* actual info */
+} dev_admin_list_t;
+
+/* Device/Driver administration registry */
+typedef struct dev_admin_registry_s {
+	mrlock_t			reg_lock;	/* To allow
+							 * exclusive
+							 * access
+							 */
+	dev_admin_list_t		*reg_first;	/* first entry in 
+							 * the list
+							 */
+	dev_admin_list_t		**reg_last;	/* pointer to the
+							 * next to last entry
+							 * in the last which 
+							 * is also the place
+							 * where the new
+							 * entry gets
+							 * inserted
+							 */
+} dev_admin_registry_t;
+
+/*
+** device_driver_s associates a device driver prefix with device switch entries.
+*/
+struct device_driver_s {
+	struct device_driver_s	*dd_next;	/* next element on hash chain */
+	struct device_driver_s	*dd_prev;	/* previous element on hash chain */
+	char			*dd_prefix;	/* driver prefix string */
+	struct bdevsw		*dd_bdevsw;	/* driver's bdevsw */
+	struct cdevsw		*dd_cdevsw;	/* driver's cdevsw */
+	
+	/* driver administration specific data structures need to
+	 * maintain the list of <driver-paramater,value> pairs
+	 */
+	dev_admin_registry_t	dd_dev_admin_registry;
+	ilvl_t			dd_thread_pri;	/* default thread priority for
+						 *  all this driver's
+						 * threads.
+						 */
+
+};
+
+#define	NEW(_p)		(_p = kmalloc(sizeof(*_p), GFP_KERNEL))
+#define FREE(_p)	(kmem_free(_p))
+	
+/*
+ * helpful lock macros
+ */
+
+#define DEV_ADMIN_REGISTRY_INITLOCK(lockp,name)	mrinit(lockp,name)
+#define DEV_ADMIN_REGISTRY_RDLOCK(lockp)	mraccess(lockp)	       
+#define DEV_ADMIN_REGISTRY_WRLOCK(lockp)	mrupdate(lockp)	       
+#define DEV_ADMIN_REGISTRY_UNLOCK(lockp)	mrunlock(lockp)
+
+/* Initialize the registry 
+ */
+static void
+dev_admin_registry_init(dev_admin_registry_t *registry)
+{
+#ifdef notyet
+	if ( registry != (dev_admin_registry_t *)0 )
+		DEV_ADMIN_REGISTRY_INITLOCK(&registry->reg_lock,
+				    "dev_admin_registry_lock");
+		registry->reg_first = NULL;
+		registry->reg_last = &registry->reg_first;
+	}
+#else
+	FIXME("dev_admin_registry_init");
+#endif
+}
+
+/*
+ * add an <name , value > entry to the dev admin registry.
+ * if the name already exists in the registry then change the
+ * value iff the new value differs from the old value.
+ * if the name doesn't exist a new list entry is created and put
+ * at the end.
+ */
+static void
+dev_admin_registry_add(dev_admin_registry_t	*registry,
+		       char			*name,
+		       char			*val)
+{
+#ifdef notyet
+	dev_admin_list_t	*reg_entry;
+	dev_admin_list_t	*scan = 0;
+
+	DEV_ADMIN_REGISTRY_WRLOCK(&registry->reg_lock);
+
+	/* check if the name already exists in the registry */
+	scan = registry->reg_first;
+
+	while (scan) {
+		if (strcmp(scan->admin_name,name) == 0) {
+			/* name is there in the registry */
+			if (strcmp(scan->admin_val,val)) {
+				/* old value != new value 
+				 * reallocate  memory and copy the new value
+				 */
+				FREE(scan->admin_val);
+				scan->admin_val = 
+					(char *)kern_calloc(1,strlen(val)+1);
+				strcpy(scan->admin_val,val);
+				goto out;
+			}
+			goto out;	/* old value == new value */
+		}
+		scan = scan->admin_next;
+	}
+
+	/* name is not there in the registry.
+	 * allocate memory for the new registry entry 
+	 */
+	NEW(reg_entry);
+	
+	reg_entry->admin_next   	= 0;
+	reg_entry->admin_name	= (char *)kern_calloc(1,strlen(name)+1);
+	strcpy(reg_entry->admin_name,name);
+	reg_entry->admin_val	= (char *)kern_calloc(1,strlen(val)+1);
+	strcpy(reg_entry->admin_val,val);
+
+	/* add the entry at the end of the registry */
+
+	*(registry->reg_last)	= reg_entry;
+	registry->reg_last	= &reg_entry->admin_next;
+
+out:	DEV_ADMIN_REGISTRY_UNLOCK(&registry->reg_lock);
+#endif
+	FIXME("dev_admin_registry_add");
+}
+/*
+ * check if there is an info corr. to a particular
+ * name starting from the cursor position in the 
+ * registry
+ */
+static char *
+dev_admin_registry_find(dev_admin_registry_t *registry,char *name)
+{
+#ifdef notyet
+	dev_admin_list_t	*scan = 0;
+	
+	DEV_ADMIN_REGISTRY_RDLOCK(&registry->reg_lock);
+	scan = registry->reg_first;
+
+	while (scan) {
+		if (strcmp(scan->admin_name,name) == 0) {
+			DEV_ADMIN_REGISTRY_UNLOCK(&registry->reg_lock);
+			return scan->admin_val;
+		}
+		scan = scan->admin_next;
+	}
+	DEV_ADMIN_REGISTRY_UNLOCK(&registry->reg_lock);
+	return 0;
+#else
+	FIXME("dev_admin_registry_find");
+	return(NULL);
+#endif
+}
+/*============= MAIN DEVICE/ DRIVER ADMINISTRATION INTERFACE================ */
+/*
+ * return any labelled info associated with a device.
+ * called by any kernel code including device drivers.
+ */
+char *
+device_admin_info_get(devfs_handle_t	dev_vhdl,
+		      char		*info_lbl)
+{
+#ifdef notyet
+	char		*info = 0;
+
+	/* return value need not be GRAPH_SUCCESS as the labelled
+	 * info may not be present
+	 */
+	(void)hwgraph_info_get_LBL(dev_vhdl,info_lbl,
+				   (arbitrary_info_t *)&info);
+
+	
+	return info;
+#else
+	FIXME("device_admin_info_get");
+	return(NULL);
+#endif
+}
+
+/*
+ * set labelled info associated with a device.
+ * called by hwgraph infrastructure . may also be called
+ * by device drivers etc.
+ */
+int
+device_admin_info_set(devfs_handle_t	dev_vhdl,
+		      char		*dev_info_lbl,
+		      char		*dev_info_val)
+{
+#ifdef notyet
+	graph_error_t		rv;
+	arbitrary_info_t	old_info;
+
+	/* Handle the labelled info
+	 *		intr_target
+	 *		sw_level 
+	 * in a special way. These are part of device_desc_t
+	 * Right now this is the only case where we have 
+	 * a set of related device_admin attributes which 
+	 * are grouped together.
+	 * In case there is a need for another set we need to
+	 * take a more generic approach to solving this.
+	 * Basically a registry should be implemented. This
+	 * registry is initialized with the callbacks for the
+	 * attributes which need to handled in a special way
+	 * For example:
+	 * Consider
+	 * 		device_desc
+	 *			intr_target
+	 *			intr_swlevel
+	 * register "do_intr_target" for intr_target
+	 * register "do_intr_swlevel" for intr_swlevel.
+	 * When the device_admin interface layer gets an <attr,val> pair
+	 * it looks in the registry to see if there is a function registered to
+	 * handle "attr. If not follow the default path of setting the <attr,val>
+	 * as labelled information hanging off the vertex.
+	 * In the above example:
+	 * "do_intr_target" does what is being done below for the ADMIN_LBL_INTR_TARGET
+	 * case
+	 */		
+	if (!strcmp(dev_info_lbl,ADMIN_LBL_INTR_TARGET) ||
+	    !strcmp(dev_info_lbl,ADMIN_LBL_INTR_SWLEVEL)) {
+
+		device_desc_t	device_desc;
+		
+		/* Check if there is a default device descriptor
+		 * information for this vertex. If not dup one .
+		 */
+		if (!(device_desc = device_desc_default_get(dev_vhdl))) {
+			device_desc = device_desc_dup(dev_vhdl);
+			device_desc_default_set(dev_vhdl,device_desc);
+
+		}
+		if (!strcmp(dev_info_lbl,ADMIN_LBL_INTR_TARGET)) {
+			/* Check if a target cpu has been specified
+			 * for this device by a device administration
+			 * directive
+			 */
+#ifdef DEBUG	
+			printf(ADMIN_LBL_INTR_TARGET
+			       " dev = 0x%x "
+			       "dev_admin_info = %s"
+			       " target = 0x%x\n",
+			       dev_vhdl,
+			       dev_info_lbl,
+			       hwgraph_path_to_vertex(dev_info_val));
+#endif	
+
+			device_desc->intr_target = 
+				hwgraph_path_to_vertex(dev_info_val);
+		} else if (!strcmp(dev_info_lbl,ADMIN_LBL_INTR_SWLEVEL)) {
+			/* Check if the ithread priority level  has been 
+			 * specified for this device by a device administration
+			 * directive
+			 */
+#ifdef DEBUG	
+			printf(ADMIN_LBL_INTR_SWLEVEL
+			       " dev = 0x%x "
+			       "dev_admin_info = %s"
+			       " sw level = 0x%x\n",
+			       dev_vhdl,
+			       dev_info_lbl,
+			       atoi(dev_info_val));
+#endif	
+			device_desc->intr_swlevel = atoi(dev_info_val);
+		}
+
+	}
+	if (!dev_info_val)
+		rv = hwgraph_info_remove_LBL(dev_vhdl,
+					     dev_info_lbl,
+					     &old_info);
+	else {
+
+		rv = hwgraph_info_add_LBL(dev_vhdl,
+					  dev_info_lbl,
+					  (arbitrary_info_t)dev_info_val);
+	
+		if (rv == GRAPH_DUP)  {
+			rv = hwgraph_info_replace_LBL(dev_vhdl,
+					      dev_info_lbl,
+					      (arbitrary_info_t)dev_info_val,
+					      &old_info);
+		}
+	}
+	ASSERT(rv == GRAPH_SUCCESS);
+#endif
+	FIXME("device_admin_info_set");
+	return 0;
+}
+
+/*
+ * return labelled info associated with a device driver
+ * called by kernel code including device drivers
+ */
+char *
+device_driver_admin_info_get(char		*driver_prefix,
+			     char		*driver_info_lbl)
+{
+#ifdef notyet
+	device_driver_t driver;
+
+	driver = device_driver_get(driver_prefix);
+	return (dev_admin_registry_find(&driver->dd_dev_admin_registry,
+					driver_info_lbl));
+#else
+	FIXME("device_driver_admin_info_get");
+	return(NULL);
+#endif
+}
+
+/*
+ * set labelled info associated with a device driver.
+ * called by hwgraph infrastructure . may also be called
+ * from drivers etc.
+ */
+int
+device_driver_admin_info_set(char		*driver_prefix,
+			     char		*driver_info_lbl,
+			     char		*driver_info_val)
+{
+#ifdef notyet
+	device_driver_t driver;
+
+	driver = device_driver_get(driver_prefix);
+	dev_admin_registry_add(&driver->dd_dev_admin_registry,	
+			       driver_info_lbl,
+			       driver_info_val);
+#endif
+	FIXME("device_driver_admin_info_set");
+	return 0;
+}
+/*================== device / driver  admin support routines================*/
+
+/* static tables created by lboot */
+extern dev_admin_info_t	dev_admin_table[];
+extern dev_admin_info_t	drv_admin_table[];
+extern int		dev_admin_table_size;
+extern int		drv_admin_table_size;
+
+/* Extend the device admin table to allow the kernel startup code to 
+ * provide some device specific administrative hints
+ */
+#define ADMIN_TABLE_CHUNK	100
+static dev_admin_info_t extended_dev_admin_table[ADMIN_TABLE_CHUNK];	
+static int		extended_dev_admin_table_size = 0;
+static mrlock_t		extended_dev_admin_table_lock;
+
+/* Initialize the extended device admin table */
+void
+device_admin_table_init(void)
+{
+#ifdef notyet
+	extended_dev_admin_table_size = 0;
+	mrinit(&extended_dev_admin_table_lock,
+	       "extended_dev_admin_table_lock");
+#endif
+	FIXME("device_admin_table_init");
+}
+/* Add <device-name , parameter-name , parameter-value> triple to
+ * the extended device administration info table. This is helpful
+ * for kernel startup code to put some hints before the hwgraph
+ * is setup 
+ */
+void
+device_admin_table_update(char *name,char *label,char *value)
+{
+#ifdef notyet
+	dev_admin_info_t	*p;
+
+	mrupdate(&extended_dev_admin_table_lock);
+
+	/* Safety check that we haven't exceeded array limits */
+	ASSERT(extended_dev_admin_table_size < ADMIN_TABLE_CHUNK);
+
+	if (extended_dev_admin_table_size == ADMIN_TABLE_CHUNK)
+		goto out;
+	
+	/* Get the pointer to the entry in the table where we are
+	 * going to put the new information 
+	 */
+	p = &extended_dev_admin_table[extended_dev_admin_table_size++];
+
+	/* Allocate memory for the strings and copy them in */
+	p->dai_name = (char *)kern_calloc(1,strlen(name)+1);
+	strcpy(p->dai_name,name);
+	p->dai_param_name = (char *)kern_calloc(1,strlen(label)+1);
+	strcpy(p->dai_param_name,label);
+	p->dai_param_val = (char *)kern_calloc(1,strlen(value)+1);
+	strcpy(p->dai_param_val,value);
+
+out:	mrunlock(&extended_dev_admin_table_lock);
+#endif
+	FIXME("device_admin_table_update");
+}
+/* Extend the device driver  admin table to allow the kernel startup code to 
+ * provide some device driver specific administrative hints
+ */
+
+static dev_admin_info_t extended_drv_admin_table[ADMIN_TABLE_CHUNK];	
+static int		extended_drv_admin_table_size = 0;
+mrlock_t		extended_drv_admin_table_lock;
+
+/* Initialize the extended device driver admin table */
+void
+device_driver_admin_table_init(void)
+{
+#ifdef notyet
+	extended_drv_admin_table_size = 0;
+	mrinit(&extended_drv_admin_table_lock,
+	       "extended_drv_admin_table_lock");
+#endif
+	FIXME("device_driver_admin_table_init");
+}
+/* Add <device-driver prefix , parameter-name , parameter-value> triple to
+ * the extended device administration info table. This is helpful
+ * for kernel startup code to put some hints before the hwgraph
+ * is setup 
+ */
+void
+device_driver_admin_table_update(char *name,char *label,char *value)
+{
+#ifdef notyet
+	dev_admin_info_t	*p;
+
+	mrupdate(&extended_dev_admin_table_lock);
+
+	/* Safety check that we haven't exceeded array limits */
+	ASSERT(extended_drv_admin_table_size < ADMIN_TABLE_CHUNK);
+
+	if (extended_drv_admin_table_size == ADMIN_TABLE_CHUNK)
+		goto out;
+	
+	/* Get the pointer to the entry in the table where we are
+	 * going to put the new information 
+	 */
+	p = &extended_drv_admin_table[extended_drv_admin_table_size++];
+
+	/* Allocate memory for the strings and copy them in */
+	p->dai_name = (char *)kern_calloc(1,strlen(name)+1);
+	strcpy(p->dai_name,name);
+	p->dai_param_name = (char *)kern_calloc(1,strlen(label)+1);
+	strcpy(p->dai_param_name,label);
+	p->dai_param_val = (char *)kern_calloc(1,strlen(value)+1);
+	strcpy(p->dai_param_val,value);
+
+out:	mrunlock(&extended_drv_admin_table_lock);
+#endif
+	FIXME("device_driver_admin_table_update");
+}
+/*
+ * keeps on adding the labelled info for each new (lbl,value) pair
+ * that it finds in the static dev admin table (  created by lboot)
+ * and the extended dev admin table ( created if at all by the kernel startup
+ *  code) corresponding to a device in the hardware graph.
+ */
+void
+device_admin_info_update(devfs_handle_t	dev_vhdl)
+{
+#ifdef notyet
+	int			i = 0;
+	dev_admin_info_t	*scan;
+	devfs_handle_t		scan_vhdl;
+	
+	/* Check the static device administration info table */
+	scan = dev_admin_table;
+	while (i < dev_admin_table_size) {
+		
+		scan_vhdl = hwgraph_path_to_dev(scan->dai_name);
+		if (scan_vhdl == dev_vhdl) {
+			device_admin_info_set(dev_vhdl,
+					      scan->dai_param_name,
+					      scan->dai_param_val);
+		}
+		if (scan_vhdl != NODEV)
+			hwgraph_vertex_unref(scan_vhdl);
+		scan++;i++;
+
+	}
+	i = 0;
+	/* Check the extended device administration info table */
+	scan = extended_dev_admin_table;
+	while (i < extended_dev_admin_table_size) {
+		scan_vhdl = hwgraph_path_to_dev(scan->dai_name);
+		if (scan_vhdl == dev_vhdl) {
+			device_admin_info_set(dev_vhdl,
+					      scan->dai_param_name,
+					      scan->dai_param_val);
+		}
+		if (scan_vhdl != NODEV)
+			hwgraph_vertex_unref(scan_vhdl);
+		scan++;i++;
+
+	}
+
+
+#endif
+	FIXME("device_admin_info_update");
+}
+
+/* looks up the static drv admin table ( created by the lboot) and the extended
+ * drv admin table (created if at all by the kernel startup code) 
+ * for this driver specific administration info and adds it to the admin info 
+ * associated with this device driver's object
+ */
+void
+device_driver_admin_info_update(device_driver_t	driver)
+{
+#ifdef notyet
+	int			i = 0;
+	dev_admin_info_t	*scan;
+
+	/* Check the static device driver administration info table */
+	scan = drv_admin_table;
+	while (i < drv_admin_table_size) {
+
+		if (strcmp(scan->dai_name,driver->dd_prefix) == 0) {
+			dev_admin_registry_add(&driver->dd_dev_admin_registry,
+						scan->dai_param_name,
+					 	scan->dai_param_val);
+		}
+		scan++;i++;
+	}
+	i = 0;
+	/* Check the extended device driver administration info table */
+	scan = extended_drv_admin_table;
+	while (i < extended_drv_admin_table_size) {
+
+		if (strcmp(scan->dai_name,driver->dd_prefix) == 0) {
+			dev_admin_registry_add(&driver->dd_dev_admin_registry,
+						scan->dai_param_name,
+					 	scan->dai_param_val);
+		}
+		scan++;i++;
+	}
+#endif
+	FIXME("device_driver_admin_info_update");
+}
+
+/* =====Device Driver Support===== */
+
+
+
+/*
+** Generic device driver support routines for use by kernel modules that
+** deal with device drivers (but NOT for use by the drivers themselves).
+** EVERY registered driver currently in the system -- static or loadable --
+** has an entry in the device_driver_hash table.  A pointer to such an entry
+** serves as a generic device driver handle.
+*/
+
+#define DEVICE_DRIVER_HASH_SIZE 32
+#ifdef notyet
+lock_t device_driver_lock[DEVICE_DRIVER_HASH_SIZE];
+device_driver_t device_driver_hash[DEVICE_DRIVER_HASH_SIZE];
+static struct string_table driver_prefix_string_table;
+#endif
+
+/*
+** Initialize device driver infrastructure.
+*/
+void
+device_driver_init(void)
+{
+#ifdef notyet
+	int i;
+	extern void alenlist_init(void);
+	extern void hwgraph_init(void);
+	extern void device_desc_init(void);
+
+	ASSERT(DEVICE_DRIVER_NONE == NULL);
+	alenlist_init();
+	hwgraph_init();
+	device_desc_init();
+
+	string_table_init(&driver_prefix_string_table);
+
+	for (i=0; i<DEVICE_DRIVER_HASH_SIZE; i++) {
+		spinlock_init(&device_driver_lock[i], "devdrv");
+		device_driver_hash[i] = NULL;
+	}
+
+	/* Initialize static drivers from master.c table */
+	for (i=0; i<static_devsw_count; i++) {
+		device_driver_t driver;
+		static_device_driver_desc_t desc;
+		int pri;
+
+		desc = &static_device_driver_table[i];
+		driver = device_driver_get(desc->sdd_prefix);
+		if (!driver)
+			driver = device_driver_alloc(desc->sdd_prefix);
+		pri = device_driver_sysgen_thread_pri_get(desc->sdd_prefix);
+		device_driver_thread_pri_set(driver, pri);
+		device_driver_devsw_put(driver, desc->sdd_bdevsw, desc->sdd_cdevsw);
+	}
+#endif
+	FIXME("device_driver_init");
+}
+
+/*
+** Hash a prefix string into a hash table chain.
+*/
+static int
+driver_prefix_hash(char *prefix)
+{
+#ifdef notyet
+	int accum = 0;
+	char nextchar;
+
+	while (nextchar = *prefix++)
+		accum = accum ^ nextchar;
+
+	return(accum % DEVICE_DRIVER_HASH_SIZE);
+#else
+	FIXME("driver_prefix_hash");
+	return(0);
+#endif
+}
+
+
+/*
+** Allocate a driver handle.
+** Returns the driver handle, or NULL if the driver prefix 
+** already has a handle.
+** 
+** Upper layers prevent races among device_driver_alloc,
+** device_driver_free, and device_driver_get*.
+*/
+device_driver_t
+device_driver_alloc(char *prefix)
+{
+#ifdef notyet
+	int which_hash;
+	device_driver_t new_driver;
+	int s;
+		
+	which_hash = driver_prefix_hash(prefix);
+
+	new_driver = kern_calloc(1, sizeof(*new_driver));
+	ASSERT(new_driver != NULL);
+	new_driver->dd_prev = NULL;
+	new_driver->dd_prefix = string_table_insert(&driver_prefix_string_table, prefix);
+	new_driver->dd_bdevsw = NULL;
+	new_driver->dd_cdevsw = NULL;
+
+	dev_admin_registry_init(&new_driver->dd_dev_admin_registry);
+	device_driver_admin_info_update(new_driver);
+
+	s = mutex_spinlock(&device_driver_lock[which_hash]);
+
+#if DEBUG
+	{
+		device_driver_t drvscan;
+
+		/* Make sure we haven't already added a driver with this prefix */
+		drvscan = device_driver_hash[which_hash];
+		while (drvscan && 
+		        strcmp(drvscan->dd_prefix, prefix)) {
+			drvscan = drvscan->dd_next;
+		}
+
+		ASSERT(!drvscan);
+	}
+#endif /* DEBUG */
+
+
+	/* Add new_driver to front of hash chain. */
+	new_driver->dd_next = device_driver_hash[which_hash];
+	if (new_driver->dd_next)
+		new_driver->dd_next->dd_prev = new_driver;
+	device_driver_hash[which_hash] = new_driver;
+
+	mutex_spinunlock(&device_driver_lock[which_hash], s);
+
+	return(new_driver);
+#else
+	FIXME("device_driver_alloc");
+	return((device_driver_t)0);
+#endif
+}
+
+/*
+** Free a driver handle.
+**
+** Statically loaded drivers should never device_driver_free.
+** Dynamically loaded drivers device_driver_free when either an
+** unloaded driver is unregistered, or when an unregistered driver
+** is unloaded.
+*/
+void
+device_driver_free(device_driver_t driver)
+{
+#ifdef notyet
+	int which_hash;
+	int s;
+
+	if (!driver)
+		return;
+
+	which_hash = driver_prefix_hash(driver->dd_prefix);
+
+	s = mutex_spinlock(&device_driver_lock[which_hash]);
+
+#if DEBUG
+	{
+		device_driver_t drvscan;
+
+		/* Make sure we're dealing with the right list */
+		drvscan = device_driver_hash[which_hash];
+		while (drvscan && (drvscan != driver))
+			drvscan = drvscan->dd_next;
+
+		ASSERT(drvscan);
+	}
+#endif /* DEBUG */
+
+	if (driver->dd_next)
+		driver->dd_next->dd_prev = driver->dd_prev;
+
+	if (driver->dd_prev)
+		driver->dd_prev->dd_next = driver->dd_next;
+	else
+		device_driver_hash[which_hash] = driver->dd_next;
+
+	mutex_spinunlock(&device_driver_lock[which_hash], s);
+
+	driver->dd_next = NULL;		/* sanity */
+	driver->dd_prev = NULL;		/* sanity */
+	driver->dd_prefix = NULL;	/* sanity */
+
+	if (driver->dd_bdevsw) {
+		driver->dd_bdevsw->d_driver = NULL;
+		driver->dd_bdevsw = NULL;
+	}
+
+	if (driver->dd_cdevsw) {
+		if (driver->dd_cdevsw->d_str) {
+			str_free_mux_node(driver);
+		}
+		driver->dd_cdevsw->d_driver = NULL;
+		driver->dd_cdevsw = NULL;
+	}
+
+	kern_free(driver);
+#endif
+	FIXME("device_driver_free");
+}
+
+
+/*
+** Given a device driver prefix, return a handle to the caller.
+*/
+device_driver_t
+device_driver_get(char *prefix)
+{
+#ifdef notyet
+	int which_hash;
+	device_driver_t drvscan;
+	int s;
+
+	if (prefix == NULL)
+		return(NULL);
+		
+	which_hash = driver_prefix_hash(prefix);
+
+	s = mutex_spinlock(&device_driver_lock[which_hash]);
+
+	drvscan = device_driver_hash[which_hash];
+	while (drvscan && strcmp(drvscan->dd_prefix, prefix))
+		drvscan = drvscan->dd_next;
+
+	mutex_spinunlock(&device_driver_lock[which_hash], s);
+
+	return(drvscan);
+#else
+	FIXME("device_driver_get");
+	return((device_driver_t)0);
+#endif
+}
+
+
+/*
+** Given a block or char special file devfs_handle_t, find the 
+** device driver that controls it.
+*/
+device_driver_t
+device_driver_getbydev(devfs_handle_t device)
+{
+#ifdef notyet
+	struct bdevsw *my_bdevsw;
+	struct cdevsw *my_cdevsw;
+
+	my_cdevsw = get_cdevsw(device);
+	if (my_cdevsw != NULL)
+		return(my_cdevsw->d_driver);
+
+	my_bdevsw = get_bdevsw(device);
+	if (my_bdevsw != NULL)
+		return(my_bdevsw->d_driver);
+
+#endif
+	FIXME("device_driver_getbydev");
+	return((device_driver_t)0);
+}
+
+
+/*
+** Associate a driver with bdevsw/cdevsw pointers.
+**
+** Statically loaded drivers are permanently and automatically associated
+** with the proper bdevsw/cdevsw.  Dynamically loaded drivers associate
+** themselves when the driver is registered, and disassociate when the
+** driver unregisters.
+**
+** Returns 0 on success, -1 on failure (devsw already associated with driver)
+*/
+int
+device_driver_devsw_put(device_driver_t driver,
+			struct bdevsw *my_bdevsw,
+			struct cdevsw *my_cdevsw)
+{
+#ifdef notyet
+	int i;
+
+	if (!driver)
+		return(-1);
+
+	/* Trying to re-register data?  */
+	if (((my_bdevsw != NULL) && (driver->dd_bdevsw != NULL)) ||
+	    ((my_cdevsw != NULL) && (driver->dd_cdevsw != NULL)))
+		return(-1);
+
+	if (my_bdevsw != NULL) {
+		driver->dd_bdevsw = my_bdevsw;
+		my_bdevsw->d_driver = driver;
+		for (i = 0; i < bdevmax; i++) {
+			if (driver->dd_bdevsw->d_flags == bdevsw[i].d_flags) {
+				bdevsw[i].d_driver = driver;
+				break;
+			}
+		}
+	}
+
+	if (my_cdevsw != NULL) {
+		driver->dd_cdevsw = my_cdevsw;
+		my_cdevsw->d_driver = driver;
+		for (i = 0; i < cdevmax; i++) {
+			if (driver->dd_cdevsw->d_flags == cdevsw[i].d_flags) {
+				cdevsw[i].d_driver = driver;
+				break;
+			}
+		}
+	}
+#endif
+	FIXME("device_driver_devsw_put");
+	return(0);
+}
+
+
+/*
+** Given a driver, return the corresponding bdevsw and cdevsw pointers.
+*/
+void
+device_driver_devsw_get(	device_driver_t driver, 
+				struct bdevsw **bdevswp,
+				struct cdevsw **cdevswp)
+{
+	if (!driver) {
+		*bdevswp = NULL;
+		*cdevswp = NULL;
+	} else {
+		*bdevswp = driver->dd_bdevsw;
+		*cdevswp = driver->dd_cdevsw;
+	}
+}
+
+/*
+ * device_driver_thread_pri_set
+ *	Given a driver try to set its thread priority.
+ *	Returns 0 on success , -1 on failure.
+ */ 
+int
+device_driver_thread_pri_set(device_driver_t driver,ilvl_t pri)
+{
+	if (!driver)
+		return(-1);
+	driver->dd_thread_pri = pri;
+	return(0);
+}
+/*
+ * device_driver_thread_pri_get
+ * 	Given a driver return the driver thread priority.
+ * 	If the driver is NULL return invalid driver thread
+ * 	priority.
+ */
+ilvl_t
+device_driver_thread_pri_get(device_driver_t driver)
+{
+	if (driver)
+		return(driver->dd_thread_pri);
+	else
+		return(DRIVER_THREAD_PRI_INVALID);
+}
+/*
+** Given a device driver, return it's handle (prefix).
+*/
+void
+device_driver_name_get(device_driver_t driver, char *buffer, int length)
+{
+	if (driver == NULL)
+		return;
+
+	strncpy(buffer, driver->dd_prefix, length);
+}
+
+
+/*
+** Associate a pointer-sized piece of information with a device.
+*/
+void 
+device_info_set(devfs_handle_t device, void *info)
+{
+#ifdef notyet
+	hwgraph_fastinfo_set(device, (arbitrary_info_t)info);
+#endif
+	FIXME("device_info_set");
+}
+
+
+/*
+** Retrieve a pointer-sized piece of information associated with a device.
+*/
+void *
+device_info_get(devfs_handle_t device)
+{
+#ifdef notyet
+	return((void *)hwgraph_fastinfo_get(device));
+#else
+	FIXME("device_info_get");
+	return(NULL);
+#endif
+}
+
+/*
+ * Find the thread priority for a device, from the various
+ * sysgen files.
+ */
+int
+device_driver_sysgen_thread_pri_get(char *dev_prefix)
+{
+#ifdef notyet
+	int pri;
+	char *pri_s;
+	char *class;
+
+	extern default_intr_pri;
+	extern disk_intr_pri;
+	extern serial_intr_pri;
+	extern parallel_intr_pri;
+	extern tape_intr_pri;
+	extern graphics_intr_pri;
+	extern network_intr_pri;
+	extern scsi_intr_pri;
+	extern audio_intr_pri;
+	extern video_intr_pri;
+	extern external_intr_pri;
+	extern tserialio_intr_pri;
+
+	/* Check if there is a thread priority specified for
+	 * this driver's thread thru admin hints. If so 
+	 * use that value. Otherwise set it to its default
+	 * class value, otherwise set it to the default
+	 * value.
+	 */
+
+	if (pri_s = device_driver_admin_info_get(dev_prefix,
+						ADMIN_LBL_THREAD_PRI)) {
+		pri = atoi(pri_s);
+	} else if (class = device_driver_admin_info_get(dev_prefix,
+						ADMIN_LBL_THREAD_CLASS)) {
+		if (strcmp(class, "disk") == 0)
+			pri = disk_intr_pri;
+		else if (strcmp(class, "serial") == 0)
+			pri = serial_intr_pri;
+		else if (strcmp(class, "parallel") == 0)
+			pri = parallel_intr_pri;
+		else if (strcmp(class, "tape") == 0)
+			pri = tape_intr_pri;
+		else if (strcmp(class, "graphics") == 0)
+			pri = graphics_intr_pri;
+		else if (strcmp(class, "network") == 0)
+			pri = network_intr_pri;
+		else if (strcmp(class, "scsi") == 0)
+			pri = scsi_intr_pri;
+		else if (strcmp(class, "audio") == 0)
+			pri = audio_intr_pri;
+		else if (strcmp(class, "video") == 0)
+			pri = video_intr_pri;
+		else if (strcmp(class, "external") == 0)
+			pri = external_intr_pri;
+		else if (strcmp(class, "tserialio") == 0)
+			pri = tserialio_intr_pri;
+		else
+			pri = default_intr_pri;
+	} else
+		pri = default_intr_pri;
+
+	if (pri > 255)
+		pri = 255;
+	else if (pri < 0)
+		pri = 0;
+	return pri;
+#else
+	FIXME("device_driver_sysgen_thread_pri_get");
+	return(-1);
+#endif
+}
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/io/eeprom.c linux/arch/ia64/sn/io/eeprom.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/io/eeprom.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/io/eeprom.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,1457 @@
+/*
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Jack Steiner (steiner@sgi.com)
+ */
+
+
+/*
+ * WARNING:     There is more than one copy of this file in different isms.
+ *              All copies must be kept exactly in sync.
+ *              Do not modify this file without also updating the following:
+ *
+ *              irix/kern/io/eeprom.c
+ *              stand/arcs/lib/libsk/ml/eeprom.c
+ *		stand/arcs/lib/libkl/io/eeprom.c
+ *
+ *      (from time to time they might not be in sync but that's due to bringup
+ *       activity - this comment is to remind us that they eventually have to
+ *       get back together)
+ *
+ * eeprom.c
+ *
+ * access to board-mounted EEPROMs via the L1 system controllers
+ *
+ */
+
+/**************************************************************************
+ *                                                                        *
+ *  Copyright (C) 1999 Silicon Graphics, Inc.                             *
+ *                                                                        *
+ *  These coded instructions, statements, and computer programs  contain  *
+ *  unpublished  proprietary  information of Silicon Graphics, Inc., and  *
+ *  are protected by Federal copyright law.  They  may  not be disclosed  *
+ *  to  third  parties  or copied or duplicated in any form, in whole or  *
+ *  in part, without the prior written consent of Silicon Graphics, Inc.  *
+ *                                                                        *
+ **************************************************************************
+ */
+
+
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/iograph.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/hcl_util.h>
+#include <asm/sn/labelcl.h>
+#include <asm/sn/eeprom.h>
+#include <asm/sn/ksys/i2c.h>
+#include <asm/sn/cmn_err.h>
+/* #include <sys/SN/SN1/ip27log.h> */
+#include <asm/sn/router.h>
+#include <asm/sn/module.h>
+#include <asm/sn/ksys/l1.h>
+#include <asm/sn/nodepda.h>
+#include <asm/sn/clksupport.h>
+
+#if defined(EEPROM_DEBUG)
+#define db_printf(x) printk x
+#else
+#define db_printf(x) printk x
+#endif
+
+#define BCOPY(x,y,z)	memcpy(y,x,z)
+
+#define UNDERSCORE	0	/* don't convert underscores to hyphens */
+#define HYPHEN		1	/* convert underscores to hyphens */
+
+void		copy_ascii_field( char *to, char *from, int length,
+			          int change_underscore );
+uint64_t	generate_unique_id( char *sn, int sn_len );
+uchar_t		char_to_base36( char c );
+int		nicify( char *dst, eeprom_brd_record_t *src );
+static void	int64_to_hex_string( char *out, uint64_t val );
+
+// extern int router_lock( net_vec_t, int, int );
+// extern int router_unlock( net_vec_t );
+#define ROUTER_LOCK(p) 	// router_lock(p, 10000, 3000000)
+#define ROUTER_UNLOCK(p) 	// router_unlock(p)
+
+#define IP27LOG_OVNIC           "OverrideNIC"
+
+
+/* the following function converts an EEPROM record to a close facsimile
+ * of the string returned by reading a Dallas Semiconductor NIC (see
+ * one of the many incarnations of nic.c for details on that driver)
+ */
+int nicify( char *dst, eeprom_brd_record_t *src )
+{
+    int field_len;
+    uint64_t unique_id;
+    char *cur_dst = dst;
+    eeprom_board_ia_t   *board;
+
+    board   = src->board_ia;
+    ASSERT( board );  /* there should always be a board info area */
+
+    /* copy part number */
+    strcpy( cur_dst, "Part:" );
+    cur_dst += strlen( cur_dst );
+    ASSERT( (board->part_num_tl & FIELD_FORMAT_MASK)
+	    == FIELD_FORMAT_ASCII );
+    field_len = board->part_num_tl & FIELD_LENGTH_MASK;
+    copy_ascii_field( cur_dst, board->part_num, field_len, HYPHEN );
+    cur_dst += field_len;
+
+    /* copy product name */
+    strcpy( cur_dst, ";Name:" );
+    cur_dst += strlen( cur_dst );
+    ASSERT( (board->product_tl & FIELD_FORMAT_MASK) == FIELD_FORMAT_ASCII );
+    field_len = board->product_tl & FIELD_LENGTH_MASK;
+    copy_ascii_field( cur_dst, board->product, field_len, UNDERSCORE );
+    cur_dst += field_len;
+
+    /* copy serial number */
+    strcpy( cur_dst, ";Serial:" );
+    cur_dst += strlen( cur_dst );
+    ASSERT( (board->serial_num_tl & FIELD_FORMAT_MASK)
+	    == FIELD_FORMAT_ASCII );
+    field_len = board->serial_num_tl & FIELD_LENGTH_MASK;
+    copy_ascii_field( cur_dst, board->serial_num, field_len,
+		      HYPHEN);
+
+    cur_dst += field_len;
+
+    /* copy revision */
+    strcpy( cur_dst, ";Revision:");
+    cur_dst += strlen( cur_dst );
+    ASSERT( (board->board_rev_tl & FIELD_FORMAT_MASK)
+	    == FIELD_FORMAT_ASCII );
+    field_len = board->board_rev_tl & FIELD_LENGTH_MASK;
+    copy_ascii_field( cur_dst, board->board_rev, field_len, HYPHEN );
+    cur_dst += field_len;
+
+    /* EEPROMs don't have equivalents for the Group, Capability and
+     * Variety fields, so we pad these with 0's
+     */
+    strcpy( cur_dst, ";Group:ff;Capability:ffffffff;Variety:ff" );
+    cur_dst += strlen( cur_dst );
+
+    /* use the board serial number to "fake" a laser id */
+    strcpy( cur_dst, ";Laser:" );
+    cur_dst += strlen( cur_dst );
+    unique_id = generate_unique_id( board->serial_num,
+				    board->serial_num_tl & FIELD_LENGTH_MASK );
+    int64_to_hex_string( cur_dst, unique_id );
+    strcat( dst, ";" );
+
+    return 1;
+}
+
+
+/* These functions borrow heavily from chars2* in nic.c
+ */
+void copy_ascii_field( char *to, char *from, int length,
+		       int change_underscore )
+{
+    int i;
+    for( i = 0; i < length; i++ ) {
+
+	/* change underscores to hyphens if requested */
+	if( from[i] == '_' && change_underscore == HYPHEN )
+	    to[i] = '-';
+
+	/* ; and ; are separators, so mustn't appear within
+	 * a field */
+	else if( from[i] == ':' || from[i] == ';' )
+	    to[i] = '?';
+
+	/* I'm not sure why or if ASCII character 0xff would
+	 * show up in an EEPROM field, but the NIC parsing
+	 * routines wouldn't like it if it did... so we
+	 * get rid of it, just in case. */
+	else if( (unsigned char)from[i] == (unsigned char)0xff )
+	    to[i] = ' ';
+	
+	/* unprintable characters are replaced with . */
+	else if( from[i] < ' ' || from[i] >= 0x7f )
+	    to[i] = '.';
+
+	/* otherwise, just copy the character */
+	else
+	    to[i] = from[i];
+    }
+
+    if( i == 0 ) {
+	to[i] = ' '; /* return at least a space... */
+	i++;
+    }
+    to[i] = 0;	     /* terminating null */
+}
+
+/* Note that int64_to_hex_string currently only has a big-endian
+ * implementation.
+ */
+#ifdef _MIPSEB
+static void int64_to_hex_string( char *out, uint64_t val )
+{
+    int i;
+    uchar_t table[] = "0123456789abcdef";
+    uchar_t *byte_ptr = (uchar_t *)&val;
+    for( i = 0; i < sizeof(uint64_t); i++ ) {
+	out[i*2] = table[ ((*byte_ptr) >> 4) & 0x0f ];
+	out[i*2+1] = table[ (*byte_ptr) & 0x0f ];
+	byte_ptr++;
+    }
+    out[i*2] = '\0';
+}
+
+#else /* little endian */
+
+static void int64_to_hex_string( char *out, uint64_t val )
+{
+
+
+	printk("int64_to_hex_string needs a little-endian implementation.\n");
+}
+#endif /* _MIPSEB */
+
+/* Convert a standard ASCII serial number to a unique integer
+ * id number by treating the serial number string as though
+ * it were a base 36 number
+ */
+uint64_t generate_unique_id( char *sn, int sn_len )
+{
+    int uid = 0;
+    int i;
+
+    #define VALID_BASE36(c)	((c >= '0' && c <='9') \
+			    ||   (c >= 'A' && c <='Z') \
+			    ||   (c >= 'a' && c <='z'))
+
+    for( i = 0; i < sn_len; i++ ) {
+	if( !VALID_BASE36(sn[i]) )
+	    continue;
+	uid *= 36;
+	uid += char_to_base36( sn[i] );
+    }
+
+    if( uid == 0 )
+	return rtc_time();
+
+    return uid;
+}
+
+uchar_t char_to_base36( char c )
+{
+    uchar_t val;
+
+    if( c >= '0' && c <= '9' )
+	val = (c - '0');
+
+    else if( c >= 'A' && c <= 'Z' )
+	val = (c - 'A' + 10);
+
+    else if( c >= 'a' && c <= 'z' )
+	val = (c - 'a' + 10);
+
+    else val = 0;
+
+    return val;
+}
+
+
+/* given a pointer to the three-byte little-endian EEPROM representation
+ * of date-of-manufacture, this function translates to a big-endian
+ * integer format
+ */
+int eeprom_xlate_board_mfr_date( uchar_t *src )
+{
+    int rval = 0;
+    rval += *src; src++;
+    rval += ((int)(*src) << 8); src ++;
+    rval += ((int)(*src) << 16);
+    return rval;
+}
+
+
+int eeprom_str( char *nic_str, nasid_t nasid, int component )
+{
+    eeprom_brd_record_t eep;
+    eeprom_board_ia_t board;
+    eeprom_chassis_ia_t chassis;
+    int r;
+
+    if( (component & C_DIMM) == C_DIMM ) {
+	/* this function isn't applicable to DIMMs */
+	return EEP_PARAM;
+    }
+    else {
+	eep.board_ia = &board;
+	eep.spd = NULL;
+	if( !(component & SUBORD_MASK) )
+	    eep.chassis_ia = &chassis;  /* only main boards have a chassis
+					 * info area */
+	else
+	    eep.chassis_ia = NULL;
+    }
+    
+    switch( component & BRICK_MASK ) {
+      case C_BRICK:
+	r = cbrick_eeprom_read( &eep, nasid, component );
+	break;
+      case IO_BRICK:
+	r = iobrick_eeprom_read( &eep, nasid, component );
+	break;
+      default:
+	return EEP_PARAM;  /* must be an invalid component */
+    }
+    if( r )
+	return r;
+    if( !nicify( nic_str, &eep ) )
+	return EEP_NICIFY;
+
+    return EEP_OK;
+}
+
+int vector_eeprom_str( char *nic_str, nasid_t nasid,
+		       int component, net_vec_t path )
+{
+    eeprom_brd_record_t eep;
+    eeprom_board_ia_t board;
+    eeprom_chassis_ia_t chassis;
+    int r;
+
+    eep.board_ia = &board;
+    if( !(component & SUBORD_MASK) )
+        eep.chassis_ia = &chassis;  /* only main boards have a chassis
+                                     * info area */
+    else
+        eep.chassis_ia = NULL;
+
+    if( !(component & VECTOR) )
+	return EEP_PARAM;
+
+    if( (r = vector_eeprom_read( &eep, nasid, path, component )) )
+	return r;
+
+    if( !nicify( nic_str, &eep ) )
+        return EEP_NICIFY;
+
+    return EEP_OK;
+}
+
+
+int is_iobrick( int nasid, int widget_num )
+{
+    uint32_t wid_reg;
+    int part_num, mfg_num;
+
+    /* Read the widget's WIDGET_ID register to get
+     * its part number and mfg number
+     */
+    wid_reg = *(volatile int32_t *)
+        (NODE_SWIN_BASE( nasid, widget_num ) + WIDGET_ID);
+
+    part_num = (wid_reg & WIDGET_PART_NUM) >> WIDGET_PART_NUM_SHFT;
+    mfg_num = (wid_reg & WIDGET_MFG_NUM) >> WIDGET_MFG_NUM_SHFT;
+
+    /* Is this the "xbow part" of an XBridge?  If so, this
+     * widget is definitely part of an I/O brick.
+     */
+    if( part_num == XXBOW_WIDGET_PART_NUM &&
+	mfg_num == XXBOW_WIDGET_MFGR_NUM )
+
+	return 1;
+
+    /* Is this a "bridge part" of an XBridge?  If so, once
+     * again, we know this widget is part of an I/O brick.
+     */
+    if( part_num == XBRIDGE_WIDGET_PART_NUM &&
+	mfg_num == XBRIDGE_WIDGET_MFGR_NUM )
+
+	return 1;
+
+    return 0;
+}
+
+
+int cbrick_uid_get( nasid_t nasid, uint64_t *uid )
+{
+#if !defined(CONFIG_SERIAL_SGI_L1_PROTOCOL)
+    return EEP_L1;
+#else
+    char uid_str[32];
+    char msg[BRL1_QSIZE];
+    int subch, len;
+    l1sc_t sc;
+    l1sc_t *scp;
+    int local = (nasid == get_nasid());
+
+    if ( IS_RUNNING_ON_SIMULATOR() )
+	return EEP_L1;
+
+    /* If the promlog variable pointed to by IP27LOG_OVNIC is set,
+     * use that value for the cbrick UID rather than the EEPROM
+     * serial number.
+     */
+#ifdef LOG_GETENV
+    if( ip27log_getenv( nasid, IP27LOG_OVNIC, uid_str, NULL, 0 ) >= 0 )
+    {
+	/* We successfully read IP27LOG_OVNIC, so return it as the UID. */
+	db_printf(( "cbrick_uid_get:"
+		    "Overriding UID with environment variable %s\n", 
+		    IP27LOG_OVNIC ));
+	*uid = strtoull( uid_str, NULL, 0 );
+	return EEP_OK;
+    }
+#endif
+
+    /* If this brick is retrieving its own uid, use the local l1sc_t to
+     * arbitrate access to the l1; otherwise, set up a new one.
+     */
+    if( local ) {
+	scp = get_l1sc();
+    }
+    else {
+	scp = &sc;
+	sc_init( &sc, nasid, BRL1_LOCALUART );
+    }
+
+    /* fill in msg with the opcode & params */
+    BZERO( msg, BRL1_QSIZE );
+    if( (subch = sc_open( scp, L1_ADDR_LOCAL )) < 0 )
+	return EEP_L1;
+
+    if( (len = sc_construct_msg( scp, subch, msg, BRL1_QSIZE,
+				 L1_ADDR_TASK_GENERAL,
+				 L1_REQ_SER_NUM, 0 )) < 0 )
+    {
+	sc_close( scp, subch );
+	return( EEP_L1 );
+    }
+
+    /* send the request to the L1 */
+    if( sc_command( scp, subch, msg, msg, &len ) ) {
+	sc_close( scp, subch );
+	return( EEP_L1 );
+    }
+
+    /* free up subchannel */
+    sc_close(scp, subch);
+
+    /* check response */
+    if( sc_interpret_resp( msg, 2, L1_ARG_ASCII, uid_str ) < 0 )
+    {
+	return( EEP_L1 );
+    }
+
+    *uid = generate_unique_id( uid_str, strlen( uid_str ) );
+
+    return EEP_OK;
+#endif /* CONFIG_SERIAL_SGI_L1_PROTOCOL */
+}
+
+
+int rbrick_uid_get( nasid_t nasid, net_vec_t path, uint64_t *uid )
+{
+#if !defined(CONFIG_SERIAL_SGI_L1_PROTOCOL)
+    return EEP_L1;
+#else
+    char uid_str[32];
+    char msg[BRL1_QSIZE];
+    int subch, len;
+    l1sc_t sc;
+
+    if ( IS_RUNNING_ON_SIMULATOR() )
+	return EEP_L1;
+
+#ifdef BRINGUP
+#define FAIL								\
+    {									\
+	*uid = rtc_time();						\
+	printk( "rbrick_uid_get failed; using current time as uid\n" );	\
+	return EEP_OK;							\
+    }
+#endif /* BRINGUP */
+
+    ROUTER_LOCK(path);
+    sc_init( &sc, nasid, path );
+
+    /* fill in msg with the opcode & params */
+    BZERO( msg, BRL1_QSIZE );
+    if( (subch = sc_open( &sc, L1_ADDR_LOCAL )) < 0 ) {
+	ROUTER_UNLOCK(path);
+	FAIL;
+    }
+
+    if( (len = sc_construct_msg( &sc, subch, msg, BRL1_QSIZE,
+				 L1_ADDR_TASK_GENERAL,
+				 L1_REQ_SER_NUM, 0 )) < 0 )
+    {
+	ROUTER_UNLOCK(path);
+	sc_close( &sc, subch );
+	FAIL;
+    }
+
+    /* send the request to the L1 */
+    if( sc_command( &sc, subch, msg, msg, &len ) ) {
+	ROUTER_UNLOCK(path);
+	sc_close( &sc, subch );
+	FAIL;
+    }
+
+    /* free up subchannel */
+    ROUTER_UNLOCK(path);
+    sc_close(&sc, subch);
+
+    /* check response */
+    if( sc_interpret_resp( msg, 2, L1_ARG_ASCII, uid_str ) < 0 )
+    {
+	FAIL;
+    }
+
+    *uid = generate_unique_id( uid_str, strlen( uid_str ) );
+
+    return EEP_OK;
+#endif /* CONFIG_SERIAL_SGI_L1_PROTOCOL */
+}
+
+int iobrick_uid_get( nasid_t nasid, uint64_t *uid )
+{
+    eeprom_brd_record_t eep;
+    eeprom_board_ia_t board;
+    eeprom_chassis_ia_t chassis;
+    int r;
+
+    eep.board_ia = &board;
+    eep.chassis_ia = &chassis;
+    eep.spd = NULL;
+
+    r = iobrick_eeprom_read( &eep, nasid, IO_BRICK );
+    if( r != EEP_OK ) {
+        *uid = rtc_time();
+        return r;
+    }
+
+    *uid = generate_unique_id( board.serial_num,
+                               board.serial_num_tl & FIELD_LENGTH_MASK );
+
+    return EEP_OK;
+}
+
+
+int ibrick_mac_addr_get( nasid_t nasid, char *eaddr )
+{
+    eeprom_brd_record_t eep;
+    eeprom_board_ia_t board;
+    eeprom_chassis_ia_t chassis;
+    int r;
+    char *tmp;
+
+    eep.board_ia = &board;
+    eep.chassis_ia = &chassis;
+    eep.spd = NULL;
+
+    r = iobrick_eeprom_read( &eep, nasid, IO_BRICK );
+    if( (r != EEP_OK) || (board.mac_addr[0] == '\0') ) {
+	db_printf(( "ibrick_mac_addr_get: "
+		    "Couldn't read MAC address from EEPROM\n" ));
+	return EEP_L1;
+    }
+    else {
+	/* successfully read info area */
+	int ix;
+	tmp = board.mac_addr;
+	for( ix = 0; ix < (board.mac_addr_tl & FIELD_LENGTH_MASK); ix++ )
+	{
+	    *eaddr++ = *tmp++;
+	}
+	*eaddr = '\0';
+    }
+
+    return EEP_OK;
+}
+
+
+/* 
+ * eeprom_vertex_info_set
+ *
+ * Given a vertex handle, a component designation, a starting nasid
+ * and (in the case of a router) a vector path to the component, this
+ * function will read the EEPROM and attach the resulting information
+ * to the vertex in the same string format as that provided by the
+ * Dallas Semiconductor NIC drivers.  If the vertex already has the
+ * string, this function just returns the string.
+ */
+
+extern char *nic_vertex_info_get( devfs_handle_t );
+extern void nic_vmc_check( devfs_handle_t, char * );
+#ifdef BRINGUP
+/* the following were lifted from nic.c - change later? */
+#define MAX_INFO 2048
+#define NEWSZ(ptr,sz)   ((ptr) = kern_malloc((sz)))
+#define DEL(ptr) (kern_free((ptr)))
+#endif /* BRINGUP */
+
+char *eeprom_vertex_info_set( int component, int nasid, devfs_handle_t v,
+                              net_vec_t path )
+{
+        char *info_tmp;
+        int info_len;
+        char *info;
+
+        /* see if this vertex is already marked */
+        info_tmp = nic_vertex_info_get(v);
+        if (info_tmp) return info_tmp;
+
+        /* get a temporary place for the data */
+        NEWSZ(info_tmp, MAX_INFO);
+        if (!info_tmp) return NULL;
+
+        /* read the EEPROM */
+	if( component & R_BRICK ) {
+	    if( RBRICK_EEPROM_STR( info_tmp, nasid, path ) != EEP_OK )
+		return NULL;
+	}
+	else {
+            if( eeprom_str( info_tmp, nasid, component ) != EEP_OK )
+	        return NULL;
+	}
+
+        /* allocate a smaller final place */
+        info_len = strlen(info_tmp)+1;
+        NEWSZ(info, info_len);
+        if (info) {
+                strcpy(info, info_tmp);
+                DEL(info_tmp);
+        } else {
+                info = info_tmp;
+        }
+
+        /* add info to the vertex */
+        hwgraph_info_add_LBL(v, INFO_LBL_NIC,
+                             (arbitrary_info_t) info);
+
+        /* see if someone else got there first */
+        info_tmp = nic_vertex_info_get(v);
+        if (info != info_tmp) {
+            DEL(info);
+            return info_tmp;
+        }
+
+        /* export the data */
+        hwgraph_info_export_LBL(v, INFO_LBL_NIC, info_len);
+
+        /* trigger all matching callbacks */
+        nic_vmc_check(v, info);
+
+        return info;
+}
+
+
+/*********************************************************************
+ *
+ * stubs for use until the Bedrock/L1 link is available
+ *
+ */
+
+#include <asm/sn/nic.h>
+
+/* #define EEPROM_TEST */
+
+/* fake eeprom reading functions (replace when the BR/L1 communication
+ * channel is in working order)
+ */
+
+
+/* generate a charater in [0-9A-Z]; if an "extra" character is
+ * specified (such as '_'), include it as one of the possibilities.
+ */
+char random_eeprom_ch( char extra ) 
+{
+    char ch;
+    int modval = 36;
+    if( extra )
+	modval++;
+    
+    ch = rtc_time() % modval;
+
+    if( ch < 10 )
+        ch += '0';
+    else if( ch >= 10 && ch < 36 )
+	ch += ('A' - 10);
+    else
+	ch = extra;
+
+    return ch;
+}
+
+/* create a part number of the form xxx-xxxx-xxx.
+ * It may be important later to generate different
+ * part numbers depending on the component we're
+ * supposed to be "reading" from, so the component
+ * paramter is provided.
+ */
+void fake_a_part_number( char *buf, int component )
+{
+    int i;
+    switch( component ) {
+
+    /* insert component-specific routines here */
+
+    case C_BRICK:
+	strcpy( buf, "030-1266-001" );
+	break;
+    default:
+        for( i = 0; i < 12; i++ ) {
+	    if( i == 3 || i == 8 )
+	        buf[i] = '-';
+	    else
+	        buf[i] = random_eeprom_ch(0);
+        }
+    }
+}
+
+
+/* create a six-character serial number */
+void fake_a_serial_number( char *buf, uint64_t ser )
+{
+    int i;
+    static const char hexchars[] = "0123456789ABCDEF";
+
+    if (ser) {
+	for( i = 5; i >=0; i-- ) {
+	    buf[i] = hexchars[ser & 0xf];
+	    ser >>= 4;
+	}
+    }
+    else {
+	for( i = 0; i < 6; i++ )
+	    buf[i] = random_eeprom_ch(0);
+    }
+}
+
+
+void fake_a_product_name( uchar_t *format, char* buf, int component )
+{
+    switch( component & BRICK_MASK ) {
+
+    case C_BRICK:
+	if( component & SUBORD_MASK ) {
+	    strcpy( buf, "C_BRICK_SUB" );
+	    *format = 0xCB;
+	}
+	else {
+	    strcpy( buf, "IP35" );
+	    *format = 0xC4;
+	}
+	break;
+
+    case R_BRICK:
+        if( component & SUBORD_MASK ) {
+            strcpy( buf, "R_BRICK_SUB" );
+            *format = 0xCB;
+        }
+        else {
+            strcpy( buf, "R_BRICK" );
+            *format = 0xC7;
+        }
+        break;
+
+    case IO_BRICK:
+        if( component & SUBORD_MASK ) {
+            strcpy( buf, "IO_BRICK_SUB" );
+            *format = 0xCC;
+        }
+        else {
+            strcpy( buf, "IO_BRICK" );
+            *format = 0xC8;
+        }
+        break;
+
+    default:
+	strcpy( buf, "UNK_DEVICE" );
+	*format = 0xCA;
+    }
+}
+
+
+
+int fake_an_eeprom_record( eeprom_brd_record_t *buf, int component, 
+			   uint64_t ser )
+{
+    eeprom_board_ia_t *board;
+    eeprom_chassis_ia_t *chassis;
+    int i, cs;
+
+    board = buf->board_ia;
+    chassis = buf->chassis_ia;
+
+    if( !(component & SUBORD_MASK) ) {
+	if( !chassis )
+	    return EEP_PARAM;
+	chassis->format = 0;
+	chassis->length = 5;
+	chassis->type = 0x17;
+
+	chassis->part_num_tl = 0xCC;
+	fake_a_part_number( chassis->part_num, component );
+	chassis->serial_num_tl = 0xC6;
+	fake_a_serial_number( chassis->serial_num, ser );
+
+	cs = chassis->format + chassis->length + chassis->type
+	    + chassis->part_num_tl + chassis->serial_num_tl;
+	for( i = 0; i < (chassis->part_num_tl & FIELD_LENGTH_MASK); i++ )
+	    cs += chassis->part_num[i];
+	for( i = 0; i < (chassis->serial_num_tl & FIELD_LENGTH_MASK); i++ )
+	    cs += chassis->serial_num[i];
+	chassis->checksum = 256 - (cs % 256);
+    }
+
+    if( !board )
+	return EEP_PARAM;
+    board->format = 0;
+    board->length = 10;
+    board->language = 0;
+    board->mfg_date = 1789200; /* noon, 5/26/99 */
+    board->manuf_tl = 0xC3;
+    strcpy( board->manuf, "SGI" );
+
+    fake_a_product_name( &(board->product_tl), board->product, component );
+
+    board->serial_num_tl = 0xC6;
+    fake_a_serial_number( board->serial_num, ser );
+
+    board->part_num_tl = 0xCC;
+    fake_a_part_number( board->part_num, component );
+
+    board->board_rev_tl = 0xC2;
+    board->board_rev[0] = '0';
+    board->board_rev[1] = '1';
+
+    board->eeprom_size_tl = 0x01;
+    board->eeprom_size = 1;
+
+    board->temp_waiver_tl = 0xC2;
+    board->temp_waiver[0] = '0';
+    board->temp_waiver[1] = '1';
+
+    cs = board->format + board->length + board->language
+	+ (board->mfg_date & 0xFF)
+	+ (board->mfg_date & 0xFF00)
+	+ (board->mfg_date & 0xFF0000)
+	+ board->manuf_tl + board->product_tl + board->serial_num_tl
+	+ board->part_num_tl + board->board_rev_tl
+	+ board->board_rev[0] + board->board_rev[1]
+	+ board->eeprom_size_tl + board->eeprom_size + board->temp_waiver_tl
+	+ board->temp_waiver[0] + board->temp_waiver[1];
+    for( i = 0; i < (board->manuf_tl & FIELD_LENGTH_MASK); i++ )
+	cs += board->manuf[i];
+    for( i = 0; i < (board->product_tl & FIELD_LENGTH_MASK); i++ )
+	cs += board->product[i];
+    for( i = 0; i < (board->serial_num_tl & FIELD_LENGTH_MASK); i++ )
+	cs += board->serial_num[i];
+    for( i = 0; i < (board->part_num_tl & FIELD_LENGTH_MASK); i++ )
+	cs += board->part_num[i];
+    
+    board->checksum = 256 - (cs % 256);
+
+    return EEP_OK;
+}
+
+#define EEPROM_CHUNKSIZE	64
+
+#if defined(EEPROM_DEBUG)
+#define RETURN_ERROR							\
+{									\
+    printk( "read_ia error return, component 0x%x, line %d"		\
+	    ", address 0x%x, ia code 0x%x\n",				\
+	    l1_compt, __LINE__, sc->subch[subch].target, ia_code );	\
+    return EEP_L1;							\
+}
+
+#else
+#define RETURN_ERROR	return(EEP_L1)
+#endif
+
+int read_ia( l1sc_t *sc, int subch, int l1_compt, 
+	     int ia_code, char *eep_record )
+{
+#if !defined(CONFIG_SERIAL_SGI_L1_PROTOCOL)
+    return EEP_L1;
+#else
+    char msg[BRL1_QSIZE]; 	   /* message buffer */
+    int len;              	   /* number of bytes used in message buffer */
+    int ia_len = EEPROM_CHUNKSIZE; /* remaining bytes in info area */
+    int offset = 0;                /* current offset into info area */
+
+    if ( IS_RUNNING_ON_SIMULATOR() )
+	return EEP_L1;
+
+    BZERO( msg, BRL1_QSIZE );
+
+    /* retrieve EEPROM data in 64-byte chunks
+     */
+
+    while( ia_len )
+    {
+	/* fill in msg with opcode & params */
+	if( (len = sc_construct_msg( sc, subch, msg, BRL1_QSIZE,
+				     L1_ADDR_TASK_GENERAL,
+				     L1_REQ_EEPROM, 8,
+				     L1_ARG_INT, l1_compt,
+				     L1_ARG_INT, ia_code,
+				     L1_ARG_INT, offset,
+				     L1_ARG_INT, ia_len )) < 0 )
+	{
+	    RETURN_ERROR;
+	}
+
+	/* send the request to the L1 */
+
+	if( sc_command( sc, subch, msg, msg, &len ) ) {
+	    RETURN_ERROR;
+	}
+
+	/* check response */
+	if( sc_interpret_resp( msg, 5, 
+			       L1_ARG_INT, &ia_len,
+			       L1_ARG_UNKNOWN, &len, eep_record ) < 0 )
+	{
+	    RETURN_ERROR;
+	}
+
+	if( ia_len > EEPROM_CHUNKSIZE )
+	    ia_len = EEPROM_CHUNKSIZE;
+
+	eep_record += EEPROM_CHUNKSIZE;
+	offset += EEPROM_CHUNKSIZE;
+    }
+
+    return EEP_OK;
+#endif /* CONFIG_SERIAL_SGI_L1_PROTOCOL */
+}
+
+
+int read_spd( l1sc_t *sc, int subch, int l1_compt,
+	      eeprom_spd_u *spd )
+{
+#if !defined(CONFIG_SERIAL_SGI_L1_PROTOCOL)
+    return EEP_L1;
+#else
+    char msg[BRL1_QSIZE]; 	    /* message buffer */
+    int len;              	    /* number of bytes used in message buffer */
+    int spd_len = EEPROM_CHUNKSIZE; /* remaining bytes in spd record */
+    int offset = 0;		    /* current offset into spd record */
+    char *spd_p = spd->bytes;	    /* "thumb" for writing to spd */
+
+    if ( IS_RUNNING_ON_SIMULATOR() )
+	return EEP_L1;
+
+    BZERO( msg, BRL1_QSIZE );
+
+    /* retrieve EEPROM data in 64-byte chunks
+     */
+
+    while( spd_len )
+    {
+	/* fill in msg with opcode & params */
+	if( (len = sc_construct_msg( sc, subch, msg, BRL1_QSIZE,
+				     L1_ADDR_TASK_GENERAL,
+				     L1_REQ_EEPROM, 8,
+				     L1_ARG_INT, l1_compt,
+				     L1_ARG_INT, L1_EEP_SPD,
+				     L1_ARG_INT, offset,
+				     L1_ARG_INT, spd_len )) < 0 )
+	{
+	    return( EEP_L1 );
+	}
+
+	/* send the request to the L1 */
+	if( sc_command( sc, subch, msg, msg, &len ) ) {
+	    return( EEP_L1 );
+	}
+
+	/* check response */
+	if( sc_interpret_resp( msg, 5, 
+			       L1_ARG_INT, &spd_len,
+			       L1_ARG_UNKNOWN, &len, spd_p ) < 0 )
+	{
+	    return( EEP_L1 );
+	}
+
+	if( spd_len > EEPROM_CHUNKSIZE )
+	    spd_len = EEPROM_CHUNKSIZE;
+
+	spd_p += EEPROM_CHUNKSIZE;
+	offset += EEPROM_CHUNKSIZE;
+    }
+    return EEP_OK;
+#endif /* CONFIG_SERIAL_SGI_L1_PROTOCOL */
+}
+
+
+int read_chassis_ia( l1sc_t *sc, int subch, int l1_compt,
+		     eeprom_chassis_ia_t *ia )
+{
+    char eep_record[512];          /* scratch area for building up info area */
+    char *eep_rec_p = eep_record;  /* thumb for moving through eep_record */
+    int checksum = 0;              /* use to verify eeprom record checksum */
+    int i;
+
+    /* Read in info area record from the L1.
+     */
+    if( read_ia( sc, subch, l1_compt, L1_EEP_CHASSIS, eep_record )
+	!= EEP_OK )
+    {
+	return EEP_L1;
+    }
+
+    /* Now we've got the whole info area.  Transfer it to the data structure.
+     */
+
+    eep_rec_p = eep_record;
+    ia->format = *eep_rec_p++;
+    ia->length = *eep_rec_p++;
+    if( ia->length == 0 ) {
+	/* since we're using 8*ia->length-1 as an array index later, make
+	 * sure it's sane.
+	 */
+	db_printf(( "read_chassis_ia: eeprom length byte of ZERO\n" ));
+	return EEP_L1;
+    }
+    ia->type = *eep_rec_p++;
+   
+    ia->part_num_tl = *eep_rec_p++;
+
+    (void)BCOPY( eep_rec_p, ia->part_num, (ia->part_num_tl & FIELD_LENGTH_MASK) );
+    eep_rec_p += (ia->part_num_tl & FIELD_LENGTH_MASK);
+
+    ia->serial_num_tl = *eep_rec_p++;
+
+    BCOPY( eep_rec_p, ia->serial_num, 
+	   (ia->serial_num_tl & FIELD_LENGTH_MASK) );
+    eep_rec_p += (ia->serial_num_tl & FIELD_LENGTH_MASK);
+
+    ia->checksum = eep_record[(8 * ia->length) - 1];
+
+    /* verify checksum */
+    eep_rec_p = eep_record;
+    checksum = 0;
+    for( i = 0; i < (8 * ia->length); i++ ) {
+	checksum += *eep_rec_p++;
+    }
+
+    if( (checksum & 0xff) != 0 )
+    {
+	db_printf(( "read_chassis_ia: bad checksum\n" ));
+	db_printf(( "read_chassis_ia: target 0x%x  uart 0x%x\n",
+			   sc->subch[subch].target, sc->uart ));
+	return EEP_BAD_CHECKSUM;
+    }
+
+    return EEP_OK;
+}
+
+
+int read_board_ia( l1sc_t *sc, int subch, int l1_compt,
+		   eeprom_board_ia_t *ia )
+{
+    char eep_record[512];          /* scratch area for building up info area */
+    char *eep_rec_p = eep_record;  /* thumb for moving through eep_record */
+    int checksum = 0;              /* running checksum total */
+    int i;
+
+    BZERO( ia, sizeof( eeprom_board_ia_t ) );
+
+    /* Read in info area record from the L1.
+     */
+    if( read_ia( sc, subch, l1_compt, L1_EEP_BOARD, eep_record )
+	!= EEP_OK )
+    {
+	db_printf(( "read_board_ia: error reading info area from L1\n" ));
+	return EEP_L1;
+    }
+
+     /* Now we've got the whole info area.  Transfer it to the data structure.
+      */
+
+    eep_rec_p = eep_record;
+    ia->format = *eep_rec_p++;
+    ia->length = *eep_rec_p++;
+    if( ia->length == 0 ) {
+	/* since we're using 8*ia->length-1 as an array index later, make
+	 * sure it's sane.
+	 */
+	db_printf(( "read_board_ia: eeprom length byte of ZERO\n" ));
+	return EEP_L1;
+    }
+    ia->language = *eep_rec_p++;
+    
+    ia->mfg_date = eeprom_xlate_board_mfr_date( (uchar_t *)eep_rec_p );
+    eep_rec_p += 3;
+
+    ia->manuf_tl = *eep_rec_p++;
+    
+    BCOPY( eep_rec_p, ia->manuf, (ia->manuf_tl & FIELD_LENGTH_MASK) );
+    eep_rec_p += (ia->manuf_tl & FIELD_LENGTH_MASK);
+
+    ia->product_tl = *eep_rec_p++;
+    
+    BCOPY( eep_rec_p, ia->product, (ia->product_tl & FIELD_LENGTH_MASK) );
+    eep_rec_p += (ia->product_tl & FIELD_LENGTH_MASK);
+
+    ia->serial_num_tl = *eep_rec_p++;
+    
+    BCOPY(eep_rec_p, ia->serial_num, (ia->serial_num_tl & FIELD_LENGTH_MASK));
+    eep_rec_p += (ia->serial_num_tl & FIELD_LENGTH_MASK);
+
+    ia->part_num_tl = *eep_rec_p++;
+
+    BCOPY( eep_rec_p, ia->part_num, (ia->part_num_tl & FIELD_LENGTH_MASK) );
+    eep_rec_p += (ia->part_num_tl & FIELD_LENGTH_MASK);
+
+    eep_rec_p++; /* we do not use the FRU file id */
+    
+    ia->board_rev_tl = *eep_rec_p++;
+    
+    BCOPY( eep_rec_p, ia->board_rev, (ia->board_rev_tl & FIELD_LENGTH_MASK) );
+    eep_rec_p += (ia->board_rev_tl & FIELD_LENGTH_MASK);
+
+    ia->eeprom_size_tl = *eep_rec_p++;
+    ia->eeprom_size = *eep_rec_p++;
+
+    ia->temp_waiver_tl = *eep_rec_p++;
+    
+    BCOPY( eep_rec_p, ia->temp_waiver, 
+	   (ia->temp_waiver_tl & FIELD_LENGTH_MASK) );
+    eep_rec_p += (ia->temp_waiver_tl & FIELD_LENGTH_MASK);
+
+    /* if there's more, we must be reading a main board; get
+     * additional fields
+     */
+    if( ((unsigned char)*eep_rec_p != (unsigned char)EEPROM_EOF) ) {
+
+	ia->ekey_G_tl = *eep_rec_p++;
+	BCOPY( eep_rec_p, (char *)&ia->ekey_G, 
+	       ia->ekey_G_tl & FIELD_LENGTH_MASK );
+	eep_rec_p += (ia->ekey_G_tl & FIELD_LENGTH_MASK);
+	
+	ia->ekey_P_tl = *eep_rec_p++;
+	BCOPY( eep_rec_p, (char *)&ia->ekey_P, 
+	       ia->ekey_P_tl & FIELD_LENGTH_MASK );
+	eep_rec_p += (ia->ekey_P_tl & FIELD_LENGTH_MASK);
+	
+	ia->ekey_Y_tl = *eep_rec_p++;
+	BCOPY( eep_rec_p, (char *)&ia->ekey_Y, 
+	       ia->ekey_Y_tl & FIELD_LENGTH_MASK );
+	eep_rec_p += (ia->ekey_Y_tl & FIELD_LENGTH_MASK);
+	
+	/* 
+	 * need to get a couple more fields if this is an I brick 
+	 */
+	if( ((unsigned char)*eep_rec_p != (unsigned char)EEPROM_EOF) ) {
+
+	    ia->mac_addr_tl = *eep_rec_p++;
+	    BCOPY( eep_rec_p, ia->mac_addr, 
+		   ia->mac_addr_tl & FIELD_LENGTH_MASK );
+	    eep_rec_p += (ia->mac_addr_tl & FIELD_LENGTH_MASK);
+	    
+	    ia->ieee1394_cfg_tl = *eep_rec_p++;
+	    BCOPY( eep_rec_p, ia->ieee1394_cfg,
+		   ia->ieee1394_cfg_tl & FIELD_LENGTH_MASK );
+	    
+	}
+    }
+
+    ia->checksum = eep_record[(ia->length * 8) - 1];
+
+    /* verify checksum */
+    eep_rec_p = eep_record;
+    checksum = 0;
+    for( i = 0; i < (8 * ia->length); i++ ) {
+	checksum += *eep_rec_p++;
+    }
+
+    if( (checksum & 0xff) != 0 )
+    {
+	db_printf(( "read_board_ia: bad checksum\n" ));
+	db_printf(( "read_board_ia: target 0x%x  uart 0x%x\n",
+		    sc->subch[subch].target, sc->uart ));
+	return EEP_BAD_CHECKSUM;
+    }
+
+    return EEP_OK;
+}
+
+
+int _cbrick_eeprom_read( eeprom_brd_record_t *buf, l1sc_t *scp,
+			 int component )
+{
+#if !defined(CONFIG_SERIAL_SGI_L1_PROTOCOL)
+    return EEP_L1;
+#else
+    int r;
+    uint64_t uid = 0;
+    char uid_str[32];
+    int l1_compt, subch;
+
+    if ( IS_RUNNING_ON_SIMULATOR() )
+	return EEP_L1;
+
+    /* make sure we're targeting a cbrick */
+    if( !(component & C_BRICK) )
+	return EEP_PARAM;
+
+    /* If the promlog variable pointed to by IP27LOG_OVNIC is set,
+     * use that value for the cbrick UID rather than the EEPROM
+     * serial number.
+     */
+#ifdef LOG_GETENV
+    if( ip27log_getenv( scp->nasid, IP27LOG_OVNIC, uid_str, "0", 0 ) >= 0 )
+    {
+	db_printf(( "_cbrick_eeprom_read: "
+		    "Overriding UID with environment variable %s\n", 
+		    IP27LOG_OVNIC ));
+	uid = strtoull( uid_str, NULL, 0 );
+    }
+#endif
+
+    if( (subch = sc_open( scp, L1_ADDR_LOCAL )) < 0 )
+	return EEP_L1;
+
+    switch( component )
+    {
+      case C_BRICK:
+	/* c-brick motherboard */
+	l1_compt = L1_EEP_NODE;
+	r = read_chassis_ia( scp, subch, l1_compt, buf->chassis_ia );
+	if( r != EEP_OK ) {
+	    sc_close( scp, subch );
+	    db_printf(( "_cbrick_eeprom_read: using a fake eeprom record\n" ));
+	    return fake_an_eeprom_record( buf, component, uid );
+	}
+	if( uid ) {
+	    /* If IP27LOG_OVNIC is set, we want to put that value
+	     * in as our UID. */
+	    fake_a_serial_number( buf->chassis_ia->serial_num, uid );
+	    buf->chassis_ia->serial_num_tl = 6;
+	}
+	break;
+
+      case C_PIMM:
+	/* one of the PIMM boards */
+	l1_compt = L1_EEP_PIMM( component & COMPT_MASK );
+	break;
+
+      case C_DIMM:
+	/* one of the DIMMs */
+	l1_compt = L1_EEP_DIMM( component & COMPT_MASK );
+	r = read_spd( scp, subch, l1_compt, buf->spd );
+	sc_close( scp, subch );
+	return r;
+
+      default:
+	/* unsupported board type */
+	sc_close( scp, subch );
+	return EEP_PARAM;
+    }
+	      
+    r = read_board_ia( scp, subch, l1_compt, buf->board_ia );
+    sc_close( scp, subch );
+    if( r != EEP_OK ) 
+    {
+	db_printf(( "_cbrick_eeprom_read: using a fake eeprom record\n" ));
+	return fake_an_eeprom_record( buf, component, uid );
+    }
+    return EEP_OK;
+#endif /* CONFIG_SERIAL_SGI_L1_PROTOCOL */
+}
+
+
+int cbrick_eeprom_read( eeprom_brd_record_t *buf, nasid_t nasid,
+    		        int component )
+{
+#if !defined(CONFIG_SERIAL_SGI_L1_PROTOCOL)
+    return EEP_L1;
+#else
+    l1sc_t *scp;
+    int local = (nasid == get_nasid());
+
+    if ( IS_RUNNING_ON_SIMULATOR() )
+	return EEP_L1;
+
+    /* If this brick is retrieving its own uid, use the local l1sc_t to
+     * arbitrate access to the l1; otherwise, set up a new one (prom) or
+     * use an existing remote l1sc_t (kernel)
+     */
+    if( local ) {
+	scp = get_l1sc();
+    }
+    else {
+	elsc_t *get_elsc(void);
+	scp = get_elsc();
+    }
+
+    return _cbrick_eeprom_read( buf, scp, component );
+#endif /* CONFIG_SERIAL_SGI_L1_PROTOCOL */
+}
+
+
+int iobrick_eeprom_read( eeprom_brd_record_t *buf, nasid_t nasid,
+			 int component )
+{
+#if !defined(CONFIG_SERIAL_SGI_L1_PROTOCOL)
+    return EEP_L1;
+#else
+    int r;
+    int l1_compt, subch;
+    l1sc_t *scp;
+    int local = (nasid == get_nasid());
+
+    if ( IS_RUNNING_ON_SIMULATOR() )
+	return EEP_L1;
+
+    /* make sure we're talking to an applicable brick */
+    if( !(component & IO_BRICK) ) {
+	return EEP_PARAM;
+    }
+
+    /* If we're talking to this c-brick's attached io brick, use
+     * the local l1sc_t; otherwise, set up a new one (prom) or
+     * use an existing remote l1sc_t (kernel)
+     */
+    if( local ) {
+	scp = get_l1sc();
+    }
+    else {
+	elsc_t *get_elsc(void);
+	scp = get_elsc();
+    }
+
+    if( (subch = sc_open( scp, L1_ADDR_LOCALIO )) < 0 )
+	return EEP_L1;
+
+
+    switch( component )
+    {
+      case IO_BRICK:
+	/* IO brick motherboard */
+	l1_compt = L1_EEP_LOGIC;
+	r = read_chassis_ia( scp, subch, l1_compt, buf->chassis_ia );
+
+	if( r != EEP_OK ) {
+	    sc_close( scp, subch );
+#ifdef BRINGUP /* Once EEPROMs are universally available, remove this */
+	    r = fake_an_eeprom_record( buf, component, rtc_time() );
+#endif /* BRINGUP */
+	    return r;
+	}
+	break;
+
+      case IO_POWER:
+	/* IO brick power board */
+	l1_compt = L1_EEP_POWER;
+	break;
+
+      default:
+	/* unsupported board type */
+	sc_close( scp, subch );
+	return EEP_PARAM;
+    }
+
+    r = read_board_ia( scp, subch, l1_compt, buf->board_ia );
+    sc_close( scp, subch );
+    if( r != EEP_OK ) {
+	return r;
+    }
+    return EEP_OK;
+#endif /* CONFIG_SERIAL_SGI_L1_PROTOCOL */    
+}
+
+
+int vector_eeprom_read( eeprom_brd_record_t *buf, nasid_t nasid,
+			net_vec_t path, int component )
+{
+#if !defined(CONFIG_SERIAL_SGI_L1_PROTOCOL)
+    return EEP_L1;
+#else
+    int r;
+    uint64_t uid = 0;
+    int l1_compt, subch;
+    l1sc_t sc;
+
+    if ( IS_RUNNING_ON_SIMULATOR() )
+	return EEP_L1;
+
+    /* make sure we're targeting an applicable brick */
+    if( !(component & VECTOR) )
+	return EEP_PARAM;
+
+    switch( component & BRICK_MASK )
+    {
+      case R_BRICK:
+	ROUTER_LOCK( path );
+	sc_init( &sc, nasid, path );
+
+	if( (subch = sc_open( &sc, L1_ADDR_LOCAL )) < 0 )
+	{
+	    db_printf(( "vector_eeprom_read: couldn't open subch\n" ));
+	    ROUTER_UNLOCK(path);
+	    return EEP_L1;
+	}
+	switch( component )
+	{
+	  case R_BRICK:
+	    /* r-brick motherboard */
+	    l1_compt = L1_EEP_LOGIC;
+    	    r = read_chassis_ia( &sc, subch, l1_compt, buf->chassis_ia );
+	    if( r != EEP_OK ) {
+		sc_close( &sc, subch );
+		ROUTER_UNLOCK( path );
+		printk( "vector_eeprom_read: couldn't get rbrick eeprom info;"
+			" using current time as uid\n" );
+		uid = rtc_time();
+		db_printf(("vector_eeprom_read: using a fake eeprom record\n"));
+		return fake_an_eeprom_record( buf, component, uid );
+	    }
+	    break;
+
+	  case R_POWER:
+	    /* r-brick power board */
+	    l1_compt = L1_EEP_POWER;
+	    break;
+
+	  default:
+	    /* unsupported board type */
+	    sc_close( &sc, subch );
+	    ROUTER_UNLOCK( path );
+	    return EEP_PARAM;
+	}
+	r = read_board_ia( &sc, subch, l1_compt, buf->board_ia );
+	sc_close( &sc, subch );
+	ROUTER_UNLOCK( path );
+	if( r != EEP_OK ) {
+	    db_printf(( "vector_eeprom_read: using a fake eeprom record\n" ));
+	    return fake_an_eeprom_record( buf, component, uid );
+	}
+	return EEP_OK;
+
+      case C_BRICK:
+	sc_init( &sc, nasid, path );
+	return _cbrick_eeprom_read( buf, &sc, component );
+
+      default:
+	/* unsupported brick type */
+	return EEP_PARAM;
+    }
+#endif /* CONFIG_SERIAL_SGI_L1_PROTOCOL */
+}
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/io/hcl.c linux/arch/ia64/sn/io/hcl.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/io/hcl.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/io/hcl.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,1506 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ *  hcl - SGI's Hardware Graph compatibility layer.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <asm/sn/sgi.h>
+#include <linux/devfs_fs.h>
+#include <linux/devfs_fs_kernel.h>
+#include <asm/io.h>
+#include <asm/sn/iograph.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/labelcl.h>
+
+#define HCL_NAME "SGI-HWGRAPH COMPATIBILITY DRIVER"
+#define HCL_TEMP_NAME "HCL_TEMP_NAME_USED_FOR_HWGRAPH_VERTEX_CREATE"
+#define HCL_TEMP_NAME_LEN 44 
+#define HCL_VERSION "1.0"
+devfs_handle_t hwgraph_root = NULL;
+
+/*
+ * Debug flag definition.
+ */
+#define OPTION_NONE             0x00
+#define HCL_DEBUG_NONE 0x00000
+#define HCL_DEBUG_ALL  0x0ffff
+#if defined(CONFIG_HCL_DEBUG)
+static unsigned int hcl_debug_init __initdata = HCL_DEBUG_NONE;
+#endif
+static unsigned int hcl_debug = HCL_DEBUG_NONE;
+static unsigned int boot_options = OPTION_NONE;
+
+/*
+ * Some Global definitions.
+ */
+spinlock_t hcl_spinlock;
+devfs_handle_t hcl_handle = NULL;
+
+/*
+ * HCL device driver.
+ * The purpose of this device driver is to provide a facility 
+ * for User Level Apps e.g. hinv, ioconfig etc. an ioctl path 
+ * to manipulate label entries without having to implement
+ * system call interfaces.  This methodology will enable us to 
+ * make this feature module loadable.
+ */
+static int hcl_open(struct inode * inode, struct file * filp)
+{
+	if (hcl_debug) {
+        	printk("HCL: hcl_open called.\n");
+	}
+
+        return(0);
+
+}
+
+static int hcl_close(struct inode * inode, struct file * filp)
+{
+
+	if (hcl_debug) {
+        	printk("HCL: hcl_close called.\n");
+	}
+
+        return(0);
+
+}
+
+static int hcl_ioctl(struct inode * inode, struct file * file,
+        unsigned int cmd, unsigned long arg)
+{
+
+	if (hcl_debug) {
+		printk("HCL: hcl_ioctl called.\n");
+	}
+
+	switch (cmd) {
+		default:
+			if (hcl_debug) {
+				printk("HCL: hcl_ioctl cmd = 0x%x\n", cmd);
+			}
+	}
+
+	return(0);
+
+}
+
+struct file_operations hcl_fops = {
+	NULL,		/* lseek - default */
+	NULL,		/* read - general block-dev read */
+	NULL,		/* write - general block-dev write */
+	NULL,		/* readdir - bad */
+	NULL,		/* poll */
+	hcl_ioctl,      /* ioctl */
+	NULL,		/* mmap */
+	hcl_open,	/* open */
+	NULL,		/* flush */
+	hcl_close,	/* release */
+	NULL,		/* fsync */
+	NULL,		/* fasync */
+	NULL,		/* check_media_change */
+	NULL,		/* revalidate */
+	NULL		/* lock */
+};
+
+
+/*
+ * init_hcl() - Boot time initialization.  Ensure that it is called 
+ *	after devfs has been initialized.
+ *
+ * For now this routine is being called out of devfs/base.c.  Actually 
+ * Not a bad place to be ..
+ *
+ */
+#ifdef MODULE
+int init_module (void)
+#else
+int __init init_hcl(void)
+#endif
+{
+	extern void string_table_init(struct string_table *);
+	extern struct string_table label_string_table;
+	int rv = 0;
+
+	printk ("\n%s: v%s Colin Ngam (cngam@sgi.com)\n",
+		HCL_NAME, HCL_VERSION);
+#if defined(CONFIG_HCL_DEBUG) && !defined(MODULE)
+	hcl_debug = hcl_debug_init;
+	printk ("%s: hcl_debug: 0x%0x\n", HCL_NAME, hcl_debug);
+#endif
+	printk ("\n%s: boot_options: 0x%0x\n", HCL_NAME, boot_options);
+	spin_lock_init(&hcl_spinlock);
+
+	/*
+	 * Create the hwgraph_root on devfs.
+	 */
+	rv = hwgraph_path_add(NULL, "hw", &hwgraph_root);
+	if (rv)
+		printk ("init_hcl: Failed to create hwgraph_root. Error = %d.\n", rv);
+
+	/*
+	 * Create the hcl driver to support inventory entry manipulations.
+	 * By default, it is expected that devfs is mounted on /dev.
+	 *
+	 */
+	hcl_handle = hwgraph_register(hwgraph_root, ".hcl",
+			0, DEVFS_FL_AUTO_DEVNUM,
+			0, 0,
+			S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
+			&hcl_fops, NULL);
+
+	if (hcl_handle == NULL) {
+		panic("HCL: Unable to create HCL Driver in init_hcl().\n");
+		return(0);
+	}
+
+	/*
+	 * Initialize the HCL string table.
+	 */
+	string_table_init(&label_string_table);
+
+	return(0);
+
+}
+
+
+/*
+ * hcl_setup() - Process boot time parameters if given.
+ *	"hcl="
+ *	This routine gets called only if "hcl=" is given in the 
+ *	boot line and before init_hcl().
+ *
+ *	We currently do not have any boot options .. when we do, 
+ *	functionalities can be added here.
+ *
+ */
+static int __init hcl_setup(char *str)
+{
+    while ( (*str != '\0') && !isspace (*str) )
+    {
+	printk("HCL: Boot time parameter %s\n", str);
+#ifdef CONFIG_HCL_DEBUG
+        if (strncmp (str, "all", 3) == 0) {
+            hcl_debug_init |= HCL_DEBUG_ALL;
+            str += 3;
+        } else 
+        	return 0;
+#endif
+        if (*str != ',') return 0;
+        ++str;
+    }
+
+    return 1;
+
+}
+
+__setup("hcl=", hcl_setup);
+
+
+/*
+ * Set device specific "fast information".
+ *
+ */
+void
+hwgraph_fastinfo_set(devfs_handle_t de, arbitrary_info_t fastinfo)
+{
+
+	if (hcl_debug) {
+		printk("HCL: hwgraph_fastinfo_set handle 0x%p fastinfo %ld\n",
+			de, fastinfo);
+	}
+		
+	labelcl_info_replace_IDX(de, HWGRAPH_FASTINFO, fastinfo, NULL);
+
+}
+
+
+/*
+ * Get device specific "fast information".
+ *
+ */
+arbitrary_info_t
+hwgraph_fastinfo_get(devfs_handle_t de)
+{
+	arbitrary_info_t fastinfo;
+	int rv;
+
+	if (!de) {
+		printk(KERN_WARNING "HCL: hwgraph_fastinfo_get handle given is NULL.\n");
+		return(-1);
+	}
+
+	rv = labelcl_info_get_IDX(de, HWGRAPH_FASTINFO, &fastinfo);
+	if (rv == 0)
+		return(fastinfo);
+
+	return(0);
+}
+
+
+/*
+ * hwgraph_connectpt_set - Sets the connect point handle in de to the 
+ *	given connect_de handle.  By default, the connect point of the 
+ *	devfs node is the parent.  This effectively changes this assumption.
+ */
+int
+hwgraph_connectpt_set(devfs_handle_t de, devfs_handle_t connect_de)
+{
+	int rv;
+
+	if (!de)
+		return(-1);
+
+	rv = labelcl_info_connectpt_set(de, connect_de);
+
+	return(rv);
+}
+
+
+/*
+ * hwgraph_connectpt_get: Returns the entry's connect point  in the devfs 
+ *	tree.
+ */
+devfs_handle_t
+hwgraph_connectpt_get(devfs_handle_t de)
+{
+	int rv;
+	arbitrary_info_t info;
+	devfs_handle_t connect;
+
+	rv = labelcl_info_get_IDX(de, HWGRAPH_CONNECTPT, &info);
+	if (rv != 0) {
+		return(NULL);
+	}
+
+	connect = (devfs_handle_t)info;
+	return(connect);
+
+}
+
+
+/*
+ * hwgraph_mk_dir - Creates a directory entry with devfs.
+ *	Note that a directory entry in devfs can have children 
+ *	but it cannot be a char|block special file.
+ */
+devfs_handle_t
+hwgraph_mk_dir(devfs_handle_t de, const char *name,
+                unsigned int namelen, void *info)
+{
+
+	int rv;
+	labelcl_info_t *labelcl_info = NULL;
+	devfs_handle_t new_devfs_handle = NULL;
+	devfs_handle_t parent = NULL;
+
+	/*
+	 * Create the device info structure for hwgraph compatiblity support.
+	 */
+	labelcl_info = labelcl_info_create();
+	if (!labelcl_info)
+		return(NULL);
+
+	/*
+	 * Create a devfs entry.
+	 */
+	new_devfs_handle = devfs_mk_dir(de, name, (void *)labelcl_info);
+	if (!new_devfs_handle) {
+		labelcl_info_destroy(labelcl_info);
+		return(NULL);
+	}
+
+	/*
+	 * Get the parent handle.
+	 */
+	parent = devfs_get_parent (new_devfs_handle);
+
+	/*
+	 * To provide the same semantics as the hwgraph, set the connect point.
+	 */
+	rv = hwgraph_connectpt_set(new_devfs_handle, parent);
+	if (!rv) {
+		/*
+		 * We need to clean up!
+		 */
+	}
+
+	/*
+	 * If the caller provides a private data pointer, save it in the 
+	 * labelcl info structure(fastinfo).  This can be retrieved via
+	 * hwgraph_fastinfo_get()
+	 */
+	if (info)
+		hwgraph_fastinfo_set(new_devfs_handle, (arbitrary_info_t)info);
+		
+	return(new_devfs_handle);
+
+}
+
+/*
+ * hwgraph_vertex_create - Create a vertex by giving it a temp name.
+ */
+
+/*
+ * hwgraph_path_add - Create a directory node with the given path starting 
+ * from the given devfs_handle_t.
+ */
+extern char * dev_to_name(devfs_handle_t, char *, uint);
+int
+hwgraph_path_add(devfs_handle_t  fromv,
+		 char *path,
+		 devfs_handle_t *new_de)
+{
+
+	unsigned int	namelen = strlen(path);
+	int		rv;
+
+	/*
+	 * We need to handle the case when fromv is NULL ..
+	 * in this case we need to create the path from the 
+	 * hwgraph root!
+	 */
+	if (fromv == NULL)
+		fromv = hwgraph_root;
+
+	/*
+	 * check the entry doesn't already exist, if it does
+	 * then we simply want new_de to point to it (otherwise
+	 * we'll overwrite the existing labelcl_info struct)
+	 */
+	rv = hwgraph_edge_get(fromv, path, new_de);
+	if (rv)	{	/* couldn't find entry so we create it */
+		*new_de = hwgraph_mk_dir(fromv, path, namelen, NULL);
+		if (new_de == NULL)
+			return(-1);
+		else
+			return(0);
+	}
+	else 
+ 		return(0);
+
+}
+
+/*
+ * hwgraph_register  - Creates a file entry with devfs.
+ *	Note that a file entry cannot have children .. it is like a 
+ *	char|block special vertex in hwgraph.
+ */
+devfs_handle_t
+hwgraph_register(devfs_handle_t de, const char *name,
+                unsigned int namelen, unsigned int flags, 
+		unsigned int major, unsigned int minor,
+                umode_t mode, uid_t uid, gid_t gid, 
+		struct file_operations *fops,
+                void *info)
+{
+
+	int rv;
+        void *labelcl_info = NULL;
+        devfs_handle_t new_devfs_handle = NULL;
+	devfs_handle_t parent = NULL;
+
+        /*
+         * Create the labelcl info structure for hwgraph compatiblity support.
+         */
+        labelcl_info = labelcl_info_create();
+        if (!labelcl_info)
+                return(NULL);
+
+        /*
+         * Create a devfs entry.
+         */
+        new_devfs_handle = devfs_register(de, name, flags, major,
+				minor, mode, fops, labelcl_info);
+        if (!new_devfs_handle) {
+                labelcl_info_destroy((labelcl_info_t *)labelcl_info);
+                return(NULL);
+        }
+
+	/*
+	 * Get the parent handle.
+	 */
+	if (de == NULL)
+		parent = devfs_get_parent (new_devfs_handle);
+	else
+		parent = de;
+		
+	/*
+	 * To provide the same semantics as the hwgraph, set the connect point.
+	 */
+	rv = hwgraph_connectpt_set(new_devfs_handle, parent);
+	if (rv) {
+		/*
+		 * We need to clean up!
+		 */
+		printk("HCL: Unable to set the connect point to it's parent 0x%p\n",
+			new_devfs_handle);
+	}
+
+        /*
+         * If the caller provides a private data pointer, save it in the 
+         * labelcl info structure(fastinfo).  This can be retrieved via
+         * hwgraph_fastinfo_get()
+         */
+        if (info)
+                hwgraph_fastinfo_set(new_devfs_handle, (arbitrary_info_t)info);
+
+        return(new_devfs_handle);
+
+}
+
+
+/*
+ * hwgraph_mk_symlink - Create a symbolic link.
+ */
+int
+hwgraph_mk_symlink(devfs_handle_t de, const char *name, unsigned int namelen,
+                unsigned int flags, const char *link, unsigned int linklen, 
+		devfs_handle_t *handle, void *info)
+{
+
+	void *labelcl_info = NULL;
+	int status = 0;
+	devfs_handle_t new_devfs_handle = NULL;
+
+	/*
+	 * Create the labelcl info structure for hwgraph compatiblity support.
+	 */
+	labelcl_info = labelcl_info_create();
+	if (!labelcl_info)
+		return(-1);
+
+	/*
+	 * Create a symbolic link devfs entry.
+	 */
+	status = devfs_mk_symlink(de, name, flags, link,
+				&new_devfs_handle, labelcl_info);
+	if ( (!new_devfs_handle) || (!status) ){
+		labelcl_info_destroy((labelcl_info_t *)labelcl_info);
+		return(-1);
+	}
+
+	/*
+	 * If the caller provides a private data pointer, save it in the 
+	 * labelcl info structure(fastinfo).  This can be retrieved via
+	 * hwgraph_fastinfo_get()
+	 */
+	if (info)
+		hwgraph_fastinfo_set(new_devfs_handle, (arbitrary_info_t)info);
+
+	*handle = new_devfs_handle;
+	return(0);
+
+}
+
+/*
+ * hwgraph_vertex_get_next - this routine returns the next sibbling for the 
+ *	device entry given in de.  If there are no more sibbling, NULL 
+ * 	is returned in next_sibbling.
+ *
+ *	Currently we do not have any protection against de being deleted 
+ *	while it's handle is being held.
+ */
+int
+hwgraph_vertex_get_next(devfs_handle_t *next_sibbling, devfs_handle_t *de)
+{
+	*next_sibbling = devfs_get_next_sibling (*de);
+
+	if (*next_sibbling != NULL)
+		*de = *next_sibbling;
+	return (0);
+}
+
+
+/*
+ * hwgraph_vertex_destroy - Destroy the devfs entry
+ */
+int
+hwgraph_vertex_destroy(devfs_handle_t de)
+{
+
+	void *labelcl_info = NULL;
+
+	labelcl_info = devfs_get_info(de);
+	devfs_unregister(de);
+
+	if (labelcl_info)
+		labelcl_info_destroy((labelcl_info_t *)labelcl_info);
+
+	return(0);
+}
+
+/*
+** See if a vertex has an outgoing edge with a specified name.
+** Vertices in the hwgraph *implicitly* contain these edges:
+**	"." 	refers to "current vertex"
+**	".." 	refers to "connect point vertex"
+**	"char"	refers to current vertex (character device access)
+**	"block"	refers to current vertex (block device access)
+*/
+
+/*
+ * hwgraph_edge_add - This routines has changed from the original conext.
+ * All it does now is to create a symbolic link from "from" to "to".
+ */
+/* ARGSUSED */
+int
+hwgraph_edge_add(devfs_handle_t from, devfs_handle_t to, char *name)
+{
+
+	char *path;
+	int name_start;
+	devfs_handle_t handle = NULL;
+	int rv;
+
+	path = kmalloc(1024, GFP_KERNEL);
+	name_start = devfs_generate_path (to, path, 1024);
+
+	/*
+	 * Otherwise, just create a symlink to the vertex.
+	 * In this case the vertex was previous created with a REAL pathname.
+	 */
+	rv = devfs_mk_symlink (from, (const char *)name, 
+			       DEVFS_FL_DEFAULT, (const char *)&path[name_start],
+			       &handle, NULL);
+
+	name_start = devfs_generate_path (handle, path, 1024);
+	return(rv);
+
+	
+}
+/* ARGSUSED */
+int
+hwgraph_edge_get(devfs_handle_t from, char *name, devfs_handle_t *toptr)
+{
+
+	int namelen = 0;
+	devfs_handle_t target_handle = NULL;
+
+	if (name == NULL)
+		return(-1);
+
+	if (toptr == NULL)
+		return(-1);
+
+	/*
+	 * If the name is "." just return the current devfs entry handle.
+	 */
+	if (!strcmp(name, HWGRAPH_EDGELBL_DOT)) {
+		if (toptr) {
+			*toptr = from;
+		}
+	} else if (!strcmp(name, HWGRAPH_EDGELBL_DOTDOT)) {
+		/*
+		 * Hmmm .. should we return the connect point or parent ..
+		 * see in hwgraph, the concept of parent is the connectpt!
+		 *
+		 * Maybe we should see whether the connectpt is set .. if 
+		 * not just return the parent!
+		 */
+		target_handle = hwgraph_connectpt_get(from);
+		if (target_handle) {
+			/*
+			 * Just return the connect point.
+			 */
+			*toptr = target_handle;
+			return(0);
+		}
+		target_handle = devfs_get_parent(from);
+		*toptr = target_handle;
+
+	} else {
+		/*
+		 * Call devfs to get the devfs entry.
+		 */
+		namelen = (int) strlen(name);
+		target_handle = devfs_find_handle (from, name, 0, 0,
+					0, 1); /* Yes traverse symbolic links */
+		if (target_handle == NULL)
+			return(-1);
+		else
+		*toptr = target_handle;
+	}
+
+	return(0);
+}
+
+
+/*
+ * hwgraph_edge_get_next - Retrieves the next sibbling given the current
+ *	entry number "placeptr".
+ *
+ * 	Allow the caller to retrieve walk through the sibblings of "source" 
+ * 	devfs_handle_t.  The implicit edges "." and ".." is returned first 
+ * 	followed by each of the real children.
+ *
+ *	We may end up returning garbage if another thread perform any deletion 
+ *	in this directory before "placeptr".
+ *
+ */
+/* ARGSUSED */
+int
+hwgraph_edge_get_next(devfs_handle_t source, char *name, devfs_handle_t *target,
+                              uint *placeptr)
+
+{
+
+        uint which_place;
+	unsigned int namelen = 0;
+	const char *tempname = NULL;
+
+        if (placeptr == NULL)
+                return(-1);
+
+        which_place = *placeptr;
+
+again:
+        if (which_place <= HWGRAPH_RESERVED_PLACES) {
+                if (which_place == EDGE_PLACE_WANT_CURRENT) {
+			/*
+			 * Looking for "."
+			 * Return the current devfs handle.
+			 */
+                        if (name != NULL)
+                                strcpy(name, HWGRAPH_EDGELBL_DOT);
+
+                        if (target != NULL) {
+                                *target = source; 
+				/* XXX should incr "source" ref count here if we
+				 * ever implement ref counts */
+                        }
+
+                } else if (which_place == EDGE_PLACE_WANT_CONNECTPT) {
+			/*
+			 * Looking for the connect point or parent.
+			 * If the connect point is set .. it returns the connect point.
+			 * Otherwise, it returns the parent .. will we support 
+			 * connect point?
+			 */
+                        devfs_handle_t connect_point = hwgraph_connectpt_get(source);
+
+                        if (connect_point == NULL) {
+				/*
+				 * No connectpoint set .. either the User
+				 * explicitly NULL it or this node was not 
+				 * created via hcl.
+				 */
+                                which_place++;
+                                goto again;
+                        }
+
+                        if (name != NULL)
+                                strcpy(name, HWGRAPH_EDGELBL_DOTDOT);
+
+                        if (target != NULL)
+                                *target = connect_point;
+
+                } else if (which_place == EDGE_PLACE_WANT_REAL_EDGES) {
+			/* 
+			 * return first "real" entry in directory, and increment
+			 * placeptr.  Next time around we should have 
+			 * which_place > HWGRAPH_RESERVED_EDGES so we'll fall through
+			 * this nested if block.
+			 */
+			*target = devfs_get_first_child(source);
+			if (*target && name) {
+				tempname = devfs_get_name(*target, &namelen);
+				if (tempname && namelen)
+					strcpy(name, tempname);
+			}
+					
+			*placeptr = which_place + 1;
+			return (0);
+                }
+
+                *placeptr = which_place+1;
+                return(0);
+        }
+
+	/*
+	 * walk linked list, (which_place - HWGRAPH_RESERVED_PLACES) times
+	 */
+	{
+		devfs_handle_t	curr;
+		int		i = 0;
+
+		for (curr=devfs_get_first_child(source), i= i+HWGRAPH_RESERVED_PLACES; 
+			curr!=NULL && i<which_place; 
+			curr=devfs_get_next_sibling(curr), i++)
+			;
+		*target = curr;
+		*placeptr = which_place + 1;
+		if (curr && name) {
+			tempname = devfs_get_name(*target, &namelen);
+			printk("hwgraph_edge_get_next: Component name = %s, length = %d\n", tempname, namelen);
+			if (tempname && namelen)
+				strcpy(name, tempname);
+		}
+	}
+	if (target == NULL)
+		return(-1);
+	else
+        	return(0);
+}
+
+/*
+ * hwgraph_info_add_LBL - Adds a new label for the device.  Mark the info_desc
+ *	of the label as INFO_DESC_PRIVATE and store the info in the label.
+ */
+/* ARGSUSED */
+int
+hwgraph_info_add_LBL(	devfs_handle_t de,
+			char *name,
+			arbitrary_info_t info)
+{
+	return(labelcl_info_add_LBL(de, name, INFO_DESC_PRIVATE, info));
+}
+
+/*
+ * hwgraph_info_remove_LBL - Remove the label entry for the device.
+ */
+/* ARGSUSED */
+int
+hwgraph_info_remove_LBL(	devfs_handle_t de,
+				char *name,
+				arbitrary_info_t *old_info)
+{
+	return(labelcl_info_remove_LBL(de, name, NULL, old_info));
+}
+
+/*
+ * hwgraph_info_replace_LBL - replaces an existing label with 
+ *	a new label info value.
+ */
+/* ARGSUSED */
+int
+hwgraph_info_replace_LBL(	devfs_handle_t de,
+				char *name,
+				arbitrary_info_t info,
+				arbitrary_info_t *old_info)
+{
+	return(labelcl_info_replace_LBL(de, name,
+			INFO_DESC_PRIVATE, info,
+			NULL, old_info));
+}
+/*
+ * hwgraph_info_get_LBL - Get and return the info value in the label of the 
+ * 	device.
+ */
+/* ARGSUSED */
+int
+hwgraph_info_get_LBL(	devfs_handle_t de,
+			char *name,
+			arbitrary_info_t *infop)
+{
+	return(labelcl_info_get_LBL(de, name, NULL, infop));
+}
+
+/*
+ * hwgraph_info_get_exported_LBL - Retrieve the info_desc and info pointer 
+ *	of the given label for the device.  The weird thing is that the label 
+ *	that matches the name is return irrespective of the info_desc value!
+ *	Do not understand why the word "exported" is used!
+ */
+/* ARGSUSED */
+int
+hwgraph_info_get_exported_LBL(	devfs_handle_t de,
+				char *name,
+				int *export_info,
+				arbitrary_info_t *infop)
+{
+	int rc;
+	arb_info_desc_t info_desc;
+
+	rc = labelcl_info_get_LBL(de, name, &info_desc, infop);
+	if (rc == 0)
+		*export_info = (int)info_desc;
+
+	return(rc);
+}
+
+/*
+ * hwgraph_info_get_next_LBL - Returns the next label info given the 
+ *	current label entry in place.
+ *
+ *	Once again this has no locking or reference count for protection.
+ *
+ */
+/* ARGSUSED */
+int
+hwgraph_info_get_next_LBL(	devfs_handle_t de,
+				char *buf,
+				arbitrary_info_t *infop,
+				labelcl_info_place_t *place)
+{
+	return(labelcl_info_get_next_LBL(de, buf, NULL, infop, place));
+}
+
+/*
+ * hwgraph_info_export_LBL - Retrieve the specified label entry and modify 
+ *	the info_desc field with the given value in nbytes.
+ */
+/* ARGSUSED */
+int
+hwgraph_info_export_LBL(devfs_handle_t de, char *name, int nbytes)
+{
+	arbitrary_info_t info;
+	int rc;
+
+	if (nbytes == 0)
+		nbytes = INFO_DESC_EXPORT;
+
+	if (nbytes < 0)
+		return(-1);
+
+	rc = labelcl_info_get_LBL(de, name, NULL, &info);
+	if (rc != 0)
+		return(rc);
+
+	rc = labelcl_info_replace_LBL(de, name,
+				nbytes, info, NULL, NULL);
+
+	return(rc);
+}
+
+/*
+ * hwgraph_info_unexport_LBL - Retrieve the given label entry and change the 
+ * label info_descr filed to INFO_DESC_PRIVATE.
+ */
+/* ARGSUSED */
+int
+hwgraph_info_unexport_LBL(devfs_handle_t de, char *name)
+{
+	arbitrary_info_t info;
+	int rc;
+
+	rc = labelcl_info_get_LBL(de, name, NULL, &info);
+	if (rc != 0)
+		return(rc);
+
+	rc = labelcl_info_replace_LBL(de, name,
+				INFO_DESC_PRIVATE, info, NULL, NULL);
+
+	return(rc);
+}
+
+/*
+ * hwgraph_path_lookup - return the handle for the given path.
+ *
+ */
+int
+hwgraph_path_lookup(	devfs_handle_t start_vertex_handle,
+			char *lookup_path,
+			devfs_handle_t *vertex_handle_ptr,
+			char **remainder)
+{
+	*vertex_handle_ptr = devfs_find_handle(start_vertex_handle,	/* start dir */
+					lookup_path,		/* path */
+					0,			/* major */
+					0,			/* minor */
+					0,			/* char | block */
+					1);			/* traverse symlinks */
+	if (*vertex_handle_ptr == NULL)
+		return(-1);
+	else
+		return(0);
+}
+
+/*
+ * hwgraph_traverse - Find and return the devfs handle starting from de.
+ *
+ */
+graph_error_t
+hwgraph_traverse(devfs_handle_t de, char *path, devfs_handle_t *found)
+{
+	/* 
+	 * get the directory entry (path should end in a directory)
+	 */
+
+	*found = devfs_find_handle(de,	/* start dir */
+			    path,	/* path */
+			    0,		/* major */
+			    0,		/* minor */
+			    0,		/* char | block */
+			    1);		/* traverse symlinks */
+	if (*found == NULL)
+		return(GRAPH_NOT_FOUND);
+	else
+		return(GRAPH_SUCCESS);
+}
+
+/*
+ * hwgraph_path_to_vertex - Return the devfs entry handle for the given 
+ *	pathname .. assume traverse symlinks too!.
+ */
+devfs_handle_t
+hwgraph_path_to_vertex(char *path)
+{
+	return(devfs_find_handle(NULL,	/* start dir */
+			path,		/* path */
+		    	0,		/* major */
+		    	0,		/* minor */
+		    	0,		/* char | block */
+		    	1));		/* traverse symlinks */
+}
+
+/*
+ * hwgraph_path_to_dev - Returns the devfs_handle_t of the given path ..
+ *	We only deal with devfs handle and not devfs_handle_t.
+*/
+devfs_handle_t
+hwgraph_path_to_dev(char *path)
+{
+	devfs_handle_t  de;
+
+	de = hwgraph_path_to_vertex(path);
+	return(de);
+}
+
+/*
+ * hwgraph_block_device_get - return the handle of the block device file.
+ *	The assumption here is that de is a directory.
+*/
+devfs_handle_t
+hwgraph_block_device_get(devfs_handle_t de)
+{
+	return(devfs_find_handle(de,		/* start dir */
+			"block",		/* path */
+		    	0,			/* major */
+		    	0,			/* minor */
+		    	DEVFS_SPECIAL_BLK,	/* char | block */
+		    	1));			/* traverse symlinks */
+}
+
+/*
+ * hwgraph_char_device_get - return the handle of the char device file.
+ *      The assumption here is that de is a directory.
+*/
+devfs_handle_t
+hwgraph_char_device_get(devfs_handle_t de)
+{
+	return(devfs_find_handle(de,		/* start dir */
+			"char",			/* path */
+		    	0,			/* major */
+		    	0,			/* minor */
+		    	DEVFS_SPECIAL_CHR,	/* char | block */
+		    	1));			/* traverse symlinks */
+}
+
+/*
+ * hwgraph_cdevsw_get - returns the fops of the given devfs entry.
+ */
+struct file_operations *
+hwgraph_cdevsw_get(devfs_handle_t de)
+{
+	return(devfs_get_ops(de));
+}
+
+/*
+ * hwgraph_bdevsw_get - returns the fops of the given devfs entry.
+*/
+struct file_operations *
+hwgraph_bdevsw_get(devfs_handle_t de)
+{
+	return(devfs_get_ops(de));
+}
+
+/*
+** Inventory is now associated with a vertex in the graph.  For items that
+** belong in the inventory but have no vertex 
+** (e.g. old non-graph-aware drivers), we create a bogus vertex under the 
+** INFO_LBL_INVENT name.
+**
+** For historical reasons, we prevent exact duplicate entries from being added
+** to a single vertex.
+*/
+
+/*
+ * hwgraph_inventory_add - Adds an inventory entry into de.
+ */
+int
+hwgraph_inventory_add(	devfs_handle_t de,
+			int class,
+			int type,
+			major_t controller,
+			minor_t unit,
+			int state)
+{
+	inventory_t *pinv = NULL, *old_pinv = NULL, *last_pinv = NULL;
+	int rv;
+
+	/*
+	 * Add our inventory data to the list of inventory data
+	 * associated with this vertex.
+	 */
+again:
+	/* GRAPH_LOCK_UPDATE(&invent_lock); */
+	rv = labelcl_info_get_LBL(de,
+			INFO_LBL_INVENT,
+			NULL, (arbitrary_info_t *)&old_pinv);
+	if ((rv != LABELCL_SUCCESS) && (rv != LABELCL_NOT_FOUND))
+		goto failure;
+
+	/*
+	 * Seek to end of inventory items associated with this
+	 * vertex.  Along the way, make sure we're not duplicating
+	 * an inventory item (for compatibility with old add_to_inventory)
+	 */
+	for (;old_pinv; last_pinv = old_pinv, old_pinv = old_pinv->inv_next) {
+		if ((int)class != -1 && old_pinv->inv_class != class)
+			continue;
+		if ((int)type != -1 && old_pinv->inv_type != type)
+			continue;
+		if ((int)state != -1 && old_pinv->inv_state != state)
+			continue;
+		if ((int)controller != -1
+		    && old_pinv->inv_controller != controller)
+			continue;
+		if ((int)unit != -1 && old_pinv->inv_unit != unit)
+			continue;
+
+		/* exact duplicate of previously-added inventory item */
+		rv = LABELCL_DUP;
+		goto failure;
+	}
+
+	/* Not a duplicate, so we know that we need to add something. */
+	if (pinv == NULL) {
+		/* Release lock while we wait for memory. */
+		/* GRAPH_LOCK_DONE_UPDATE(&invent_lock); */
+		pinv = (inventory_t *)kmalloc(sizeof(inventory_t), GFP_KERNEL);
+		replace_in_inventory(pinv, class, type, controller, unit, state);
+		goto again;
+	}
+
+	pinv->inv_next = NULL;
+	if (last_pinv) {
+		last_pinv->inv_next = pinv;
+	} else {
+		rv = labelcl_info_add_LBL(de, INFO_LBL_INVENT, 
+			sizeof(inventory_t), (arbitrary_info_t)pinv);
+
+		if (!rv)
+			goto failure;
+	}
+
+	/* GRAPH_LOCK_DONE_UPDATE(&invent_lock); */
+	return(0);
+
+failure:
+	/* GRAPH_LOCK_DONE_UPDATE(&invent_lock); */
+	if (pinv)
+		kfree(pinv);
+	return(rv);
+}
+
+
+/*
+ * hwgraph_inventory_remove - Removes an inventory entry.
+ *
+ *	Remove an inventory item associated with a vertex.   It is the caller's
+ *	responsibility to make sure that there are no races between removing
+ *	inventory from a vertex and simultaneously removing that vertex.
+*/
+int
+hwgraph_inventory_remove(	devfs_handle_t de,
+				int class,
+				int type,
+				major_t controller,
+				minor_t unit,
+				int state)
+{
+	inventory_t *pinv = NULL, *last_pinv = NULL, *next_pinv = NULL;
+	labelcl_error_t rv;
+
+	/*
+	 * We never remove stuff from ".invent" ..
+	 */
+	if (!de)
+		return (-1);
+
+	/*
+	 * Remove our inventory data to the list of inventory data
+	 * associated with this vertex.
+	 */
+	/* GRAPH_LOCK_UPDATE(&invent_lock); */
+	rv = labelcl_info_get_LBL(de,
+			INFO_LBL_INVENT,
+			NULL, (arbitrary_info_t *)&pinv);
+	if (rv != LABELCL_SUCCESS)
+		goto failure;
+
+	/*
+	 * Search through inventory items associated with this
+	 * vertex, looking for a match.
+	 */
+	for (;pinv; pinv = next_pinv) {
+		next_pinv = pinv->inv_next;
+
+		if(((int)class == -1 || pinv->inv_class == class) &&
+		   ((int)type == -1 || pinv->inv_type == type) &&
+		   ((int)state == -1 || pinv->inv_state == state) &&
+		   ((int)controller == -1 || pinv->inv_controller == controller) &&
+		   ((int)unit == -1 || pinv->inv_unit == unit)) {
+
+			/* Found a matching inventory item. Remove it. */
+			if (last_pinv) {
+				last_pinv->inv_next = pinv->inv_next;
+			} else {
+				rv = hwgraph_info_replace_LBL(de, INFO_LBL_INVENT, (arbitrary_info_t)pinv->inv_next, NULL);
+				if (rv != LABELCL_SUCCESS)
+					goto failure;
+			}
+
+			pinv->inv_next = NULL; /* sanity */
+			kfree(pinv);
+		} else
+			last_pinv = pinv;
+	}
+
+	if (last_pinv == NULL) {
+		rv = hwgraph_info_remove_LBL(de, INFO_LBL_INVENT, NULL);
+		if (rv != LABELCL_SUCCESS)
+			goto failure;
+	}
+
+	rv = LABELCL_SUCCESS;
+
+failure:
+	/* GRAPH_LOCK_DONE_UPDATE(&invent_lock); */
+	return(rv);
+}
+
+/*
+ * hwgraph_inventory_get_next - Get next inventory item associated with the 
+ *	specified vertex.
+ *
+ *	No locking is really needed.  We don't yet have the ability
+ *	to remove inventory items, and new items are always added to
+ *	the end of a vertex' inventory list.
+ *
+ * 	However, a devfs entry can be removed!
+*/
+int
+hwgraph_inventory_get_next(devfs_handle_t de, invplace_t *place, inventory_t **ppinv)
+{
+	inventory_t *pinv;
+	labelcl_error_t rv;
+
+	if (de == NULL)
+		return(LABELCL_BAD_PARAM);
+
+	if (place->invplace_vhdl == NULL) {
+		place->invplace_vhdl = de;
+		place->invplace_inv = NULL;
+	}
+
+	if (de != place->invplace_vhdl)
+		return(LABELCL_BAD_PARAM);
+
+	if (place->invplace_inv == NULL) {
+		/* Just starting on this vertex */
+		rv = labelcl_info_get_LBL(de, INFO_LBL_INVENT,
+						NULL, (arbitrary_info_t *)&pinv);
+		if (rv != LABELCL_SUCCESS)
+			return(LABELCL_NOT_FOUND);
+
+	} else {
+		/* Advance to next item on this vertex */
+		pinv = place->invplace_inv->inv_next;
+	}
+	place->invplace_inv = pinv;
+	*ppinv = pinv;
+
+	return(LABELCL_SUCCESS);
+}
+
+/*
+ * hwgraph_controller_num_get - Returns the controller number in the inventory 
+ *	entry.
+ */
+int
+hwgraph_controller_num_get(devfs_handle_t device)
+{
+	inventory_t *pinv;
+	invplace_t invplace = { NULL, NULL, NULL };
+	int val = -1;
+	if ((pinv = device_inventory_get_next(device, &invplace)) != NULL) {
+		val = (pinv->inv_class == INV_NETWORK)? pinv->inv_unit: pinv->inv_controller;
+	}
+#ifdef DEBUG
+	/*
+	 * It does not make any sense to call this on vertexes with multiple
+	 * inventory structs chained together
+	 */
+	if ( device_inventory_get_next(device, &invplace) != NULL ) {
+		printk("Should panic here ... !\n");
+#endif
+	return (val);	
+}
+
+/*
+ * hwgraph_controller_num_set - Sets the controller number in the inventory 
+ *	entry.
+ */
+void
+hwgraph_controller_num_set(devfs_handle_t device, int contr_num)
+{
+	inventory_t *pinv;
+	invplace_t invplace = { NULL, NULL, NULL };
+	if ((pinv = device_inventory_get_next(device, &invplace)) != NULL) {
+		if (pinv->inv_class == INV_NETWORK)
+			pinv->inv_unit = contr_num;
+		else {
+			if (pinv->inv_class == INV_FCNODE)
+				pinv = device_inventory_get_next(device, &invplace);
+			if (pinv != NULL)
+				pinv->inv_controller = contr_num;
+		}
+	}
+#ifdef DEBUG
+	/*
+	 * It does not make any sense to call this on vertexes with multiple
+	 * inventory structs chained together
+	 */
+	if(pinv != NULL)
+		ASSERT(device_inventory_get_next(device, &invplace) == NULL);
+#endif
+}
+
+/*
+ * Find the canonical name for a given vertex by walking back through
+ * connectpt's until we hit the hwgraph root vertex (or until we run
+ * out of buffer space or until something goes wrong).
+ *
+ *	COMPATIBILITY FUNCTIONALITY
+ * Walks back through 'parents', not necessarily the same as connectpts.
+ *
+ * Need to resolve the fact that devfs does not return the path from 
+ * "/" but rather it just stops right before /dev ..
+ */
+int
+hwgraph_vertex_name_get(devfs_handle_t vhdl, char *buf, uint buflen)
+{
+	char *locbuf;
+	int   pos;
+
+	if (buflen < 1)
+		return(-1);	/* XXX should be GRAPH_BAD_PARAM ? */
+
+	locbuf = kmalloc(buflen, GFP_KERNEL);
+
+	pos = devfs_generate_path(vhdl, locbuf, buflen);
+	if (pos < 0) {
+		kfree(locbuf);
+		return pos;
+	}
+
+	strcpy(buf, &locbuf[pos]);
+	kfree(locbuf);
+	return 0;
+}
+
+/*
+** vertex_to_name converts a vertex into a canonical name by walking
+** back through connect points until we hit the hwgraph root (or until
+** we run out of buffer space).
+**
+** Usually returns a pointer to the original buffer, filled in as
+** appropriate.  If the buffer is too small to hold the entire name,
+** or if anything goes wrong while determining the name, vertex_to_name
+** returns "UnknownDevice".
+*/
+
+#define DEVNAME_UNKNOWN "UnknownDevice"
+
+char *
+vertex_to_name(devfs_handle_t vhdl, char *buf, uint buflen)
+{
+	if (hwgraph_vertex_name_get(vhdl, buf, buflen) == GRAPH_SUCCESS)
+		return(buf);
+	else
+		return(DEVNAME_UNKNOWN);
+}
+
+#ifdef IRIX
+/*
+** Return the compact node id of the node that ultimately "owns" the specified
+** vertex.  In order to do this, we walk back through masters and connect points
+** until we reach a vertex that represents a node.
+*/
+cnodeid_t
+master_node_get(devfs_handle_t vhdl)
+{
+	cnodeid_t cnodeid;
+	devfs_handle_t master;
+
+	for (;;) {
+		cnodeid = nodevertex_to_cnodeid(vhdl);
+		if (cnodeid != CNODEID_NONE)
+			return(cnodeid);
+
+		master = device_master_get(vhdl);
+
+		/* Check for exceptional cases */
+		if (master == vhdl) {
+			/* Since we got a reference to the "master" thru
+			 * device_master_get() we should decrement
+			 * its reference count by 1
+			 */
+			hwgraph_vertex_unref(master);
+			return(CNODEID_NONE);
+		}
+
+		if (master == GRAPH_VERTEX_NONE) {
+			master = hwgraph_connectpt_get(vhdl);
+			if ((master == GRAPH_VERTEX_NONE) ||
+			    (master == vhdl)) {
+				if (master == vhdl)
+					/* Since we got a reference to the
+					 * "master" thru
+					 * hwgraph_connectpt_get() we should
+					 * decrement its reference count by 1
+					 */
+					hwgraph_vertex_unref(master);
+				return(CNODEID_NONE);
+			}
+		}
+		
+		vhdl = master;
+		/* Decrement the reference to "master" which was got
+		 * either thru device_master_get() or hwgraph_connectpt_get()
+		 * above.
+		 */
+		hwgraph_vertex_unref(master);
+	}
+}
+
+/*
+ * Using the canonical path name to get hold of the desired vertex handle will
+ * not work on multi-hub sn0 nodes. Hence, we use the following (slightly
+ * convoluted) algorithm.
+ *
+ * - Start at the vertex corresponding to the driver (provided as input parameter)
+ * - Loop till you reach a vertex which has EDGE_LBL_MEMORY
+ *    - If EDGE_LBL_CONN exists, follow that up.
+ *      else if EDGE_LBL_MASTER exists, follow that up.
+ *      else follow EDGE_LBL_DOTDOT up.
+ *
+ * * We should be at desired hub/heart vertex now *
+ * - Follow EDGE_LBL_CONN to the widget vertex.
+ *
+ * - return vertex handle of this widget.
+ */
+devfs_handle_t
+mem_vhdl_get(devfs_handle_t drv_vhdl)
+{
+devfs_handle_t cur_vhdl, cur_upper_vhdl;
+devfs_handle_t tmp_mem_vhdl, mem_vhdl;
+graph_error_t loop_rv;
+
+  /* Initializations */
+  cur_vhdl = drv_vhdl;
+  loop_rv = ~GRAPH_SUCCESS;
+
+  /* Loop till current vertex has EDGE_LBL_MEMORY */
+  while (loop_rv != GRAPH_SUCCESS) {
+
+    if ((hwgraph_edge_get(cur_vhdl, EDGE_LBL_CONN, &cur_upper_vhdl)) == GRAPH_SUCCESS) {
+
+    } else if ((hwgraph_edge_get(cur_vhdl, EDGE_LBL_MASTER, &cur_upper_vhdl)) == GRAPH_SUCCESS) {
+      } else { /* Follow HWGRAPH_EDGELBL_DOTDOT up */
+           (void) hwgraph_edge_get(cur_vhdl, HWGRAPH_EDGELBL_DOTDOT, &cur_upper_vhdl);
+        }
+
+    cur_vhdl = cur_upper_vhdl;
+
+#if DEBUG && HWG_DEBUG
+    printf("Current vhdl %d \n", cur_vhdl);
+#endif /* DEBUG */
+
+    loop_rv = hwgraph_edge_get(cur_vhdl, EDGE_LBL_MEMORY, &tmp_mem_vhdl);
+  }
+
+  /* We should be at desired hub/heart vertex now */
+  if ((hwgraph_edge_get(cur_vhdl, EDGE_LBL_CONN, &mem_vhdl)) != GRAPH_SUCCESS)
+    return (GRAPH_VERTEX_NONE);
+
+  return (mem_vhdl);
+}
+#endif /* IRIX */
+
+
+/*
+** Add a char device -- if the driver supports it -- at a specified vertex.
+*/
+graph_error_t
+hwgraph_char_device_add(        devfs_handle_t from,
+                                char *path,
+                                char *prefix,
+                                devfs_handle_t *devhdl)
+{
+	devfs_handle_t xx = NULL;
+
+	printk("FIXME: hwgraph_char_device_add() called. Use hwgraph_register.\n");
+	*devhdl = xx;	// Must set devhdl
+	return(GRAPH_SUCCESS);
+}
+
+graph_error_t
+hwgraph_edge_remove(devfs_handle_t from, char *name, devfs_handle_t *toptr)
+{
+	printk("FIXME: hwgraph_edge_remove\n");
+	return(GRAPH_ILLEGAL_REQUEST);
+}
+
+graph_error_t
+hwgraph_vertex_unref(devfs_handle_t vhdl)
+{
+	printk("FIXME: hwgraph_vertex_unref\n");
+	return(GRAPH_ILLEGAL_REQUEST);
+}
+
+
+EXPORT_SYMBOL(hwgraph_mk_dir);
+EXPORT_SYMBOL(hwgraph_path_add);
+EXPORT_SYMBOL(hwgraph_char_device_add);
+EXPORT_SYMBOL(hwgraph_register);
+EXPORT_SYMBOL(hwgraph_vertex_destroy);
+
+EXPORT_SYMBOL(hwgraph_fastinfo_get);
+EXPORT_SYMBOL(hwgraph_edge_get);
+
+EXPORT_SYMBOL(hwgraph_fastinfo_set);
+EXPORT_SYMBOL(hwgraph_connectpt_set);
+EXPORT_SYMBOL(hwgraph_connectpt_get);
+EXPORT_SYMBOL(hwgraph_edge_get_next);
+EXPORT_SYMBOL(hwgraph_info_add_LBL);
+EXPORT_SYMBOL(hwgraph_info_remove_LBL);
+EXPORT_SYMBOL(hwgraph_info_replace_LBL);
+EXPORT_SYMBOL(hwgraph_info_get_LBL);
+EXPORT_SYMBOL(hwgraph_info_get_exported_LBL);
+EXPORT_SYMBOL(hwgraph_info_get_next_LBL);
+EXPORT_SYMBOL(hwgraph_info_export_LBL);
+EXPORT_SYMBOL(hwgraph_info_unexport_LBL);
+EXPORT_SYMBOL(hwgraph_path_lookup);
+EXPORT_SYMBOL(hwgraph_traverse);
+EXPORT_SYMBOL(hwgraph_path_to_vertex);
+EXPORT_SYMBOL(hwgraph_path_to_dev);
+EXPORT_SYMBOL(hwgraph_block_device_get);
+EXPORT_SYMBOL(hwgraph_char_device_get);
+EXPORT_SYMBOL(hwgraph_cdevsw_get);
+EXPORT_SYMBOL(hwgraph_bdevsw_get);
+EXPORT_SYMBOL(hwgraph_vertex_name_get);
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/io/hcl_util.c linux/arch/ia64/sn/io/hcl_util.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/io/hcl_util.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/io/hcl_util.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,160 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/devfs_fs.h>
+#include <linux/devfs_fs_kernel.h>
+#include <asm/sn/sgi.h>
+#include <asm/io.h>
+#include <asm/sn/iograph.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/labelcl.h>
+#include <asm/sn/hcl_util.h>
+#include <asm/sn/nodepda.h>
+
+static devfs_handle_t hwgraph_all_cnodes = GRAPH_VERTEX_NONE;
+extern devfs_handle_t hwgraph_root;
+
+
+/*
+** Return the "master" for a given vertex.  A master vertex is a
+** controller or adapter or other piece of hardware that the given
+** vertex passes through on the way to the rest of the system.
+*/
+devfs_handle_t
+device_master_get(devfs_handle_t vhdl)
+{
+	graph_error_t rc;
+	devfs_handle_t master;
+
+	rc = hwgraph_edge_get(vhdl, EDGE_LBL_MASTER, &master);
+	if (rc == GRAPH_SUCCESS)
+		return(master);
+	else
+		return(GRAPH_VERTEX_NONE);
+}
+
+/*
+** Set the master for a given vertex.
+** Returns 0 on success, non-0 indicates failure
+*/
+int
+device_master_set(devfs_handle_t vhdl, devfs_handle_t master)
+{
+	graph_error_t rc;
+
+	rc = hwgraph_edge_add(vhdl, master, EDGE_LBL_MASTER);
+	return(rc != GRAPH_SUCCESS);
+}
+
+
+/*
+** Return the compact node id of the node that ultimately "owns" the specified
+** vertex.  In order to do this, we walk back through masters and connect points
+** until we reach a vertex that represents a node.
+*/
+cnodeid_t
+master_node_get(devfs_handle_t vhdl)
+{
+	cnodeid_t cnodeid;
+	devfs_handle_t master;
+
+	for (;;) {
+		cnodeid = nodevertex_to_cnodeid(vhdl);
+		if (cnodeid != CNODEID_NONE)
+			return(cnodeid);
+
+		master = device_master_get(vhdl);
+
+		/* Check for exceptional cases */
+		if (master == vhdl) {
+			/* Since we got a reference to the "master" thru
+			 * device_master_get() we should decrement
+			 * its reference count by 1
+			 */
+			return(CNODEID_NONE);
+		}
+
+		if (master == GRAPH_VERTEX_NONE) {
+			master = hwgraph_connectpt_get(vhdl);
+			if ((master == GRAPH_VERTEX_NONE) ||
+			    (master == vhdl)) {
+				return(CNODEID_NONE);
+			}
+		}
+
+		vhdl = master;
+	}
+}
+
+/*
+** If the specified device represents a node, return its
+** compact node ID; otherwise, return CNODEID_NONE.
+*/
+cnodeid_t
+nodevertex_to_cnodeid(devfs_handle_t vhdl)
+{
+	int rv = 0;
+	arbitrary_info_t cnodeid = CNODEID_NONE;
+
+	rv = labelcl_info_get_LBL(vhdl, INFO_LBL_CNODEID, NULL, &cnodeid);
+
+	return((cnodeid_t)cnodeid);
+}
+
+void
+mark_nodevertex_as_node(devfs_handle_t vhdl, cnodeid_t cnodeid)
+{
+	if (cnodeid == CNODEID_NONE)
+		return;
+
+	cnodeid_to_vertex(cnodeid) = vhdl;
+	labelcl_info_add_LBL(vhdl, INFO_LBL_CNODEID, INFO_DESC_EXPORT, 
+		(arbitrary_info_t)cnodeid);
+
+	{
+		char cnodeid_buffer[10];
+
+		if (hwgraph_all_cnodes == GRAPH_VERTEX_NONE) {
+			(void)hwgraph_path_add( hwgraph_root,
+						EDGE_LBL_NODENUM,
+						&hwgraph_all_cnodes);
+		}
+
+		sprintf(cnodeid_buffer, "%d", cnodeid);
+		(void)hwgraph_edge_add( hwgraph_all_cnodes,
+					vhdl,
+					cnodeid_buffer);
+	}
+}
+
+
+/*
+** dev_to_name converts a devfs_handle_t into a canonical name.  If the devfs_handle_t
+** represents a vertex in the hardware graph, it is converted in the
+** normal way for vertices.  If the devfs_handle_t is an old devfs_handle_t (one which
+** does not represent a hwgraph vertex), we synthesize a name based
+** on major/minor number.
+**
+** Usually returns a pointer to the original buffer, filled in as
+** appropriate.  If the buffer is too small to hold the entire name,
+** or if anything goes wrong while determining the name, dev_to_name
+** returns "UnknownDevice".
+*/
+char *
+dev_to_name(devfs_handle_t dev, char *buf, uint buflen)
+{
+        return(vertex_to_name(dev, buf, buflen));
+}
+
+
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/io/hubdev.c linux/arch/ia64/sn/io/hubdev.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/io/hubdev.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/io/hubdev.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,127 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/iograph.h>
+#include <asm/sn/sn1/hubdev.h>
+#include <asm/sn/sn_private.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/labelcl.h>
+
+struct hubdev_callout {
+        int (*attach_method)(devfs_handle_t);
+        struct hubdev_callout *fp;
+};
+
+typedef struct hubdev_callout hubdev_callout_t;
+
+mutex_t hubdev_callout_mutex;
+hubdev_callout_t *hubdev_callout_list = NULL;
+
+void
+hubdev_init(void)
+{
+	mutex_init(&hubdev_callout_mutex, MUTEX_DEFAULT, "hubdev");
+        hubdev_callout_list = NULL;
+}
+        
+void
+hubdev_register(int (*attach_method)(devfs_handle_t))
+{
+        hubdev_callout_t *callout;
+        
+        ASSERT(attach_method);
+
+        callout =  (hubdev_callout_t *)kmem_zalloc(sizeof(hubdev_callout_t), KM_SLEEP);
+        ASSERT(callout);
+        
+	mutex_lock(&hubdev_callout_mutex, PZERO);
+        /*
+         * Insert at the front of the list
+         */
+        callout->fp = hubdev_callout_list;
+        hubdev_callout_list = callout;
+        callout->attach_method = attach_method;
+	mutex_unlock(&hubdev_callout_mutex);
+}
+
+int
+hubdev_unregister(int (*attach_method)(devfs_handle_t))
+{
+        hubdev_callout_t **p;
+        
+        ASSERT(attach_method);
+   
+	mutex_lock(&hubdev_callout_mutex, PZERO);
+        /*
+         * Remove registry element containing attach_method
+         */
+        for (p = &hubdev_callout_list; *p != NULL; p = &(*p)->fp) {
+                if ((*p)->attach_method == attach_method) {
+                        hubdev_callout_t* victim = *p;
+                        *p = (*p)->fp;
+                        kfree(victim);
+                        mutex_unlock(&hubdev_callout_mutex);
+                        return (0);
+                }
+        }
+        mutex_unlock(&hubdev_callout_mutex);
+        return (ENOENT);
+}
+
+
+int
+hubdev_docallouts(devfs_handle_t hub)
+{
+        hubdev_callout_t *p;
+        int errcode;
+
+	mutex_lock(&hubdev_callout_mutex, PZERO);
+        
+        for (p = hubdev_callout_list; p != NULL; p = p->fp) {
+                ASSERT(p->attach_method);
+                errcode = (*p->attach_method)(hub);
+                if (errcode != 0) {
+			mutex_unlock(&hubdev_callout_mutex);
+                        return (errcode);
+                }
+        }
+        mutex_unlock(&hubdev_callout_mutex);
+        return (0);
+}
+
+/*
+ * Given a hub vertex, return the base address of the Hspec space
+ * for that hub.
+ */
+caddr_t
+hubdev_prombase_get(devfs_handle_t hub)
+{
+	hubinfo_t	hinfo = NULL;
+
+	hubinfo_get(hub, &hinfo);
+	ASSERT(hinfo);
+
+	return ((caddr_t)NODE_RBOOT_BASE(hinfo->h_nasid));
+}
+
+cnodeid_t
+hubdev_cnodeid_get(devfs_handle_t hub)
+{
+	hubinfo_t	hinfo = NULL;
+	hubinfo_get(hub, &hinfo);
+	ASSERT(hinfo);
+
+	return hinfo->h_cnodeid;
+}
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/io/hubspc.c linux/arch/ia64/sn/io/hubspc.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/io/hubspc.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/io/hubspc.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,447 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+/*
+ * hubspc.c - Hub Memory Space Management Driver
+ * This driver implements the managers for the following
+ * memory resources:
+ * 1) reference counters
+ */
+
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <asm/sn/sgi.h>
+#include <linux/devfs_fs.h>
+#include <linux/devfs_fs_kernel.h>
+#include <asm/io.h>
+#include <asm/sn/iograph.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/labelcl.h>
+#include <asm/sn/mem_refcnt.h>
+#include <asm/sn/agent.h>
+#include <asm/sn/addrs.h>
+
+
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+#include <asm/sn/sn1/ip27config.h>
+#include <asm/sn/sn1/hubdev.h>
+#include <asm/sn/ksys/elsc.h>
+#endif
+
+#include <asm/sn/hubspc.h>
+
+
+/* Uncomment the following line for tracing */
+/* #define HUBSPC_DEBUG 1 */
+
+int hubspc_devflag = D_MP;
+
+extern void *device_info_get(devfs_handle_t device);
+extern void device_info_set(devfs_handle_t device, void *info);
+
+
+
+/***********************************************************************/
+/* CPU Prom Space 						       */
+/***********************************************************************/
+
+typedef struct cpuprom_info {
+	devfs_handle_t	prom_dev;
+	devfs_handle_t	nodevrtx;
+	struct	cpuprom_info *next;
+}cpuprom_info_t;
+
+static cpuprom_info_t	*cpuprom_head;
+lock_t	cpuprom_spinlock;
+#define	PROM_LOCK()	mutex_spinlock(&cpuprom_spinlock)
+#define	PROM_UNLOCK(s)	mutex_spinunlock(&cpuprom_spinlock, (s))
+
+/*
+ * Add prominfo to the linked list maintained.
+ */
+void
+prominfo_add(devfs_handle_t hub, devfs_handle_t prom)
+{
+	cpuprom_info_t	*info;
+	int	s;
+
+	info = kmalloc(sizeof(cpuprom_info_t), GFP_KERNEL);
+	ASSERT(info);
+	info->prom_dev = prom;
+	info->nodevrtx = hub;
+
+
+	s = PROM_LOCK();
+	info->next = cpuprom_head;
+	cpuprom_head = info;
+	PROM_UNLOCK(s);
+}
+
+void
+prominfo_del(devfs_handle_t prom)
+{
+	int	s;
+	cpuprom_info_t	*info;
+	cpuprom_info_t	**prev;
+
+	s = PROM_LOCK();
+	prev = &cpuprom_head;
+	while ( (info = *prev) ) {
+		if (info->prom_dev == prom) {
+			*prev = info->next;
+			PROM_UNLOCK(s);
+			return;
+		}
+		
+		prev = &info->next;
+	}
+	PROM_UNLOCK(s);
+	ASSERT(0);
+}
+
+devfs_handle_t
+prominfo_nodeget(devfs_handle_t prom)
+{
+	int	s;
+	cpuprom_info_t	*info;
+
+	s = PROM_LOCK();
+	info = cpuprom_head;
+	while (info) {
+		if(info->prom_dev == prom) {
+			PROM_UNLOCK(s);
+			return info->nodevrtx;
+		}
+		info = info->next;
+	}
+	PROM_UNLOCK(s);
+	return 0;
+}
+
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+#define	SN_PROMVERSION		INV_IP35PROM
+#endif
+
+/* Add "detailed" labelled inventory information to the
+ * prom vertex 
+ */
+void
+cpuprom_detailed_inventory_info_add(devfs_handle_t prom_dev,devfs_handle_t node)
+{
+	invent_miscinfo_t 	*cpuprom_inventory_info;
+	extern invent_generic_t *klhwg_invent_alloc(cnodeid_t cnode, 
+						     int class, int size);
+	cnodeid_t		cnode = hubdev_cnodeid_get(node);
+
+	/* Allocate memory for the extra inventory information
+	 * for the  prom
+	 */
+	cpuprom_inventory_info = (invent_miscinfo_t *) 
+		klhwg_invent_alloc(cnode, INV_PROM, sizeof(invent_miscinfo_t));
+
+	ASSERT(cpuprom_inventory_info);
+
+	/* Set the enabled flag so that the hinv interprets this
+	 * information
+	 */
+	cpuprom_inventory_info->im_gen.ig_flag = INVENT_ENABLED;
+	cpuprom_inventory_info->im_type = SN_PROMVERSION;
+	/* Store prom revision into inventory information */
+	cpuprom_inventory_info->im_rev = IP27CONFIG.pvers_rev;
+	cpuprom_inventory_info->im_version = IP27CONFIG.pvers_vers;
+
+
+	/* Store this info as labelled information hanging off the
+	 * prom device vertex
+	 */
+	hwgraph_info_add_LBL(prom_dev, INFO_LBL_DETAIL_INVENT, 
+			     (arbitrary_info_t) cpuprom_inventory_info);
+	/* Export this information so that user programs can get to
+	 * this by using attr_get()
+	 */
+        hwgraph_info_export_LBL(prom_dev, INFO_LBL_DETAIL_INVENT,
+				sizeof(invent_miscinfo_t));
+}
+
+int
+cpuprom_attach(devfs_handle_t node)
+{
+        devfs_handle_t prom_dev;
+
+        hwgraph_char_device_add(node, EDGE_LBL_PROM, "hubspc_", &prom_dev);
+#ifdef	HUBSPC_DEBUG
+	printf("hubspc: prom_attach hub: 0x%x prom: 0x%x\n", node, prom_dev);
+#endif	/* HUBSPC_DEBUG */
+	device_inventory_add(prom_dev, INV_PROM, SN_PROMVERSION,
+				(major_t)0, (minor_t)0, 0);
+
+	/* Add additional inventory info about the cpu prom like
+	 * revision & version numbers etc.
+	 */
+	cpuprom_detailed_inventory_info_add(prom_dev,node);
+        device_info_set(prom_dev, (void*)(ulong)HUBSPC_PROM);
+	prominfo_add(node, prom_dev);
+
+        return (0);
+}
+
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+#define FPROM_CONFIG_ADDR	MD_JUNK_BUS_TIMING
+#define FPROM_ENABLE_MASK	MJT_FPROM_ENABLE_MASK
+#define FPROM_ENABLE_SHFT	MJT_FPROM_ENABLE_SHFT
+#define FPROM_SETUP_MASK	MJT_FPROM_SETUP_MASK
+#define FPROM_SETUP_SHFT	MJT_FPROM_SETUP_SHFT
+#endif
+
+/*ARGSUSED*/
+int
+cpuprom_map(devfs_handle_t dev, vhandl_t *vt, off_t addr, size_t len)
+{
+        int 		errcode;
+	caddr_t 	kvaddr;
+	devfs_handle_t		node;
+	cnodeid_t 	cnode;
+
+	node = prominfo_nodeget(dev);
+
+	if (!node)
+		return EIO;
+        
+
+	kvaddr = hubdev_prombase_get(node);
+	cnode  = hubdev_cnodeid_get(node);
+#ifdef	HUBSPC_DEBUG
+	printf("cpuprom_map: hubnode %d kvaddr 0x%x\n", node, kvaddr);
+#endif
+
+	if (len > RBOOT_SIZE)
+		len = RBOOT_SIZE;
+        /*
+         * Map in the prom space
+         */
+	errcode = v_mapphys(vt, kvaddr, len);
+
+	if (errcode == 0 ){
+		/*
+		 * Set the MD configuration registers suitably.
+		 */
+		nasid_t		nasid;
+		uint64_t	value;
+		volatile hubreg_t	*regaddr;
+
+		nasid = COMPACT_TO_NASID_NODEID(cnode);
+		regaddr = REMOTE_HUB_ADDR(nasid, FPROM_CONFIG_ADDR);
+		value = HUB_L(regaddr);
+		value &= ~(FPROM_SETUP_MASK | FPROM_ENABLE_MASK);
+		{
+			value |= (((long)CONFIG_FPROM_SETUP << FPROM_SETUP_SHFT) | 
+				  ((long)CONFIG_FPROM_ENABLE << FPROM_ENABLE_SHFT));
+		}
+		HUB_S(regaddr, value);
+
+	}
+        return (errcode);
+}
+
+/*ARGSUSED*/
+int
+cpuprom_unmap(devfs_handle_t dev, vhandl_t *vt)
+{
+        return 0;
+}
+
+/***********************************************************************/
+/* Base Hub Space Driver                                               */
+/***********************************************************************/
+
+// extern int l1_attach( devfs_handle_t );
+
+/*
+ * hubspc_init
+ * Registration of the hubspc devices with the hub manager
+ */
+void
+hubspc_init(void)
+{
+        /*
+         * Register with the hub manager
+         */
+
+        /* The reference counters */
+        hubdev_register(mem_refcnt_attach);
+
+	/* Prom space */
+	hubdev_register(cpuprom_attach);
+
+#if defined(CONFIG_SERIAL_SGI_L1_PROTOCOL)
+	/* L1 system controller link */
+	if ( !IS_RUNNING_ON_SIMULATOR() ) {
+		/* initialize the L1 link */
+		void l1_cons_init( l1sc_t *sc );
+		elsc_t *get_elsc(void);
+
+		l1_cons_init((l1sc_t *)get_elsc());
+	}
+#endif
+
+#ifdef	HUBSPC_DEBUG
+	printf("hubspc_init: Completed\n");
+#endif	/* HUBSPC_DEBUG */
+	/* Initialize spinlocks */
+	spinlock_init(&cpuprom_spinlock, "promlist");
+}
+
+/* ARGSUSED */
+int
+hubspc_open(devfs_handle_t *devp, mode_t oflag, int otyp, cred_t *crp)
+{
+        int errcode = 0;
+        
+        switch ((hubspc_subdevice_t)(ulong)device_info_get(*devp)) {
+        case HUBSPC_REFCOUNTERS:
+                errcode = mem_refcnt_open(devp, oflag, otyp, crp);
+                break;
+
+        case HUBSPC_PROM:
+		/* Check if the user has proper access rights to 
+		 * read/write the prom space.
+		 */
+                if (!cap_able(CAP_DEVICE_MGT)) {
+                        errcode = EPERM;
+                }                
+                break;
+
+        default:
+                errcode = ENODEV;
+        }
+
+#ifdef	HUBSPC_DEBUG
+	printf("hubspc_open: Completed open for type %d\n",
+               (hubspc_subdevice_t)(ulong)device_info_get(*devp));
+#endif	/* HUBSPC_DEBUG */
+
+        return (errcode);
+}
+
+
+/* ARGSUSED */
+int
+hubspc_close(devfs_handle_t dev, int oflag, int otyp, cred_t *crp)
+{
+        int errcode = 0;
+        
+        switch ((hubspc_subdevice_t)(ulong)device_info_get(dev)) {
+        case HUBSPC_REFCOUNTERS:
+                errcode = mem_refcnt_close(dev, oflag, otyp, crp);
+                break;
+
+        case HUBSPC_PROM:
+                break;
+        default:
+                errcode = ENODEV;
+        }
+
+#ifdef	HUBSPC_DEBUG
+	printf("hubspc_close: Completed close for type %d\n",
+               (hubspc_subdevice_t)(ulong)device_info_get(dev));
+#endif	/* HUBSPC_DEBUG */
+
+        return (errcode);
+}
+
+/* ARGSUSED */
+int
+hubspc_map(devfs_handle_t dev, vhandl_t *vt, off_t off, size_t len, uint prot)
+{
+	/*REFERENCED*/
+        hubspc_subdevice_t subdevice;
+        int errcode = 0;
+
+	/* check validity of request */
+	if( len == 0 ) {
+		return ENXIO;
+        }
+
+        subdevice = (hubspc_subdevice_t)(ulong)device_info_get(dev);
+
+#ifdef	HUBSPC_DEBUG
+	printf("hubspc_map: subdevice: %d vaddr: 0x%x phyaddr: 0x%x len: 0x%x\n",
+	       subdevice, v_getaddr(vt), off, len);
+#endif /* HUBSPC_DEBUG */
+
+        switch ((hubspc_subdevice_t)(ulong)device_info_get(dev)) {
+        case HUBSPC_REFCOUNTERS:
+                errcode = mem_refcnt_mmap(dev, vt, off, len, prot);
+                break;
+
+        case HUBSPC_PROM:
+		errcode = cpuprom_map(dev, vt, off, len);
+                break;
+        default:
+                errcode = ENODEV;
+        }
+
+#ifdef	HUBSPC_DEBUG
+	printf("hubspc_map finished: spctype: %d vaddr: 0x%x len: 0x%x\n",
+	       (hubspc_subdevice_t)(ulong)device_info_get(dev), v_getaddr(vt), len);
+#endif /* HUBSPC_DEBUG */
+
+	return errcode;
+}
+
+/* ARGSUSED */
+int
+hubspc_unmap(devfs_handle_t dev, vhandl_t *vt)
+{
+        int errcode = 0;
+        
+        switch ((hubspc_subdevice_t)(ulong)device_info_get(dev)) {
+        case HUBSPC_REFCOUNTERS:
+                errcode = mem_refcnt_unmap(dev, vt);
+                break;
+
+        case HUBSPC_PROM:
+                errcode = cpuprom_unmap(dev, vt);
+                break;
+
+        default:
+                errcode = ENODEV;
+        }
+	return errcode;
+
+}
+
+/* ARGSUSED */
+int
+hubspc_ioctl(devfs_handle_t dev,
+             int cmd,
+             void *arg,
+             int mode,
+             cred_t *cred_p,
+             int *rvalp)
+{
+        int errcode = 0;
+        
+        switch ((hubspc_subdevice_t)(ulong)device_info_get(dev)) {
+        case HUBSPC_REFCOUNTERS:
+                errcode = mem_refcnt_ioctl(dev, cmd, arg, mode, cred_p, rvalp);
+                break;
+
+        case HUBSPC_PROM:
+                break;
+
+        default:
+                errcode = ENODEV;
+        }
+	return errcode;
+
+}
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/io/invent.c linux/arch/ia64/sn/io/invent.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/io/invent.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/io/invent.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,198 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+/*
+ * Hardware Inventory
+ *
+ * See sys/sn/invent.h for an explanation of the hardware inventory contents.
+ *
+ */
+#include <linux/types.h>
+#include <linux/config.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/labelcl.h>
+
+void
+inventinit(void)
+{
+}
+
+/*
+ * For initializing/updating an inventory entry.
+ */
+void
+replace_in_inventory(
+	inventory_t *pinv, int class, int type,
+	int controller, int unit, int state)
+{
+	pinv->inv_class = class;
+	pinv->inv_type = type;
+	pinv->inv_controller = controller;
+	pinv->inv_unit = unit;
+	pinv->inv_state = state;
+}
+
+/*
+ * Inventory addition 
+ *
+ * XXX NOTE: Currently must be called after dynamic memory allocator is
+ * initialized.
+ *
+ */
+void
+add_to_inventory(int class, int type, int controller, int unit, int state)
+{
+	(void)device_inventory_add((devfs_handle_t)GRAPH_VERTEX_NONE, class, type, 
+					controller, unit, state);
+}
+
+
+/*
+ * Inventory retrieval 
+ *
+ * These two routines are intended to prevent the caller from having to know
+ * the internal structure of the inventory table.
+ *
+ */
+inventory_t *
+get_next_inventory(invplace_t *place)
+{
+	inventory_t *pinv;
+	devfs_handle_t device = place->invplace_vhdl;
+	int rv;
+
+	while ((pinv = device_inventory_get_next(device, place)) == NULL) {
+		/*
+		 * We've exhausted inventory items on the last device.
+		 * Advance to next device.
+		 */
+		rv = hwgraph_vertex_get_next(&device, &place->invplace_vplace);
+		if (rv != LABELCL_SUCCESS)
+			return(NULL);
+		place->invplace_vhdl = device;
+		place->invplace_inv = NULL; /* Start from beginning invent on this device */
+	}
+
+	return(pinv);
+}
+
+/* ARGSUSED */
+int
+get_sizeof_inventory(int abi)
+{
+	return sizeof(inventory_t);
+}
+
+/*
+ * Hardware inventory scanner.
+ *
+ * Calls fun() for every entry in inventory list unless fun() returns something
+ * other than 0.
+ */
+int
+scaninvent(int (*fun)(inventory_t *, void *), void *arg)
+{
+	inventory_t *ie;
+	invplace_t iplace = { NULL,NULL, NULL };
+	int rc;
+
+	ie = 0;
+	rc = 0;
+	while ( (ie = (inventory_t *)get_next_inventory(&iplace)) ) {
+		rc = (*fun)(ie, arg);
+		if (rc)
+			break;
+	}
+	return rc;
+}
+
+/*
+ * Find a particular inventory object
+ *
+ * pinv can be a pointer to an inventory entry and the search will begin from
+ * there, or it can be 0 in which case the search starts at the beginning.
+ * A -1 for any of the other arguments is a wildcard (i.e. it always matches).
+ */
+inventory_t *
+find_inventory(inventory_t *pinv, int class, int type, int controller,
+	       int unit, int state)
+{
+	invplace_t iplace =  { NULL,NULL, NULL };
+
+	while ((pinv = (inventory_t *)get_next_inventory(&iplace)) != NULL) {
+		if (class != -1 && pinv->inv_class != class)
+			continue;
+		if (type != -1 && pinv->inv_type != type)
+			continue;
+
+		/* XXXX - perhaps the "state" entry should be ignored so an
+		 * an existing entry can be updated.  See vino_init() and
+		 * ml/IP22.c:add_ioboard() for an example.
+		 */
+		if (state != -1 && pinv->inv_state != state)
+			continue;
+		if (controller != -1
+		    && pinv->inv_controller != controller)
+			continue;
+		if (unit != -1 && pinv->inv_unit != unit)
+			continue;
+		break;
+	}
+
+	return(pinv);
+}
+
+
+/*
+** Retrieve inventory data associated with a device.
+*/
+inventory_t *
+device_inventory_get_next(	devfs_handle_t device,
+				invplace_t *invplace)
+{
+	inventory_t *pinv;
+	int rv;
+
+	rv = hwgraph_inventory_get_next(device, invplace, &pinv);
+	if (rv == LABELCL_SUCCESS)
+		return(pinv);
+	else
+		return(NULL);
+}
+
+
+/*
+** Associate canonical inventory information with a device (and
+** add it to the general inventory).
+*/
+void
+device_inventory_add(	devfs_handle_t device,
+			int class, 
+			int type, 
+			major_t controller, 
+			minor_t unit, 
+			int state)
+{
+	hwgraph_inventory_add(device, class, type, controller, unit, state);
+}
+
+int
+device_controller_num_get(devfs_handle_t device)
+{
+	return (hwgraph_controller_num_get(device));
+}
+
+void
+device_controller_num_set(devfs_handle_t device, int contr_num)
+{
+	hwgraph_controller_num_set(device, contr_num);
+}
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/io/io.c linux/arch/ia64/sn/io/io.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/io/io.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/io/io.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,1312 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <asm/sn/types.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/cmn_err.h>
+#include <asm/sn/iobus.h>
+#include <asm/sn/iograph.h>
+#include <asm/param.h>
+#include <asm/sn/pio.h>
+#include <asm/sn/xtalk/xwidget.h>
+#include <asm/sn/sn_private.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/hcl_util.h>
+#include <asm/sn/agent.h>
+#include <asm/sn/intr.h>
+#include <asm/sn/xtalk/xtalkaddrs.h>
+#include <asm/sn/klconfig.h>
+#include <asm/sn/xtalk/xwidget.h>
+#include <asm/sn/io.h>
+#include <asm/sn/sn_cpuid.h>
+
+extern xtalk_provider_t hub_provider;
+
+#ifndef CONFIG_IA64_SGI_IO
+/* Global variables */
+extern pdaindr_t       pdaindr[MAXCPUS];
+#endif
+
+/*
+ * Perform any initializations needed to support hub-based I/O.
+ * Called once during startup.
+ */
+void
+hubio_init(void)
+{
+#if 0
+	/* This isn't needed unless we port the entire sio driver ... */
+        extern void early_brl1_port_init( void );
+	early_brl1_port_init();
+#endif
+}
+
+/* 
+ * Implementation of hub iobus operations.
+ *
+ * Hub provides a crosstalk "iobus" on IP27 systems.  These routines
+ * provide a platform-specific implementation of xtalk used by all xtalk 
+ * cards on IP27 systems.
+ *
+ * Called from corresponding xtalk_* routines.
+ */
+
+
+/* PIO MANAGEMENT */
+/* For mapping system virtual address space to xtalk space on a specified widget */
+
+/*
+ * Setup pio structures needed for a particular hub.
+ */
+static void
+hub_pio_init(devfs_handle_t hubv)
+{
+	xwidgetnum_t widget;
+	hubinfo_t hubinfo;
+	nasid_t nasid;
+	int bigwin;
+	hub_piomap_t hub_piomap;
+
+	hubinfo_get(hubv, &hubinfo);
+	nasid = hubinfo->h_nasid;
+
+	/* Initialize small window piomaps for this hub */
+	for (widget=0; widget <= HUB_WIDGET_ID_MAX; widget++) {
+		hub_piomap = hubinfo_swin_piomap_get(hubinfo, (int)widget);
+		hub_piomap->hpio_xtalk_info.xp_target = widget;
+		hub_piomap->hpio_xtalk_info.xp_xtalk_addr = 0;
+		hub_piomap->hpio_xtalk_info.xp_mapsz = SWIN_SIZE;
+		hub_piomap->hpio_xtalk_info.xp_kvaddr = (caddr_t)NODE_SWIN_BASE(nasid, widget);
+		hub_piomap->hpio_hub = hubv;
+		hub_piomap->hpio_flags = HUB_PIOMAP_IS_VALID;
+	}
+
+	/* Initialize big window piomaps for this hub */
+	for (bigwin=0; bigwin < HUB_NUM_BIG_WINDOW; bigwin++) {
+		hub_piomap = hubinfo_bwin_piomap_get(hubinfo, bigwin);
+		hub_piomap->hpio_xtalk_info.xp_mapsz = BWIN_SIZE;
+		hub_piomap->hpio_hub = hubv;
+		hub_piomap->hpio_holdcnt = 0;
+		hub_piomap->hpio_flags = HUB_PIOMAP_IS_BIGWINDOW;
+		IIO_ITTE_DISABLE(nasid, bigwin);
+	}
+#ifdef	BRINGUP
+	hub_set_piomode(nasid, HUB_PIO_CONVEYOR);
+#else
+	/* Set all the xwidgets in fire-and-forget mode
+	 * by default
+	 */
+	hub_set_piomode(nasid, HUB_PIO_FIRE_N_FORGET);
+#endif	/* BRINGUP */
+
+	sv_init(&hubinfo->h_bwwait, SV_FIFO, "bigwin");
+	spinlock_init(&hubinfo->h_bwlock, "bigwin");
+}
+
+/* 
+ * Create a caddr_t-to-xtalk_addr mapping.
+ *
+ * Use a small window if possible (that's the usual case), but
+ * manage big windows if needed.  Big window mappings can be
+ * either FIXED or UNFIXED -- we keep at least 1 big window available
+ * for UNFIXED mappings.
+ *
+ * Returns an opaque pointer-sized type which can be passed to
+ * other hub_pio_* routines on success, or NULL if the request
+ * cannot be satisfied.
+ */
+/* ARGSUSED */
+hub_piomap_t
+hub_piomap_alloc(devfs_handle_t dev,	/* set up mapping for this device */
+		device_desc_t dev_desc,	/* device descriptor */
+		iopaddr_t xtalk_addr,	/* map for this xtalk_addr range */
+		size_t byte_count,
+		size_t byte_count_max, 	/* maximum size of a mapping */
+		unsigned flags)		/* defined in sys/pio.h */
+{
+	xwidget_info_t widget_info = xwidget_info_get(dev);
+	xwidgetnum_t widget = xwidget_info_id_get(widget_info);
+	devfs_handle_t hubv = xwidget_info_master_get(widget_info);
+	hubinfo_t hubinfo;
+	hub_piomap_t bw_piomap;
+	int bigwin, free_bw_index;
+	nasid_t nasid;
+	volatile hubreg_t junk;
+	int s;
+
+	/* sanity check */
+	if (byte_count_max > byte_count)
+		return(NULL);
+
+	hubinfo_get(hubv, &hubinfo);
+
+	/* If xtalk_addr range is mapped by a small window, we don't have 
+	 * to do much 
+	 */
+	if (xtalk_addr + byte_count <= SWIN_SIZE)
+		return(hubinfo_swin_piomap_get(hubinfo, (int)widget));
+
+	/* We need to use a big window mapping.  */
+
+	/*
+	 * TBD: Allow requests that would consume multiple big windows --
+	 * split the request up and use multiple mapping entries.
+	 * For now, reject requests that span big windows.
+	 */
+	if ((xtalk_addr % BWIN_SIZE) + byte_count > BWIN_SIZE)
+		return(NULL);
+
+
+	/* Round xtalk address down for big window alignement */
+	xtalk_addr = xtalk_addr & ~(BWIN_SIZE-1);
+
+	/*
+	 * Check to see if an existing big window mapping will suffice.
+	 */
+tryagain:
+	free_bw_index = -1;
+	s = mutex_spinlock(&hubinfo->h_bwlock);
+	for (bigwin=0; bigwin < HUB_NUM_BIG_WINDOW; bigwin++) {
+		bw_piomap = hubinfo_bwin_piomap_get(hubinfo, bigwin);
+
+		/* If mapping is not valid, skip it */
+		if (!(bw_piomap->hpio_flags & HUB_PIOMAP_IS_VALID)) {
+			free_bw_index = bigwin;
+			continue;
+		}
+
+		/* 
+		 * If mapping is UNFIXED, skip it.  We don't allow sharing
+		 * of UNFIXED mappings, because this would allow starvation.
+		 */
+		if (!(bw_piomap->hpio_flags & HUB_PIOMAP_IS_FIXED))
+			continue;
+
+		if ( xtalk_addr == bw_piomap->hpio_xtalk_info.xp_xtalk_addr &&
+		     widget == bw_piomap->hpio_xtalk_info.xp_target) {
+			bw_piomap->hpio_holdcnt++;
+			mutex_spinunlock(&hubinfo->h_bwlock, s);
+			return(bw_piomap);
+		}
+	}
+
+	/*
+	 * None of the existing big window mappings will work for us --
+	 * we need to establish a new mapping.
+	 */
+
+	/* Insure that we don't consume all big windows with FIXED mappings */
+	if (flags & PIOMAP_FIXED) {
+		if (hubinfo->h_num_big_window_fixed < HUB_NUM_BIG_WINDOW-1) {
+			ASSERT(free_bw_index >= 0);
+			hubinfo->h_num_big_window_fixed++;
+		} else {
+			bw_piomap = NULL;
+			goto done;
+		}
+	} else /* PIOMAP_UNFIXED */ {
+		if (free_bw_index < 0) {
+			if (flags & PIOMAP_NOSLEEP) {
+				bw_piomap = NULL;
+				goto done;
+			}
+
+			sv_wait(&hubinfo->h_bwwait, PZERO, &hubinfo->h_bwlock, s);
+			goto tryagain;
+		}
+	}
+
+
+	/* OK!  Allocate big window free_bw_index for this mapping. */
+ 	/* 
+	 * The code below does a PIO write to setup an ITTE entry.
+	 * We need to prevent other CPUs from seeing our updated memory 
+	 * shadow of the ITTE (in the piomap) until the ITTE entry is 
+	 * actually set up; otherwise, another CPU might attempt a PIO 
+	 * prematurely.  
+	 *
+	 * Also, the only way we can know that an entry has been received 
+	 * by the hub and can be used by future PIO reads/writes is by 
+	 * reading back the ITTE entry after writing it.
+	 *
+	 * For these two reasons, we PIO read back the ITTE entry after
+	 * we write it.
+	 */
+
+	nasid = hubinfo->h_nasid;
+	IIO_ITTE_PUT(nasid, free_bw_index, HUB_PIO_MAP_TO_MEM, widget, xtalk_addr);	
+	junk = HUB_L(IIO_ITTE_GET(nasid, free_bw_index));
+
+	bw_piomap = hubinfo_bwin_piomap_get(hubinfo, free_bw_index);
+	bw_piomap->hpio_xtalk_info.xp_dev = dev;
+	bw_piomap->hpio_xtalk_info.xp_target = widget;
+	bw_piomap->hpio_xtalk_info.xp_xtalk_addr = xtalk_addr;
+	bw_piomap->hpio_xtalk_info.xp_kvaddr = (caddr_t)NODE_BWIN_BASE(nasid, free_bw_index);
+	bw_piomap->hpio_holdcnt++;
+	bw_piomap->hpio_bigwin_num = free_bw_index;
+
+	if (flags & PIOMAP_FIXED)
+		bw_piomap->hpio_flags |= HUB_PIOMAP_IS_VALID | HUB_PIOMAP_IS_FIXED;
+	else
+		bw_piomap->hpio_flags |= HUB_PIOMAP_IS_VALID;
+
+done:
+	mutex_spinunlock(&hubinfo->h_bwlock, s);
+	return(bw_piomap);
+}
+
+/*
+ * hub_piomap_free destroys a caddr_t-to-xtalk pio mapping and frees
+ * any associated mapping resources.  
+ *
+ * If this * piomap was handled with a small window, or if it was handled
+ * in a big window that's still in use by someone else, then there's 
+ * nothing to do.  On the other hand, if this mapping was handled 
+ * with a big window, AND if we were the final user of that mapping, 
+ * then destroy the mapping.
+ */
+void
+hub_piomap_free(hub_piomap_t hub_piomap)
+{
+	devfs_handle_t hubv;
+	hubinfo_t hubinfo;
+	nasid_t nasid;
+	int s;
+
+	/* 
+	 * Small windows are permanently mapped to corresponding widgets,
+	 * so there're no resources to free.
+	 */
+	if (!(hub_piomap->hpio_flags & HUB_PIOMAP_IS_BIGWINDOW))
+		return;
+
+	ASSERT(hub_piomap->hpio_flags & HUB_PIOMAP_IS_VALID);
+	ASSERT(hub_piomap->hpio_holdcnt > 0);
+
+	hubv = hub_piomap->hpio_hub;
+	hubinfo_get(hubv, &hubinfo);
+	nasid = hubinfo->h_nasid;
+
+	s = mutex_spinlock(&hubinfo->h_bwlock);
+
+	/*
+	 * If this is the last hold on this mapping, free it.
+	 */
+	if (--hub_piomap->hpio_holdcnt == 0) {
+		IIO_ITTE_DISABLE(nasid, hub_piomap->hpio_bigwin_num );
+
+		if (hub_piomap->hpio_flags & HUB_PIOMAP_IS_FIXED) {
+			hub_piomap->hpio_flags &= ~(HUB_PIOMAP_IS_VALID | HUB_PIOMAP_IS_FIXED);
+			hubinfo->h_num_big_window_fixed--;
+			ASSERT(hubinfo->h_num_big_window_fixed >= 0);
+		} else
+			hub_piomap->hpio_flags &= ~HUB_PIOMAP_IS_VALID;
+
+		(void)sv_signal(&hubinfo->h_bwwait);
+	}
+
+	mutex_spinunlock(&hubinfo->h_bwlock, s);
+}
+
+/*
+ * Establish a mapping to a given xtalk address range using the resources
+ * allocated earlier.
+ */
+caddr_t
+hub_piomap_addr(hub_piomap_t hub_piomap,	/* mapping resources */
+		iopaddr_t xtalk_addr,		/* map for this xtalk address */
+		size_t byte_count)		/* map this many bytes */
+{
+	/* Verify that range can be mapped using the specified piomap */
+	if (xtalk_addr < hub_piomap->hpio_xtalk_info.xp_xtalk_addr)
+		return(0);
+
+	if (xtalk_addr + byte_count > 
+		( hub_piomap->hpio_xtalk_info.xp_xtalk_addr + 
+			hub_piomap->hpio_xtalk_info.xp_mapsz))
+		return(0);
+
+	if (hub_piomap->hpio_flags & HUB_PIOMAP_IS_VALID)
+		return(hub_piomap->hpio_xtalk_info.xp_kvaddr + 
+			(xtalk_addr % hub_piomap->hpio_xtalk_info.xp_mapsz));
+	else
+		return(0);
+}
+
+
+/*
+ * Driver indicates that it's done with PIO's from an earlier piomap_addr.
+ */
+/* ARGSUSED */
+void
+hub_piomap_done(hub_piomap_t hub_piomap)	/* done with these mapping resources */
+{
+	/* Nothing to do */
+}
+
+
+/*
+ * For translations that require no mapping resources, supply a kernel virtual
+ * address that maps to the specified xtalk address range.
+ */
+/* ARGSUSED */
+caddr_t
+hub_piotrans_addr(	devfs_handle_t dev,	/* translate to this device */
+			device_desc_t dev_desc,	/* device descriptor */
+			iopaddr_t xtalk_addr,	/* Crosstalk address */
+			size_t byte_count,	/* map this many bytes */
+			unsigned flags)		/* (currently unused) */
+{
+	xwidget_info_t widget_info = xwidget_info_get(dev);
+	xwidgetnum_t widget = xwidget_info_id_get(widget_info);
+	devfs_handle_t hubv = xwidget_info_master_get(widget_info);
+	hub_piomap_t hub_piomap;
+	hubinfo_t hubinfo;
+
+	hubinfo_get(hubv, &hubinfo);
+
+	if (xtalk_addr + byte_count <= SWIN_SIZE) {
+		hub_piomap = hubinfo_swin_piomap_get(hubinfo, (int)widget);
+		return(hub_piomap_addr(hub_piomap, xtalk_addr, byte_count));
+	} else
+		return(0);
+}
+
+
+/* DMA MANAGEMENT */
+/* Mapping from crosstalk space to system physical space */
+
+/* 
+ * There's not really very much to do here, since crosstalk maps
+ * directly to system physical space.  It's quite possible that this
+ * DMA layer will be bypassed in performance kernels.
+ */
+
+
+/* ARGSUSED */
+static void
+hub_dma_init(devfs_handle_t hubv)
+{
+}
+
+
+/*
+ * Allocate resources needed to set up DMA mappings up to a specified size
+ * on a specified adapter.
+ * 
+ * We don't actually use the adapter ID for anything.  It's just the adapter
+ * that the lower level driver plans to use for DMA.
+ */
+/* ARGSUSED */
+hub_dmamap_t
+hub_dmamap_alloc(	devfs_handle_t dev,	/* set up mappings for this device */
+			device_desc_t dev_desc,	/* device descriptor */
+			size_t byte_count_max, 	/* max size of a mapping */
+			unsigned flags)		/* defined in dma.h */
+{
+	hub_dmamap_t dmamap;
+	xwidget_info_t widget_info = xwidget_info_get(dev);
+	xwidgetnum_t widget = xwidget_info_id_get(widget_info);
+	devfs_handle_t hubv = xwidget_info_master_get(widget_info);
+
+	dmamap = kern_malloc(sizeof(struct hub_dmamap_s));
+	dmamap->hdma_xtalk_info.xd_dev = dev;
+	dmamap->hdma_xtalk_info.xd_target = widget;
+	dmamap->hdma_hub = hubv;
+	dmamap->hdma_flags = HUB_DMAMAP_IS_VALID;
+ 	if (flags & XTALK_FIXED)
+		dmamap->hdma_flags |= HUB_DMAMAP_IS_FIXED;
+
+	return(dmamap);
+}
+
+/*
+ * Destroy a DMA mapping from crosstalk space to system address space.
+ * There is no actual mapping hardware to destroy, but we at least mark
+ * the dmamap INVALID and free the space that it took.
+ */
+void
+hub_dmamap_free(hub_dmamap_t hub_dmamap)
+{
+	hub_dmamap->hdma_flags &= ~HUB_DMAMAP_IS_VALID;
+	kern_free(hub_dmamap);
+}
+
+/*
+ * Establish a DMA mapping using the resources allocated in a previous dmamap_alloc.
+ * Return an appropriate crosstalk address range that maps to the specified physical 
+ * address range.
+ */
+/* ARGSUSED */
+extern iopaddr_t
+hub_dmamap_addr(	hub_dmamap_t dmamap,	/* use these mapping resources */
+			paddr_t paddr,		/* map for this address */
+			size_t byte_count)	/* map this many bytes */
+{
+	devfs_handle_t vhdl;
+
+	ASSERT(dmamap->hdma_flags & HUB_DMAMAP_IS_VALID);
+
+	if (dmamap->hdma_flags & HUB_DMAMAP_USED) {
+	    /* If the map is FIXED, re-use is OK. */
+	    if (!(dmamap->hdma_flags & HUB_DMAMAP_IS_FIXED)) {
+		vhdl = dmamap->hdma_xtalk_info.xd_dev;
+#if defined(SUPPORT_PRINTING_V_FORMAT)
+		cmn_err(CE_WARN, "%v: hub_dmamap_addr re-uses dmamap.\n",vhdl);
+#else
+		cmn_err(CE_WARN, "0x%p: hub_dmamap_addr re-uses dmamap.\n", &vhdl);
+#endif
+	    }
+	} else {
+		dmamap->hdma_flags |= HUB_DMAMAP_USED;
+	}
+
+	/* There isn't actually any DMA mapping hardware on the hub. */
+	return(paddr);
+}
+
+/*
+ * Establish a DMA mapping using the resources allocated in a previous dmamap_alloc.
+ * Return an appropriate crosstalk address list that maps to the specified physical 
+ * address list.
+ */
+/* ARGSUSED */
+alenlist_t
+hub_dmamap_list(hub_dmamap_t hub_dmamap,	/* use these mapping resources */
+		alenlist_t palenlist,		/* map this area of memory */
+		unsigned flags)
+{
+	devfs_handle_t vhdl;
+
+	ASSERT(hub_dmamap->hdma_flags & HUB_DMAMAP_IS_VALID);
+
+	if (hub_dmamap->hdma_flags & HUB_DMAMAP_USED) {
+	    /* If the map is FIXED, re-use is OK. */
+	    if (!(hub_dmamap->hdma_flags & HUB_DMAMAP_IS_FIXED)) {
+		vhdl = hub_dmamap->hdma_xtalk_info.xd_dev;
+#if defined(SUPPORT_PRINTING_V_FORMAT)
+		cmn_err(CE_WARN,"%v: hub_dmamap_list re-uses dmamap\n",vhdl);
+#else
+		cmn_err(CE_WARN,"0x%p: hub_dmamap_list re-uses dmamap\n", &vhdl);
+#endif
+	    }
+	} else {
+		hub_dmamap->hdma_flags |= HUB_DMAMAP_USED;
+	}
+
+	/* There isn't actually any DMA mapping hardware on the hub.  */
+	return(palenlist);
+}
+
+/*
+ * Driver indicates that it has completed whatever DMA it may have started
+ * after an earlier dmamap_addr or dmamap_list call.
+ */
+void
+hub_dmamap_done(hub_dmamap_t hub_dmamap)	/* done with these mapping resources */
+{
+	devfs_handle_t vhdl;
+
+	if (hub_dmamap->hdma_flags & HUB_DMAMAP_USED) {
+		hub_dmamap->hdma_flags &= ~HUB_DMAMAP_USED;
+	} else {
+	    /* If the map is FIXED, re-done is OK. */
+	    if (!(hub_dmamap->hdma_flags & HUB_DMAMAP_IS_FIXED)) {
+		vhdl = hub_dmamap->hdma_xtalk_info.xd_dev;
+#if defined(SUPPORT_PRINTING_V_FORMAT)
+		cmn_err(CE_WARN, "%v: hub_dmamap_done already done with dmamap\n",vhdl);
+#else
+		cmn_err(CE_WARN, "0x%p: hub_dmamap_done already done with dmamap\n", &vhdl);
+#endif
+	    }
+	}
+}
+
+/*
+ * Translate a single system physical address into a crosstalk address.
+ */
+/* ARGSUSED */
+iopaddr_t
+hub_dmatrans_addr(	devfs_handle_t dev,	/* translate for this device */
+			device_desc_t dev_desc,	/* device descriptor */
+			paddr_t paddr,		/* system physical address */
+			size_t byte_count,	/* length */
+			unsigned flags)		/* defined in dma.h */
+{
+	/* no translation needed */
+	return(paddr);
+}
+
+/*
+ * Translate a list of IP27 addresses and lengths into a list of crosstalk 
+ * addresses and lengths.  No actual hardware mapping takes place; the hub 
+ * has no DMA mapping registers -- crosstalk addresses map directly.
+ */
+/* ARGSUSED */
+alenlist_t
+hub_dmatrans_list(	devfs_handle_t dev,	/* translate for this device */
+			device_desc_t dev_desc,	/* device descriptor */
+			alenlist_t palenlist,	/* system address/length list */
+			unsigned flags)		/* defined in dma.h */
+{
+	/* no translation needed */
+	return(palenlist);
+}
+
+/*ARGSUSED*/
+void
+hub_dmamap_drain(	hub_dmamap_t map)
+{
+    /* XXX- flush caches, if cache coherency WAR is needed */
+}
+
+/*ARGSUSED*/
+void
+hub_dmaaddr_drain(	devfs_handle_t vhdl,
+			paddr_t addr,
+			size_t bytes)
+{
+    /* XXX- flush caches, if cache coherency WAR is needed */
+}
+
+/*ARGSUSED*/
+void
+hub_dmalist_drain(	devfs_handle_t vhdl,
+			alenlist_t list)
+{
+    /* XXX- flush caches, if cache coherency WAR is needed */
+}
+
+
+
+/* INTERRUPT MANAGEMENT */
+
+/* ARGSUSED */
+static void
+hub_intr_init(devfs_handle_t hubv)
+{
+}
+
+/*
+ * hub_device_desc_update
+ *	Update the passed in device descriptor with the actual the
+ * 	target cpu number and interrupt priority level.
+ *	NOTE : These might be the same as the ones passed in thru
+ *	the descriptor.
+ */
+static void
+hub_device_desc_update(device_desc_t 	dev_desc, 
+		       ilvl_t 		intr_swlevel,
+		       cpuid_t		cpu)
+{
+	char	cpuname[40];
+	
+	/* Store the interrupt priority level in the device descriptor */
+	device_desc_intr_swlevel_set(dev_desc, intr_swlevel);
+
+	/* Convert the cpuid to the vertex handle in the hwgraph and
+	 * save it in the device descriptor.
+	 */
+	sprintf(cpuname,"/hw/cpunum/%ld",cpu);
+	device_desc_intr_target_set(dev_desc, 
+				    hwgraph_path_to_dev(cpuname));
+}
+
+int allocate_my_bit = INTRCONNECT_ANYBIT;
+
+/*
+ * Allocate resources required for an interrupt as specified in dev_desc.
+ * Returns a hub interrupt handle on success, or 0 on failure.
+ */
+hub_intr_t
+hub_intr_alloc(	devfs_handle_t dev,		/* which crosstalk device */
+		device_desc_t dev_desc,		/* device descriptor */
+		devfs_handle_t owner_dev)		/* owner of this interrupt, if known */
+{
+	cpuid_t cpu;			/* cpu to receive interrupt */
+        int cpupicked = 0;
+	int bit;			/* interrupt vector */
+	/*REFERENCED*/
+	int intr_resflags;
+	hub_intr_t intr_hdl;
+	cnodeid_t nodeid;		/* node to receive interrupt */
+	/*REFERENCED*/
+	nasid_t nasid;			/* nasid to receive interrupt */
+	struct xtalk_intr_s *xtalk_info;
+	iopaddr_t xtalk_addr;		/* xtalk addr on hub to set intr */
+	xwidget_info_t xwidget_info;	/* standard crosstalk widget info handle */
+	char *intr_name = NULL;
+	ilvl_t intr_swlevel;
+	extern int default_intr_pri;
+#ifdef CONFIG_IA64_SGI_SN1 
+	extern void synergy_intr_alloc(int, int);
+#endif
+	
+	/*
+	 * If caller didn't explicily specify a device descriptor, see if there's
+	 * a default descriptor associated with the device.
+	 */
+	if (!dev_desc) 
+		dev_desc = device_desc_default_get(dev);
+
+	if (dev_desc) {
+		intr_name = device_desc_intr_name_get(dev_desc);
+		intr_swlevel = device_desc_intr_swlevel_get(dev_desc);
+		if (dev_desc->flags & D_INTR_ISERR) {
+			intr_resflags = II_ERRORINT;
+		} else if (!(dev_desc->flags & D_INTR_NOTHREAD)) {
+			intr_resflags = II_THREADED;
+		} else {
+			/* Neither an error nor a thread. */
+			intr_resflags = 0;
+		}
+	} else {
+		intr_swlevel = default_intr_pri;
+		intr_resflags = II_THREADED;
+	}
+
+	/* XXX - Need to determine if the interrupt should be threaded. */
+
+	/* If the cpu has not been picked already then choose a candidate 
+	 * interrupt target and reserve the interrupt bit 
+	 */
+#if defined(NEW_INTERRUPTS)
+	if (!cpupicked) {
+		cpu = intr_heuristic(dev,dev_desc,allocate_my_bit,
+				     intr_resflags,owner_dev,
+				     intr_name,&bit);
+	}
+#endif
+
+	/* At this point we SHOULD have a valid cpu */
+	if (cpu == CPU_NONE) {
+#if defined(SUPPORT_PRINTING_V_FORMAT)
+		cmn_err(CE_WARN, 
+			"%v hub_intr_alloc could not allocate interrupt\n",
+			owner_dev);
+#else
+		cmn_err(CE_WARN, 
+			"0x%p hub_intr_alloc could not allocate interrupt\n",
+			&owner_dev);
+#endif
+		return(0);
+
+	}
+
+	/* If the cpu has been picked already (due to the bridge data 
+	 * corruption bug) then try to reserve an interrupt bit .
+	 */
+#if defined(NEW_INTERRUPTS)
+	if (cpupicked) {
+		bit = intr_reserve_level(cpu, allocate_my_bit, 
+					 intr_resflags, 
+					 owner_dev, intr_name);
+		if (bit < 0) {
+#if defined(SUPPORT_PRINTING_V_FORMAT)
+			cmn_err(CE_WARN,
+				"Could not reserve an interrupt bit for cpu "
+				" %d and dev %v\n",
+				cpu,owner_dev);
+#else
+			cmn_err(CE_WARN,
+				"Could not reserve an interrupt bit for cpu "
+				" %d and dev 0x%x\n",
+				cpu, &owner_dev);
+#endif
+				
+			return(0);
+		}
+	}
+#endif	/* NEW_INTERRUPTS */
+
+	nodeid = cpuid_to_cnodeid(cpu);
+	nasid = cpuid_to_nasid(cpu);
+	xtalk_addr = HUBREG_AS_XTALKADDR(nasid, PIREG(PI_INT_PEND_MOD, cpuid_to_subnode(cpu)));
+
+	/*
+	 * Allocate an interrupt handle, and fill it in.  There are two
+	 * pieces to an interrupt handle: the piece needed by generic
+	 * xtalk code which is used by crosstalk device drivers, and
+	 * the piece needed by low-level IP27 hardware code.
+	 */
+	intr_hdl = kmem_alloc_node(sizeof(struct hub_intr_s), KM_NOSLEEP, nodeid);
+	ASSERT_ALWAYS(intr_hdl);
+
+	/* 
+	 * Fill in xtalk information for generic xtalk interfaces that
+	 * operate on xtalk_intr_hdl's.
+	 */
+	xtalk_info = &intr_hdl->i_xtalk_info;
+	xtalk_info->xi_dev = dev;
+	xtalk_info->xi_vector = bit;
+	xtalk_info->xi_addr = xtalk_addr;
+	xtalk_info->xi_flags =  (intr_resflags == II_THREADED) ?
+				0 : XTALK_INTR_NOTHREAD;
+
+	/*
+	 * Regardless of which CPU we ultimately interrupt, a given crosstalk
+	 * widget always handles interrupts (and PIO and DMA) through its 
+	 * designated "master" crosstalk provider.
+	 */
+	xwidget_info = xwidget_info_get(dev);
+	if (xwidget_info)
+		xtalk_info->xi_target = xwidget_info_masterid_get(xwidget_info);
+
+	/* Fill in low level hub information for hub_* interrupt interface */
+	intr_hdl->i_swlevel = intr_swlevel;
+	intr_hdl->i_cpuid = cpu;
+	intr_hdl->i_bit = bit;
+	intr_hdl->i_flags = HUB_INTR_IS_ALLOCED;
+
+	/* Store the actual interrupt priority level & interrupt target
+	 * cpu back in the device descriptor.
+	 */
+	hub_device_desc_update(dev_desc, intr_swlevel, cpu);
+#ifdef CONFIG_IA64_SGI_SN1
+	synergy_intr_alloc((int)bit, (int)cpu);
+#endif
+	return(intr_hdl);
+}
+
+
+/*
+ * Free resources consumed by intr_alloc.
+ */
+void
+hub_intr_free(hub_intr_t intr_hdl)
+{
+	cpuid_t cpu = intr_hdl->i_cpuid;
+	int bit = intr_hdl->i_bit;
+	xtalk_intr_t xtalk_info;
+
+	if (intr_hdl->i_flags & HUB_INTR_IS_CONNECTED) {
+		/* Setting the following fields in the xtalk interrupt info
+	 	 * clears the interrupt target register in the xtalk user
+	 	 */
+		xtalk_info = &intr_hdl->i_xtalk_info;
+		xtalk_info->xi_dev = NODEV;
+		xtalk_info->xi_vector = 0;
+		xtalk_info->xi_addr = 0;
+		hub_intr_disconnect(intr_hdl);
+	}
+
+	if (intr_hdl->i_flags & HUB_INTR_IS_ALLOCED)
+		kfree(intr_hdl);
+
+#if defined(NEW_INTERRUPTS)
+	intr_unreserve_level(cpu, bit);
+#endif
+}
+
+
+/*
+ * Associate resources allocated with a previous hub_intr_alloc call with the
+ * described handler, arg, name, etc.
+ */
+/*ARGSUSED*/
+int
+hub_intr_connect(	hub_intr_t intr_hdl,		/* xtalk intr resource handle */
+			intr_func_t intr_func,		/* xtalk intr handler */
+			void *intr_arg,			/* arg to intr handler */
+			xtalk_intr_setfunc_t setfunc,	/* func to set intr hw */
+			void *setfunc_arg,		/* arg to setfunc */
+			void *thread)			/* intr thread to use */
+{
+	int rv;
+	cpuid_t cpu = intr_hdl->i_cpuid;
+	int bit = intr_hdl->i_bit;
+#ifdef CONFIG_IA64_SGI_SN1
+	extern int synergy_intr_connect(int, int);
+#endif
+
+	ASSERT(intr_hdl->i_flags & HUB_INTR_IS_ALLOCED);
+
+#if defined(NEW_INTERRUPTS)
+	rv = intr_connect_level(cpu, bit, intr_hdl->i_swlevel, 
+					intr_func, intr_arg, NULL);
+	if (rv < 0)
+		return(rv);
+
+#endif
+	intr_hdl->i_xtalk_info.xi_setfunc = setfunc;
+	intr_hdl->i_xtalk_info.xi_sfarg = setfunc_arg;
+
+	if (setfunc) (*setfunc)((xtalk_intr_t)intr_hdl);
+
+	intr_hdl->i_flags |= HUB_INTR_IS_CONNECTED;
+#ifdef CONFIG_IA64_SGI_SN1
+	return(synergy_intr_connect((int)bit, (int)cpu));
+#endif
+}
+
+
+/*
+ * Disassociate handler with the specified interrupt.
+ */
+void
+hub_intr_disconnect(hub_intr_t intr_hdl)
+{
+	/*REFERENCED*/
+	int rv;
+	cpuid_t cpu = intr_hdl->i_cpuid;
+	int bit = intr_hdl->i_bit;
+	xtalk_intr_setfunc_t setfunc;
+
+	setfunc = intr_hdl->i_xtalk_info.xi_setfunc;
+
+	/* TBD: send disconnected interrupts somewhere harmless */
+	if (setfunc) (*setfunc)((xtalk_intr_t)intr_hdl);
+
+#if defined(NEW_INTERRUPTS)
+	rv = intr_disconnect_level(cpu, bit);
+	ASSERT(rv == 0);
+#endif
+
+	intr_hdl->i_flags &= ~HUB_INTR_IS_CONNECTED;
+}
+
+
+/*
+ * Return a hwgraph vertex that represents the CPU currently
+ * targeted by an interrupt.
+ */
+devfs_handle_t
+hub_intr_cpu_get(hub_intr_t intr_hdl)
+{
+	cpuid_t cpuid = intr_hdl->i_cpuid;
+	ASSERT(cpuid != CPU_NONE);
+
+	return(cpuid_to_vertex(cpuid));
+}
+
+
+
+/* CONFIGURATION MANAGEMENT */
+
+/*
+ * Perform initializations that allow this hub to start crosstalk support.
+ */
+void
+hub_provider_startup(devfs_handle_t hubv)
+{
+	hub_pio_init(hubv);
+	hub_dma_init(hubv);
+	hub_intr_init(hubv);
+}
+
+/*
+ * Shutdown crosstalk support from a hub.
+ */
+void
+hub_provider_shutdown(devfs_handle_t hub)
+{
+	/* TBD */
+	xtalk_provider_unregister(hub);
+}
+
+/*
+ * Check that an address is in teh real small window widget 0 space
+ * or else in the big window we're using to emulate small window 0
+ * in the kernel.
+ */
+int
+hub_check_is_widget0(void *addr)
+{
+	nasid_t nasid = NASID_GET(addr);
+
+	if (((__psunsigned_t)addr >= RAW_NODE_SWIN_BASE(nasid, 0)) &&
+	    ((__psunsigned_t)addr < RAW_NODE_SWIN_BASE(nasid, 1)))
+		return 1;
+	return 0;
+}
+
+
+/*
+ * Check that two addresses use the same widget
+ */
+int
+hub_check_window_equiv(void *addra, void *addrb)
+{
+	if (hub_check_is_widget0(addra) && hub_check_is_widget0(addrb))
+		return 1;
+
+	/* XXX - Assume this is really a small window address */
+	if (WIDGETID_GET((__psunsigned_t)addra) ==
+	    WIDGETID_GET((__psunsigned_t)addrb))
+		return 1;
+
+	return 0;
+}
+
+
+/*
+ * Determine whether two PCI addresses actually refer to the same device.
+ * This only works if both addresses are in small windows.  It's used to
+ * determine whether prom addresses refer to particular PCI devices.
+ */
+/*	
+ * XXX - This won't work as written if we ever have more than two nodes
+ * on a crossbow.  In that case, we'll need an array or partners.
+ */
+int
+hub_check_pci_equiv(void *addra, void *addrb)
+{
+	nasid_t nasida, nasidb;
+
+	/*
+	 * This is for a permanent workaround that causes us to use a
+	 * big window in place of small window 0.
+	 */
+	if (!hub_check_window_equiv(addra, addrb))
+		return 0;
+
+	/* If the offsets aren't the same, forget it. */
+	if (SWIN_WIDGETADDR((__psunsigned_t)addra) !=
+	    (SWIN_WIDGETADDR((__psunsigned_t)addrb)))
+		return 0;
+
+	/* Now, check the nasids */
+	nasida = NASID_GET(addra);
+	nasidb = NASID_GET(addrb);
+
+	ASSERT(NASID_TO_COMPACT_NODEID(nasida) != INVALID_NASID);
+	ASSERT(NASID_TO_COMPACT_NODEID(nasidb) != INVALID_NASID);
+
+	/*
+	 * Either the NASIDs must be the same or they must be crossbow
+	 * partners (on the same crossbow).
+	 */
+	return (check_nasid_equiv(nasida, nasidb));
+}
+
+/*
+ * hub_setup_prb(nasid, prbnum, credits, conveyor)
+ *
+ * 	Put a PRB into fire-and-forget mode if conveyor isn't set.  Otehrwise,
+ * 	put it into conveyor belt mode with the specified number of credits.
+ */
+void
+hub_setup_prb(nasid_t nasid, int prbnum, int credits, int conveyor)
+{
+	iprb_t prb;
+	int prb_offset;
+#ifdef IRIX
+	extern int force_fire_and_forget;
+	extern volatile int ignore_conveyor_override;
+
+	if (force_fire_and_forget && !ignore_conveyor_override)
+	    if (conveyor == HUB_PIO_CONVEYOR)
+		conveyor = HUB_PIO_FIRE_N_FORGET;
+#endif
+
+	/*
+	 * Get the current register value.
+	 */
+	prb_offset = IIO_IOPRB(prbnum);
+	prb.iprb_regval = REMOTE_HUB_L(nasid, prb_offset);
+
+	/*
+	 * Clear out some fields.
+	 */
+	prb.iprb_ovflow = 1;
+	prb.iprb_bnakctr = 0;
+	prb.iprb_anakctr = 0;
+
+	/*
+	 * Enable or disable fire-and-forget mode.
+	 */
+	prb.iprb_ff = ((conveyor == HUB_PIO_CONVEYOR) ? 0 : 1);
+
+	/*
+	 * Set the appropriate number of PIO cresits for the widget.
+	 */
+	prb.iprb_xtalkctr = credits;
+
+	/*
+	 * Store the new value to the register.
+	 */
+	REMOTE_HUB_S(nasid, prb_offset, prb.iprb_regval);
+}
+
+/*
+ * hub_set_piomode()
+ *
+ * 	Put the hub into either "PIO conveyor belt" mode or "fire-and-forget"
+ * 	mode.  To do this, we have to make absolutely sure that no PIOs
+ *	are in progress so we turn off access to all widgets for the duration
+ *	of the function.
+ * 
+ * XXX - This code should really check what kind of widget we're talking
+ * to.  Bridges can only handle three requests, but XG will do more.
+ * How many can crossbow handle to widget 0?  We're assuming 1.
+ *
+ * XXX - There is a bug in the crossbow that link reset PIOs do not
+ * return write responses.  The easiest solution to this problem is to
+ * leave widget 0 (xbow) in fire-and-forget mode at all times.  This
+ * only affects pio's to xbow registers, which should be rare.
+ */
+void
+hub_set_piomode(nasid_t nasid, int conveyor)
+{
+	hubreg_t ii_iowa;
+	int direct_connect;
+	hubii_wcr_t ii_wcr;
+	int prbnum;
+	int s, cons_lock = 0;
+
+	ASSERT(NASID_TO_COMPACT_NODEID(nasid) != INVALID_CNODEID);
+	if (nasid == get_console_nasid()) {
+		PUTBUF_LOCK(s);	
+		cons_lock = 1;
+	}
+
+	ii_iowa = REMOTE_HUB_L(nasid, IIO_OUTWIDGET_ACCESS);
+	REMOTE_HUB_S(nasid, IIO_OUTWIDGET_ACCESS, 0);
+
+	ii_wcr.wcr_reg_value = REMOTE_HUB_L(nasid, IIO_WCR);
+	direct_connect = ii_wcr.iwcr_dir_con;
+
+	if (direct_connect) {
+		/* 
+		 * Assume a bridge here.
+		 */
+		hub_setup_prb(nasid, 0, 3, conveyor);
+	} else {
+		/* 
+		 * Assume a crossbow here.
+		 */
+		hub_setup_prb(nasid, 0, 1, conveyor);
+	}
+
+	for (prbnum = HUB_WIDGET_ID_MIN; prbnum <= HUB_WIDGET_ID_MAX; prbnum++) {
+		/*
+		 * XXX - Here's where we should take the widget type into
+		 * when account assigning credits.
+		 */
+		/* Always set the PRBs in fire-and-forget mode */
+		hub_setup_prb(nasid, prbnum, 3, conveyor);
+	}
+
+#ifdef IRIX
+	/*
+	 * In direct connect mode, disable access to all widgets but 0.
+	 * Later, the prom will do this for us.
+	 */
+	if (direct_connect)
+		ii_iowa = 1;
+#endif
+
+	REMOTE_HUB_S(nasid, IIO_OUTWIDGET_ACCESS, ii_iowa);
+
+	if (cons_lock)
+	    PUTBUF_UNLOCK(s);
+}
+/* Interface to allow special drivers to set hub specific
+ * device flags.
+ * Return 0 on failure , 1 on success
+ */
+int
+hub_widget_flags_set(nasid_t		nasid,
+		     xwidgetnum_t	widget_num,
+		     hub_widget_flags_t	flags)
+{
+
+	ASSERT((flags & HUB_WIDGET_FLAGS) == flags);
+
+	if (flags & HUB_PIO_CONVEYOR) {
+		hub_setup_prb(nasid,widget_num,
+			      3,HUB_PIO_CONVEYOR); /* set the PRB in conveyor 
+						    * belt mode with 3 credits
+						    */
+	} else if (flags & HUB_PIO_FIRE_N_FORGET) {
+		hub_setup_prb(nasid,widget_num,
+			      3,HUB_PIO_FIRE_N_FORGET); /* set the PRB in fire
+							 *  and forget mode 
+							 */
+	}
+
+	return 1;
+}
+/* Interface to allow special drivers to set hub specific
+ * device flags.
+ * Return 0 on failure , 1 on success
+ */
+int
+hub_device_flags_set(devfs_handle_t	widget_vhdl,
+		     hub_widget_flags_t	flags)
+{
+	xwidget_info_t		widget_info = xwidget_info_get(widget_vhdl);
+	xwidgetnum_t		widget_num  = xwidget_info_id_get(widget_info);
+	devfs_handle_t		hub_vhdl    = xwidget_info_master_get(widget_info);
+	hubinfo_t		hub_info = 0;
+	nasid_t			nasid;
+	int			s,rv;
+
+	/* Use the nasid from the hub info hanging off the hub vertex
+	 * and widget number from the widget vertex
+	 */
+	hubinfo_get(hub_vhdl, &hub_info);
+	/* Being over cautious by grabbing a lock */
+	s 	= mutex_spinlock(&hub_info->h_bwlock);
+	nasid 	= hub_info->h_nasid;
+	rv 	= hub_widget_flags_set(nasid,widget_num,flags);
+	mutex_spinunlock(&hub_info->h_bwlock, s);
+
+	return rv;
+}
+
+#if ((defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)) && defined(BRINGUP))
+/* BRINGUP:  This ought to be useful for IP27 too but, for now,
+ * make it SN1 only because `ii_ixtt_u_t' is not in IP27/hubio.h
+ * (or anywhere else :-).
+ */
+int
+hubii_ixtt_set(devfs_handle_t widget_vhdl, ii_ixtt_u_t *ixtt)
+{
+	xwidget_info_t		widget_info = xwidget_info_get(widget_vhdl);
+	devfs_handle_t		hub_vhdl    = xwidget_info_master_get(widget_info);
+	hubinfo_t		hub_info = 0;
+	nasid_t			nasid;
+	int			s;
+
+	/* Use the nasid from the hub info hanging off the hub vertex
+	 * and widget number from the widget vertex
+	 */
+	hubinfo_get(hub_vhdl, &hub_info);
+	/* Being over cautious by grabbing a lock */
+	s 	= mutex_spinlock(&hub_info->h_bwlock);
+	nasid 	= hub_info->h_nasid;
+
+	REMOTE_HUB_S(nasid, IIO_IXTT, ixtt->ii_ixtt_regval);
+
+	mutex_spinunlock(&hub_info->h_bwlock, s);
+	return 0;
+}
+
+int
+hubii_ixtt_get(devfs_handle_t widget_vhdl, ii_ixtt_u_t *ixtt)
+{
+	xwidget_info_t		widget_info = xwidget_info_get(widget_vhdl);
+	devfs_handle_t		hub_vhdl    = xwidget_info_master_get(widget_info);
+	hubinfo_t		hub_info = 0;
+	nasid_t			nasid;
+	int			s;
+
+	/* Use the nasid from the hub info hanging off the hub vertex
+	 * and widget number from the widget vertex
+	 */
+	hubinfo_get(hub_vhdl, &hub_info);
+	/* Being over cautious by grabbing a lock */
+	s 	= mutex_spinlock(&hub_info->h_bwlock);
+	nasid 	= hub_info->h_nasid;
+
+	ixtt->ii_ixtt_regval = REMOTE_HUB_L(nasid, IIO_IXTT);
+
+	mutex_spinunlock(&hub_info->h_bwlock, s);
+	return 0;
+}
+#endif /* CONFIG_IA64_SGI_SN1 */
+
+/*
+ * hub_device_inquiry
+ *	Find out the xtalk widget related information stored in this 
+ *	hub's II.
+ */
+void
+hub_device_inquiry(devfs_handle_t	xbus_vhdl, xwidgetnum_t widget)
+{
+	devfs_handle_t	xconn, hub_vhdl;
+	char		widget_name[8];
+	hubreg_t	ii_iidem,ii_iiwa, ii_iowa;
+	hubinfo_t	hubinfo;
+	nasid_t		nasid;
+	int		d;
+
+	sprintf(widget_name, "%d", widget);
+	if (hwgraph_traverse(xbus_vhdl, widget_name, &xconn)
+	    != GRAPH_SUCCESS)
+		return;
+
+	hub_vhdl = device_master_get(xconn);
+	if (hub_vhdl == GRAPH_VERTEX_NONE)
+		return;
+
+	hubinfo_get(hub_vhdl, &hubinfo);
+	if (!hubinfo)
+		return;
+	
+	nasid = hubinfo->h_nasid;
+
+	ii_iidem	= REMOTE_HUB_L(nasid, IIO_IIDEM);
+	ii_iiwa 	= REMOTE_HUB_L(nasid, IIO_IIWA);
+	ii_iowa 	= REMOTE_HUB_L(nasid, IIO_IOWA);
+
+#if defined(SUPPORT_PRINTING_V_FORMAT)
+	cmn_err(CE_CONT, "Inquiry Info for %v\n", xconn);
+#else
+	cmn_err(CE_CONT, "Inquiry Info for 0x%p\n", &xconn);
+#endif
+
+	cmn_err(CE_CONT,"\tDevices shutdown [ ");
+
+	for (d = 0 ; d <= 7 ; d++)
+		if (!(ii_iidem & (IIO_IIDEM_WIDGETDEV_MASK(widget,d))))
+			cmn_err(CE_CONT, " %d", d);
+
+	cmn_err(CE_CONT,"]\n");
+
+	cmn_err(CE_CONT,
+		"\tInbound access ? %s\n",
+		ii_iiwa & IIO_IIWA_WIDGET(widget) ? "yes" : "no");
+
+	cmn_err(CE_CONT,
+		"\tOutbound access ? %s\n",
+		ii_iowa & IIO_IOWA_WIDGET(widget) ? "yes" : "no");
+
+}
+
+/*
+ * A pointer to this structure hangs off of every hub hwgraph vertex.
+ * The generic xtalk layer may indirect through it to get to this specific
+ * crosstalk bus provider.
+ */
+xtalk_provider_t hub_provider = {
+	(xtalk_piomap_alloc_f *)	hub_piomap_alloc,
+	(xtalk_piomap_free_f *)		hub_piomap_free,
+	(xtalk_piomap_addr_f *)		hub_piomap_addr,
+	(xtalk_piomap_done_f *)		hub_piomap_done,
+	(xtalk_piotrans_addr_f *)	hub_piotrans_addr,
+
+	(xtalk_dmamap_alloc_f *)	hub_dmamap_alloc,
+	(xtalk_dmamap_free_f *)		hub_dmamap_free,
+	(xtalk_dmamap_addr_f *)		hub_dmamap_addr,
+	(xtalk_dmamap_list_f *)		hub_dmamap_list,
+	(xtalk_dmamap_done_f *)		hub_dmamap_done,
+	(xtalk_dmatrans_addr_f *)	hub_dmatrans_addr,
+	(xtalk_dmatrans_list_f *)	hub_dmatrans_list,
+	(xtalk_dmamap_drain_f *)	hub_dmamap_drain,
+	(xtalk_dmaaddr_drain_f *)	hub_dmaaddr_drain,
+	(xtalk_dmalist_drain_f *)	hub_dmalist_drain,
+
+	(xtalk_intr_alloc_f *)		hub_intr_alloc,
+	(xtalk_intr_free_f *)		hub_intr_free,
+	(xtalk_intr_connect_f *)	hub_intr_connect,
+	(xtalk_intr_disconnect_f *)	hub_intr_disconnect,
+	(xtalk_intr_cpu_get_f *)	hub_intr_cpu_get,
+
+	(xtalk_provider_startup_f *)	hub_provider_startup,
+	(xtalk_provider_shutdown_f *)	hub_provider_shutdown,
+};
+
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/io/ip37.c linux/arch/ia64/sn/io/ip37.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/io/ip37.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/io/ip37.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,127 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+/*
+ * ip37.c
+ *	Support for IP35/IP37 machines
+ */
+
+#include <linux/types.h>
+#include <linux/config.h>
+
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+#include <asm/sn/sgi.h>
+#include <asm/sn/klconfig.h>
+#include <asm/sn/sn1/hubdev.h>
+#include <asm/sn/pci/bridge.h>     /* for bridge_t */
+
+
+xwidgetnum_t
+hub_widget_id(nasid_t nasid)
+{
+	hubii_wcr_t	ii_wcr;	/* the control status register */
+		
+	ii_wcr.wcr_reg_value = REMOTE_HUB_L(nasid,IIO_WCR);
+
+	printk("hub_widget_id: Found Hub Widget ID 0x%x from Register 0x%p\n", ii_wcr.wcr_fields_s.wcr_widget_id, REMOTE_HUB_ADDR(nasid, IIO_WCR));
+
+	printk("hub_widget_id: Found Hub Widget 0x%lx wcr_reg_value 0x%lx\n", REMOTE_HUB_L(nasid,IIO_WCR), ii_wcr.wcr_reg_value);
+
+	return ii_wcr.wcr_fields_s.wcr_widget_id;
+}
+
+/*
+ * get_nasid() returns the physical node id number of the caller.
+ */
+nasid_t
+get_nasid(void)
+{
+	return (nasid_t)((LOCAL_HUB_L(LB_REV_ID) & LRI_NODEID_MASK) >> LRI_NODEID_SHFT);
+}
+
+int
+get_slice(void)
+{
+	return LOCAL_HUB_L(PI_CPU_NUM);
+}
+
+int
+is_fine_dirmode(void)
+{
+	return (((LOCAL_HUB_L(LB_REV_ID) & LRI_SYSTEM_SIZE_MASK)
+		>> LRI_SYSTEM_SIZE_SHFT) == SYSTEM_SIZE_SMALL);
+
+}
+
+hubreg_t
+get_hub_chiprev(nasid_t nasid)
+{
+
+	printk("get_hub_chiprev: Hub Chip Rev 0x%lx\n",
+		(REMOTE_HUB_L(nasid, LB_REV_ID) & LRI_REV_MASK) >> LRI_REV_SHFT);
+	return ((REMOTE_HUB_L(nasid, LB_REV_ID) & LRI_REV_MASK)
+		                                         >> LRI_REV_SHFT);
+}
+
+int
+verify_snchip_rev(void)
+{
+	int hub_chip_rev;
+	int i;
+	static int min_hub_rev = 0;
+	nasid_t nasid;
+	static int first_time = 1;
+	extern int maxnodes;
+
+        
+	if (first_time) {
+	    for (i = 0; i < maxnodes; i++) {	
+		nasid = COMPACT_TO_NASID_NODEID(i);
+		hub_chip_rev = get_hub_chiprev(nasid);
+
+		if ((hub_chip_rev < min_hub_rev) || (i == 0))
+		    min_hub_rev = hub_chip_rev;
+	    }
+
+	
+	    first_time = 0;
+	}
+
+	return min_hub_rev;
+	
+}
+
+#ifdef SN1_USE_POISON_BITS
+int
+hub_bte_poison_ok(void)
+{
+	/*
+	 * For now, assume poisoning is ok. If it turns out there are chip
+	 * bugs that prevent its use in early revs, there is some neat code
+	 * to steal from the IP27 equivalent of this code.
+	 */
+
+#ifdef BRINGUP	/* temp disable BTE poisoning - might be sw bugs in this area */
+	return 0;
+#else
+	return 1;
+#endif
+}
+#endif /* SN1_USE_POISON_BITS */
+                
+
+void
+ni_reset_port(void)
+{
+	LOCAL_HUB_S(NI_RESET_ENABLE, NRE_RESETOK);
+	LOCAL_HUB_S(NI_PORT_RESET, NPR_PORTRESET | NPR_LOCALRESET);
+}
+
+#endif	/* CONFIG_SGI_IP35 || CONFIG_IA64_SGI_SN1 */
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/io/klconflib.c linux/arch/ia64/sn/io/klconflib.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/io/klconflib.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/io/klconflib.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,1334 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/ctype.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/iograph.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/labelcl.h>
+
+#include <asm/sn/agent.h>
+#include <asm/sn/klconfig.h>
+#include <asm/sn/nodepda.h>
+#include <asm/sn/module.h>
+#include <asm/sn/router.h>
+#include <asm/sn/xtalk/xbow.h>
+
+#define printf printk
+int hasmetarouter;
+
+#define LDEBUG 0
+#define NIC_UNKNOWN ((nic_t) -1)
+
+#undef DEBUG_KLGRAPH
+#ifdef DEBUG_KLGRAPH
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif /* DEBUG_KLGRAPH */
+
+static void sort_nic_names(lboard_t *) ;
+
+lboard_t *
+find_lboard(lboard_t *start, unsigned char brd_type)
+{
+	/* Search all boards stored on this node. */
+	while (start) {
+		if (start->brd_type == brd_type)
+			return start;
+		start = KLCF_NEXT(start);
+	}
+
+	/* Didn't find it. */
+	return (lboard_t *)NULL;
+}
+
+lboard_t *
+find_lboard_class(lboard_t *start, unsigned char brd_type)
+{
+	/* Search all boards stored on this node. */
+	while (start) {
+		if (KLCLASS(start->brd_type) == KLCLASS(brd_type))
+			return start;
+		start = KLCF_NEXT(start);
+	}
+
+	/* Didn't find it. */
+	return (lboard_t *)NULL;
+}
+
+klinfo_t *
+find_component(lboard_t *brd, klinfo_t *kli, unsigned char struct_type)
+{
+	int index, j;
+
+	if (kli == (klinfo_t *)NULL) {
+		index = 0;
+	} else {
+		for (j = 0; j < KLCF_NUM_COMPS(brd); j++) {
+			if (kli == KLCF_COMP(brd, j))
+				break;
+		}
+		index = j;
+		if (index == KLCF_NUM_COMPS(brd)) {
+			printf("find_component: Bad pointer: 0x%p\n", kli);
+			return (klinfo_t *)NULL;
+		}
+		index++;	/* next component */
+	}
+	
+	for (; index < KLCF_NUM_COMPS(brd); index++) {		
+		kli = KLCF_COMP(brd, index);
+		DBG("find_component: brd %p kli %p  request type = 0x%x kli type 0x%x\n", brd, kli, kli->struct_type, KLCF_COMP_TYPE(kli));
+		if (KLCF_COMP_TYPE(kli) == struct_type)
+			return kli;
+	}
+
+	/* Didn't find it. */
+	return (klinfo_t *)NULL;
+}
+
+klinfo_t *
+find_first_component(lboard_t *brd, unsigned char struct_type)
+{
+	return find_component(brd, (klinfo_t *)NULL, struct_type);
+}
+
+lboard_t *
+find_lboard_modslot(lboard_t *start, moduleid_t mod, slotid_t slot)
+{
+	/* Search all boards stored on this node. */
+	while (start) {
+		if (MODULE_MATCH(start->brd_module, mod) &&
+		    (start->brd_slot == slot))
+			return start;
+		start = KLCF_NEXT(start);
+	}
+
+	/* Didn't find it. */
+	return (lboard_t *)NULL;
+}
+
+lboard_t *
+find_lboard_module(lboard_t *start, moduleid_t mod)
+{
+        /* Search all boards stored on this node. */
+        while (start) {
+                if (MODULE_MATCH(start->brd_module, mod))
+                        return start;
+                start = KLCF_NEXT(start);
+        }
+
+        /* Didn't find it. */
+        return (lboard_t *)NULL;
+}
+
+lboard_t *
+find_lboard_module_class(lboard_t *start, moduleid_t mod,
+                                                unsigned char brd_type)
+{
+	while (start) {
+
+		DBG("find_lboard_module_class: lboard 0x%p, start->brd_module 0x%x, mod 0x%x, start->brd_type 0x%x, brd_type 0x%x\n", start, start->brd_module, mod, start->brd_type, brd_type);
+
+		if (MODULE_MATCH(start->brd_module, mod) &&
+			(KLCLASS(start->brd_type) == KLCLASS(brd_type)))
+			return start;
+		start = KLCF_NEXT(start);
+	}
+
+	/* Didn't find it. */
+	return (lboard_t *)NULL;
+}
+
+#ifndef CONFIG_IA64_SGI_IO
+#define tolower(c)	(isupper(c) ? (c) - 'A' + 'a' : (c))
+#define toupper(c)	(islower(c) ? (c) - 'a' + 'A' : (c))
+#endif
+
+
+/*
+ * Convert a NIC name to a name for use in the hardware graph.
+ */
+void
+nic_name_convert(char *old_name, char *new_name)
+{
+        int i;
+        char c;
+        char *compare_ptr;
+
+	if ((old_name[0] == '\0') || (old_name[1] == '\0')) {
+                strcpy(new_name, EDGE_LBL_XWIDGET);
+        } else {
+                for (i = 0; i < strlen(old_name); i++) {
+                        c = old_name[i];
+
+                        if (isalpha(c))
+                                new_name[i] = tolower(c);
+                        else if (isdigit(c))
+                                new_name[i] = c;
+                        else
+                                new_name[i] = '_';
+                }
+                new_name[i] = '\0';
+        }
+
+        /* XXX -
+         * Since a bunch of boards made it out with weird names like
+         * IO6-fibbbed and IO6P2, we need to look for IO6 in a name and
+         * replace it with "baseio" to avoid confusion in the field.
+	 * We also have to make sure we don't report media_io instead of
+	 * baseio.
+         */
+
+        /* Skip underscores at the beginning of the name */
+        for (compare_ptr = new_name; (*compare_ptr) == '_'; compare_ptr++)
+                ;
+
+	/*
+	 * Check for some names we need to replace.  Early boards
+	 * had junk following the name so check only the first
+	 * characters.
+	 */
+        if (!strncmp(new_name, "io6", 3) || 
+            !strncmp(new_name, "mio", 3) || 
+	    !strncmp(new_name, "media_io", 8))
+		strcpy(new_name, "baseio");
+#if !defined(CONFIG_SGI_IP35) && !defined(CONFIG_IA64_SGI_SN1) && !defined(CONFIG_IA64_GENERIC)
+	else if (!strncmp(new_name, "ip29", 4))
+		strcpy(new_name,SN00_MOTHERBOARD);
+#endif
+	else if (!strncmp(new_name, "divo", 4))
+		strcpy(new_name, "divo") ;
+
+}
+
+/* Check if the given board corresponds to the global 
+ * master io6
+ */
+int
+is_master_baseio(nasid_t nasid,moduleid_t module,slotid_t slot)
+{
+	lboard_t	*board;
+
+#if CONFIG_SGI_IP35 || CONFIG_IA64_SGI_SN1 || CONFIG_IA64_GENERIC
+/* BRINGUP: If this works then look for callers of is_master_baseio()
+ * (e.g. iograph.c) and let them pass in a slot if they want
+ */
+	board = find_lboard_module((lboard_t *)KL_CONFIG_INFO(nasid), module);
+#else
+	board = find_lboard_modslot((lboard_t *)KL_CONFIG_INFO(nasid),
+				    module, slot);
+#endif
+
+#ifndef _STANDALONE
+	{
+		cnodeid_t cnode = NASID_TO_COMPACT_NODEID(nasid);
+
+		if (!board && (NODEPDA(cnode)->xbow_peer != INVALID_NASID))
+#if CONFIG_SGI_IP35 || CONFIG_IA64_SGI_SN1 || CONFIG_IA64_GENERIC
+			board = find_lboard_module((lboard_t *)
+				    KL_CONFIG_INFO(NODEPDA(cnode)->xbow_peer),
+				    module);
+#else
+			board = find_lboard_modslot((lboard_t *)
+				    KL_CONFIG_INFO(NODEPDA(cnode)->xbow_peer),
+				    module, slot);
+#endif
+	}
+#endif
+	if (!board)
+		return(0);
+	return(board->brd_flags & GLOBAL_MASTER_IO6);
+}
+/*
+ * Find the lboard structure and get the board name.
+ * If we can't find the structure or it's too low a revision,
+ * use default name.
+ */
+lboard_t *
+get_board_name(nasid_t nasid, moduleid_t mod, slotid_t slot, char *name)
+{
+	lboard_t *brd;
+
+	brd = find_lboard_modslot((lboard_t *)KL_CONFIG_INFO(nasid),
+				  mod, slot);
+
+#ifndef _STANDALONE
+	{
+		cnodeid_t cnode = NASID_TO_COMPACT_NODEID(nasid);
+
+		if (!brd && (NODEPDA(cnode)->xbow_peer != INVALID_NASID))
+			brd = find_lboard_modslot((lboard_t *)
+				KL_CONFIG_INFO(NODEPDA(cnode)->xbow_peer),
+				mod, slot);
+	}
+#endif
+
+	if (!brd || (brd->brd_sversion < 2)) {
+		strcpy(name, EDGE_LBL_XWIDGET);
+	} else {
+		nic_name_convert(brd->brd_name, name);
+	}
+
+	/*
+ 	 * PV # 540860
+	 * If the name is not 'baseio' or SN00 MOTHERBOARD
+	 * get the lowest of all the names in the nic string.
+	 * This is needed for boards like divo, which can have
+	 * a bunch of daughter cards, but would like to be called
+	 * divo. We could do this for baseio and SN00 MOTHERBOARD
+ 	 * but it has some special case names that we would not
+ 	 * like to disturb at this point.
+	 */
+
+	/* gfx boards don't need any of this name scrambling */
+	if (brd && (KLCLASS(brd->brd_type) == KLCLASS_GFX)) {
+		return(brd);
+	}
+
+	if (!(!strcmp(name, "baseio") )) {
+		if (brd) {
+			sort_nic_names(brd) ;
+			/* Convert to small case, '-' to '_' etc */
+			nic_name_convert(brd->brd_name, name) ;
+		}
+	}
+
+	return(brd);
+}
+
+int
+get_cpu_slice(cpuid_t cpu)
+{
+	klcpu_t *acpu;
+	if ((acpu = get_cpuinfo(cpu)) == NULL)
+	    return -1;
+	return acpu->cpu_info.physid;
+}
+
+
+/*
+ * get_actual_nasid
+ *
+ *	Completely disabled brds have their klconfig on 
+ *	some other nasid as they have no memory. But their
+ *	actual nasid is hidden in the klconfig. Use this
+ *	routine to get it. Works for normal boards too.
+ */
+nasid_t
+get_actual_nasid(lboard_t *brd)
+{
+	klhub_t	*hub ;
+
+	if (!brd)
+		return INVALID_NASID ;
+
+	/* find out if we are a completely disabled brd. */
+
+        hub  = (klhub_t *)find_first_component(brd, KLSTRUCT_HUB);
+	if (!hub)
+                return INVALID_NASID ;
+	if (!(hub->hub_info.flags & KLINFO_ENABLE))	/* disabled node brd */
+		return hub->hub_info.physid ;
+	else
+		return brd->brd_nasid ;
+}
+
+int
+xbow_port_io_enabled(nasid_t nasid, int link)
+{
+	lboard_t *brd;
+	klxbow_t *xbow_p;
+
+	/*
+	 * look for boards that might contain an xbow or xbridge
+	 */
+#if SN0
+	brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_MIDPLANE8);
+#else
+	brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_PBRICK_XBOW);
+#endif
+	if (brd == NULL) return 0;
+		
+	if ((xbow_p = (klxbow_t *)find_component(brd, NULL, KLSTRUCT_XBOW))
+	    == NULL)
+	    return 0;
+
+	if (!XBOW_PORT_TYPE_IO(xbow_p, link) || !XBOW_PORT_IS_ENABLED(xbow_p, link))
+	    return 0;
+
+	printf("xbow_port_io_enabled:  brd 0x%p xbow_p 0x%p \n", brd, xbow_p);
+
+	return 1;
+}
+
+void
+board_to_path(lboard_t *brd, char *path)
+{
+	moduleid_t modnum;
+	char *board_name;
+#if !defined(CONFIG_SGI_IP35) && !defined(CONFIG_IA64_SGI_SN1) && !defined(CONFIG_IA64_GENERIC)
+	slotid_t slot;
+	char slot_name[SLOTNUM_MAXLENGTH];
+#endif
+
+	ASSERT(brd);
+
+	switch (KLCLASS(brd->brd_type)) {
+
+		case KLCLASS_NODE:
+			board_name = EDGE_LBL_NODE;
+			break;
+		case KLCLASS_ROUTER:
+			if (brd->brd_type == KLTYPE_META_ROUTER) {
+				board_name = EDGE_LBL_META_ROUTER;
+				hasmetarouter++;
+			} else
+				board_name = EDGE_LBL_ROUTER;
+			break;
+		case KLCLASS_MIDPLANE:
+			board_name = EDGE_LBL_MIDPLANE;
+			break;
+		case KLCLASS_IO:
+			board_name = EDGE_LBL_IO;
+			break;
+		case KLCLASS_IOBRICK:
+			if (brd->brd_type == KLTYPE_PBRICK)
+				board_name = EDGE_LBL_PBRICK;
+			else if (brd->brd_type == KLTYPE_IBRICK)
+				board_name = EDGE_LBL_IBRICK;
+			else if (brd->brd_type == KLTYPE_XBRICK)
+				board_name = EDGE_LBL_XBRICK;
+			else
+				board_name = EDGE_LBL_IOBRICK;
+			break;
+		default:
+			board_name = EDGE_LBL_UNKNOWN;
+	}
+			
+	modnum = brd->brd_module;
+
+#if defined(SN0)
+	slot = brd->brd_slot;
+	get_slotname(slot, slot_name);
+
+	ASSERT(modnum >= 0);
+
+	sprintf(path, "%H/" EDGE_LBL_SLOT "/%s/%s", 
+		modnum, slot_name, board_name);
+#else
+	ASSERT(modnum != MODULE_UNKNOWN && modnum != INVALID_MODULE);
+#ifdef BRINGUP /* fix IP35 hwgraph */
+	sprintf(path, EDGE_LBL_MODULE "/%x/%s", modnum, board_name);
+#else
+	sprintf(path, "%H/%s", modnum, board_name);
+#endif
+#endif
+}
+
+/*
+ * Get the module number for a NASID.
+ */
+moduleid_t
+get_module_id(nasid_t nasid)
+{
+	lboard_t *brd;
+
+	brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IP27);
+
+	if (!brd)
+		return INVALID_MODULE;
+	else
+		return brd->brd_module;
+}
+
+
+#ifndef CONFIG_IA64_SGI_IO
+#if 1
+/*
+ *  find_gfxpipe(#)
+ *
+ *  XXXmacko
+ *  This is only used by graphics drivers, and should be moved
+ *  over to gfx/kern/graphics/SN0 as soon as it's convenient.
+ */
+static klgfx_t *graphics_pipe_list = NULL;
+static devfs_handle_t hwgraph_all_gfxids = GRAPH_VERTEX_NONE;
+
+void
+setup_gfxpipe_link(devfs_handle_t vhdl,int pipenum)
+{
+	char idbuf[8];
+	extern graph_hdl_t hwgraph;
+
+	graph_info_add_LBL(hwgraph, vhdl, INFO_LBL_GFXID, INFO_DESC_EXPORT, 
+		(arbitrary_info_t)pipenum);
+	if (hwgraph_all_gfxids == GRAPH_VERTEX_NONE)
+		hwgraph_path_add(hwgraph_root, EDGE_LBL_GFX, &hwgraph_all_gfxids);
+	sprintf(idbuf, "%d", pipenum);
+	hwgraph_edge_add(hwgraph_all_gfxids, vhdl, idbuf);
+
+}
+#endif
+
+/* 
+ * find the pipenum'th logical graphics pipe (KLCLASS_GFX)
+ */
+lboard_t *
+find_gfxpipe(int pipenum)
+{
+        gda_t           *gdap;
+        cnodeid_t       cnode;
+        nasid_t         nasid;
+        lboard_t        *lb;
+	klgfx_t		*kg,**pkg;
+	int		i;
+
+        gdap = (gda_t *)GDA_ADDR(get_nasid());
+        if (gdap->g_magic != GDA_MAGIC)
+        	return NULL;
+
+	if (!graphics_pipe_list) {
+		/* for all nodes */
+        	for (cnode = 0; cnode < MAX_COMPACT_NODES; cnode ++) {
+                	nasid = gdap->g_nasidtable[cnode];
+                	if (nasid == INVALID_NASID)
+                        	continue;
+			lb = KL_CONFIG_INFO(nasid) ;
+			while (lb = find_lboard_class(lb, KLCLASS_GFX)) {
+				moduleid_t kgm, pkgm;
+				int	kgs, pkgs;
+
+#if defined(DEBUG) && (defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1 || defined(CONFIG_IA64_GENERIC))) && defined(BRINGUP)
+				printf("find_gfxpipe(): PIPE: %s mod %M slot %d\n",lb?lb->brd_name:"!LBRD",
+					lb->brd_module,lb->brd_slot);
+#endif
+				/* insert lb into list */
+				if (!(kg = (klgfx_t*)find_first_component(lb,KLSTRUCT_GFX))) {
+					lb = KLCF_NEXT(lb);
+					continue;
+				}
+				/* set moduleslot now that we have brd_module set */
+				kg->moduleslot = (lb->brd_module << 8) | SLOTNUM_GETSLOT(lb->brd_slot);
+				/* make sure board has device flag set */
+				kg->gfx_info.flags |= KLINFO_DEVICE;
+				if (kg->cookie < KLGFX_COOKIE) {
+				    kg->gfx_next_pipe = NULL;
+				    kg->cookie = KLGFX_COOKIE;
+				}
+
+				kgm = kg->moduleslot>>8;
+				kgs = kg->moduleslot&0xff;
+				pkg = &graphics_pipe_list;
+				while (*pkg) {
+					pkgm = (*pkg)->moduleslot>>8;
+					pkgs = (*pkg)->moduleslot&0xff;
+
+					if (!(MODULE_CMP(kgm, pkgm) > 0 ||
+					      (MODULE_CMP(kgm, pkgm) == 0 &&
+					       kgs > pkgs)))
+					    break;
+
+					pkg = &(*pkg)->gfx_next_pipe;
+				}
+				kg->gfx_next_pipe = *pkg;
+				*pkg = kg;
+				lb = KLCF_NEXT(lb);
+			}
+		}
+#ifdef FIND_GFXPIPE_DEBUG
+		i = 0;
+		kg = graphics_pipe_list;
+		while (kg) {
+			lboard_t *lb;
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+			lb = find_lboard_class(KL_CONFIG_INFO(kg->gfx_info.nasid), KLCLASS_GFX);
+#else
+#error Need to figure out how to find graphics boards ...
+#endif
+#if defined(SUPPORT_PRINTING_M_FORMAT)
+			printf("find_gfxpipe(): %s pipe %d mod %M slot %d\n",lb?lb->brd_name:"!LBRD",i,
+				(kg->moduleslot>>8),(kg->moduleslot&0xff));
+#else
+			printf("find_gfxpipe(): %s pipe %d mod 0x%x slot %d\n",lb?lb->brd_name:"!LBRD",i,
+				(kg->moduleslot>>8),(kg->moduleslot&0xff));
+#endif
+			kg = kg->gfx_next_pipe;
+			i++;
+		}
+#endif
+        }
+
+	i = 0;
+	kg = graphics_pipe_list;
+	while (kg && (i < pipenum)) {
+		kg = kg->gfx_next_pipe;
+		i++;
+		}
+
+	if (!kg) return NULL;
+
+#if defined(SN0)
+	return find_lboard_modslot(KL_CONFIG_INFO(kg->gfx_info.nasid),
+				(kg->moduleslot>>8),
+				SLOTNUM_XTALK_CLASS|(kg->moduleslot&0xff));
+#elif defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+	return find_lboard_class(KL_CONFIG_INFO(kg->gfx_info.nasid), KLCLASS_GFX);
+#else
+#error Need to figure out how to find graphics boards ...
+#endif
+}
+#endif
+
+
+#define MHZ	1000000
+
+#ifndef CONFIG_IA64_SGI_IO
+uint
+cpu_cycles_adjust(uint orig_cycles)
+{
+	klcpu_t *acpu;
+	uint speed;
+
+	acpu  = nasid_slice_to_cpuinfo(get_nasid(), get_slice());
+
+	if (acpu == NULL) return orig_cycles;
+
+	/*
+	 * cpu cycles seem to be half of the real value, hack and mult by 2
+	 * for now.
+	 */
+	speed = (orig_cycles * 2) / MHZ;
+
+	/*
+	 * if the cpu thinks its running at some random speed nowhere close 
+	 * the programmed speed, do nothing.
+	 */
+	if ((speed < (acpu->cpu_speed - 2)) || (speed > (acpu->cpu_speed + 2)))
+	    return orig_cycles;
+	return (acpu->cpu_speed * MHZ/2);
+}
+#endif /* CONFIG_IA64_SGI_IO */
+
+/* Get the canonical hardware graph name for the given pci component
+ * on the given io board.
+ */
+void
+device_component_canonical_name_get(lboard_t 	*brd,
+				    klinfo_t 	*component,
+				    char 	*name)
+{
+	moduleid_t 	modnum;
+	slotid_t 	slot;
+	char 		board_name[20];
+#ifdef SN0
+	char 		slot_name[SLOTNUM_MAXLENGTH];
+#endif
+
+	ASSERT(brd);
+
+	/* Get the module number of this board */
+	modnum = brd->brd_module;
+
+	/* Convert the [ CLASS | TYPE ] kind of slotid
+	 * into a string 
+	 */
+	slot = brd->brd_slot;
+#ifdef SN0
+	get_slotname(slot, slot_name);
+
+	ASSERT(modnum >= 0);
+#else
+	ASSERT(modnum != MODULE_UNKNOWN && modnum != INVALID_MODULE);
+#endif
+
+	/* Get the io board name  */
+	if (!brd || (brd->brd_sversion < 2)) {
+		strcpy(name, EDGE_LBL_XWIDGET);
+	} else {
+		nic_name_convert(brd->brd_name, board_name);
+	}
+
+	/* Give out the canonical  name of the pci device*/
+#ifdef SN0
+	sprintf(name, 
+		"/hw/"EDGE_LBL_MODULE "/%M/"EDGE_LBL_SLOT"/%s/%s/"
+		EDGE_LBL_PCI"/%d", 
+		modnum, slot_name, board_name,KLCF_BRIDGE_W_ID(component));
+#elif defined (CONFIG_SGI_IP35)  || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+	sprintf(name, 
+		"/dev/hw/"EDGE_LBL_MODULE "/%x/"EDGE_LBL_SLOT"/%s/"
+		EDGE_LBL_PCI"/%d", 
+		modnum, board_name,KLCF_BRIDGE_W_ID(component));
+#endif
+	
+}
+
+/*
+ * Get the serial number of the main  component of a board
+ * Returns 0 if a valid serial number is found
+ * 1 otherwise.
+ * Assumptions: Nic manufacturing string  has the following format
+ *			*Serial:<serial_number>;*
+ */
+static int
+component_serial_number_get(lboard_t 		*board,
+			    klconf_off_t 	mfg_nic_offset,
+			    char		*serial_number,
+			    char		*key_pattern)
+{
+
+	char	*mfg_nic_string;
+	char	*serial_string,*str;
+	int	i;
+	char	*serial_pattern = "Serial:";
+
+	/* We have an error on a null mfg nic offset */
+	if (!mfg_nic_offset)
+		return(1);
+	/* Get the hub's manufacturing nic information
+	 * which is in the form of a pre-formatted string
+	 */
+	mfg_nic_string = 
+		(char *)NODE_OFFSET_TO_K0(NASID_GET(board),
+					  mfg_nic_offset);
+	/* There is no manufacturing nic info */
+	if (!mfg_nic_string)
+		return(1);
+
+	str = mfg_nic_string;
+	/* Look for the key pattern first (if it is  specified)
+	 * and then print the serial number corresponding to that.
+	 */
+	if (strcmp(key_pattern,"") && 
+	    !(str = strstr(mfg_nic_string,key_pattern)))
+		return(1);
+
+	/* There is no serial number info in the manufacturing
+	 * nic info
+	 */
+	if (!(serial_string = strstr(str,serial_pattern)))
+		return(1);
+
+	serial_string = serial_string + strlen(serial_pattern);
+	/*  Copy the serial number information from the klconfig */
+	i = 0;
+	while (serial_string[i] != ';') {
+		serial_number[i] = serial_string[i];
+		i++;
+	}
+	serial_number[i] = 0;
+	
+	return(0);
+}
+/*
+ * Get the serial number of a board
+ * Returns 0 if a valid serial number is found
+ * 1 otherwise.
+ */
+
+int
+board_serial_number_get(lboard_t *board,char *serial_number)
+{
+	ASSERT(board && serial_number);
+	if (!board || !serial_number)
+		return(1);
+
+	strcpy(serial_number,"");
+	switch(KLCLASS(board->brd_type)) {
+	case KLCLASS_CPU: {	/* Node board */
+		klhub_t	*hub;
+		
+		/* Get the hub component information */
+		hub = (klhub_t *)find_first_component(board,
+						      KLSTRUCT_HUB);
+		/* If we don't have a hub component on an IP27
+		 * then we have a weird klconfig.
+		 */
+		if (!hub)
+			return(1);
+		/* Get the serial number information from
+		 * the hub's manufacturing nic info
+		 */
+		if (component_serial_number_get(board,
+						hub->hub_mfg_nic,
+						serial_number,
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+						"IP35"))
+#else
+						"IP27"))
+			/* Try with IP31 key if IP27 key fails */
+			if (component_serial_number_get(board,
+							hub->hub_mfg_nic,
+							serial_number,
+							"IP31"))
+#endif /* CONFIG_SGI_IP35 || CONFIG_IA64_SGI_SN1 */
+				return(1);
+		break;
+	}
+	case KLCLASS_IO: {	/* IO board */
+		if (KLTYPE(board->brd_type) == KLTYPE_TPU) {
+		/* Special case for TPU boards */
+			kltpu_t *tpu;	
+		
+			/* Get the tpu component information */
+			tpu = (kltpu_t *)find_first_component(board,
+						      KLSTRUCT_TPU);
+			/* If we don't have a tpu component on a tpu board
+			 * then we have a weird klconfig.
+			 */
+			if (!tpu)
+				return(1);
+			/* Get the serial number information from
+			 * the tpu's manufacturing nic info
+			 */
+			if (component_serial_number_get(board,
+						tpu->tpu_mfg_nic,
+						serial_number,
+						""))
+				return(1);
+			break;
+		} else  if ((KLTYPE(board->brd_type) == KLTYPE_GSN_A) ||
+		            (KLTYPE(board->brd_type) == KLTYPE_GSN_B)) {
+		/* Special case for GSN boards */
+			klgsn_t *gsn;	
+		
+			/* Get the gsn component information */
+			gsn = (klgsn_t *)find_first_component(board,
+			      ((KLTYPE(board->brd_type) == KLTYPE_GSN_A) ?
+					KLSTRUCT_GSN_A : KLSTRUCT_GSN_B));
+			/* If we don't have a gsn component on a gsn board
+			 * then we have a weird klconfig.
+			 */
+			if (!gsn)
+				return(1);
+			/* Get the serial number information from
+			 * the gsn's manufacturing nic info
+			 */
+			if (component_serial_number_get(board,
+						gsn->gsn_mfg_nic,
+						serial_number,
+						""))
+				return(1);
+			break;
+		} else {
+		     	klbri_t	*bridge;
+		
+			/* Get the bridge component information */
+			bridge = (klbri_t *)find_first_component(board,
+							 KLSTRUCT_BRI);
+			/* If we don't have a bridge component on an IO board
+			 * then we have a weird klconfig.
+			 */
+			if (!bridge)
+				return(1);
+			/* Get the serial number information from
+		 	 * the bridge's manufacturing nic info
+			 */
+			if (component_serial_number_get(board,
+						bridge->bri_mfg_nic,
+						serial_number,
+						""))
+				return(1);
+			break;
+		}
+	}
+	case KLCLASS_ROUTER: {	/* Router board */
+		klrou_t *router;	
+		
+		/* Get the router component information */
+		router = (klrou_t *)find_first_component(board,
+							 KLSTRUCT_ROU);
+		/* If we don't have a router component on a router board
+		 * then we have a weird klconfig.
+		 */
+		if (!router)
+			return(1);
+		/* Get the serial number information from
+		 * the router's manufacturing nic info
+		 */
+		if (component_serial_number_get(board,
+						router->rou_mfg_nic,
+						serial_number,
+						""))
+			return(1);
+		break;
+	}
+	case KLCLASS_GFX: {	/* Gfx board */
+		klgfx_t *graphics;
+		
+		/* Get the graphics component information */
+		graphics = (klgfx_t *)find_first_component(board, KLSTRUCT_GFX);
+		/* If we don't have a gfx component on a gfx board
+		 * then we have a weird klconfig.
+		 */
+		if (!graphics)
+			return(1);
+		/* Get the serial number information from
+		 * the graphics's manufacturing nic info
+		 */
+		if (component_serial_number_get(board,
+						graphics->gfx_mfg_nic,
+						serial_number,
+						""))
+			return(1);
+		break;
+	}
+	default:
+		strcpy(serial_number,"");
+		break;
+	}
+	return(0);
+}
+
+#include "asm/sn/sn_private.h"
+#ifndef CONFIG_IA64_SGI_IO
+/*
+ * Given a physical address get the name of memory dimm bank
+ * in a hwgraph name format.
+ */
+void
+membank_pathname_get(paddr_t paddr,char *name)
+{
+	cnodeid_t	cnode;
+	char		slotname[SLOTNUM_MAXLENGTH];
+
+	cnode = paddr_cnode(paddr);
+	/* Make sure that we have a valid name buffer */
+	if (!name)
+		return;
+
+	name[0] = 0;
+	/* Make sure that the cnode is valid */
+	if ((cnode == CNODEID_NONE) || (cnode > numnodes))
+		return;
+	/* Given a slotid(class:type) get the slotname */
+#if defined (SN0)
+	get_slotname(NODE_SLOTID(cnode),slotname);
+	sprintf(name,
+		"/hw/"EDGE_LBL_MODULE"/%M/"EDGE_LBL_SLOT"/%s/"EDGE_LBL_NODE
+		"/"EDGE_LBL_MEMORY"/dimm_bank/%d",
+		NODE_MODULEID(cnode),slotname,paddr_dimm(paddr));
+#elif defined (CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+	sprintf(name,
+		"/dev/hw/"EDGE_LBL_MODULE"/%M/"EDGE_LBL_NODE
+		"/"EDGE_LBL_MEMORY"/dimm_bank/%d",
+		NODE_MODULEID(cnode),paddr_dimm(paddr));
+#endif
+}
+
+
+
+int
+membank_check_mixed_hidensity(nasid_t nasid)
+{
+	lboard_t *brd;
+	klmembnk_t *mem;
+	int min_size = 1024, max_size = 0;
+	int bank, mem_size;
+
+	brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IP27);
+	ASSERT(brd);
+
+	mem = (klmembnk_t *)find_first_component(brd, KLSTRUCT_MEMBNK);
+	ASSERT(mem);
+
+
+	for (mem_size = 0, bank = 0; bank < MD_MEM_BANKS; bank++) {
+		mem_size = KLCONFIG_MEMBNK_SIZE(mem, bank);
+		if (mem_size < min_size)
+		    min_size = mem_size;
+		if (mem_size > max_size)
+		    max_size = mem_size;
+	}
+	
+	if ((max_size == 512) && (max_size != min_size))
+	    return 1;
+
+	return 0;
+}
+
+
+int
+mem_mixed_hidensity_banks(void)
+{
+	cnodeid_t cnode;
+	nasid_t nasid;
+
+	for (cnode = 0; cnode < maxnodes; cnode++) {
+		nasid = COMPACT_TO_NASID_NODEID(cnode);
+		if (nasid == INVALID_NASID)
+		    continue;
+		if (membank_check_mixed_hidensity(nasid))
+		    return 1;
+	}
+	return 0;
+
+}
+#endif /* CONFIG_IA64_SGI_IO */
+
+xwidgetnum_t
+nodevertex_widgetnum_get(devfs_handle_t node_vtx)
+{
+	hubinfo_t hubinfo_p;
+
+	hwgraph_info_get_LBL(node_vtx, INFO_LBL_NODE_INFO, 
+			     (arbitrary_info_t *) &hubinfo_p);
+	return(hubinfo_p->h_widgetid);
+}
+
+devfs_handle_t
+nodevertex_xbow_peer_get(devfs_handle_t node_vtx)
+{
+	hubinfo_t hubinfo_p;
+	nasid_t xbow_peer_nasid;
+	cnodeid_t xbow_peer;
+
+	hwgraph_info_get_LBL(node_vtx, INFO_LBL_NODE_INFO,
+				     (arbitrary_info_t *) &hubinfo_p);
+	xbow_peer_nasid = hubinfo_p->h_nodepda->xbow_peer;
+	if(xbow_peer_nasid == INVALID_NASID) 
+			return ( (devfs_handle_t)-1);
+	xbow_peer = NASID_TO_COMPACT_NODEID(xbow_peer_nasid);
+	return(NODEPDA(xbow_peer)->node_vertex);
+}
+
+/* NIC Sorting Support */
+
+#define MAX_NICS_PER_STRING 	32
+#define MAX_NIC_NAME_LEN	32
+
+static char *
+get_nic_string(lboard_t *lb)
+{
+        int         	i;
+        klinfo_t    	*k = NULL ;
+    	klconf_off_t    mfg_off = 0 ;
+    	char            *mfg_nic = NULL ;
+
+        for (i = 0; i < KLCF_NUM_COMPS(lb); i++) {
+                k = KLCF_COMP(lb, i) ;
+                switch(k->struct_type) {
+                        case KLSTRUCT_BRI:
+            			mfg_off = ((klbri_t *)k)->bri_mfg_nic ;
+				break ;
+
+                        case KLSTRUCT_HUB:
+            			mfg_off = ((klhub_t *)k)->hub_mfg_nic ;
+				break ;
+
+                        case KLSTRUCT_ROU:
+            			mfg_off = ((klrou_t *)k)->rou_mfg_nic ;
+				break ;
+
+                        case KLSTRUCT_GFX:
+            			mfg_off = ((klgfx_t *)k)->gfx_mfg_nic ;
+				break ;
+
+                        case KLSTRUCT_TPU:
+            			mfg_off = ((kltpu_t *)k)->tpu_mfg_nic ;
+				break ;
+
+                        case KLSTRUCT_GSN_A:
+                        case KLSTRUCT_GSN_B:
+            			mfg_off = ((klgsn_t *)k)->gsn_mfg_nic ;
+				break ;
+
+                        case KLSTRUCT_XTHD:
+                                mfg_off = ((klxthd_t *)k)->xthd_mfg_nic ;
+                                break;
+
+			default:
+				mfg_off = 0 ;
+                                break ;
+                }
+		if (mfg_off)
+			break ;
+        }
+
+	if ((mfg_off) && (k))
+		mfg_nic = (char *)NODE_OFFSET_TO_K0(k->nasid, mfg_off) ;
+
+        return mfg_nic ;
+}
+
+char *
+get_first_string(char **ptrs, int n)
+{
+        int     i ;
+        char    *tmpptr ;
+
+        if ((ptrs == NULL) || (n == 0))
+                return NULL ;
+
+        tmpptr = ptrs[0] ;
+
+        if (n == 1)
+                return tmpptr ;
+
+        for (i = 0 ; i < n ; i++) {
+                if (strcmp(tmpptr, ptrs[i]) > 0)
+                        tmpptr = ptrs[i] ;
+        }
+
+        return tmpptr ;
+}
+
+int
+get_ptrs(char *idata, char **ptrs, int n, char *label)
+{
+        int     i = 0 ;
+        char    *tmp = idata ;
+
+        if ((ptrs == NULL) || (idata == NULL) || (label == NULL) || (n == 0))
+                return 0 ;
+
+        while  ( (tmp = strstr(tmp, label)) ){
+                tmp += strlen(label) ;
+                /* check for empty name field, and last NULL ptr */
+                if ((i < (n-1)) && (*tmp != ';')) {
+                        ptrs[i++] = tmp ;
+                }
+        }
+
+        ptrs[i] = NULL ;
+
+        return i ;
+}
+
+/*
+ * sort_nic_names
+ *
+ * 	Does not really do sorting. Find the alphabetically lowest
+ *	name among all the nic names found in a nic string.
+ *
+ * Return:
+ *	Nothing
+ *
+ * Side Effects:
+ *
+ *	lb->brd_name gets the new name found
+ */
+
+static void
+sort_nic_names(lboard_t *lb)
+{
+	char 	*nic_str ;
+        char    *ptrs[MAX_NICS_PER_STRING] ;
+        char    name[MAX_NIC_NAME_LEN] ;
+        char    *tmp, *tmp1 ;
+
+	*name = 0 ;
+
+	/* Get the nic pointer from the lb */
+
+	if ((nic_str = get_nic_string(lb)) == NULL)
+		return ;
+
+        tmp = get_first_string(ptrs,
+                        get_ptrs(nic_str, ptrs, MAX_NICS_PER_STRING, "Name:")) ;
+
+        if (tmp == NULL)
+		return ;
+
+        if  ( (tmp1 = strchr(tmp, ';')) ){
+                strncpy(name, tmp, tmp1-tmp) ;
+                name[tmp1-tmp] = 0 ;
+        } else {
+                strncpy(name, tmp, (sizeof(name) -1)) ;
+                name[sizeof(name)-1] = 0 ;
+        }
+
+	strcpy(lb->brd_name, name) ;
+}
+
+
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+
+char brick_types[MAX_BRICK_TYPES + 1] = "crikxdp789012345";
+
+/*
+ * Format a module id for printing.
+ */
+void
+format_module_id(char *buffer, moduleid_t m, int fmt)
+{
+	int rack, position;
+	char brickchar;
+
+	rack = MODULE_GET_RACK(m);
+	ASSERT(MODULE_GET_BTYPE(m) < MAX_BRICK_TYPES);
+	brickchar = MODULE_GET_BTCHAR(m);
+	position = MODULE_GET_BPOS(m);
+
+	if (fmt == MODULE_FORMAT_BRIEF) {
+	    /* Brief module number format, eg. 002c15 */
+
+	    /* Decompress the rack number */
+	    *buffer++ = '0' + RACK_GET_CLASS(rack);
+	    *buffer++ = '0' + RACK_GET_GROUP(rack);
+	    *buffer++ = '0' + RACK_GET_NUM(rack);
+
+	    /* Add the brick type */
+	    *buffer++ = brickchar;
+	}
+	else if (fmt == MODULE_FORMAT_LONG) {
+	    /* Fuller hwgraph format, eg. rack/002/bay/15 */
+
+	    strcpy(buffer, EDGE_LBL_RACK "/");  buffer += strlen(buffer);
+
+	    *buffer++ = '0' + RACK_GET_CLASS(rack);
+	    *buffer++ = '0' + RACK_GET_GROUP(rack);
+	    *buffer++ = '0' + RACK_GET_NUM(rack);
+
+	    strcpy(buffer, "/" EDGE_LBL_RPOS "/");  buffer += strlen(buffer);
+	}
+
+	/* Add the bay position, using at least two digits */
+	if (position < 10)
+	    *buffer++ = '0';
+	sprintf(buffer, "%d", position);
+
+}
+
+/*
+ * Parse a module id, in either brief or long form.
+ * Returns < 0 on error.
+ * The long form does not include a brick type, so it defaults to 0 (CBrick)
+ */
+int
+parse_module_id(char *buffer)
+{
+	unsigned int	v, rack, bay, type, form;
+	moduleid_t	m;
+	char 		c;
+
+	if (strstr(buffer, EDGE_LBL_RACK "/") == buffer) {
+		form = MODULE_FORMAT_LONG;
+		buffer += strlen(EDGE_LBL_RACK "/");
+
+		/* A long module ID must be exactly 5 non-template chars. */
+		if (strlen(buffer) != strlen("/" EDGE_LBL_RPOS "/") + 5)
+			return -1;
+	}
+	else {
+		form = MODULE_FORMAT_BRIEF;
+
+		/* A brief module id must be exactly 6 characters */
+		if (strlen(buffer) != 6)
+			return -2;
+	}
+
+	/* The rack number must be exactly 3 digits */
+	if (!(isdigit(buffer[0]) && isdigit(buffer[1]) && isdigit(buffer[2])))
+		return -3;
+
+	rack = 0;
+	v = *buffer++ - '0';
+	if (v > RACK_CLASS_MASK(rack) >> RACK_CLASS_SHFT(rack))
+		return -4;
+	RACK_ADD_CLASS(rack, v);
+
+	v = *buffer++ - '0';
+	if (v > RACK_GROUP_MASK(rack) >> RACK_GROUP_SHFT(rack))
+		return -5;
+	RACK_ADD_GROUP(rack, v);
+
+	v = *buffer++ - '0';
+	/* rack numbers are 1-based */
+	if (v-1 > RACK_NUM_MASK(rack) >> RACK_NUM_SHFT(rack))
+		return -6;
+	RACK_ADD_NUM(rack, v);
+
+	if (form == MODULE_FORMAT_BRIEF) {
+		/* Next should be a module type character.  Accept ucase or lcase. */
+		c = *buffer++;
+		if (!isalpha(c))
+			return -7;
+
+		/* strchr() returns a pointer into brick_types[], or NULL */
+		type = (unsigned int)(strchr(brick_types, tolower(c)) - brick_types);
+		if (type > MODULE_BTYPE_MASK >> MODULE_BTYPE_SHFT)
+			return -8;
+	}
+	else {
+		/* Hardcode the module type, and skip over the boilerplate */
+		type = MODULE_CBRICK;
+
+		if (strstr(buffer, "/" EDGE_LBL_RPOS "/") != buffer)
+			return -9;
+
+		buffer += strlen("/" EDGE_LBL_RPOS "/");
+	}
+		
+	/* The bay number is last.  Make sure it's exactly two digits */
+
+	if (!(isdigit(buffer[0]) && isdigit(buffer[1]) && !buffer[2]))
+		return -10;
+
+	bay = 10 * (buffer[0] - '0') + (buffer[1] - '0');
+
+	if (bay > MODULE_BPOS_MASK >> MODULE_BPOS_SHFT)
+		return -11;
+
+	m = RBT_TO_MODULE(rack, bay, type);
+
+	/* avoid sign extending the moduleid_t */
+	return (int)(unsigned short)m;
+}
+
+#else /* CONFIG_SGI_IP35 || CONFIG_IA64_SGI_SN1 */
+
+/*
+ * Format a module id for printing.
+ */
+void
+format_module_id(char *buffer, moduleid_t m, int fmt)
+{
+    if (fmt == MODULE_FORMAT_BRIEF) {
+		sprintf(buffer, "%d", m);
+    }
+    else if (fmt == MODULE_FORMAT_LONG) {
+		sprintf(buffer, EDGE_LBL_MODULE "/%d", m);
+    }
+}
+
+/*
+ * Parse a module id, in either brief or long form.
+ * Returns < 0 on error.
+ */
+int
+parse_module_id(char *buffer)
+{
+    moduleid_t m;
+    char c;
+
+    if (strstr(buffer, EDGE_LBL_MODULE "/") == buffer)
+	buffer += strlen(EDGE_LBL_MODULE "/");
+
+    m = 0;
+    while(c = *buffer++) {
+	if (!isdigit(c))
+	    return -1;
+	m = 10 * m + (c - '0');
+    }
+
+    /* avoid sign extending the moduleid_t */
+    return (int)(unsigned short)m;
+}
+
+#endif /* CONFIG_SGI_IP35 || CONFIG_IA64_SGI_SN1 */
+
+
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/io/klgraph.c linux/arch/ia64/sn/io/klgraph.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/io/klgraph.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/io/klgraph.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,971 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+/*
+ * klgraph.c-
+ *      This file specifies the interface between the kernel and the PROM's
+ *      configuration data structures.
+ */
+
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/iograph.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/labelcl.h>
+
+#include <asm/sn/cmn_err.h>
+#include <asm/sn/agent.h>
+#ifdef CONFIG_IA64_SGI_IO
+#include <asm/sn/kldir.h>
+#endif
+#include <asm/sn/gda.h> 
+#include <asm/sn/klconfig.h>
+#include <asm/sn/router.h>
+#include <asm/sn/xtalk/xbow.h>
+#include <asm/sn/hcl_util.h>
+
+#define KLGRAPH_DEBUG 1
+#ifdef KLGRAPH_DEBUG
+#define GRPRINTF(x)	printk x
+#define CE_GRPANIC	CE_PANIC
+#else
+#define GRPRINTF(x)
+#define CE_GRPANIC	CE_PANIC
+#endif
+
+#include <asm/sn/sn_private.h>
+
+extern char arg_maxnodes[];
+extern int maxnodes;
+
+#ifndef BRINGUP
+/*
+ * Gets reason for diagval using table lookup.
+ */
+static char*
+get_diag_string(uint diagcode)
+{
+  int num_entries;
+  int i;
+  num_entries = sizeof(diagval_map) / sizeof(diagval_t);
+  for (i = 0; i < num_entries; i++){
+    if ((unchar)diagval_map[i].dv_code == (unchar)diagcode)
+      return diagval_map[i].dv_msg;
+  }
+  return "Unknown";
+}
+
+#endif /* ndef BRINGUP */
+
+
+/*
+ * Support for verbose inventory via hardware graph. 
+ * klhwg_invent_alloc allocates the necessary size of inventory information
+ * and fills in the generic information.
+ */
+invent_generic_t *
+klhwg_invent_alloc(cnodeid_t cnode, int class, int size)
+{
+	invent_generic_t *invent;
+
+	invent = kern_malloc(size);
+	if (!invent) return NULL;
+	
+	invent->ig_module = NODE_MODULEID(cnode);
+	invent->ig_slot = SLOTNUM_GETSLOT(NODE_SLOTID(cnode));
+	invent->ig_invclass = class;
+
+	return invent;
+}
+
+/* 
+ * Add information about the baseio prom version number
+ * as a part of detailed inventory info in the hwgraph.
+ */
+void
+klhwg_baseio_inventory_add(devfs_handle_t baseio_vhdl,cnodeid_t cnode)
+{
+	invent_miscinfo_t	*baseio_inventory;
+	unsigned char		version = 0,revision = 0;
+
+	/* Allocate memory for the "detailed inventory" info
+	 * for the baseio
+	 */
+	baseio_inventory = (invent_miscinfo_t *) 
+		klhwg_invent_alloc(cnode, INV_PROM, sizeof(invent_miscinfo_t));
+	baseio_inventory->im_type = INV_IO6PROM;
+	/* Read the io6prom revision from the nvram */
+#ifndef CONFIG_IA64_SGI_IO
+	nvram_prom_version_get(&version,&revision);
+#endif
+	/* Store the revision info  in the inventory */
+	baseio_inventory->im_version = version;
+	baseio_inventory->im_rev = revision;
+	/* Put the inventory info in the hardware graph */
+	hwgraph_info_add_LBL(baseio_vhdl, INFO_LBL_DETAIL_INVENT, 
+			     (arbitrary_info_t) baseio_inventory);
+	/* Make the information available to the user programs
+	 * thru hwgfs.
+	 */
+        hwgraph_info_export_LBL(baseio_vhdl, INFO_LBL_DETAIL_INVENT,
+				sizeof(invent_miscinfo_t));
+}
+
+char	*hub_rev[] = {
+	"0.0",
+	"1.0",
+	"2.0",
+	"2.1",
+	"2.2",
+	"2.3"
+};
+
+/*
+ * Add detailed cpu inventory info to the hardware graph.
+ */
+void
+klhwg_hub_invent_info(devfs_handle_t hubv,
+		      cnodeid_t cnode, 
+		      klhub_t *hub)
+{
+	invent_miscinfo_t *hub_invent;
+
+	hub_invent = (invent_miscinfo_t *) 
+	    klhwg_invent_alloc(cnode, INV_MISC, sizeof(invent_miscinfo_t));
+	if (!hub_invent)
+	    return;
+
+	if (KLCONFIG_INFO_ENABLED((klinfo_t *)hub))
+	    hub_invent->im_gen.ig_flag = INVENT_ENABLED;
+
+	hub_invent->im_type = INV_HUB;
+	hub_invent->im_rev = hub->hub_info.revision;
+	hub_invent->im_speed = hub->hub_speed;
+	hwgraph_info_add_LBL(hubv, INFO_LBL_DETAIL_INVENT, 
+			     (arbitrary_info_t) hub_invent);
+        hwgraph_info_export_LBL(hubv, INFO_LBL_DETAIL_INVENT,
+				sizeof(invent_miscinfo_t));
+}
+
+/* ARGSUSED */
+void
+klhwg_add_hub(devfs_handle_t node_vertex, klhub_t *hub, cnodeid_t cnode)
+{
+	devfs_handle_t myhubv;
+	int rc;
+
+	GRPRINTF(("klhwg_add_hub: adding %s\n", EDGE_LBL_HUB));
+
+	(void) hwgraph_path_add(node_vertex, EDGE_LBL_HUB, &myhubv);
+	rc = device_master_set(myhubv, node_vertex);
+
+#ifndef CONFIG_IA64_SGI_IO
+	/*
+	 * Activate when we support hub stats.
+	 */
+	rc = hwgraph_info_add_LBL(myhubv, INFO_LBL_HUB_INFO,
+                        (arbitrary_info_t)(&NODEPDA(cnode)->hubstats));
+#endif
+
+	if (rc != GRAPH_SUCCESS) {
+		cmn_err(CE_WARN,
+			"klhwg_add_hub: Can't add hub info label 0x%p, code %d",
+			myhubv, rc);
+	}
+
+	klhwg_hub_invent_info(myhubv, cnode, hub);
+
+#ifndef BRINGUP
+	init_hub_stats(cnode, NODEPDA(cnode));
+#endif /* ndef BRINGUP */
+
+#ifndef CONFIG_IA64_SGI_IO
+	sndrv_attach(myhubv);
+#else
+	/*
+	 * Need to call our driver to do the attach?
+	 */
+	printk("klhwg_add_hub: Need to add code to do the attach.\n");
+#endif
+}
+
+#ifndef BRINGUP
+
+void
+klhwg_add_rps(devfs_handle_t node_vertex, cnodeid_t cnode, int flag)
+{
+	devfs_handle_t myrpsv;
+	invent_rpsinfo_t *rps_invent;
+	int rc;
+
+        if(cnode == CNODEID_NONE)
+                return;                                                        
+	
+	GRPRINTF(("klhwg_add_rps: adding %s to vertex 0x%x\n", EDGE_LBL_RPS,
+		node_vertex));
+
+	rc = hwgraph_path_add(node_vertex, EDGE_LBL_RPS, &myrpsv);
+	if (rc != GRAPH_SUCCESS)
+		return;
+
+	device_master_set(myrpsv, node_vertex);
+
+        rps_invent = (invent_rpsinfo_t *)
+            klhwg_invent_alloc(cnode, INV_RPS, sizeof(invent_rpsinfo_t));
+
+        if (!rps_invent)
+            return;
+
+	rps_invent->ir_xbox = 0;	/* not an xbox RPS */
+
+        if (flag)
+            rps_invent->ir_gen.ig_flag = INVENT_ENABLED;
+        else
+            rps_invent->ir_gen.ig_flag = 0x0;                                  
+
+        hwgraph_info_add_LBL(myrpsv, INFO_LBL_DETAIL_INVENT,
+                             (arbitrary_info_t) rps_invent);
+        hwgraph_info_export_LBL(myrpsv, INFO_LBL_DETAIL_INVENT,
+                                sizeof(invent_rpsinfo_t));                     
+	
+}
+
+/*
+ * klhwg_update_rps gets invoked when the system controller sends an 
+ * interrupt indicating the power supply has lost/regained the redundancy.
+ * It's responsible for updating the Hardware graph information.
+ *	rps_state = 0 -> if the rps lost the redundancy
+ *		  = 1 -> If it is redundant. 
+ */
+void 
+klhwg_update_rps(cnodeid_t cnode, int rps_state)
+{
+        devfs_handle_t node_vertex;
+        devfs_handle_t rpsv;
+        invent_rpsinfo_t *rps_invent;                                          
+        int rc;
+        if(cnode == CNODEID_NONE)
+                return;                                                        
+
+        node_vertex = cnodeid_to_vertex(cnode);                                
+	rc = hwgraph_edge_get(node_vertex, EDGE_LBL_RPS, &rpsv);
+        if (rc != GRAPH_SUCCESS)  {
+		return;
+	}
+
+	rc = hwgraph_info_get_LBL(rpsv, INFO_LBL_DETAIL_INVENT, 
+				  (arbitrary_info_t *)&rps_invent);
+        if (rc != GRAPH_SUCCESS)  {
+                return;
+        }                                                                      
+
+	if (rps_state == 0 ) 
+		rps_invent->ir_gen.ig_flag = 0;
+	else 
+		rps_invent->ir_gen.ig_flag = INVENT_ENABLED;
+}
+
+void
+klhwg_add_xbox_rps(devfs_handle_t node_vertex, cnodeid_t cnode, int flag)
+{
+	devfs_handle_t myrpsv;
+	invent_rpsinfo_t *rps_invent;
+	int rc;
+
+        if(cnode == CNODEID_NONE)
+                return;                                                        
+	
+	GRPRINTF(("klhwg_add_rps: adding %s to vertex 0x%x\n", 
+		  EDGE_LBL_XBOX_RPS, node_vertex));
+
+	rc = hwgraph_path_add(node_vertex, EDGE_LBL_XBOX_RPS, &myrpsv);
+	if (rc != GRAPH_SUCCESS)
+		return;
+
+	device_master_set(myrpsv, node_vertex);
+
+        rps_invent = (invent_rpsinfo_t *)
+            klhwg_invent_alloc(cnode, INV_RPS, sizeof(invent_rpsinfo_t));
+
+        if (!rps_invent)
+            return;
+
+	rps_invent->ir_xbox = 1;	/* xbox RPS */
+
+        if (flag)
+            rps_invent->ir_gen.ig_flag = INVENT_ENABLED;
+        else
+            rps_invent->ir_gen.ig_flag = 0x0;                                  
+
+        hwgraph_info_add_LBL(myrpsv, INFO_LBL_DETAIL_INVENT,
+                             (arbitrary_info_t) rps_invent);
+        hwgraph_info_export_LBL(myrpsv, INFO_LBL_DETAIL_INVENT,
+                                sizeof(invent_rpsinfo_t));                     
+	
+}
+
+/*
+ * klhwg_update_xbox_rps gets invoked when the xbox system controller
+ * polls the status register and discovers that the power supply has 
+ * lost/regained the redundancy.
+ * It's responsible for updating the Hardware graph information.
+ *	rps_state = 0 -> if the rps lost the redundancy
+ *		  = 1 -> If it is redundant. 
+ */
+void 
+klhwg_update_xbox_rps(cnodeid_t cnode, int rps_state)
+{
+        devfs_handle_t node_vertex;
+        devfs_handle_t rpsv;
+        invent_rpsinfo_t *rps_invent;                                          
+        int rc;
+        if(cnode == CNODEID_NONE)
+                return;                                                        
+
+        node_vertex = cnodeid_to_vertex(cnode);                                
+	rc = hwgraph_edge_get(node_vertex, EDGE_LBL_XBOX_RPS, &rpsv);
+        if (rc != GRAPH_SUCCESS)  {
+		return;
+	}
+
+	rc = hwgraph_info_get_LBL(rpsv, INFO_LBL_DETAIL_INVENT, 
+				  (arbitrary_info_t *)&rps_invent);
+        if (rc != GRAPH_SUCCESS)  {
+                return;
+        }                                                                      
+
+	if (rps_state == 0 ) 
+		rps_invent->ir_gen.ig_flag = 0;
+	else 
+		rps_invent->ir_gen.ig_flag = INVENT_ENABLED;
+}
+
+#endif /* ndef BRINGUP */
+
+void
+klhwg_add_xbow(cnodeid_t cnode, nasid_t nasid)
+{
+	lboard_t *brd;
+	klxbow_t *xbow_p;
+	nasid_t hub_nasid;
+	cnodeid_t hub_cnode;
+	int widgetnum;
+	devfs_handle_t xbow_v, hubv;
+	/*REFERENCED*/
+	graph_error_t err;
+
+#if CONFIG_SGI_IP35 || CONFIG_IA64_SGI_SN1 || defined(CONFIG_IA64_GENERIC)
+	if ((brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid),
+				KLTYPE_PBRICK_XBOW)) == NULL)
+			return;
+#endif
+
+	if (KL_CONFIG_DUPLICATE_BOARD(brd))
+	    return;
+
+	GRPRINTF(("klhwg_add_xbow: adding cnode %d nasid %d xbow edges\n",
+			cnode, nasid));
+
+	if ((xbow_p = (klxbow_t *)find_component(brd, NULL, KLSTRUCT_XBOW))
+	    == NULL)
+	    return;
+
+#ifndef CONFIG_IA64_SGI_IO
+	/*
+	 * We cannot support this function in devfs .. see below where 
+	 * we use hwgraph_path_add() to create this vertex with a known 
+	 * name.
+	 */
+	err = hwgraph_vertex_create(&xbow_v);
+	ASSERT(err == GRAPH_SUCCESS);
+
+	xswitch_vertex_init(xbow_v);
+#endif /* !CONFIG_IA64_SGI_IO */
+
+	for (widgetnum = HUB_WIDGET_ID_MIN; widgetnum <= HUB_WIDGET_ID_MAX; widgetnum++) {
+		if (!XBOW_PORT_TYPE_HUB(xbow_p, widgetnum)) 
+		    continue;
+
+		hub_nasid = XBOW_PORT_NASID(xbow_p, widgetnum);
+		printk("klhwg_add_xbow: Found xbow port type hub hub_nasid %d widgetnum %d\n", hub_nasid, widgetnum);
+		if (hub_nasid == INVALID_NASID) {
+			cmn_err(CE_WARN, "hub widget %d, skipping xbow graph\n", widgetnum);
+			continue;
+		}
+
+		hub_cnode = NASID_TO_COMPACT_NODEID(hub_nasid);
+		printk("klhwg_add_xbow: cnode %d cnode %d\n", nasid_to_compact_node[0], nasid_to_compact_node[1]);
+
+		if (is_specified(arg_maxnodes) && hub_cnode == INVALID_CNODEID) {
+			continue;
+		}
+			
+		hubv = cnodeid_to_vertex(hub_cnode);
+
+#ifdef CONFIG_IA64_SGI_IO
+		printk("klhwg_add_xbow: Hub Vertex found = %p hub_cnode %d\n", hubv, hub_cnode);
+		err = hwgraph_path_add(hubv, EDGE_LBL_XTALK, &xbow_v);
+                if (err != GRAPH_SUCCESS) {
+                        if (err == GRAPH_DUP)
+                                cmn_err(CE_WARN, "klhwg_add_xbow: Check for "
+                                        "working routers and router links!");
+
+                        cmn_err(CE_GRPANIC, "klhwg_add_xbow: Failed to add "
+                                "edge: vertex 0x%p (0x%p) to vertex 0x%p (0x%p),"
+                                "error %d\n",
+                                hubv, hubv, xbow_v, xbow_v, err);
+                }
+		xswitch_vertex_init(xbow_v); 
+#endif
+
+		NODEPDA(hub_cnode)->xbow_vhdl = xbow_v;
+
+		/*
+		 * XXX - This won't work is we ever hook up two hubs
+		 * by crosstown through a crossbow.
+		 */
+		if (hub_nasid != nasid) {
+			NODEPDA(hub_cnode)->xbow_peer = nasid;
+			NODEPDA(NASID_TO_COMPACT_NODEID(nasid))->xbow_peer =
+				hub_nasid;
+		}
+
+		GRPRINTF(("klhwg_add_xbow: adding port nasid %d %s to vertex 0x%p\n",
+			hub_nasid, EDGE_LBL_XTALK, hubv));
+
+#ifndef CONFIG_IA64_SGI_IO
+		err = hwgraph_edge_add(hubv, xbow_v, EDGE_LBL_XTALK);
+		if (err != GRAPH_SUCCESS) {
+			if (err == GRAPH_DUP)
+				cmn_err(CE_WARN, "klhwg_add_xbow: Check for "
+					"working routers and router links!");
+
+			cmn_err(CE_GRPANIC, "klhwg_add_xbow: Failed to add "
+				"edge: vertex 0x%p (0x%p) to vertex 0x%p (0x%p), "
+				"error %d\n",
+				hubv, hubv, xbow_v, xbow_v, err);
+		}
+#endif
+	}
+}
+
+
+/* ARGSUSED */
+void
+klhwg_add_node(devfs_handle_t hwgraph_root, cnodeid_t cnode, gda_t *gdap)
+{
+	nasid_t nasid;
+	lboard_t *brd;
+	klhub_t *hub;
+	devfs_handle_t node_vertex = NULL;
+	char path_buffer[100];
+	int rv;
+	char *s;
+	int board_disabled = 0;
+
+	nasid = COMPACT_TO_NASID_NODEID(cnode);
+	brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IP27);
+	GRPRINTF(("klhwg_add_node: Adding cnode %d, nasid %d, brd 0x%p\n",
+                cnode, nasid, brd));
+	ASSERT(brd);
+
+	do {
+
+		/* Generate a hardware graph path for this board. */
+		board_to_path(brd, path_buffer);
+
+		GRPRINTF(("klhwg_add_node: adding %s to vertex 0x%p\n",
+			path_buffer, hwgraph_root));
+		rv = hwgraph_path_add(hwgraph_root, path_buffer, &node_vertex);
+
+		printk("klhwg_add_node: rv = %d graph success %d node_vertex 0x%p\n", rv, GRAPH_SUCCESS, node_vertex);
+		if (rv != GRAPH_SUCCESS)
+			cmn_err(CE_PANIC, "Node vertex creation failed.  "
+					  "Path == %s",
+				path_buffer);
+
+		hub = (klhub_t *)find_first_component(brd, KLSTRUCT_HUB);
+		ASSERT(hub);
+		if(hub->hub_info.flags & KLINFO_ENABLE)
+			board_disabled = 0;
+		else
+			board_disabled = 1;
+		
+		if(!board_disabled) {
+			mark_nodevertex_as_node(node_vertex,
+					    cnode + board_disabled * numnodes);
+			printk("klhwg_add_node: node_vertex %p, cnode %d numnodes %d\n", node_vertex, cnode, numnodes);
+
+			s = dev_to_name(node_vertex, path_buffer, sizeof(path_buffer));
+			printk("klhwg_add_node: s %s\n", s);
+
+			NODEPDA(cnode)->hwg_node_name =
+						kmalloc(strlen(s) + 1,
+						GFP_KERNEL);
+			ASSERT_ALWAYS(NODEPDA(cnode)->hwg_node_name != NULL);
+			strcpy(NODEPDA(cnode)->hwg_node_name, s);
+
+			hubinfo_set(node_vertex, NODEPDA(cnode)->pdinfo);
+
+			/* Set up node board's slot */
+			NODEPDA(cnode)->slotdesc = brd->brd_slot;
+
+			/* Set up the module we're in */
+			NODEPDA(cnode)->module_id = brd->brd_module;
+			NODEPDA(cnode)->module = module_lookup(brd->brd_module);
+		}
+
+		if(!board_disabled)
+		klhwg_add_hub(node_vertex, hub, cnode);
+		
+		brd = KLCF_NEXT(brd);
+		if (brd)
+			brd = find_lboard(brd, KLTYPE_IP27);
+		else
+			break;
+	} while(brd);
+}
+
+
+/* ARGSUSED */
+void
+klhwg_add_all_routers(devfs_handle_t hwgraph_root)
+{
+	nasid_t nasid;
+	cnodeid_t cnode;
+	lboard_t *brd;
+	devfs_handle_t node_vertex;
+	char path_buffer[100];
+	int rv;
+
+	for (cnode = 0; cnode < maxnodes; cnode++) {
+		nasid = COMPACT_TO_NASID_NODEID(cnode);
+
+		GRPRINTF(("klhwg_add_all_routers: adding router on cnode %d\n",
+			cnode));
+
+		brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid),
+				KLTYPE_ROUTER);
+
+		if (!brd)
+			/* No routers stored in this node's memory */
+			continue;
+
+		do {
+			ASSERT(brd);
+			GRPRINTF(("Router board struct is %p\n", brd));
+
+			/* Don't add duplicate boards. */
+			if (brd->brd_flags & DUPLICATE_BOARD)
+				continue;
+
+			GRPRINTF(("Router 0x%p module number is %d\n", brd, brd->brd_module));
+			/* Generate a hardware graph path for this board. */
+			board_to_path(brd, path_buffer);
+
+			GRPRINTF(("Router path is %s\n", path_buffer));
+
+			/* Add the router */
+			GRPRINTF(("klhwg_add_all_routers: adding %s to vertex 0x%p\n",
+				path_buffer, hwgraph_root));
+			rv = hwgraph_path_add(hwgraph_root, path_buffer, &node_vertex);
+
+			if (rv != GRAPH_SUCCESS)
+				cmn_err(CE_PANIC, "Router vertex creation "
+						  "failed.  Path == %s",
+					path_buffer);
+
+			GRPRINTF(("klhwg_add_all_routers: get next board from 0x%p\n",
+					brd));
+		/* Find the rest of the routers stored on this node. */
+		} while ( (brd = find_lboard_class(KLCF_NEXT(brd),
+			 KLTYPE_ROUTER)) );
+
+		GRPRINTF(("klhwg_add_all_routers: Done.\n"));
+	}
+
+}
+
+/* ARGSUSED */
+void
+klhwg_connect_one_router(devfs_handle_t hwgraph_root, lboard_t *brd,
+			 cnodeid_t cnode, nasid_t nasid)
+{
+	klrou_t *router;
+	char path_buffer[50];
+	char dest_path[50];
+	devfs_handle_t router_hndl;
+	devfs_handle_t dest_hndl;
+	int rc;
+	int port;
+	lboard_t *dest_brd;
+
+	GRPRINTF(("klhwg_connect_one_router: Connecting router on cnode %d\n",
+			cnode));
+
+	/* Don't add duplicate boards. */
+	if (brd->brd_flags & DUPLICATE_BOARD) {
+		GRPRINTF(("klhwg_connect_one_router: Duplicate router 0x%p on cnode %d\n",
+			brd, cnode));
+		return;
+	}
+
+	/* Generate a hardware graph path for this board. */
+	board_to_path(brd, path_buffer);
+
+	rc = hwgraph_traverse(hwgraph_root, path_buffer, &router_hndl);
+
+	if (rc != GRAPH_SUCCESS && is_specified(arg_maxnodes))
+			return;
+
+	if (rc != GRAPH_SUCCESS)
+		cmn_err(CE_WARN, "Can't find router: %s", path_buffer);
+
+	/* We don't know what to do with multiple router components */
+	if (brd->brd_numcompts != 1) {
+		cmn_err(CE_PANIC,
+			"klhwg_connect_one_router: %d cmpts on router\n",
+			brd->brd_numcompts);
+		return;
+	}
+
+
+	/* Convert component 0 to klrou_t ptr */
+	router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd),
+					      brd->brd_compts[0]);
+
+	for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
+		/* See if the port's active */
+		if (router->rou_port[port].port_nasid == INVALID_NASID) {
+			GRPRINTF(("klhwg_connect_one_router: port %d inactive.\n",
+				 port));
+			continue;
+		}
+		if (is_specified(arg_maxnodes) && NASID_TO_COMPACT_NODEID(router->rou_port[port].port_nasid) 
+		    == INVALID_CNODEID) {
+			continue;
+		}
+
+		dest_brd = (lboard_t *)NODE_OFFSET_TO_K0(
+				router->rou_port[port].port_nasid,
+				router->rou_port[port].port_offset);
+
+		/* Generate a hardware graph path for this board. */
+		board_to_path(dest_brd, dest_path);
+
+		rc = hwgraph_traverse(hwgraph_root, dest_path, &dest_hndl);
+
+		if (rc != GRAPH_SUCCESS) {
+			if (is_specified(arg_maxnodes) && KL_CONFIG_DUPLICATE_BOARD(dest_brd))
+				continue;
+			cmn_err(CE_PANIC, "Can't find router: %s", dest_path);
+		}
+		GRPRINTF(("klhwg_connect_one_router: Link from %s/%d to %s\n",
+			  path_buffer, port, dest_path));
+
+		sprintf(dest_path, "%d", port);
+
+		rc = hwgraph_edge_add(router_hndl, dest_hndl, dest_path);
+
+		if (rc == GRAPH_DUP) {
+			GRPRINTF(("Skipping port %d. nasid %d %s/%s\n",
+				  port, router->rou_port[port].port_nasid,
+				  path_buffer, dest_path));
+			continue;
+		}
+
+		if (rc != GRAPH_SUCCESS && !is_specified(arg_maxnodes))
+			cmn_err(CE_GRPANIC, "Can't create edge: %s/%s to vertex 0x%p error 0x%x\n",
+				path_buffer, dest_path, dest_hndl, rc);
+		
+	}
+}
+
+
+void
+klhwg_connect_routers(devfs_handle_t hwgraph_root)
+{
+	nasid_t nasid;
+	cnodeid_t cnode;
+	lboard_t *brd;
+
+	for (cnode = 0; cnode < maxnodes; cnode++) {
+		nasid = COMPACT_TO_NASID_NODEID(cnode);
+
+		GRPRINTF(("klhwg_connect_routers: Connecting routers on cnode %d\n",
+			cnode));
+
+		brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid),
+				KLTYPE_ROUTER);
+
+		if (!brd)
+			continue;
+
+		do {
+
+			nasid = COMPACT_TO_NASID_NODEID(cnode);
+
+			klhwg_connect_one_router(hwgraph_root, brd,
+						 cnode, nasid);
+
+		/* Find the rest of the routers stored on this node. */
+		} while ( (brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER)) );
+	}
+}
+
+
+
+void
+klhwg_connect_hubs(devfs_handle_t hwgraph_root)
+{
+	nasid_t nasid;
+	cnodeid_t cnode;
+	lboard_t *brd;
+	klhub_t *hub;
+	lboard_t *dest_brd;
+	devfs_handle_t hub_hndl;
+	devfs_handle_t dest_hndl;
+	char path_buffer[50];
+	char dest_path[50];
+	graph_error_t rc;
+
+	for (cnode = 0; cnode < maxnodes; cnode++) {
+		nasid = COMPACT_TO_NASID_NODEID(cnode);
+
+		GRPRINTF(("klhwg_connect_hubs: Connecting hubs on cnode %d\n",
+			cnode));
+
+		brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid),
+				KLTYPE_IP27);
+		ASSERT(brd);
+
+		hub = (klhub_t *)find_first_component(brd, KLSTRUCT_HUB);
+		ASSERT(hub);
+
+		/* See if the port's active */
+		if (hub->hub_port.port_nasid == INVALID_NASID) {
+			GRPRINTF(("klhwg_connect_hubs: port inactive.\n"));
+			continue;
+		}
+
+		if (is_specified(arg_maxnodes) && NASID_TO_COMPACT_NODEID(hub->hub_port.port_nasid) == INVALID_CNODEID)
+			continue;
+
+		/* Generate a hardware graph path for this board. */
+		board_to_path(brd, path_buffer);
+
+		GRPRINTF(("klhwg_connect_hubs: Hub path is %s.\n", path_buffer));
+		rc = hwgraph_traverse(hwgraph_root, path_buffer, &hub_hndl);
+
+		if (rc != GRAPH_SUCCESS)
+			cmn_err(CE_WARN, "Can't find hub: %s", path_buffer);
+
+		dest_brd = (lboard_t *)NODE_OFFSET_TO_K0(
+				hub->hub_port.port_nasid,
+				hub->hub_port.port_offset);
+
+		/* Generate a hardware graph path for this board. */
+		board_to_path(dest_brd, dest_path);
+
+		rc = hwgraph_traverse(hwgraph_root, dest_path, &dest_hndl);
+
+		if (rc != GRAPH_SUCCESS) {
+			if (is_specified(arg_maxnodes) && KL_CONFIG_DUPLICATE_BOARD(dest_brd))
+				continue;
+			cmn_err(CE_PANIC, "Can't find board: %s", dest_path);
+		} else {
+		
+
+			GRPRINTF(("klhwg_connect_hubs: Link from %s to %s.\n",
+			  path_buffer, dest_path));
+
+			rc = hwgraph_edge_add(hub_hndl, dest_hndl, EDGE_LBL_INTERCONNECT);
+
+			if (rc != GRAPH_SUCCESS)
+				cmn_err(CE_GRPANIC, "Can't create edge: %s/%s to vertex 0x%p, error 0x%x\n",
+				path_buffer, dest_path, dest_hndl, rc);
+
+		}
+	}
+}
+
+/* Store the pci/vme disabled board information as extended administrative
+ * hints which can later be used by the drivers using the device/driver
+ * admin interface. 
+ */
+void
+klhwg_device_disable_hints_add(void)
+{
+	cnodeid_t	cnode; 		/* node we are looking at */
+	nasid_t		nasid;		/* nasid of the node */
+	lboard_t	*board;		/* board we are looking at */
+	int		comp_index;	/* component index */
+	klinfo_t	*component;	/* component in the board we are
+					 * looking at 
+					 */
+	char		device_name[MAXDEVNAME];
+	
+#ifndef CONFIG_IA64_SGI_IO
+	device_admin_table_init();
+#endif
+	for(cnode = 0; cnode < numnodes; cnode++) {
+		nasid = COMPACT_TO_NASID_NODEID(cnode);
+		board = (lboard_t *)KL_CONFIG_INFO(nasid);
+		/* Check out all the board info stored  on a node */
+		while(board) {
+			/* No need to look at duplicate boards or non-io 
+			 * boards
+			 */
+			if (KL_CONFIG_DUPLICATE_BOARD(board) ||
+			    KLCLASS(board->brd_type) != KLCLASS_IO) {
+				board = KLCF_NEXT(board);
+				continue;
+			}
+			/* Check out all the components of a board */
+			for (comp_index = 0; 
+			     comp_index < KLCF_NUM_COMPS(board);
+			     comp_index++) {
+				component = KLCF_COMP(board,comp_index);
+				/* If the component is enabled move on to
+				 * the next component
+				 */
+				if (KLCONFIG_INFO_ENABLED(component))
+					continue;
+				/* NOTE : Since the prom only supports
+				 * the disabling of pci devices the following
+				 * piece of code makes sense. 
+				 * Make sure that this assumption is valid
+				 */
+				/* This component is disabled. Store this
+				 * hint in the extended device admin table
+				 */
+				/* Get the canonical name of the pci device */
+				device_component_canonical_name_get(board,
+							    component,
+							    device_name);
+#ifndef CONFIG_IA64_SGI_IO
+				device_admin_table_update(device_name,
+							  ADMIN_LBL_DISABLED,
+							  "yes");
+#endif
+#ifdef DEBUG
+				printf("%s DISABLED\n",device_name);
+#endif				
+			}
+			/* go to the next board info stored on this 
+			 * node 
+			 */
+			board = KLCF_NEXT(board);
+		}
+	}
+}
+
+void
+klhwg_add_all_modules(devfs_handle_t hwgraph_root)
+{
+	cmoduleid_t	cm;
+	char		name[128];
+	devfs_handle_t	vhdl;
+	int		rc;
+
+	/* Add devices under each module */
+
+	for (cm = 0; cm < nummodules; cm++) {
+		/* Use module as module vertex fastinfo */
+
+		sprintf(name, EDGE_LBL_MODULE "/%x", modules[cm]->id);
+
+		rc = hwgraph_path_add(hwgraph_root, name, &vhdl);
+		ASSERT(rc == GRAPH_SUCCESS);
+		rc = rc;
+
+		hwgraph_fastinfo_set(vhdl, (arbitrary_info_t) modules[cm]);
+
+		/* Add system controller */
+
+		sprintf(name,
+			EDGE_LBL_MODULE "/%x/" EDGE_LBL_L1,
+			modules[cm]->id);
+
+		rc = hwgraph_path_add(hwgraph_root, name, &vhdl);
+		ASSERT_ALWAYS(rc == GRAPH_SUCCESS); 
+		rc = rc;
+
+		hwgraph_info_add_LBL(vhdl,
+				     INFO_LBL_ELSC,
+				     (arbitrary_info_t) (__psint_t) 1);
+
+#ifndef CONFIG_IA64_SGI_IO
+		sndrv_attach(vhdl);
+#else
+		/*
+		 * We need to call the drivers attach routine ..
+		 */
+		FIXME("klhwg_add_all_modules: Need code to call driver attach.\n");
+#endif
+	}
+}
+
+void
+klhwg_add_all_nodes(devfs_handle_t hwgraph_root)
+{
+	//gda_t		*gdap = GDA;
+	gda_t		*gdap;
+	cnodeid_t	cnode;
+
+#ifdef SIMULATED_KLGRAPH
+	//gdap = 0xa800000000011000;
+	gdap = (gda_t *)0xe000000000011000;
+	printk("klhwg_add_all_nodes: SIMULATED_KLGRAPH FIXME: gdap= 0x%p\n", gdap);
+#else
+	gdap = GDA;
+#endif /* SIMULATED_KLGRAPH */
+	for (cnode = 0; cnode < numnodes; cnode++) {
+		ASSERT(gdap->g_nasidtable[cnode] != INVALID_NASID);
+		klhwg_add_node(hwgraph_root, cnode, gdap);
+	}
+
+	for (cnode = 0; cnode < numnodes; cnode++) {
+		ASSERT(gdap->g_nasidtable[cnode] != INVALID_NASID);
+
+#ifndef CONFIG_IA64_SGI_IO
+		klhwg_add_xbow(cnode, gdap->g_nasidtable[cnode]);
+#else
+		printk("klhwg_add_all_nodes: Fix me by getting real nasid\n");
+		klhwg_add_xbow(cnode, 0);
+#endif
+	}
+
+	/*
+	 * As for router hardware inventory information, we set this
+	 * up in router.c. 
+	 */
+	
+	klhwg_add_all_routers(hwgraph_root);
+	klhwg_connect_routers(hwgraph_root);
+	klhwg_connect_hubs(hwgraph_root);
+
+	/* Assign guardian nodes to each of the
+	 * routers in the system.
+	 */
+
+#ifndef CONFIG_IA64_SGI_IO
+	router_guardians_set(hwgraph_root);
+#endif
+
+	/* Go through the entire system's klconfig
+	 * to figure out which pci components have been disabled
+	 */
+	klhwg_device_disable_hints_add();
+
+}
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/io/klgraph_hack.c linux/arch/ia64/sn/io/klgraph_hack.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/io/klgraph_hack.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/io/klgraph_hack.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,847 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+
+/*
+ * This is a temporary file that statically initializes the expected 
+ * initial klgraph information that is normally provided by prom.
+ */
+
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/klconfig.h>
+
+void * real_port;
+void * real_io_base;
+void * real_addr;
+
+char *BW0 = NULL;
+
+kl_config_hdr_t *linux_klcfg;
+
+#ifdef BRINGUP
+/* forward declarations */
+extern void dump_ii(void), dump_lb(void), dump_crossbow(void);
+extern void clear_ii_error(void);
+#endif /* BRINGUP */
+
+void
+simulated_BW0_init(void)
+{
+
+	unsigned long *cnode0_hub;
+	unsigned long hub_widget = 0x1000000;
+	unsigned long hub_offset = 0x800000;
+	unsigned long hub_reg_base = 0;
+	extern void * vmalloc(unsigned long);
+
+	memset(&nasid_to_compact_node[0], 0, sizeof(cnodeid_t) * MAX_NASIDS);
+
+	BW0 = vmalloc(0x10000000);
+	if (BW0 == NULL) {
+		printk("Darn it .. cannot create space for Big Window 0\n");
+	}
+	printk("BW0: Start Address %p\n", BW0);
+	
+	memset(BW0+(0x10000000 - 8), 0xf, 0x8);
+
+	printk("BW0: Last WORD address %p has value 0x%lx\n", (char *)(BW0 +(0x10000000 - 8)), *(long *)(BW0 +(0x10000000 - 8)));
+
+	printk("XWIDGET 8 Address = 0x%p\n", (unsigned long *)(NODE_SWIN_BASE(0, 8)) ); 
+
+	/*
+	 * Do some HUB Register Hack ..
+	 */
+	hub_reg_base = (unsigned long)BW0 + hub_widget + hub_offset;
+        cnode0_hub = (unsigned long *)(hub_reg_base + IIO_WID); *cnode0_hub = 0x1c110049;
+        cnode0_hub = (unsigned long *)(hub_reg_base + IIO_WSTAT); *cnode0_hub = 0x0;
+        cnode0_hub = (unsigned long *)(hub_reg_base + IIO_WCR); *cnode0_hub = 0x401b;
+	printk("IIO_WCR address = 0x%p\n", cnode0_hub);
+
+        cnode0_hub = (unsigned long *)(hub_reg_base + IIO_ILAPR); *cnode0_hub = 0xffffffffffffffff;
+        cnode0_hub = (unsigned long *)(hub_reg_base + IIO_ILAPO); *cnode0_hub = 0x0;
+        cnode0_hub = (unsigned long *)(hub_reg_base + IIO_IOWA); *cnode0_hub = 0xff01;
+        cnode0_hub = (unsigned long *)(hub_reg_base + IIO_IIWA); *cnode0_hub = 0xff01;
+        cnode0_hub = (unsigned long *)(hub_reg_base + IIO_IIDEM); *cnode0_hub = 0xffffffffffffffff;
+        cnode0_hub = (unsigned long *)(hub_reg_base + IIO_ILCSR); *cnode0_hub = 0x3fc03ff640a;
+        cnode0_hub = (unsigned long *)(hub_reg_base + IIO_ILLR); *cnode0_hub = 0x0;
+        cnode0_hub = (unsigned long *)(hub_reg_base + IIO_IIDSR); *cnode0_hub = 0x1000040;
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+        cnode0_hub = (unsigned long *)(hub_reg_base + IIO_IGFX0); *cnode0_hub = 0x0;
+        cnode0_hub = (unsigned long *)(hub_reg_base + IIO_IGFX1); *cnode0_hub = 0x0;
+        cnode0_hub = (unsigned long *)(hub_reg_base + IIO_ISCR0); *cnode0_hub = 0x23d;
+        cnode0_hub = (unsigned long *)(hub_reg_base + IIO_ISCR1); *cnode0_hub = 0x0;
+#endif	/* CONFIG_SGI_IP35 || CONFIG_IA64_SGI_SN1 */
+}
+
+#define SYNERGY_WIDGET          ((char *)0xc0000e0000000000)
+#define SYNERGY_SWIZZLE         ((char *)0xc0000e0000000400)
+#define HUBREG                  ((char *)0xc0000a0001e00000)
+#define WIDGET0                 ((char *)0xc0000a0000000000)
+#define WIDGET4                 ((char *)0xc0000a0000000004)
+
+#define SYNERGY_WIDGET          ((char *)0xc0000e0000000000)
+#define SYNERGY_SWIZZLE         ((char *)0xc0000e0000000400)
+#define HUBREG                  ((char *)0xc0000a0001e00000)
+#define WIDGET0                 ((char *)0xc0000a0000000000)
+
+int test = 0;
+
+/*
+ * Hack to loop for test.
+ */
+void
+test_io_regs(void)
+{
+
+	uint32_t reg_32bits;
+	uint64_t reg_64bits;
+
+	while (test) {
+
+		reg_32bits = (uint32_t)(*(volatile uint32_t *) SYNERGY_WIDGET);
+		reg_64bits = (uint64_t) (*(volatile uint64_t *) SYNERGY_WIDGET);
+
+	}
+
+        printk("Synergy Widget Address = 0x%p, Value = 0x%lx\n", SYNERGY_WIDGET, (uint64_t)*(SYNERGY_WIDGET));
+
+        printk("Synergy swizzle Address = 0x%p, Value = 0x%lx\n", SYNERGY_SWIZZLE, (uint64_t)*(SYNERGY_SWIZZLE));
+        printk("HUBREG  Address = 0x%p, Value = 0x%lx\n",  HUBREG, (uint64_t)*(HUBREG));
+        printk("WIDGET0 Address = 0x%p, Value = 0x%lx\n", WIDGET0, (uint64_t)*(WIDGET0));
+        printk("WIDGET4 Address = 0x%p, Value = 0x%x\n", WIDGET4, (uint32_t)*(WIDGET4));
+
+}
+
+void
+klgraph_hack_init(void)
+{
+
+	kl_config_hdr_t *kl_hdr_ptr;
+	lboard_t	*lb_ptr;
+	lboard_t	*temp_ptr;
+	klhub_t		*klhub_ptr;
+	klioc3_t	*klioc3_ptr;
+	klbri_t		*klbri_ptr;
+	klxbow_t	*klxbow_ptr;
+	klinfo_t	*klinfo_ptr;
+	klcomp_t	*klcomp_ptr;
+	uint64_t	*tmp;
+	volatile u32	*tmp32;
+
+#ifdef 0
+	/* Preset some values */
+	/* Write IOERR clear to clear the CRAZY bit in the status */
+	tmp = (uint64_t *)0xc0000a0001c001f8; *tmp = (uint64_t)0xffffffff;
+	/* set widget control register...setting bedrock widget id to b */
+	/* tmp = (uint64_t *)0xc0000a0001c00020; *tmp = (uint64_t)0x801b; */
+	/* set io outbound widget access...allow all */
+	tmp = (uint64_t *)0xc0000a0001c00110; *tmp = (uint64_t)0xff01;
+	/* set io inbound widget access...allow all */
+	tmp = (uint64_t *)0xc0000a0001c00118; *tmp = (uint64_t)0xff01;
+	/* set io crb timeout to max */
+	tmp = (uint64_t *)0xc0000a0001c003c0; *tmp = (uint64_t)0xffffff;
+	tmp = (uint64_t *)0xc0000a0001c003c0; *tmp = (uint64_t)0xffffff;
+	
+	/* set local block io permission...allow all */
+	tmp = (uint64_t *)0xc0000a0001e04010; *tmp = (uint64_t)0xfffffffffffffff;
+
+	/* clear any errors */
+	clear_ii_error();
+
+	/* set default read response buffers in bridge */
+	tmp32 = (volatile u32 *)0xc0000a000f000280L;
+	*tmp32 = 0xba98;
+	tmp32 = (volatile u32 *)0xc0000a000f000288L;
+	*tmp32 = 0xba98;
+#endif
+
+printk("Widget ID Address 0x%p Value 0x%lx\n", (uint64_t *)0xc0000a0001e00000, *( (volatile uint64_t *)0xc0000a0001e00000) );
+
+printk("Widget ID Address 0x%p Value 0x%lx\n", (uint64_t *)0xc0000a0001c00000, *( (volatile uint64_t *)0xc0000a0001c00000) );
+
+printk("Widget ID Address 0x%p Value 0x%lx\n", (uint64_t *)0xc000020001e00000, *( (volatile uint64_t *)0xc000020001e00000) );
+
+
+printk("Widget ID Address 0x%p Value 0x%lx\n", (uint64_t *)0xc000020001c00000, *( (volatile uint64_t *)0xc000020001c00000) );
+
+printk("Widget ID Address 0x%p Value 0x%lx\n", (uint64_t *)0xc0000a0001e00000, *( (volatile uint64_t *)0xc0000a0001e00000) );
+
+printk("Xbow ID Address 0x%p Value 0x%x\n", (uint64_t *)0xc0000a0000000000, *( (volatile uint32_t *)0xc0000a0000000000) );
+
+printk("Xbow ID Address 0x%p Value 0x%x\n", (uint64_t *)0xc000020000000004, *( (volatile uint32_t *)0xc000020000000004) );
+
+
+	if ( test )
+		test_io_regs();
+	/*
+	 * Klconfig header.
+	 */
+	kl_hdr_ptr = kmalloc(sizeof(kl_config_hdr_t), GFP_KERNEL);
+        kl_hdr_ptr->ch_magic = 0xbeedbabe;
+        kl_hdr_ptr->ch_version = 0x0;
+        kl_hdr_ptr->ch_malloc_hdr_off = 0x48;
+        kl_hdr_ptr->ch_cons_off = 0x18;
+        kl_hdr_ptr->ch_board_info = 0x0;
+        kl_hdr_ptr->ch_cons_info.uart_base = 0x920000000f820178;
+        kl_hdr_ptr->ch_cons_info.config_base = 0x920000000f024000;
+        kl_hdr_ptr->ch_cons_info.memory_base = 0x920000000f800000;
+        kl_hdr_ptr->ch_cons_info.baud = 0x2580;
+        kl_hdr_ptr->ch_cons_info.flag = 0x1;
+        kl_hdr_ptr->ch_cons_info.type = 0x300fafa;
+        kl_hdr_ptr->ch_cons_info.nasid = 0x0;
+        kl_hdr_ptr->ch_cons_info.wid = 0xf;
+        kl_hdr_ptr->ch_cons_info.npci = 0x4;
+        kl_hdr_ptr->ch_cons_info.baseio_nic = 0x0;
+
+	/*
+	 * We need to know whether we are booting from PROM or 
+	 * boot from disk.
+	 */
+	linux_klcfg = (kl_config_hdr_t *)0xe000000000030000;
+	if (linux_klcfg->ch_magic == 0xbeedbabe) {
+		printk("Linux Kernel Booted from Disk\n");
+	} else {
+		printk("Linux Kernel Booted from PROM\n");
+		linux_klcfg = kl_hdr_ptr;
+	}
+
+	/*
+	 * lboard KLTYPE_IP35
+	 */
+	lb_ptr = kmalloc(sizeof(lboard_t), GFP_KERNEL);
+	kl_hdr_ptr->ch_board_info = (klconf_off_t) lb_ptr;
+	temp_ptr = lb_ptr;
+	printk("First Lboard = %p\n", temp_ptr);
+
+        lb_ptr->brd_next = 0;
+        lb_ptr->struct_type = 0x1;
+        lb_ptr->brd_type  = 0x11;
+        lb_ptr->brd_sversion = 0x3;
+        lb_ptr->brd_brevision = 0x1;
+        lb_ptr->brd_promver = 0x1;
+        lb_ptr->brd_promver = 0x1;
+        lb_ptr->brd_slot = 0x0;
+        lb_ptr->brd_debugsw = 0x0;
+        lb_ptr->brd_module = 0x145;
+        lb_ptr->brd_partition = 0x0;
+        lb_ptr->brd_diagval = 0x0;
+        lb_ptr->brd_diagparm = 0x0;
+        lb_ptr->brd_inventory = 0x0;
+        lb_ptr->brd_numcompts = 0x5;
+        lb_ptr->brd_nic = 0x2a0aed35;
+        lb_ptr->brd_nasid = 0x0;
+        lb_ptr->brd_errinfo = 0x0;
+        lb_ptr->brd_parent = 0x0;
+        lb_ptr->brd_graph_link  = (devfs_handle_t)0x26;
+        lb_ptr->brd_owner = 0x0;
+        lb_ptr->brd_nic_flags = 0x0;
+	memcpy(&lb_ptr->brd_name[0], "IP35", 4);
+
+	/*
+	 * Hub Component
+	 */
+	klcomp_ptr = kmalloc(sizeof(klcomp_t), GFP_KERNEL);
+	klhub_ptr = (klhub_t *)klcomp_ptr;
+	klinfo_ptr = (klinfo_t *)klcomp_ptr;
+	lb_ptr->brd_compts[0] = (klconf_off_t)klcomp_ptr;
+	printk("hub info = %p lboard = %p\n", klhub_ptr, lb_ptr);
+
+	klinfo_ptr = (klinfo_t *)klhub_ptr;
+        klinfo_ptr->struct_type = 0x2;
+        klinfo_ptr->struct_version = 0x1;
+        klinfo_ptr->flags = 0x1;
+        klinfo_ptr->revision = 0x1;
+        klinfo_ptr->diagval = 0x0;
+        klinfo_ptr->diagparm = 0x0;
+        klinfo_ptr->inventory = 0x0;
+        klinfo_ptr->partid = 0x0;
+        klinfo_ptr->nic = 0x2a0aed35;
+        klinfo_ptr->physid = 0x0;
+        klinfo_ptr->virtid = 0x0;
+        klinfo_ptr->widid = 0x0;
+        klinfo_ptr->nasid = 0x0;
+
+        klhub_ptr->hub_flags = 0x0;
+        klhub_ptr->hub_port.port_nasid = (nasid_t)0x0ffffffff;
+        klhub_ptr->hub_port.port_flag = 0x0;
+        klhub_ptr->hub_port.port_offset = 0x0;
+        klhub_ptr->hub_box_nic = 0x0;
+        klhub_ptr->hub_mfg_nic = 0x3f420;
+        klhub_ptr->hub_speed = 0xbebc200;
+
+	/*
+	 * Memory Component
+	 */
+        klcomp_ptr = kmalloc(sizeof(klcomp_t), GFP_KERNEL);
+        klinfo_ptr = (klinfo_t *)klcomp_ptr;
+	lb_ptr->brd_compts[1] = (klconf_off_t)klcomp_ptr;
+
+        klinfo_ptr->struct_type = 0x3;
+        klinfo_ptr->struct_version = 0x2;
+        klinfo_ptr->flags = 0x1;
+        klinfo_ptr->revision = 0xff;
+        klinfo_ptr->diagval = 0x0;
+        klinfo_ptr->diagparm = 0x0;
+        klinfo_ptr->inventory = 0x0;
+        klinfo_ptr->partid = 0x0;
+        klinfo_ptr->nic = 0xffffffffffffffff;
+        klinfo_ptr->physid = 0xff;
+        klinfo_ptr->virtid = 0xffffffff;
+        klinfo_ptr->widid = 0x0;
+        klinfo_ptr->nasid = 0x0;
+
+	/*
+	 * KLSTRUCT_HUB_UART Component
+	 */
+	klcomp_ptr = kmalloc(sizeof(klcomp_t), GFP_KERNEL);
+	klinfo_ptr = (klinfo_t *)klcomp_ptr;
+	lb_ptr->brd_compts[2] = (klconf_off_t)klcomp_ptr;
+
+        klinfo_ptr->struct_type = 0x11;
+        klinfo_ptr->struct_version = 0x1;
+        klinfo_ptr->flags = 0x31;
+        klinfo_ptr->revision = 0xff;
+        klinfo_ptr->diagval = 0x0;
+        klinfo_ptr->diagparm = 0x0;
+        klinfo_ptr->inventory = 0x0;
+        klinfo_ptr->partid = 0x0;
+        klinfo_ptr->nic = 0xffffffffffffffff;
+        klinfo_ptr->physid = 0x0;
+        klinfo_ptr->virtid = 0x0;
+        klinfo_ptr->widid = 0x0;
+        klinfo_ptr->nasid = 0x0;
+
+	/*
+	 * KLSTRUCT_CPU Component
+	 */
+	klcomp_ptr = kmalloc(sizeof(klcomp_t), GFP_KERNEL);
+        klinfo_ptr = (klinfo_t *)klcomp_ptr;
+	lb_ptr->brd_compts[3] = (klconf_off_t)klcomp_ptr;
+
+        klinfo_ptr->struct_type = 0x1;
+        klinfo_ptr->struct_version = 0x2;
+        klinfo_ptr->flags = 0x1;
+        klinfo_ptr->revision = 0xff;
+        klinfo_ptr->diagval = 0x0;
+        klinfo_ptr->diagparm = 0x0;
+        klinfo_ptr->inventory = 0x0;
+        klinfo_ptr->partid = 0x0;
+        klinfo_ptr->nic = 0xffffffffffffffff;
+        klinfo_ptr->physid = 0x0;
+        klinfo_ptr->virtid = 0x0;
+        klinfo_ptr->widid = 0x0;
+        klinfo_ptr->nasid = 0x0;
+
+	/*
+	 * KLSTRUCT_CPU Component
+	 */
+	klcomp_ptr = kmalloc(sizeof(klcomp_t), GFP_KERNEL);
+        klinfo_ptr = (klinfo_t *)klcomp_ptr;
+	lb_ptr->brd_compts[4] = (klconf_off_t)klcomp_ptr;
+
+        klinfo_ptr->struct_type = 0x1;
+        klinfo_ptr->struct_version = 0x2;
+        klinfo_ptr->flags = 0x1;
+        klinfo_ptr->revision = 0xff;
+        klinfo_ptr->diagval = 0x0;
+        klinfo_ptr->diagparm = 0x0;
+        klinfo_ptr->inventory = 0x0;
+        klinfo_ptr->partid = 0x0;
+        klinfo_ptr->nic = 0xffffffffffffffff;
+        klinfo_ptr->physid = 0x1;
+        klinfo_ptr->virtid = 0x1;
+        klinfo_ptr->widid = 0x0;
+        klinfo_ptr->nasid = 0x0;
+
+	lb_ptr->brd_compts[5] = 0; /* Set the next one to 0 .. end */
+	lb_ptr->brd_numcompts = 5; /* 0 to 4 */
+
+	/*
+	 * lboard(0x42) KLTYPE_PBRICK_XBOW
+	 */
+	lb_ptr = kmalloc(sizeof(lboard_t), GFP_KERNEL);
+	temp_ptr->brd_next = (klconf_off_t)lb_ptr; /* Let the previous point at the new .. */
+	temp_ptr = lb_ptr;
+	printk("Second Lboard = %p\n", temp_ptr);
+
+        lb_ptr->brd_next = 0;
+        lb_ptr->struct_type = 0x1;
+        lb_ptr->brd_type  = 0x42;
+        lb_ptr->brd_sversion = 0x2;
+        lb_ptr->brd_brevision = 0x0;
+        lb_ptr->brd_promver = 0x1;
+        lb_ptr->brd_promver = 0x1;
+        lb_ptr->brd_slot = 0x0;
+        lb_ptr->brd_debugsw = 0x0;
+        lb_ptr->brd_module = 0x145;
+        lb_ptr->brd_partition = 0x1;
+        lb_ptr->brd_diagval = 0x0;
+        lb_ptr->brd_diagparm = 0x0;
+        lb_ptr->brd_inventory = 0x0;
+        lb_ptr->brd_numcompts = 0x1;
+        lb_ptr->brd_nic = 0xffffffffffffffff;
+        lb_ptr->brd_nasid = 0x0;
+        lb_ptr->brd_errinfo = 0x0;
+        lb_ptr->brd_parent = (struct lboard_s *)0x9600000000030070;
+        lb_ptr->brd_graph_link  = (devfs_handle_t)0xffffffff;
+        lb_ptr->brd_owner = 0x0;
+        lb_ptr->brd_nic_flags = 0x0;
+        memcpy(&lb_ptr->brd_name[0], "IOBRICK", 7);
+
+	/*
+	 * KLSTRUCT_XBOW Component
+	 */
+        klcomp_ptr = kmalloc(sizeof(klcomp_t), GFP_KERNEL);
+	memset(klcomp_ptr, 0, sizeof(klcomp_t));
+        klxbow_ptr = (klxbow_t *)klcomp_ptr;
+        klinfo_ptr = (klinfo_t *)klcomp_ptr;
+        lb_ptr->brd_compts[0] = (klconf_off_t)klcomp_ptr;
+	printk("xbow_p 0x%p\n", klcomp_ptr);
+
+        klinfo_ptr->struct_type = 0x4;
+        klinfo_ptr->struct_version = 0x1;
+        klinfo_ptr->flags = 0x1;
+        klinfo_ptr->revision = 0x2;
+        klinfo_ptr->diagval = 0x0;
+        klinfo_ptr->diagparm = 0x0;
+        klinfo_ptr->inventory = 0x0;
+        klinfo_ptr->partid = 0x0;
+        klinfo_ptr->nic = 0xffffffffffffffff;
+        klinfo_ptr->physid = 0xff;
+        klinfo_ptr->virtid = 0x0;
+        klinfo_ptr->widid = 0x0;
+        klinfo_ptr->nasid = 0x0;
+
+        klxbow_ptr->xbow_master_hub_link = 0xb;
+        klxbow_ptr->xbow_port_info[0].port_nasid = 0x0;
+        klxbow_ptr->xbow_port_info[0].port_flag = 0x0;
+        klxbow_ptr->xbow_port_info[0].port_offset = 0x0;
+
+        klxbow_ptr->xbow_port_info[1].port_nasid = 0x401;
+        klxbow_ptr->xbow_port_info[1].port_flag = 0x0;
+        klxbow_ptr->xbow_port_info[1].port_offset = 0x0;
+
+        klxbow_ptr->xbow_port_info[2].port_nasid = 0x0;
+        klxbow_ptr->xbow_port_info[2].port_flag = 0x0;
+        klxbow_ptr->xbow_port_info[2].port_offset = 0x0;
+
+        klxbow_ptr->xbow_port_info[3].port_nasid = 0x0; /* ffffffff */
+        klxbow_ptr->xbow_port_info[3].port_flag = 0x6;
+        klxbow_ptr->xbow_port_info[3].port_offset = 0x30070;
+
+        klxbow_ptr->xbow_port_info[4].port_nasid = 0x0; /* ffffff00; */
+        klxbow_ptr->xbow_port_info[4].port_flag = 0x0;
+        klxbow_ptr->xbow_port_info[4].port_offset = 0x0;
+
+        klxbow_ptr->xbow_port_info[5].port_nasid = 0x0;
+        klxbow_ptr->xbow_port_info[5].port_flag = 0x0;
+        klxbow_ptr->xbow_port_info[5].port_offset = 0x0;
+        klxbow_ptr->xbow_port_info[6].port_nasid = 0x0;
+        klxbow_ptr->xbow_port_info[6].port_flag = 0x5;
+        klxbow_ptr->xbow_port_info[6].port_offset = 0x30210;
+        klxbow_ptr->xbow_port_info[7].port_nasid = 0x3;
+        klxbow_ptr->xbow_port_info[7].port_flag = 0x5;
+        klxbow_ptr->xbow_port_info[7].port_offset = 0x302e0;
+	
+	lb_ptr->brd_compts[1] = 0;
+        lb_ptr->brd_numcompts = 1;
+
+
+	/*
+	 * lboard KLTYPE_PBRICK
+	 */
+	lb_ptr = kmalloc(sizeof(lboard_t), GFP_KERNEL);
+	temp_ptr->brd_next = (klconf_off_t)lb_ptr; /* Let the previous point at the new .. */
+	temp_ptr = lb_ptr;
+	printk("Third Lboard %p\n", lb_ptr);
+
+        lb_ptr->brd_next = 0;
+        lb_ptr->struct_type = 0x1;
+        lb_ptr->brd_type  = 0x72;
+        lb_ptr->brd_sversion = 0x2;
+        lb_ptr->brd_brevision = 0x0;
+        lb_ptr->brd_promver = 0x1;
+        lb_ptr->brd_promver = 0x41;
+        lb_ptr->brd_slot = 0xe;
+        lb_ptr->brd_debugsw = 0x0;
+        lb_ptr->brd_module = 0x145;
+        lb_ptr->brd_partition = 0x1;
+        lb_ptr->brd_diagval = 0x0;
+        lb_ptr->brd_diagparm = 0x0;
+        lb_ptr->brd_inventory = 0x0;
+        lb_ptr->brd_numcompts = 0x1;
+        lb_ptr->brd_nic = 0x30e3fd;
+        lb_ptr->brd_nasid = 0x0;
+        lb_ptr->brd_errinfo = 0x0;
+        lb_ptr->brd_parent = (struct lboard_s *)0x9600000000030140;
+        lb_ptr->brd_graph_link  = (devfs_handle_t)0xffffffff;
+        lb_ptr->brd_owner = 0x0;
+        lb_ptr->brd_nic_flags = 0x0;
+	memcpy(&lb_ptr->brd_name[0], "IP35", 4);
+
+	/*
+	 * KLSTRUCT_BRI Component
+	 */
+        klcomp_ptr = kmalloc(sizeof(klcomp_t), GFP_KERNEL);
+        klbri_ptr = (klbri_t *)klcomp_ptr;
+        klinfo_ptr = (klinfo_t *)klcomp_ptr;
+        lb_ptr->brd_compts[0] = (klconf_off_t)klcomp_ptr;
+
+        klinfo_ptr->struct_type = 0x5;
+        klinfo_ptr->struct_version = 0x2;
+        klinfo_ptr->flags = 0x1;
+        klinfo_ptr->revision = 0x2;
+        klinfo_ptr->diagval = 0x0;
+        klinfo_ptr->diagparm = 0x0;
+        klinfo_ptr->inventory = 0x0;
+        klinfo_ptr->partid = 0xd002;
+        klinfo_ptr->nic = 0x30e3fd;
+        klinfo_ptr->physid = 0xe;
+        klinfo_ptr->virtid = 0xe;
+        klinfo_ptr->widid = 0xe;
+        klinfo_ptr->nasid = 0x0;
+
+        klbri_ptr->bri_eprominfo = 0xff;
+        klbri_ptr->bri_bustype = 0x7;
+        klbri_ptr->bri_mfg_nic = 0x3f4a8;
+
+        lb_ptr->brd_compts[1] = 0;
+        lb_ptr->brd_numcompts = 1;
+
+	/*
+	 * lboard KLTYPE_PBRICK
+	 */
+	lb_ptr = kmalloc(sizeof(lboard_t), GFP_KERNEL);
+	temp_ptr->brd_next = (klconf_off_t)lb_ptr; /* Let the previous point at the new .. */
+	temp_ptr = lb_ptr;
+	printk("Fourth Lboard %p\n", lb_ptr);
+
+        lb_ptr->brd_next = 0x0;
+        lb_ptr->struct_type = 0x1;
+        lb_ptr->brd_type  = 0x72;
+        lb_ptr->brd_sversion = 0x2;
+        lb_ptr->brd_brevision = 0x0;
+        lb_ptr->brd_promver = 0x1;
+        lb_ptr->brd_promver = 0x31;
+        lb_ptr->brd_slot = 0xf;
+        lb_ptr->brd_debugsw = 0x0;
+        lb_ptr->brd_module = 0x145;
+        lb_ptr->brd_partition = 0x1;
+        lb_ptr->brd_diagval = 0x0;
+        lb_ptr->brd_diagparm = 0x0;
+        lb_ptr->brd_inventory = 0x0;
+        lb_ptr->brd_numcompts = 0x6;
+        lb_ptr->brd_nic = 0x30e3fd;
+        lb_ptr->brd_nasid = 0x0;
+        lb_ptr->brd_errinfo = 0x0;
+        lb_ptr->brd_parent = (struct lboard_s *)0x9600000000030140;
+        lb_ptr->brd_graph_link  = (devfs_handle_t)0xffffffff;
+        lb_ptr->brd_owner = 0x0;
+        lb_ptr->brd_nic_flags = 0x0;
+	memcpy(&lb_ptr->brd_name[0], "IP35", 4);
+
+
+	/*
+	 * KLSTRUCT_BRI Component
+	 */
+        klcomp_ptr = kmalloc(sizeof(klcomp_t), GFP_KERNEL);
+	klbri_ptr = (klbri_t *)klcomp_ptr;
+        klinfo_ptr = (klinfo_t *)klcomp_ptr;
+        lb_ptr->brd_compts[0] = (klconf_off_t)klcomp_ptr;
+
+        klinfo_ptr->struct_type = 0x5;
+        klinfo_ptr->struct_version = 0x2;
+        klinfo_ptr->flags = 0x1;
+        klinfo_ptr->revision = 0x2;
+        klinfo_ptr->diagval = 0x0;
+        klinfo_ptr->diagparm = 0x0;
+        klinfo_ptr->inventory = 0x0;
+        klinfo_ptr->partid = 0xd002;
+        klinfo_ptr->nic = 0x30e3fd;
+        klinfo_ptr->physid = 0xf;
+        klinfo_ptr->virtid = 0xf;
+        klinfo_ptr->widid = 0xf;
+        klinfo_ptr->nasid = 0x0;
+
+        klbri_ptr->bri_eprominfo = 0xff;
+        klbri_ptr->bri_bustype = 0x7;
+        klbri_ptr->bri_mfg_nic = 0x3f528;
+
+	/*
+	 * KLSTRUCT_SCSI component
+	 */
+        klcomp_ptr = kmalloc(sizeof(klcomp_t), GFP_KERNEL);
+        klinfo_ptr = (klinfo_t *)klcomp_ptr;
+        lb_ptr->brd_compts[1] = (klconf_off_t)klcomp_ptr;
+
+        klinfo_ptr->struct_type = 0xb;
+        klinfo_ptr->struct_version = 0x1;
+        klinfo_ptr->flags = 0x31;
+        klinfo_ptr->revision = 0x5;
+        klinfo_ptr->diagval = 0x0;
+        klinfo_ptr->diagparm = 0x0;
+        klinfo_ptr->inventory = 0x0;
+        klinfo_ptr->partid = 0x0;
+        klinfo_ptr->nic = 0xffffffffffffffff;
+        klinfo_ptr->physid = 0x1;
+        klinfo_ptr->virtid = 0x0;
+        klinfo_ptr->widid = 0xf;
+        klinfo_ptr->nasid = 0x0;
+
+	/*
+	 * KLSTRUCT_IOC3 Component
+	 */
+        klcomp_ptr = kmalloc(sizeof(klcomp_t), GFP_KERNEL);
+        klioc3_ptr = (klioc3_t *)klcomp_ptr;
+        klinfo_ptr = (klinfo_t *)klcomp_ptr;
+        lb_ptr->brd_compts[2] = (klconf_off_t)klcomp_ptr;
+
+        klinfo_ptr->struct_type = 0x6;
+        klinfo_ptr->struct_version = 0x1;
+        klinfo_ptr->flags = 0x31;
+        klinfo_ptr->revision = 0x1;
+        klinfo_ptr->diagval = 0x0;
+        klinfo_ptr->diagparm = 0x0;
+        klinfo_ptr->inventory = 0x0;
+        klinfo_ptr->partid = 0x0;
+        klinfo_ptr->nic = 0xffffffffffffffff;
+        klinfo_ptr->physid = 0x4;
+        klinfo_ptr->virtid = 0x0;
+        klinfo_ptr->widid = 0xf;
+        klinfo_ptr->nasid = 0x0;
+
+        klioc3_ptr->ioc3_ssram = 0x0;
+        klioc3_ptr->ioc3_nvram = 0x0;
+
+	/*
+	 * KLSTRUCT_UNKNOWN Component
+	 */
+        klcomp_ptr = kmalloc(sizeof(klcomp_t), GFP_KERNEL);
+        klinfo_ptr = (klinfo_t *)klcomp_ptr;
+        lb_ptr->brd_compts[3] = (klconf_off_t)klcomp_ptr;
+
+        klinfo_ptr->struct_type = 0x0;
+        klinfo_ptr->struct_version = 0x1;
+        klinfo_ptr->flags = 0x31;
+        klinfo_ptr->revision = 0xff;
+        klinfo_ptr->diagval = 0x0;
+        klinfo_ptr->diagparm = 0x0;
+        klinfo_ptr->inventory = 0x0;
+        klinfo_ptr->partid = 0x0;
+        klinfo_ptr->nic = 0xffffffffffffffff;
+        klinfo_ptr->physid = 0x5;
+        klinfo_ptr->virtid = 0x0;
+        klinfo_ptr->widid = 0xf;
+        klinfo_ptr->nasid = 0x0;
+
+	/*
+	 * KLSTRUCT_SCSI Component
+	 */
+        klcomp_ptr = kmalloc(sizeof(klcomp_t), GFP_KERNEL);
+        klinfo_ptr = (klinfo_t *)klcomp_ptr;
+        lb_ptr->brd_compts[4] = (klconf_off_t)klcomp_ptr;
+
+        klinfo_ptr->struct_type = 0xb;
+        klinfo_ptr->struct_version = 0x1;
+        klinfo_ptr->flags = 0x31;
+        klinfo_ptr->revision = 0x1;
+        klinfo_ptr->diagval = 0x0;
+        klinfo_ptr->diagparm = 0x0;
+        klinfo_ptr->inventory = 0x0;
+        klinfo_ptr->partid = 0x0;
+        klinfo_ptr->nic = 0xffffffffffffffff;
+        klinfo_ptr->physid = 0x6;
+        klinfo_ptr->virtid = 0x5;
+        klinfo_ptr->widid = 0xf;
+        klinfo_ptr->nasid = 0x0;
+
+	/*
+	 * KLSTRUCT_UNKNOWN
+	 */
+        klcomp_ptr = kmalloc(sizeof(klcomp_t), GFP_KERNEL);
+        klinfo_ptr = (klinfo_t *)klcomp_ptr;
+        lb_ptr->brd_compts[5] = (klconf_off_t)klcomp_ptr;
+
+        klinfo_ptr->struct_type = 0x0;
+        klinfo_ptr->struct_version = 0x1;
+        klinfo_ptr->flags = 0x31;
+        klinfo_ptr->revision = 0xff;
+        klinfo_ptr->diagval = 0x0;
+        klinfo_ptr->diagparm = 0x0;
+        klinfo_ptr->inventory = 0x0;
+        klinfo_ptr->partid = 0x0;
+        klinfo_ptr->nic = 0xffffffffffffffff;
+        klinfo_ptr->physid = 0x7;
+        klinfo_ptr->virtid = 0x0;
+        klinfo_ptr->widid = 0xf;
+        klinfo_ptr->nasid = 0x0;
+
+	lb_ptr->brd_compts[6] = 0;
+	lb_ptr->brd_numcompts = 6;
+
+}
+
+
+
+
+	
+#ifdef BRINGUP
+/* 
+ * these were useful for printing out registers etc
+ * during bringup  
+ */
+
+void
+xdump(long long *addr, int count)
+{
+	int ii;
+	volatile long long *xx = addr;
+
+	for ( ii = 0; ii < count; ii++, xx++ ) {
+		printk("0x%p : 0x%p\n", xx, *xx);
+	}
+}
+
+void
+xdump32(unsigned int *addr, int count)
+{
+	int ii;
+	volatile unsigned int *xx = addr;
+
+	for ( ii = 0; ii < count; ii++, xx++ ) {
+		printk("0x%p : 0x%0x\n", xx, *xx);
+	}
+}
+
+
+
+void
+clear_ii_error(void)
+{
+	volatile long long *tmp;
+
+	printk("... WSTAT ");
+	xdump((long long *)0xc0000a0001c00008, 1);
+	printk("... WCTRL ");
+	xdump((long long *)0xc0000a0001c00020, 1);
+	printk("... WLCSR ");
+	xdump((long long *)0xc0000a0001c00128, 1);
+	printk("... IIDSR ");
+	xdump((long long *)0xc0000a0001c00138, 1);
+        printk("... IOPRBs ");
+	xdump((long long *)0xc0000a0001c00198, 9);
+	printk("... IXSS ");
+	xdump((long long *)0xc0000a0001c00210, 1);
+	printk("... IBLS0 ");
+	xdump((long long *)0xc0000a0001c10000, 1);
+	printk("... IBLS1 ");
+	xdump((long long *)0xc0000a0001c20000, 1);
+
+        /* Write IOERR clear to clear the CRAZY bit in the status */
+        tmp = (long long *)0xc0000a0001c001f8; *tmp = (long long)0xffffffff;
+
+	/* dump out local block error registers */
+	printk("... ");
+	xdump((long long *)0xc0000a0001e04040, 1);	/* LB_ERROR_BITS */
+	printk("... ");
+	xdump((long long *)0xc0000a0001e04050, 1);	/* LB_ERROR_HDR1 */
+	printk("... ");
+	xdump((long long *)0xc0000a0001e04058, 1);	/* LB_ERROR_HDR2 */
+	/* and clear the LB_ERROR_BITS */
+	tmp = (long long *)0xc0000a0001e04040; *tmp = 0x0;
+	printk("clr: ");
+	xdump((long long *)0xc0000a0001e04040, 1);	/* LB_ERROR_BITS */
+	tmp = (long long *)0xc0000a0001e04050; *tmp = 0x0;
+	tmp = (long long *)0xc0000a0001e04058; *tmp = 0x0;
+}
+
+
+void
+dump_ii()
+{
+	printk("===== Dump the II regs =====\n");
+	xdump((long long *)0xc0000a0001c00000, 2);
+	xdump((long long *)0xc0000a0001c00020, 1);
+	xdump((long long *)0xc0000a0001c00100, 37);
+	xdump((long long *)0xc0000a0001c00300, 98);
+	xdump((long long *)0xc0000a0001c10000, 6);
+	xdump((long long *)0xc0000a0001c20000, 6);
+	xdump((long long *)0xc0000a0001c30000, 2);
+
+	xdump((long long *)0xc0000a0000000000, 1);
+	xdump((long long *)0xc0000a0001000000, 1);
+	xdump((long long *)0xc0000a0002000000, 1);
+	xdump((long long *)0xc0000a0003000000, 1);
+	xdump((long long *)0xc0000a0004000000, 1);
+	xdump((long long *)0xc0000a0005000000, 1);
+	xdump((long long *)0xc0000a0006000000, 1);
+	xdump((long long *)0xc0000a0007000000, 1);
+	xdump((long long *)0xc0000a0008000000, 1);
+	xdump((long long *)0xc0000a0009000000, 1);
+	xdump((long long *)0xc0000a000a000000, 1);
+	xdump((long long *)0xc0000a000b000000, 1);
+	xdump((long long *)0xc0000a000c000000, 1);
+	xdump((long long *)0xc0000a000d000000, 1);
+	xdump((long long *)0xc0000a000e000000, 1);
+	xdump((long long *)0xc0000a000f000000, 1);
+}
+
+void
+dump_lb()
+{
+	printk("===== Dump the LB regs =====\n");
+	xdump((long long *)0xc0000a0001e00000, 1);
+	xdump((long long *)0xc0000a0001e04000, 13);
+	xdump((long long *)0xc0000a0001e04100, 2);
+	xdump((long long *)0xc0000a0001e04200, 2);
+	xdump((long long *)0xc0000a0001e08000, 5);
+	xdump((long long *)0xc0000a0001e08040, 2);
+	xdump((long long *)0xc0000a0001e08050, 3);
+	xdump((long long *)0xc0000a0001e0c000, 3);
+	xdump((long long *)0xc0000a0001e0c020, 4);
+}
+
+void
+dump_crossbow()
+{
+	printk("===== Dump the Crossbow regs =====\n");
+	clear_ii_error();
+	xdump32((unsigned int *)0xc0000a0000000004, 1);
+	clear_ii_error();
+	xdump32((unsigned int *)0xc0000a0000000000, 1);
+	printk("and again..\n");
+	xdump32((unsigned int *)0xc0000a0000000000, 1);
+	xdump32((unsigned int *)0xc0000a0000000000, 1);
+
+
+	clear_ii_error();
+
+	xdump32((unsigned int *)0xc000020000000004, 1);
+	clear_ii_error();
+	xdump32((unsigned int *)0xc000020000000000, 1);
+	clear_ii_error();
+
+	xdump32((unsigned int *)0xc0000a0000800004, 1);
+	clear_ii_error();
+	xdump32((unsigned int *)0xc0000a0000800000, 1);
+	clear_ii_error();
+
+	xdump32((unsigned int *)0xc000020000800004, 1);
+	clear_ii_error();
+	xdump32((unsigned int *)0xc000020000800000, 1);
+	clear_ii_error();
+
+
+}
+#endif /* BRINGUP */
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/io/l1.c linux/arch/ia64/sn/io/l1.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/io/l1.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/io/l1.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,2974 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+/* In general, this file is organized in a hierarchy from lower-level
+ * to higher-level layers, as follows:
+ *
+ *	UART routines
+ *	Bedrock/L1 "PPP-like" protocol implementation
+ *	System controller "message" interface (allows multiplexing
+ *		of various kinds of requests and responses with
+ *		console I/O)
+ *	Console interfaces (there are two):
+ *	  (1) "elscuart", used in the IP35prom and (maybe) some
+ *		debugging situations elsewhere, and
+ *	  (2) "l1_cons", the glue that allows the L1 to act
+ *		as the system console for the stdio libraries
+ *
+ * Routines making use of the system controller "message"-style interface
+ * can be found in l1_command.c.  Their names are leftover from early SN0, 
+ * when the "module system controller" (msc) was known as the "entry level
+ * system controller" (elsc).  The names and signatures of those functions 
+ * remain unchanged in order to keep the SN0 -> SN1 system controller
+ * changes fairly localized.
+ */
+
+
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/iograph.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/hcl_util.h>
+#include <asm/sn/labelcl.h>
+#include <asm/sn/eeprom.h>
+#include <asm/sn/ksys/i2c.h>
+#include <asm/sn/cmn_err.h>
+#include <asm/sn/router.h>
+#include <asm/sn/module.h>
+#include <asm/sn/ksys/l1.h>
+#include <asm/sn/nodepda.h>
+#include <asm/sn/clksupport.h>
+
+#include <asm/sn/sn1/uart16550.h>
+
+
+#if defined(EEPROM_DEBUG)
+#define db_printf(x) printk x
+#else
+#define db_printf(x)
+#endif
+
+// From irix/kern/sys/SN/SN1/bdrkhspecregs.h
+#define    HSPEC_UART_0              0x00000080    /* UART Registers         */
+
+/*********************************************************************
+ * Hardware-level (UART) driver routines.
+ */
+
+/* macros for reading/writing registers */
+
+#define LD(x)		(*(volatile uint64_t *)(x))
+#define SD(x, v)        (LD(x) = (uint64_t) (v))
+
+/* location of uart receive/xmit data register */
+#define L1_UART_BASE(n)	((ulong)REMOTE_HSPEC_ADDR((n), HSPEC_UART_0))
+#define LOCAL_HUB	LOCAL_HUB_ADDR
+
+#define ADDR_L1_REG(n, r)	\
+    (L1_UART_BASE(n) | ( (r) << 3 ))
+
+#define READ_L1_UART_REG(n, r) \
+    ( LD(ADDR_L1_REG((n), (r))) )
+
+#define WRITE_L1_UART_REG(n, r, v) \
+    ( SD(ADDR_L1_REG((n), (r)), (v)) )
+
+
+/* Avoid conflicts with symmon...*/
+#define CONS_HW_LOCK(x)
+#define CONS_HW_UNLOCK(x)
+
+#define L1_CONS_HW_LOCK(sc)	CONS_HW_LOCK(sc->uart == BRL1_LOCALUART)
+#define L1_CONS_HW_UNLOCK(sc)	CONS_HW_UNLOCK(sc->uart == BRL1_LOCALUART)
+
+#if DEBUG
+static int debuglock_ospl; /* For CONS_HW_LOCK macro */
+#endif
+
+/* UART-related #defines */
+
+#define UART_BAUD_RATE		57600
+#define UART_FIFO_DEPTH		16
+#define UART_DELAY_SPAN		10
+#define UART_PUTC_TIMEOUT	50000
+#define UART_INIT_TIMEOUT	100000
+
+/* error codes */
+#define UART_SUCCESS		  0
+#define UART_TIMEOUT		(-1)
+#define UART_LINK		(-2)
+#define UART_NO_CHAR		(-3)
+#define UART_VECTOR		(-4)
+
+#ifdef BRINGUP
+#define UART_DELAY(x)	{ int i; i = x * 1000; while (--i); }
+#else
+#define UART_DELAY(x)	us_delay(x)
+#endif
+
+/*
+ *	Some macros for handling Endian-ness
+ */
+
+#ifdef	LITTLE_ENDIAN
+#define COPY_INT_TO_BUFFER(_b, _i, _n)		\
+	{					\
+		_b[_i++] = (_n >> 24) & 0xff;	\
+		_b[_i++] = (_n >> 16) & 0xff;	\
+		_b[_i++] = (_n >>  8) & 0xff;	\
+		_b[_i++] =  _n        & 0xff;	\
+	}
+
+#define COPY_BUFFER_TO_INT(_b, _i, _n)		\
+	{					\
+		_n  = (_b[_i++] << 24) & 0xff;	\
+		_n |= (_b[_i++] << 16) & 0xff;	\
+		_n |= (_b[_i++] <<  8) & 0xff;	\
+		_n |=  _b[_i++]        & 0xff;	\
+	}
+
+#define COPY_BUFFER_TO_BUFFER(_b, _i, _bn)	\
+	{					\
+	    char *_xyz = (char *)_bn;		\
+	    _xyz[3] = _b[_i++];			\
+	    _xyz[2] = _b[_i++];			\
+	    _xyz[1] = _b[_i++];			\
+	    _xyz[0] = _b[_i++];			\
+	}
+#else	/* BIG_ENDIAN */
+#define COPY_INT_TO_BUFFER(_b, _i, _n)			\
+	{						\
+		bcopy((char *)&_n, _b, sizeof(_n));	\
+		_i += sizeof(_n);			\
+	}
+
+#define COPY_BUFFER_TO_INT(_b, _i, _n)			\
+	{						\
+		bcopy(&_b[_i], &_n, sizeof(_n));	\
+		_i += sizeof(_n);			\
+	}
+
+#define COPY_BUFFER_TO_BUFFER(_b, _i, _bn)		\
+	{						\
+            bcopy(&(_b[_i]), _bn, sizeof(int));		\
+            _i += sizeof(int);				\
+	}
+#endif	/* LITTLE_ENDIAN */
+
+int atomicAddInt(int *int_ptr, int value);
+int atomicClearInt(int *int_ptr, int value);
+void kmem_free(void *where, int size);
+
+#define BCOPY(x,y,z)	memcpy(y,x,z)
+
+extern char *bcopy(const char * src, char * dest, int count);
+
+
+int 
+get_L1_baud(void)
+{
+    return UART_BAUD_RATE;
+}
+
+
+/* uart driver functions */
+
+static void
+uart_delay( rtc_time_t delay_span )
+{
+    UART_DELAY( delay_span );
+}
+
+#define UART_PUTC_READY(n)	(READ_L1_UART_REG((n), REG_LSR) & LSR_XHRE)
+
+static int
+uart_putc( l1sc_t *sc ) 
+{
+#ifdef BRINGUP
+    /* need a delay to avoid dropping chars */
+    UART_DELAY(57);
+#endif
+    WRITE_L1_UART_REG( sc->nasid, REG_DAT,
+		       sc->send[sc->sent] );
+    return UART_SUCCESS;
+}
+
+
+static int
+uart_getc( l1sc_t *sc )
+{
+    u_char lsr_reg = 0;
+    nasid_t nasid = sc->nasid;
+
+    if( (lsr_reg = READ_L1_UART_REG( nasid, REG_LSR )) & 
+	(LSR_RCA | LSR_PARERR | LSR_FRMERR) ) 
+    {
+	if( lsr_reg & LSR_RCA ) 
+	    return( (u_char)READ_L1_UART_REG( nasid, REG_DAT ) );
+	else if( lsr_reg & (LSR_PARERR | LSR_FRMERR) ) {
+	    return UART_LINK;
+	}
+    }
+
+    return UART_NO_CHAR;
+}
+
+
+#define PROM_SER_CLK_SPEED	12000000
+#define PROM_SER_DIVISOR(x)	(PROM_SER_CLK_SPEED / ((x) * 16))
+
+static void
+uart_init( l1sc_t *sc, int baud )
+{
+    rtc_time_t expire;
+    int clkdiv;
+    nasid_t nasid;
+
+    clkdiv = PROM_SER_DIVISOR(baud);
+    expire = rtc_time() + UART_INIT_TIMEOUT;
+    nasid = sc->nasid;
+    
+    /* make sure the transmit FIFO is empty */
+    while( !(READ_L1_UART_REG( nasid, REG_LSR ) & LSR_XSRE) ) {
+	uart_delay( UART_DELAY_SPAN );
+	if( rtc_time() > expire ) {
+	    break;
+	}
+    }
+
+    L1_CONS_HW_LOCK( sc );
+
+    WRITE_L1_UART_REG( nasid, REG_LCR, LCR_DLAB );
+	uart_delay( UART_DELAY_SPAN );
+    WRITE_L1_UART_REG( nasid, REG_DLH, (clkdiv >> 8) & 0xff );
+	uart_delay( UART_DELAY_SPAN );
+    WRITE_L1_UART_REG( nasid, REG_DLL, clkdiv & 0xff );
+	uart_delay( UART_DELAY_SPAN );
+
+    /* set operating parameters and set DLAB to 0 */
+    WRITE_L1_UART_REG( nasid, REG_LCR, LCR_BITS8 | LCR_STOP1 );
+	uart_delay( UART_DELAY_SPAN );
+    WRITE_L1_UART_REG( nasid, REG_MCR, MCR_RTS | MCR_AFE );
+	uart_delay( UART_DELAY_SPAN );
+
+    /* disable interrupts */
+    WRITE_L1_UART_REG( nasid, REG_ICR, 0x0 );
+	uart_delay( UART_DELAY_SPAN );
+
+    /* enable FIFO mode and reset both FIFOs */
+    WRITE_L1_UART_REG( nasid, REG_FCR, FCR_FIFOEN );
+	uart_delay( UART_DELAY_SPAN );
+    WRITE_L1_UART_REG( nasid, REG_FCR,
+	FCR_FIFOEN | FCR_RxFIFO | FCR_TxFIFO );
+
+    L1_CONS_HW_UNLOCK( sc );
+}
+
+static void
+uart_intr_enable( l1sc_t *sc, u_char mask )
+{
+    u_char lcr_reg, icr_reg;
+    nasid_t nasid = sc->nasid;
+
+    L1_CONS_HW_LOCK(sc);
+
+    /* make sure that the DLAB bit in the LCR register is 0
+     */
+    lcr_reg = READ_L1_UART_REG( nasid, REG_LCR );
+    lcr_reg &= ~(LCR_DLAB);
+    WRITE_L1_UART_REG( nasid, REG_LCR, lcr_reg );
+
+    /* enable indicated interrupts
+     */
+    icr_reg = READ_L1_UART_REG( nasid, REG_ICR );
+    icr_reg |= mask;
+    WRITE_L1_UART_REG( nasid, REG_ICR, icr_reg /*(ICR_RIEN | ICR_TIEN)*/ );
+
+    L1_CONS_HW_UNLOCK(sc);
+}
+
+static void
+uart_intr_disable( l1sc_t *sc, u_char mask )
+{
+    u_char lcr_reg, icr_reg;
+    nasid_t nasid = sc->nasid;
+
+    L1_CONS_HW_LOCK(sc);
+
+    /* make sure that the DLAB bit in the LCR register is 0
+     */
+    lcr_reg = READ_L1_UART_REG( nasid, REG_LCR );
+    lcr_reg &= ~(LCR_DLAB);
+    WRITE_L1_UART_REG( nasid, REG_LCR, lcr_reg );
+
+    /* enable indicated interrupts
+     */
+    icr_reg = READ_L1_UART_REG( nasid, REG_ICR );
+    icr_reg &= mask;
+    WRITE_L1_UART_REG( nasid, REG_ICR, icr_reg /*(ICR_RIEN | ICR_TIEN)*/ );
+
+    L1_CONS_HW_UNLOCK(sc);
+}
+
+#define uart_enable_xmit_intr(sc) \
+	uart_intr_enable((sc), ICR_TIEN)
+
+#define uart_disable_xmit_intr(sc) \
+        uart_intr_disable((sc), ~(ICR_TIEN))
+
+#define uart_enable_recv_intr(sc) \
+        uart_intr_enable((sc), ICR_RIEN)
+
+#define uart_disable_recv_intr(sc) \
+        uart_intr_disable((sc), ~(ICR_RIEN))
+
+
+/*********************************************************************
+ * Routines for accessing a remote (router) UART
+ */
+
+#define READ_RTR_L1_UART_REG(p, n, r, v)		\
+    {							\
+	if( vector_read_node( (p), (n), 0,		\
+			      RR_JBUS1(r), (v) ) ) {	\
+	    return UART_VECTOR;				\
+	}						\
+    }
+
+#define WRITE_RTR_L1_UART_REG(p, n, r, v)		\
+    {							\
+	if( vector_write_node( (p), (n), 0,		\
+			       RR_JBUS1(r), (v) ) ) {	\
+	    return UART_VECTOR;				\
+	}						\
+    }
+
+#ifdef SABLE
+#define RTR_UART_PUTC_TIMEOUT	0
+#define RTR_UART_DELAY_SPAN	0
+#define RTR_UART_INIT_TIMEOUT	0
+#else
+#define RTR_UART_PUTC_TIMEOUT	UART_PUTC_TIMEOUT*10
+#define RTR_UART_DELAY_SPAN	UART_DELAY_SPAN
+#define RTR_UART_INIT_TIMEOUT	UART_INIT_TIMEOUT*10
+#endif
+
+static int
+rtr_uart_putc( l1sc_t *sc )
+{
+    uint64_t regval, c;
+    nasid_t nasid = sc->nasid;
+    net_vec_t path = sc->uart;
+    rtc_time_t expire = rtc_time() + RTR_UART_PUTC_TIMEOUT;
+    
+    c = (sc->send[sc->sent] & 0xffULL);
+    
+    while( 1 ) 
+    {
+        /* Check for "tx hold reg empty" bit. */
+	READ_RTR_L1_UART_REG( path, nasid, REG_LSR, &regval );
+	if( regval & LSR_XHRE )
+	{
+	    WRITE_RTR_L1_UART_REG( path, nasid, REG_DAT, c );
+	    return UART_SUCCESS;
+	}
+
+	if( rtc_time() >= expire ) 
+	{
+	    return UART_TIMEOUT;
+	}
+	uart_delay( RTR_UART_DELAY_SPAN );
+    }
+}
+
+
+static int
+rtr_uart_getc( l1sc_t *sc )
+{
+    uint64_t regval;
+    nasid_t nasid = sc->nasid;
+    net_vec_t path = sc->uart;
+
+    READ_RTR_L1_UART_REG( path, nasid, REG_LSR, &regval );
+    if( regval & (LSR_RCA | LSR_PARERR | LSR_FRMERR) )
+    {
+	if( regval & LSR_RCA )
+	{
+	    READ_RTR_L1_UART_REG( path, nasid, REG_DAT, &regval );
+	    return( (int)regval );
+	}
+	else
+	{
+	    return UART_LINK;
+	}
+    }
+
+    return UART_NO_CHAR;
+}
+
+
+static int
+rtr_uart_init( l1sc_t *sc, int baud )
+{
+    rtc_time_t expire;
+    int clkdiv;
+    nasid_t nasid;
+    net_vec_t path;
+    uint64_t regval;
+
+    clkdiv = PROM_SER_DIVISOR(baud);
+    expire = rtc_time() + RTR_UART_INIT_TIMEOUT;
+    nasid = sc->nasid;
+    path = sc->uart;
+
+    /* make sure the transmit FIFO is empty */
+    while(1) {
+	READ_RTR_L1_UART_REG( path, nasid, REG_LSR, &regval );
+	if( regval & LSR_XSRE ) {
+	    break;
+	}
+	if( rtc_time() > expire ) {
+	    break;
+	}
+	uart_delay( RTR_UART_DELAY_SPAN );
+    }
+
+    WRITE_RTR_L1_UART_REG( path, nasid, REG_LCR, LCR_DLAB  );
+	uart_delay( UART_DELAY_SPAN );
+    WRITE_RTR_L1_UART_REG( path, nasid, REG_DLH, (clkdiv >> 8) & 0xff  );
+	uart_delay( UART_DELAY_SPAN );
+    WRITE_RTR_L1_UART_REG( path, nasid, REG_DLL, clkdiv & 0xff  );
+	uart_delay( UART_DELAY_SPAN );
+
+    /* set operating parameters and set DLAB to 0 */
+    WRITE_RTR_L1_UART_REG( path, nasid, REG_LCR, LCR_BITS8 | LCR_STOP1  );
+	uart_delay( UART_DELAY_SPAN );
+    WRITE_RTR_L1_UART_REG( path, nasid, REG_MCR, MCR_RTS | MCR_AFE  );
+	uart_delay( UART_DELAY_SPAN );
+
+    /* disable interrupts */
+    WRITE_RTR_L1_UART_REG( path, nasid, REG_ICR, 0x0  );
+	uart_delay( UART_DELAY_SPAN );
+
+    /* enable FIFO mode and reset both FIFOs */
+    WRITE_RTR_L1_UART_REG( path, nasid, REG_FCR, FCR_FIFOEN  );
+	uart_delay( UART_DELAY_SPAN );
+    WRITE_RTR_L1_UART_REG( path, nasid, REG_FCR,
+	FCR_FIFOEN | FCR_RxFIFO | FCR_TxFIFO );
+
+    return 0;
+}
+    
+	
+
+/*********************************************************************
+ * locking macros 
+ */
+
+#define L1SC_SEND_LOCK(l,pl)						\
+     { if( (l)->uart == BRL1_LOCALUART )				\
+	 (pl) = mutex_spinlock_spl( &((l)->send_lock), spl7 ); }
+
+#define L1SC_SEND_UNLOCK(l,pl)				\
+     { if( (l)->uart == BRL1_LOCALUART )		\
+	 mutex_spinunlock( &((l)->send_lock), (pl)); }
+
+#define L1SC_RECV_LOCK(l,pl)						\
+     { if( (l)->uart == BRL1_LOCALUART )				\
+	 (pl) = mutex_spinlock_spl( &((l)->recv_lock), spl7 ); }
+
+#define L1SC_RECV_UNLOCK(l,pl)				\
+     { if( (l)->uart == BRL1_LOCALUART )		\
+	 mutex_spinunlock( &((l)->recv_lock), (pl)); }
+
+
+/*********************************************************************
+ * subchannel manipulation 
+ *
+ * The SUBCH_[UN]LOCK macros are used to arbitrate subchannel
+ * allocation.  SUBCH_DATA_[UN]LOCK control access to data structures
+ * associated with particular subchannels (e.g., receive queues).
+ *
+ */
+
+
+#ifdef SPINLOCKS_WORK
+#define SUBCH_LOCK(sc,pl) \
+     (pl) = mutex_spinlock_spl( &((sc)->subch_lock), spl7 )
+#define SUBCH_UNLOCK(sc,pl) \
+     mutex_spinunlock( &((sc)->subch_lock), (pl) )
+
+#define SUBCH_DATA_LOCK(sbch,pl) \
+     (pl) = mutex_spinlock_spl( &((sbch)->data_lock), spl7 )
+#define SUBCH_DATA_UNLOCK(sbch,pl) \
+     mutex_spinunlock( &((sbch)->data_lock), (pl) )
+#else
+#define SUBCH_LOCK(sc,pl) 
+#define SUBCH_UNLOCK(sc,pl)
+#define SUBCH_DATA_LOCK(sbch,pl)
+#define SUBCH_DATA_UNLOCK(sbch,pl)
+#endif	/* SPINLOCKS_WORK */
+
+/*
+ * set a function to be called for subchannel ch in the event of
+ * a transmission low-water interrupt from the uart
+ */
+void
+subch_set_tx_notify( l1sc_t *sc, int ch, brl1_notif_t func )
+{
+    int pl;
+    L1SC_SEND_LOCK( sc, pl );
+    sc->subch[ch].tx_notify = func;
+    
+    /* some upper layer is asking to be notified of low-water, but if the 
+     * send buffer isn't already in use, we're going to need to get the
+     * interrupts going on the uart...
+     */
+    if( func && !sc->send_in_use )
+	uart_enable_xmit_intr( sc );
+    L1SC_SEND_UNLOCK(sc, pl );
+}
+
+/*
+ * set a function to be called for subchannel ch when data is received
+ */
+void
+subch_set_rx_notify( l1sc_t *sc, int ch, brl1_notif_t func )
+{
+#ifdef SPINLOCKS_WORK
+    int pl;
+#endif
+    brl1_sch_t *subch = &(sc->subch[ch]);
+
+    SUBCH_DATA_LOCK( subch, pl );
+    sc->subch[ch].rx_notify = func;
+    SUBCH_DATA_UNLOCK( subch, pl );
+}
+
+
+
+/* get_myid is an internal function that reads the PI_CPU_NUM
+ * register of the local bedrock to determine which of the
+ * four possible CPU's "this" one is
+ */
+static int
+get_myid( void )
+{
+    return( LD(LOCAL_HUB(PI_CPU_NUM)) );
+}
+
+
+
+/*********************************************************************
+ * Queue manipulation macros
+ *
+ *
+ */
+#define NEXT(p)         (((p) + 1) & (BRL1_QSIZE-1)) /* assume power of 2 */
+
+#define cq_init(q)      bzero((q), sizeof (*(q)))
+#define cq_empty(q)     ((q)->ipos == (q)->opos)
+#define cq_full(q)      (NEXT((q)->ipos) == (q)->opos)
+#define cq_used(q)      ((q)->opos <= (q)->ipos ?                       \
+                         (q)->ipos - (q)->opos :                        \
+                         BRL1_QSIZE + (q)->ipos - (q)->opos)
+#define cq_room(q)      ((q)->opos <= (q)->ipos ?                       \
+                         BRL1_QSIZE - 1 + (q)->opos - (q)->ipos :       \
+                         (q)->opos - (q)->ipos - 1)
+#define cq_add(q, c)    ((q)->buf[(q)->ipos] = (u_char) (c),            \
+                         (q)->ipos = NEXT((q)->ipos))
+#define cq_rem(q, c)    ((c) = (q)->buf[(q)->opos],                     \
+                         (q)->opos = NEXT((q)->opos))
+#define cq_discard(q)	((q)->opos = NEXT((q)->opos))
+
+#define cq_tent_full(q)	(NEXT((q)->tent_next) == (q)->opos)
+#define cq_tent_len(q)	((q)->ipos <= (q)->tent_next ?			\
+			 (q)->tent_next - (q)->ipos :			\
+			 BRL1_QSIZE + (q)->tent_next - (q)->ipos)
+#define cq_tent_add(q, c)						\
+			((q)->buf[(q)->tent_next] = (u_char) (c),	\
+			 (q)->tent_next = NEXT((q)->tent_next))
+#define cq_commit_tent(q)						\
+			((q)->ipos = (q)->tent_next)
+#define cq_discard_tent(q)						\
+			((q)->tent_next = (q)->ipos)
+
+
+
+
+/*********************************************************************
+ * CRC-16 (for checking bedrock/L1 packets).
+ *
+ * These are based on RFC 1662 ("PPP in HDLC-like framing").
+ */
+
+static unsigned short fcstab[256] = {
+      0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf,
+      0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7,
+      0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e,
+      0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876,
+      0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd,
+      0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5,
+      0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c,
+      0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974,
+      0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb,
+      0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3,
+      0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a,
+      0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72,
+      0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9,
+      0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1,
+      0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738,
+      0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70,
+      0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7,
+      0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff,
+      0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036,
+      0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e,
+      0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5,
+      0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd,
+      0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134,
+      0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c,
+      0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3,
+      0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb,
+      0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232,
+      0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a,
+      0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1,
+      0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9,
+      0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330,
+      0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78
+};
+
+#define INIT_CRC	0xFFFF	/* initial CRC value	  */
+#define	GOOD_CRC	0xF0B8	/* "good" final CRC value */
+
+static unsigned short crc16_calc( unsigned short crc, u_char c )
+{
+    return( (crc >> 8) ^ fcstab[(crc ^ c) & 0xff] );
+}
+
+
+/***********************************************************************
+ * The following functions implement the PPP-like bedrock/L1 protocol
+ * layer.
+ *
+ */
+
+#define BRL1_FLAG_CH	0x7e
+#define BRL1_ESC_CH	0x7d
+#define BRL1_XOR_CH	0x20
+
+/* L1<->Bedrock packet types */
+#define BRL1_REQUEST    0x00
+#define BRL1_RESPONSE   0x20
+#define BRL1_EVENT      0x40
+
+#define BRL1_PKT_TYPE_MASK      0xE0
+#define BRL1_SUBCH_MASK         0x1F
+
+#define PKT_TYPE(tsb)   ((tsb) & BRL1_PKT_TYPE_MASK)
+#define SUBCH(tsb)	((tsb) & BRL1_SUBCH_MASK)
+
+/* timeouts */
+#define BRL1_INIT_TIMEOUT	500000
+
+extern l1sc_t * get_elsc( void );
+
+/*
+ * brl1_discard_packet is a dummy "receive callback" used to get rid
+ * of packets we don't want
+ */
+void brl1_discard_packet( l1sc_t *sc, int ch )
+{
+    int pl;
+    brl1_sch_t *subch = &sc->subch[ch];
+    sc_cq_t *q = subch->iqp;
+    SUBCH_DATA_LOCK( subch, pl );
+    q->opos = q->ipos;
+    atomicClearInt( &(subch->packet_arrived), ~((unsigned)0) );
+    SUBCH_DATA_UNLOCK( subch, pl );
+}
+
+
+/*
+ * brl1_send_chars sends the send buffer in the l1sc_t structure
+ * out through the uart.  Assumes that the caller has locked the
+ * UART (or send buffer in the kernel).
+ *
+ * This routine doesn't block-- if you want it to, call it in
+ * a loop.
+ */
+static int
+brl1_send_chars( l1sc_t *sc )
+{
+    /* In the kernel, we track the depth of the C brick's UART's
+     * fifo in software, and only check if the UART is accepting
+     * characters when our count indicates that the fifo should
+     * be full.
+     *
+     * For remote (router) UARTs, and also for the local (C brick)
+     * UART in the prom, we check with the UART before sending every
+     * character.
+     */
+    if( sc->uart == BRL1_LOCALUART ) 
+    {
+	CONS_HW_LOCK(1);
+	if( !(sc->fifo_space) && UART_PUTC_READY( sc->nasid ) )
+//	    sc->fifo_space = UART_FIFO_DEPTH;
+	    sc->fifo_space = 1000;
+	
+	while( (sc->sent < sc->send_len) && (sc->fifo_space) ) {
+	    uart_putc( sc );
+	    sc->fifo_space--;
+	    sc->sent++;
+	}
+
+	CONS_HW_UNLOCK(1);
+    }
+
+    else
+
+    /* The following applies to all UARTs in the prom, and to remote
+     * (router) UARTs in the kernel...
+     */
+
+#define TIMEOUT_RETRIES	30
+
+    {
+	int result;
+	int tries = 0;
+
+	while( sc->sent < sc->send_len ) {
+	    result = sc->putc_f( sc );
+	    if( result >= 0 ) {
+		(sc->sent)++;
+		continue;
+	    }
+	    if( result == UART_TIMEOUT ) {
+		tries++;
+		/* send this character in TIMEOUT_RETRIES... */
+		if( tries < TIMEOUT_RETRIES ) {
+		    continue;
+		}
+		/* ...or else... */
+		else {
+		    /* ...drop the packet. */
+		    sc->sent = sc->send_len;
+		    return sc->send_len;
+		}
+	    }
+	    if( result < 0 ) {
+		return result;
+	    }
+	}
+    }
+    
+    return sc->sent;
+}
+
+
+/* brl1_send formats up a packet and (at least begins to) send it
+ * to the uart.  If the send buffer is in use when this routine obtains
+ * the lock, it will behave differently depending on the "wait" parameter.
+ * For wait == 0 (most I/O), it will return 0 (as in "zero bytes sent"),
+ * hopefully encouraging the caller to back off (unlock any high-level 
+ * spinlocks) and allow the buffer some time to drain.  For wait==1 (high-
+ * priority I/O along the lines of kernel error messages), we will flush
+ * the current contents of the send buffer and beat on the uart
+ * until our message has been completely transmitted.
+ */
+
+int
+brl1_send( l1sc_t *sc, char *msg, int len, u_char type_and_subch, int wait )
+{
+    int pl;
+    int index;
+    int pkt_len = 0;
+    unsigned short crc = INIT_CRC;
+    char *send_ptr = sc->send;
+
+    L1SC_SEND_LOCK(sc, pl);
+
+    if( sc->send_in_use ) {
+	if( !wait ) {
+	    L1SC_SEND_UNLOCK(sc, pl);
+	    return 0; /* couldn't send anything; wait for buffer to drain */
+	}
+	else {
+	    /* buffer's in use, but we're synchronous I/O, so we're going
+	     * to send whatever's in there right now and take the buffer
+	     */
+	    while( sc->sent < sc->send_len )
+		brl1_send_chars( sc );
+	}
+    }
+    else {
+	sc->send_in_use = 1;
+    }
+    *send_ptr++ = BRL1_FLAG_CH;
+    *send_ptr++ = type_and_subch;
+    pkt_len += 2;
+    crc = crc16_calc( crc, type_and_subch );
+
+    /* limit number of characters accepted to max payload size */
+    if( len > (BRL1_QSIZE - 1) )
+	len = (BRL1_QSIZE - 1);
+
+    /* copy in the message buffer (inserting PPP 
+     * framing info where necessary)
+     */
+    for( index = 0; index < len; index++ ) {
+
+	switch( *msg ) {
+	    
+	  case BRL1_FLAG_CH:
+	    *send_ptr++ = BRL1_ESC_CH;
+	    *send_ptr++ = (*msg) ^ BRL1_XOR_CH;
+	    pkt_len += 2;
+	    break;
+	    
+	  case BRL1_ESC_CH:
+	    *send_ptr++ = BRL1_ESC_CH;
+	    *send_ptr++ = (*msg) ^ BRL1_XOR_CH;
+	    pkt_len += 2;
+	    break;
+	    
+	  default:
+	    *send_ptr++ = *msg;
+	    pkt_len++;
+	}
+	crc = crc16_calc( crc, *msg );
+	msg++;
+    }
+    crc ^= 0xffff;
+
+    for( index = 0; index < sizeof(crc); index++ ) {
+	char crc_char = (char)(crc & 0x00FF);
+	if( (crc_char == BRL1_ESC_CH) || (crc_char == BRL1_FLAG_CH) ) {
+	    *send_ptr++ = BRL1_ESC_CH;
+	    pkt_len++;
+	    crc_char ^= BRL1_XOR_CH;
+	}
+	*send_ptr++ = crc_char;
+	pkt_len++;
+	crc >>= 8;
+    }
+    
+    *send_ptr++ = BRL1_FLAG_CH;
+    pkt_len++;
+
+    sc->send_len = pkt_len;
+    sc->sent = 0;
+
+    do {
+	brl1_send_chars( sc );
+    } while( (sc->sent < sc->send_len) && wait );
+
+    if( sc->sent == sc->send_len ) {
+	/* success! release the send buffer */
+	sc->send_in_use = 0;
+    }
+    else if( !wait ) {
+	/* enable low-water interrupts so buffer will be drained */
+	uart_enable_xmit_intr(sc);
+    }
+    L1SC_SEND_UNLOCK(sc, pl);
+    return len;
+}
+
+
+/* brl1_send_cont is intended to be called as an interrupt service
+ * routine.  It sends until the UART won't accept any more characters,
+ * or until an error is encountered (in which case we surrender the
+ * send buffer and give up trying to send the packet).  Once the
+ * last character in the packet has been sent, this routine releases
+ * the send buffer and calls any previously-registered "low-water"
+ * output routines.
+ */
+int
+brl1_send_cont( l1sc_t *sc )
+{
+    int pl;
+    int done = 0;
+    brl1_notif_t callups[BRL1_NUM_SUBCHANS];
+    brl1_notif_t *callup;
+    brl1_sch_t *subch;
+    int index;
+
+    L1SC_SEND_LOCK(sc, pl);
+    brl1_send_chars( sc );
+    done = (sc->sent == sc->send_len);
+    if( done ) {
+
+	sc->send_in_use = 0;
+	uart_disable_xmit_intr(sc);
+
+	/* collect pointers to callups *before* unlocking */
+	subch = sc->subch;
+	callup = callups;
+	for( index = 0; index < BRL1_NUM_SUBCHANS; index++ ) {
+	    *callup = subch->tx_notify;
+	    subch++;
+	    callup++;
+	}
+    }
+    L1SC_SEND_UNLOCK(sc, pl);
+
+    if( done ) {
+	/* call any upper layer that's asked for low-water notification */
+	callup = callups;
+	for( index = 0; index < BRL1_NUM_SUBCHANS; index++ ) {
+	    if( *callup )
+		(*(*callup))( sc, index );
+	    callup++;
+	}
+    }
+    return 0;
+}
+
+
+/* internal function -- used by brl1_receive to read a character 
+ * from the uart and check whether errors occurred in the process.
+ */
+static int
+read_uart( l1sc_t *sc, int *c, int *result )
+{
+    *c = sc->getc_f( sc );
+
+    /* no character is available */
+    if( *c == UART_NO_CHAR ) {
+	*result = BRL1_NO_MESSAGE;
+	return 0;
+    }
+
+    /* some error in UART */
+    if( *c < 0 ) {
+	*result = BRL1_LINK;
+	return 0;
+    }
+
+    /* everything's fine */
+    *result = BRL1_VALID;
+    return 1;
+}
+
+
+/*
+ * brl1_receive
+ *
+ * This function reads a Bedrock-L1 protocol packet into the l1sc_t
+ * response buffer.
+ *
+ * The operation of this function can be expressed as a finite state
+ * machine:
+ *
+
+START STATE			INPUT		TRANSITION
+==========================================================
+BRL1_IDLE (reset or error)	flag		BRL1_FLAG
+				other		BRL1_IDLE@
+
+BRL1_FLAG (saw a flag (0x7e))	flag		BRL1_FLAG
+				escape		BRL1_IDLE@
+				header byte	BRL1_HDR
+				other		BRL1_IDLE@
+
+BRL1_HDR (saw a type/subch byte)(see below)	BRL1_BODY
+						BRL1_HDR
+
+BRL1_BODY (reading packet body)	flag		BRL1_FLAG
+				escape		BRL1_ESC
+				other		BRL1_BODY
+
+BRL1_ESC (saw an escape (0x7d))	flag		BRL1_FLAG@
+				escape		BRL1_IDLE@
+				other		BRL1_BODY
+==========================================================
+
+"@" denotes an error transition.
+
+ * The BRL1_HDR state is a transient state which doesn't read input,
+ * but just provides a way in to code which decides to whom an
+ * incoming packet should be directed.
+ *
+ * brl1_receive can be used to poll for input from the L1, or as 
+ * an interrupt service routine.  It reads as much data as is
+ * ready from the junk bus UART and places into the appropriate
+ * input queues according to subchannel.  The header byte is
+ * stripped from console-type data, but is retained for message-
+ * type data (L1 responses).  A length byte will also be
+ * prepended to message-type packets.
+ *
+ * This routine is non-blocking; if the caller needs to block
+ * for input, it must call brl1_receive in a loop.
+ *
+ * brl1_receive returns when there is no more input, the queue
+ * for the current incoming message is full, or there is an
+ * error (parity error, bad header, bad CRC, etc.).
+ */
+
+#define STATE_SET(l,s)	((l)->brl1_state = (s))
+#define STATE_GET(l)	((l)->brl1_state)
+
+#define LAST_HDR_SET(l,h)	((l)->brl1_last_hdr = (h))
+#define LAST_HDR_GET(l)		((l)->brl1_last_hdr)
+
+#define SEQSTAMP_INCR(l)
+#define SEQSTAMP_GET(l)
+
+#define VALID_HDR(c)				\
+    ( SUBCH((c)) <= SC_CONS_SYSTEM		\
+	? PKT_TYPE((c)) == BRL1_REQUEST		\
+	: ( PKT_TYPE((c)) == BRL1_RESPONSE ||	\
+	    PKT_TYPE((c)) == BRL1_EVENT ) )
+
+#define IS_TTY_PKT(l) \
+         ( SUBCH(LAST_HDR_GET(l)) <= SC_CONS_SYSTEM ? 1 : 0 )
+
+
+int
+brl1_receive( l1sc_t *sc )
+{
+    int result;		/* value to be returned by brl1_receive */
+    int c;		/* most-recently-read character	     	*/
+    int pl;		/* priority level for UART receive lock */
+    int done;		/* set done to break out of recv loop	*/
+    sc_cq_t *q;		/* pointer to queue we're working with	*/
+
+    result = BRL1_NO_MESSAGE;
+
+    L1SC_RECV_LOCK( sc, pl );
+    L1_CONS_HW_LOCK( sc );
+
+    done = 0;
+    while( !done )
+    {
+	switch( STATE_GET(sc) )
+	{
+
+	  case BRL1_IDLE:
+	    /* Initial or error state.  Waiting for a flag character
+             * to resynchronize with the L1.
+             */
+
+	    if( !read_uart( sc, &c, &result ) ) {
+
+		/* error reading uart */
+		done = 1;
+		continue;
+	    }
+	    
+	    if( c == BRL1_FLAG_CH ) {
+		/* saw a flag character */
+		STATE_SET( sc, BRL1_FLAG );
+		continue;
+	    }
+	    break;
+	    
+	  case BRL1_FLAG:
+	    /* One or more flag characters have been read; look for
+	     * the beginning of a packet (header byte).
+	     */
+	    
+	    if( !read_uart( sc, &c, &result ) ) {
+
+		/* error reading uart */
+		if( c != UART_NO_CHAR )
+		    STATE_SET( sc, BRL1_IDLE );
+
+		done = 1;
+		continue;
+	    }
+	    
+	    if( c == BRL1_FLAG_CH ) {
+		/* multiple flags are OK */
+		continue;
+	    }
+
+	    if( !VALID_HDR( c ) ) {
+		/* if c isn't a flag it should have been
+		 * a valid header, so we have an error
+		 */
+		result = BRL1_PROTOCOL;
+		STATE_SET( sc, BRL1_IDLE );
+		done = 1;
+		continue;
+	    }
+
+	    /* we have a valid header byte */
+	    LAST_HDR_SET( sc, c );
+	    STATE_SET( sc, BRL1_HDR );
+
+	    break; 
+
+	  case BRL1_HDR:
+	    /* A header byte has been read. Do some bookkeeping. */
+	    q = sc->subch[ SUBCH( LAST_HDR_GET(sc) ) ].iqp;
+	    ASSERT(q);
+	    
+	    if( !IS_TTY_PKT(sc) ) {
+		/* if this is an event or command response rather
+		 * than console I/O, we need to reserve a couple
+		 * of extra spaces in the queue for the header
+		 * byte and a length byte; if we can't, stay in
+		 * the BRL1_HDR state.
+		 */
+		if( cq_room( q ) < 2 ) {
+		    result = BRL1_FULL_Q;
+		    done = 1;
+		    continue;
+		}
+		cq_tent_add( q, 0 );			/* reserve length byte */
+		cq_tent_add( q, LAST_HDR_GET( sc ) );	/* record header byte  */
+	    }
+	    STATE_SET( sc, BRL1_BODY );
+
+	    break;
+
+	  case BRL1_BODY:
+	    /* A header byte has been read.  We are now attempting
+	     * to receive the packet body.
+	     */
+
+	    q = sc->subch[ SUBCH( LAST_HDR_GET(sc) ) ].iqp;
+	    ASSERT(q);
+
+	    /* if the queue we want to write into is full, don't read from
+	     * the uart (this provides backpressure to the L1 side)
+	     */
+	    if( cq_tent_full( q ) ) {
+		result = BRL1_FULL_Q;
+		done = 1;
+		continue;
+	    }
+	    
+	    if( !read_uart( sc, &c, &result ) ) {
+
+		/* error reading uart */
+		if( c != UART_NO_CHAR )
+		    STATE_SET( sc, BRL1_IDLE );
+		done = 1;
+		continue;
+	    }
+
+	    if( c == BRL1_ESC_CH ) {
+		/* prepare to unescape the next character */
+		STATE_SET( sc, BRL1_ESC );
+		continue;
+	    }
+	    
+	    if( c == BRL1_FLAG_CH ) {
+		/* flag signifies the end of a packet */
+
+		unsigned short crc;	/* holds the crc as we calculate it */
+		int i;			/* index variable */
+		brl1_sch_t *subch;      /* subchannel for received packet */
+		int sch_pl;		/* cookie for subchannel lock */
+		brl1_notif_t callup;	/* "data ready" callup */
+
+		/* whatever else may happen, we've seen a flag and we're
+		 * starting a new packet
+		 */
+		STATE_SET( sc, BRL1_FLAG );
+		SEQSTAMP_INCR(sc); /* bump the packet sequence counter */
+		
+		/* if the packet body has less than 2 characters,
+		 * it can't be a well-formed packet.  Discard it.
+		 */
+		if( cq_tent_len( q ) < /* 2 + possible length byte */
+		    (2 + (IS_TTY_PKT(sc) ? 0 : 1)) )
+		{
+		    result = BRL1_PROTOCOL;
+		    cq_discard_tent( q );
+		    STATE_SET( sc, BRL1_FLAG );
+		    done = 1;
+		    continue;
+		}
+		
+		/* check CRC */
+
+		/* accumulate CRC, starting with the header byte and
+		 * ending with the transmitted CRC.  This should
+		 * result in a known good value.
+		 */
+		crc = crc16_calc( INIT_CRC, LAST_HDR_GET(sc) );
+		for( i = (q->ipos + (IS_TTY_PKT(sc) ? 0 : 2)) % BRL1_QSIZE;
+		     i != q->tent_next;
+		     i = (i + 1) % BRL1_QSIZE )
+		{
+		    crc = crc16_calc( crc, q->buf[i] );
+		}
+
+		/* verify the caclulated crc against the "good" crc value;
+		 * if we fail, discard the bad packet and return an error.
+		 */
+		if( crc != (unsigned short)GOOD_CRC ) {
+		    result = BRL1_CRC;
+		    cq_discard_tent( q );
+		    STATE_SET( sc, BRL1_FLAG );
+		    done = 1;
+		    continue;
+		}
+		
+		/* so the crc check was ok.  Now we discard the CRC
+		 * from the end of the received bytes.
+		 */
+		q->tent_next += (BRL1_QSIZE - 2);
+		q->tent_next %= BRL1_QSIZE;
+
+		/* get the subchannel and lock it */
+		subch = &(sc->subch[SUBCH( LAST_HDR_GET(sc) )]);
+		SUBCH_DATA_LOCK( subch, sch_pl );
+		
+		/* if this isn't a console packet, we need to record
+		 * a length byte
+		 */
+		if( !IS_TTY_PKT(sc) ) {
+		    q->buf[q->ipos] = cq_tent_len( q ) - 1;
+		}
+		
+		/* record packet for posterity */
+		cq_commit_tent( q );
+		result = BRL1_VALID;
+
+		/* notify subchannel owner that there's something
+		 * on the queue for them
+		 */
+		atomicAddInt( &(subch->packet_arrived), 1);
+		callup = subch->rx_notify;
+		SUBCH_DATA_UNLOCK( subch, sch_pl );
+
+		if( callup ) {
+		    L1_CONS_HW_UNLOCK( sc );
+		    L1SC_RECV_UNLOCK( sc, pl );
+		    (*callup)( sc, SUBCH(LAST_HDR_GET(sc)) );
+		    L1SC_RECV_LOCK( sc, pl );
+		    L1_CONS_HW_LOCK( sc );
+		}
+		continue;	/* go back for more! */
+	    }
+	    
+	    /* none of the special cases applied; we've got a normal
+	     * body character
+	     */
+	    cq_tent_add( q, c );
+
+	    break;
+
+	  case BRL1_ESC:
+	    /* saw an escape character.  The next character will need
+	     * to be unescaped.
+	     */
+
+	    q = sc->subch[ SUBCH( LAST_HDR_GET(sc) ) ].iqp;
+	    ASSERT(q);
+
+	    /* if the queue we want to write into is full, don't read from
+	     * the uart (this provides backpressure to the L1 side)
+	     */
+	    if( cq_tent_full( q ) ) {
+		result = BRL1_FULL_Q;
+		done = 1;
+		continue;
+	    }
+	    
+	    if( !read_uart( sc, &c, &result ) ) {
+
+		/* error reading uart */
+		if( c != UART_NO_CHAR ) {
+		    cq_discard_tent( q );
+		    STATE_SET( sc, BRL1_IDLE );
+		}
+		done = 1;
+		continue;
+	    }
+	    
+	    if( c == BRL1_FLAG_CH ) {
+		/* flag after escape is an error */
+		STATE_SET( sc, BRL1_FLAG );
+		cq_discard_tent( q );
+		result = BRL1_PROTOCOL;
+		done = 1;
+		continue;
+	    }
+
+	    if( c == BRL1_ESC_CH ) {
+		/* two consecutive escapes is an error */
+		STATE_SET( sc, BRL1_IDLE );
+		cq_discard_tent( q );
+		result = BRL1_PROTOCOL;
+		done = 1;
+		continue;
+	    }
+	    
+	    /* otherwise, we've got a character that needs
+	     * to be unescaped
+	     */
+	    cq_tent_add( q, (c ^ BRL1_XOR_CH) );
+	    STATE_SET( sc, BRL1_BODY );
+
+	    break;
+
+	} /* end of switch( STATE_GET(sc) ) */
+    } /* end of while(!done) */
+    
+    L1_CONS_HW_UNLOCK( sc );
+    L1SC_RECV_UNLOCK(sc, pl);
+
+    return result;
+}	    
+
+
+/* brl1_init initializes the Bedrock/L1 protocol layer.  This includes
+ * zeroing out the send and receive state information.
+ */
+
+void
+brl1_init( l1sc_t *sc, nasid_t nasid, net_vec_t uart )
+{
+    int i;
+    brl1_sch_t *subch;
+
+    bzero( sc, sizeof( *sc ) );
+    sc->nasid = nasid;
+    sc->uart = uart;
+    sc->getc_f = (uart == BRL1_LOCALUART ? uart_getc : rtr_uart_getc);
+    sc->putc_f = (uart == BRL1_LOCALUART ? uart_putc : rtr_uart_putc);
+    sc->sol = 1;
+    subch = sc->subch;
+
+    /* initialize L1 subchannels
+     */
+
+    /* assign processor TTY channels */
+    for( i = 0; i < CPUS_PER_NODE; i++, subch++ ) {
+	subch->use = BRL1_SUBCH_RSVD;
+	subch->packet_arrived = 0;
+	spinlock_init( &(subch->data_lock), NULL );
+	sv_init( &(subch->arrive_sv), SV_FIFO, NULL );
+	subch->tx_notify = NULL;
+	/* (for now, drop elscuart packets in the kernel) */
+	subch->rx_notify = brl1_discard_packet;
+	subch->iqp = &sc->garbage_q;
+    }
+
+    /* assign system TTY channel (first free subchannel after each
+     * processor's individual TTY channel has been assigned)
+     */
+    subch->use = BRL1_SUBCH_RSVD;
+    subch->packet_arrived = 0;
+    spinlock_init( &(subch->data_lock), NULL );
+    sv_init( &(subch->arrive_sv), SV_FIFO, NULL );
+    subch->tx_notify = NULL;
+    if( sc->uart == BRL1_LOCALUART ) {
+	subch->iqp = kmem_zalloc_node( sizeof(sc_cq_t), KM_NOSLEEP,
+				       NASID_TO_COMPACT_NODEID(nasid) );
+	ASSERT( subch->iqp );
+	cq_init( subch->iqp );
+	subch->rx_notify = NULL;
+    }
+    else {
+	/* we shouldn't be getting console input from remote UARTs */
+	subch->iqp = &sc->garbage_q;
+	subch->rx_notify = brl1_discard_packet;
+    }
+    subch++; i++;
+
+    /* "reserved" subchannels (0x05-0x0F); for now, throw away
+     * incoming packets
+     */
+    for( ; i < 0x10; i++, subch++ ) {
+	subch->use = BRL1_SUBCH_FREE;
+	subch->packet_arrived = 0;
+	subch->tx_notify = NULL;
+	subch->rx_notify = brl1_discard_packet;
+	subch->iqp = &sc->garbage_q;
+    }
+
+    /* remaining subchannels are free */
+    for( ; i < BRL1_NUM_SUBCHANS; i++, subch++ ) {
+	subch->use = BRL1_SUBCH_FREE;
+	subch->packet_arrived = 0;
+	subch->tx_notify = NULL;
+	subch->rx_notify = brl1_discard_packet;
+	subch->iqp = &sc->garbage_q;
+    }
+
+    /* initialize synchronization structures
+     */
+    spinlock_init( &(sc->send_lock), NULL );
+    spinlock_init( &(sc->recv_lock), NULL );
+    spinlock_init( &(sc->subch_lock), NULL );
+
+    if( sc->uart == BRL1_LOCALUART ) {
+	uart_init( sc, UART_BAUD_RATE );
+    }
+    else {
+	rtr_uart_init( sc, UART_BAUD_RATE );
+    }
+
+    /* Set up remaining fields using L1 command functions-- elsc_module_get
+     * to read the module id, elsc_debug_get to see whether or not we're
+     * in verbose mode.
+     */
+    {
+	extern int elsc_module_get(l1sc_t *);
+
+	sc->modid = elsc_module_get( sc );
+	sc->modid = 
+	    (sc->modid < 0 ? INVALID_MODULE : sc->modid);
+
+	sc->verbose = 1;
+    }
+}
+
+
+/*********************************************************************
+ * These are interrupt-related functions used in the kernel to service
+ * the L1.
+ */
+
+/*
+ * brl1_intrd is the function which is called in a loop by the
+ * xthread that services L1 interrupts.
+ */
+#ifdef IRIX
+void
+brl1_intrd( struct eframe_s *ep )
+{
+    u_char isr_reg;
+    l1sc_t *sc = get_elsc();
+
+    isr_reg = READ_L1_UART_REG(sc->nasid, REG_ISR);
+
+    while( isr_reg & (ISR_RxRDY | ISR_TxRDY) ) {
+
+	if( isr_reg & ISR_RxRDY ) {
+	    brl1_receive(sc);
+	}
+	if( (isr_reg & ISR_TxRDY) || 
+	    (sc->send_in_use && UART_PUTC_READY(sc->nasid)) ) 
+	{
+	    brl1_send_cont(sc);
+	}
+	isr_reg = READ_L1_UART_REG(sc->nasid, REG_ISR);
+    }
+
+    /* uart interrupts were blocked at bedrock when the the interrupt
+     * was initially answered; reenable them now
+     */
+    intr_unblock_bit( sc->intr_cpu, UART_INTR );
+    ep = ep; /* placate the compiler */
+}
+#endif
+
+
+
+/* brl1_intr is called directly from the uart interrupt; after it runs, the
+ * interrupt "daemon" xthread is signalled to continue.
+ */
+#ifdef IRIX
+void
+brl1_intr( struct eframe_s *ep )
+{
+    /* Disable the UART interrupt, giving the xthread time to respond.
+     * When the daemon (xthread) finishes doing its thing, it will
+     * unblock the interrupt.
+     */
+    intr_block_bit( get_elsc()->intr_cpu, UART_INTR );
+    ep = ep; /* placate the compiler */
+}
+
+
+/* set up uart interrupt handling for this node's uart
+ */
+void
+brl1_connect_intr( l1sc_t *sc )
+{
+    cpuid_t last_cpu;
+
+    sc->intr_cpu = nodepda->node_first_cpu;
+
+    if( intr_connect_level(sc->intr_cpu, UART_INTR, INTPEND0_MAXMASK,
+			   (intr_func_t)brl1_intrd, 0, 
+			   (intr_func_t)brl1_intr) )
+	cmn_err(CE_PANIC, "brl1_connect_intr: Can't connect UART interrupt.");
+
+    uart_enable_recv_intr( sc );
+}
+#endif	/* IRIX */
+
+#ifdef SABLE
+/* this function is called periodically to generate fake interrupts
+ * and allow brl1_intrd to send/receive characters
+ */
+void
+hubuart_service( void )
+{
+    l1sc_t *sc = get_elsc();
+    /* note that we'll lose error state by reading the lsr_reg.
+     * This is probably ok in the frictionless domain of sable.
+     */
+    int lsr_reg;
+    nasid_t nasid = sc->nasid;
+    lsr_reg = READ_L1_UART_REG( nasid, REG_LSR );
+    if( lsr_reg & (LSR_RCA | LSR_XSRE) ) {
+        REMOTE_HUB_PI_SEND_INTR(0, 0, UART_INTR);
+    }
+}
+#endif /* SABLE */
+
+
+/*********************************************************************
+ * The following function allows the kernel to "go around" the
+ * uninitialized l1sc structure to allow console output during
+ * early system startup.
+ */
+
+/* These are functions to use from serial_in/out when in protocol
+ * mode to send and receive uart control regs.
+ */
+void
+brl1_send_control(int offset, int value)
+{
+	nasid_t nasid = get_nasid();
+	WRITE_L1_UART_REG(nasid, offset, value); 
+}
+
+int
+brl1_get_control(int offset)
+{
+	nasid_t nasid = get_nasid();
+	return(READ_L1_UART_REG(nasid, offset)); 
+}
+
+#define PUTCHAR(ch) \
+    { \
+        while( !(READ_L1_UART_REG( nasid, REG_LSR ) & LSR_XHRE) );  \
+        WRITE_L1_UART_REG( nasid, REG_DAT, (ch) ); \
+    }
+
+int
+brl1_send_console_packet( char *str, int len )
+{
+    int sent = len;
+    char crc_char;
+    unsigned short crc = INIT_CRC;
+    nasid_t nasid = get_nasid();
+
+    PUTCHAR( BRL1_FLAG_CH );
+    PUTCHAR( BRL1_EVENT | SC_CONS_SYSTEM );
+    crc = crc16_calc( crc, (BRL1_EVENT | SC_CONS_SYSTEM) );
+
+    while( len ) {
+
+	if( (*str == BRL1_FLAG_CH) || (*str == BRL1_ESC_CH) ) {
+	    PUTCHAR( BRL1_ESC_CH );
+	    PUTCHAR( (*str) ^ BRL1_XOR_CH );
+	}
+	else {
+	    PUTCHAR( *str );
+	}
+	
+	crc = crc16_calc( crc, *str );
+
+	str++; len--;
+    }
+    
+    crc ^= 0xffff;
+    crc_char = crc & 0xff;
+    if( (crc_char == BRL1_ESC_CH) || (crc_char == BRL1_FLAG_CH) ) {
+	crc_char ^= BRL1_XOR_CH;
+	PUTCHAR( BRL1_ESC_CH );
+    }
+    PUTCHAR( crc_char );
+    crc_char = (crc >> 8) & 0xff;
+    if( (crc_char == BRL1_ESC_CH) || (crc_char == BRL1_FLAG_CH) ) {
+	crc_char ^= BRL1_XOR_CH;
+	PUTCHAR( BRL1_ESC_CH );
+    }
+    PUTCHAR( crc_char );
+    PUTCHAR( BRL1_FLAG_CH );
+
+    return sent - len;
+}
+
+
+/*********************************************************************
+ * l1_cons functions
+ *
+ * These allow the L1 to act as the system console.  They're intended
+ * to abstract away most of the br/l1 internal details from the
+ * _L1_cons_* functions (in the prom-- see "l1_console.c") and
+ * l1_* functions (in the kernel-- see "sio_l1.c") that they support.
+ *
+ */
+
+int
+l1_cons_poll( l1sc_t *sc )
+{
+    /* in case this gets called before the l1sc_t structure for the module_t
+     * struct for this node is initialized (i.e., if we're called with a
+     * zero l1sc_t pointer)...
+     */
+    if( !sc ) {
+	return 0;
+    }
+
+    if( sc->subch[SC_CONS_SYSTEM].packet_arrived ) {
+	return 1;
+    }
+
+    brl1_receive( sc );
+
+    if( sc->subch[SC_CONS_SYSTEM].packet_arrived ) {
+	return 1;
+    }
+    return 0;
+}
+
+
+/* pull a character off of the system console queue (if one is available)
+ */
+int
+l1_cons_getc( l1sc_t *sc )
+{
+    int c;
+#ifdef SPINLOCKS_WORK
+    int pl;
+#endif
+    brl1_sch_t *subch = &(sc->subch[SC_CONS_SYSTEM]);
+    sc_cq_t *q = subch->iqp;
+
+    if( !l1_cons_poll( sc ) ) {
+	return 0;
+    }
+
+    SUBCH_DATA_LOCK( subch, pl );
+    if( cq_empty( q ) ) {
+	subch->packet_arrived = 0;
+	SUBCH_DATA_UNLOCK( subch, pl );
+	return 0;
+    }
+    cq_rem( q, c );
+    if( cq_empty( q ) )
+	subch->packet_arrived = 0;
+    SUBCH_DATA_UNLOCK( subch, pl );
+
+    return c;
+}
+
+
+/* initialize the system console subchannel
+ */
+void
+l1_cons_init( l1sc_t *sc )
+{
+#ifdef SPINLOCKS_WORK
+    int pl;
+#endif
+    brl1_sch_t *subch = &(sc->subch[SC_CONS_SYSTEM]);
+
+    SUBCH_DATA_LOCK( subch, pl );
+    subch->packet_arrived = 0;
+    cq_init( subch->iqp );
+    SUBCH_DATA_UNLOCK( subch, pl );
+}
+
+
+/*
+ * Write a message to the L1 on the system console subchannel.
+ *
+ * Danger: don't use a non-zero value for the wait parameter unless you're
+ * someone important (like a kernel error message).
+ */
+int
+l1_cons_write( l1sc_t *sc, char *msg, int len, int wait )
+{
+    return( brl1_send( sc, msg, len, (SC_CONS_SYSTEM | BRL1_EVENT), wait ) );
+}
+
+
+/* 
+ * Read as many characters from the system console receive queue as are
+ * available there (up to avail bytes).
+ */
+int
+l1_cons_read( l1sc_t *sc, char *buf, int avail )
+{
+    int pl;
+    int before_wrap, after_wrap;
+    brl1_sch_t *subch = &(sc->subch[SC_CONS_SYSTEM]);
+    sc_cq_t *q = subch->iqp;
+
+    if( !(subch->packet_arrived) )
+	return 0;
+
+    SUBCH_DATA_LOCK( subch, pl );
+    if( q->opos > q->ipos ) {
+	before_wrap = BRL1_QSIZE - q->opos;
+	if( before_wrap >= avail ) {
+	    before_wrap = avail;
+	    after_wrap = 0;
+	}
+	else {
+	    avail -= before_wrap;
+	    after_wrap = q->ipos;
+	    if( after_wrap > avail )
+		after_wrap = avail;
+	}
+    }
+    else {
+	before_wrap = q->ipos - q->opos;
+	if( before_wrap > avail )
+	    before_wrap = avail;
+	after_wrap = 0;
+    }
+
+
+    BCOPY( q->buf + q->opos, buf, before_wrap  );
+    if( after_wrap )
+        BCOPY( q->buf, buf + before_wrap, after_wrap  );
+    q->opos = ((q->opos + before_wrap + after_wrap) % BRL1_QSIZE);
+
+    subch->packet_arrived = 0;
+    SUBCH_DATA_UNLOCK( subch, pl );
+
+    return( before_wrap + after_wrap );
+}
+	
+
+/*
+ * Install a callback function for the system console subchannel 
+ * to allow an upper layer to be notified when the send buffer 
+ * has been emptied.
+ */
+void
+l1_cons_tx_notif( l1sc_t *sc, brl1_notif_t func )
+{
+    subch_set_tx_notify( sc, SC_CONS_SYSTEM, func );
+}
+
+
+/*
+ * Install a callback function for the system console subchannel
+ * to allow an upper layer to be notified when a packet has been
+ * received.
+ */
+void
+l1_cons_rx_notif( l1sc_t *sc, brl1_notif_t func )
+{
+    subch_set_rx_notify( sc, SC_CONS_SYSTEM, func );
+}
+
+
+
+
+/*********************************************************************
+ * The following functions and definitions implement the "message"-
+ * style interface to the L1 system controller.
+ *
+ * Note that throughout this file, "sc" generally stands for "system
+ * controller", while "subchannels" tend to be represented by
+ * variables with names like subch or ch.
+ *
+ */
+
+#ifdef L1_DEBUG
+#define L1_DBG_PRF(x) printf x
+#else
+#define L1_DBG_PRF(x)
+#endif
+
+/* sc_data_ready is called to signal threads that are blocked on 
+ * l1 input.
+ */
+void
+sc_data_ready( l1sc_t *sc, int ch )
+{
+    brl1_sch_t *subch = &(sc->subch[ch]);
+    sv_signal( &(subch->arrive_sv) );
+}
+
+/* sc_open reserves a subchannel to send a request to the L1 (the
+ * L1's response will arrive on the same channel).  The number
+ * returned by sc_open is the system controller subchannel
+ * acquired.
+ */
+int
+sc_open( l1sc_t *sc, uint target )
+{
+    /* The kernel version implements a locking scheme to arbitrate
+     * subchannel assignment.
+     */
+    int ch;
+    int pl;
+    brl1_sch_t *subch;
+
+    SUBCH_LOCK( sc, pl );
+
+    /* Look for a free subchannel. Subchannels 0-15 are reserved
+     * for other purposes.
+     */
+    for( subch = &(sc->subch[BRL1_CMD_SUBCH]), ch = BRL1_CMD_SUBCH; 
+			ch < BRL1_NUM_SUBCHANS; subch++, ch++ ) {
+        if( subch->use == BRL1_SUBCH_FREE )
+            break;
+    }
+
+    if( ch == BRL1_NUM_SUBCHANS ) {
+        /* there were no subchannels available! */
+        SUBCH_UNLOCK( sc, pl );
+        return SC_NSUBCH;
+    }
+
+    subch->use = BRL1_SUBCH_RSVD;
+    SUBCH_UNLOCK( sc, pl );
+
+    subch->packet_arrived = 0;
+    subch->target = target;
+    sv_init( &(subch->arrive_sv), SV_FIFO, NULL );
+    spinlock_init( &(subch->data_lock), NULL );
+    subch->tx_notify = NULL;
+    subch->rx_notify = sc_data_ready;
+    subch->iqp = kmem_zalloc_node( sizeof(sc_cq_t), KM_NOSLEEP,
+				   NASID_TO_COMPACT_NODEID(sc->nasid) );
+    ASSERT( subch->iqp );
+    cq_init( subch->iqp );
+
+    return ch;
+}
+
+
+/* sc_close frees a Bedrock<->L1 subchannel.
+ */
+int
+sc_close( l1sc_t *sc, int ch )
+{
+    brl1_sch_t *subch;
+    int pl;
+
+    SUBCH_LOCK( sc, pl );
+    subch = &(sc->subch[ch]);
+    if( subch->use != BRL1_SUBCH_RSVD ) {
+        /* we're trying to close a subchannel that's not open */
+        return SC_NOPEN;
+    }
+
+    subch->packet_arrived = 0;
+    subch->use = BRL1_SUBCH_FREE;
+
+    sv_broadcast( &(subch->arrive_sv) );
+    sv_destroy( &(subch->arrive_sv) );
+    spinlock_destroy( &(subch->data_lock) );
+
+    ASSERT( subch->iqp && (subch->iqp != &sc->garbage_q) );
+    kmem_free( subch->iqp, sizeof(sc_cq_t) );
+    subch->iqp = &sc->garbage_q;
+
+    SUBCH_UNLOCK( sc, pl );
+
+    return SC_SUCCESS;
+}
+
+
+/* sc_construct_msg builds a bedrock-to-L1 request in the supplied
+ * buffer.  Returns the length of the message.  The
+ * safest course when passing a buffer to be filled in is to use
+ * BRL1_QSIZE as the buffer size.
+ *
+ * Command arguments are passed as type/argument pairs, i.e., to
+ * pass the number 5 as an argument to an L1 command, call
+ * sc_construct_msg as follows:
+ *
+ *    char msg[BRL1_QSIZE];
+ *    msg_len = sc_construct_msg( msg,
+ *				  BRL1_QSIZE,
+ *				  target_component,
+ *                                L1_ADDR_TASK_BOGUSTASK,
+ *                                L1_BOGUSTASK_REQ_BOGUSREQ,
+ *                                2,
+ *                                L1_ARG_INT, 5 );
+ *
+ * To pass an additional ASCII argument, you'd do the following:
+ *
+ *    char *str;
+ *    ... str points to a null-terminated ascii string ...
+ *    msg_len = sc_construct_msg( msg,
+ *                                BRL1_QSIZE,
+ *				  target_component,
+ *                                L1_ADDR_TASK_BOGUSTASK,
+ *                                L1_BOGUSTASK_REQ_BOGUSREQ,
+ *                                4,
+ *                                L1_ARG_INT, 5,
+ *                                L1_ARG_ASCII, str );
+ *
+ * Finally, arbitrary data of unknown type is passed using the argtype
+ * code L1_ARG_UNKNOWN, a data length, and a buffer pointer, e.g.
+ *
+ *    msg_len = sc_construct_msg( msg,
+ *                                BRL1_QSIZE,
+ *				  target_component,
+ *                                L1_ADDR_TASK_BOGUSTASK,
+ *                                L1_BOGUSTASK_REQ_BOGUSREQ,
+ *                                3,
+ *                                L1_ARG_UNKNOWN, 32, bufptr );
+ *
+ * ...passes 32 bytes of data starting at bufptr.  Note that no string or
+ * "unknown"-type argument should be long enough to overflow the message
+ * buffer.
+ *
+ * To construct a message for an L1 command that requires no arguments,
+ * you'd use the following:
+ *
+ *    msg_len = sc_construct_msg( msg,
+ *                                BRL1_QSIZE,
+ *				  target_component,
+ *                                L1_ADDR_TASK_BOGUSTASK,
+ *                                L1_BOGUSTASK_REQ_BOGUSREQ,
+ *                                0 );
+ *
+ * The final 0 means "no varargs".  Notice that this parameter is used to hold
+ * the number of additional arguments to sc_construct_msg, _not_ the actual
+ * number of arguments used by the L1 command (so 2 per L1_ARG_[INT,ASCII]
+ * type argument, and 3 per L1_ARG_UNKOWN type argument).  A call to construct
+ * an L1 command which required three integer arguments and two arguments of
+ * some arbitrary (unknown) type would pass 12 as the value for this parameter.
+ *
+ * ENDIANNESS WARNING: The following code does a lot of copying back-and-forth
+ * between byte arrays and four-byte big-endian integers.  Depending on the
+ * system controller connection and endianness of future architectures, some
+ * rewriting might be necessary.
+ */
+int
+sc_construct_msg( l1sc_t  *sc,		/* system controller struct */
+		  int	   ch,           /* subchannel for this message */
+		  char    *msg,          /* message buffer */
+		  int      msg_len,      /* size of message buffer */
+                  l1addr_t addr_task,    /* target system controller task */
+                  short    req_code,     /* 16-bit request code */
+                  int      req_nargs,    /* # of arguments (varargs) passed */
+                  ... )                 /* any additional parameters */
+{
+    uint32_t buf32;   /* 32-bit buffer used to bounce things around */
+    void *bufptr;       /* used to hold command argument addresses */
+    va_list al;         /* variable argument list */
+    int index;          /* current index into msg buffer */
+    int argno;          /* current position in varargs list */
+    int l1_argno;       /* running total of arguments to l1 */
+    int l1_arg_t;       /* argument type/length */
+    int l1_argno_byte;  /* offset of argument count byte */
+
+    index = argno = 0;
+
+    /* set up destination address */
+    if( (msg_len -= sizeof( buf32 )) < 0 )
+	return -1;
+    L1_ADDRESS_TO_TASK( &buf32, sc->subch[ch].target, addr_task );
+    COPY_INT_TO_BUFFER(msg, index, buf32);
+
+    /* copy request code */
+    if( (msg_len -= 2) < 0 )
+	return( -1 );
+    msg[index++] = ((req_code >> 8) & 0xff);
+    msg[index++] = (req_code & 0xff);
+
+    if( !req_nargs ) {
+        return index;
+    }
+
+    /* reserve a byte for the argument count */
+    if( (msg_len -= 1) < 0 )
+	return( -1 );
+    l1_argno_byte = index++;
+    l1_argno = 0;
+
+    /* copy additional arguments */
+    va_start( al, req_nargs );
+    while( argno < req_nargs ) {
+        l1_argno++;
+        l1_arg_t = va_arg( al, int ); argno++;
+        switch( l1_arg_t )
+        {
+          case L1_ARG_INT:
+	    if( (msg_len -= (sizeof( buf32 ) + 1)) < 0 )
+		return( -1 );
+            msg[index++] = L1_ARG_INT;
+            buf32 = (unsigned)va_arg( al, int ); argno++;
+	    COPY_INT_TO_BUFFER(msg, index, buf32);
+            break;
+
+          case L1_ARG_ASCII:
+            bufptr = va_arg( al, char* ); argno++;
+	    if( (msg_len -= (strlen( bufptr ) + 2)) < 0 )
+		return( -1 );
+            msg[index++] = L1_ARG_ASCII;
+            strcpy( (char *)&(msg[index]), (char *)bufptr );
+            index += (strlen( bufptr ) + 1); /* include terminating null */
+            break;
+
+	  case L1_ARG_UNKNOWN:
+              {
+                  int arglen;
+		  
+                  arglen = va_arg( al, int ); argno++;
+                  bufptr = va_arg( al, void* ); argno++;
+		  if( (msg_len -= (arglen + 1)) < 0 )
+		      return( -1 );
+                  msg[index++] = L1_ARG_UNKNOWN | arglen;
+                  BCOPY( bufptr, &(msg[index]), arglen  );
+                  index += arglen;
+		  break;
+              }
+	  
+	  default: /* unhandled argument type */
+	    return -1;
+        }
+    }
+
+    va_end( al );
+    msg[l1_argno_byte] = l1_argno;
+
+    return index;
+}
+
+
+
+/* sc_interpret_resp verifies an L1 response to a bedrock request, and
+ * breaks the response data up into the constituent parts.  If the
+ * response message indicates error, or if a mismatch is found in the
+ * expected number and type of arguments, an error is returned.  The
+ * arguments to this function work very much like the arguments to
+ * sc_construct_msg, above, except that L1_ARG_INTs must be followed
+ * by a _pointer_ to an integer that can be filled in by this function.
+ */
+int
+sc_interpret_resp( char *resp,          /* buffer received from L1 */
+                   int   resp_nargs,    /* number of _varargs_ passed in */
+                   ... )
+{
+    uint32_t buf32;   /* 32-bit buffer used to bounce things around */
+    void *bufptr;       /* used to hold response field addresses */
+    va_list al;         /* variable argument list */
+    int index;          /* current index into response buffer */
+    int argno;          /* current position in varargs list */
+    int l1_fldno;       /* number of resp fields received from l1 */
+    int l1_fld_t;       /* field type/length */
+
+    index = argno = 0;
+
+#if defined(L1_DEBUG)
+#define DUMP_RESP							  \
+    {									  \
+	int ix;								  \
+        char outbuf[512];						  \
+        sprintf( outbuf, "sc_interpret_resp error line %d: ", __LINE__ ); \
+	for( ix = 0; ix < 16; ix++ ) {					  \
+	    sprintf( &outbuf[strlen(outbuf)], "%x ", resp[ix] );	  \
+	}								  \
+	printk( "%s\n", outbuf );					  \
+    }
+#else
+#define DUMP_RESP
+#endif /* L1_DEBUG */
+
+    /* check response code */
+    COPY_BUFFER_TO_INT(resp, index, buf32);
+    if( buf32 != L1_RESP_OK ) {
+	DUMP_RESP;
+        return buf32;
+    }
+
+    /* get number of response fields */
+    l1_fldno = resp[index++];
+
+    va_start( al, resp_nargs );
+
+    /* copy out response fields */
+    while( argno < resp_nargs ) {
+        l1_fldno--;
+        l1_fld_t = va_arg( al, int ); argno++;
+        switch( l1_fld_t )
+        {
+          case L1_ARG_INT:
+            if( resp[index++] != L1_ARG_INT ) {
+                /* type mismatch */
+		va_end( al );
+		DUMP_RESP;
+		return -1;
+            }
+            bufptr = va_arg( al, int* ); argno++;
+	    COPY_BUFFER_TO_BUFFER(resp, index, bufptr);
+            break;
+
+          case L1_ARG_ASCII:
+            if( resp[index++] != L1_ARG_ASCII ) {
+                /* type mismatch */
+		va_end( al );
+		DUMP_RESP;
+                return -1;
+            }
+            bufptr = va_arg( al, char* ); argno++;
+            strcpy( (char *)bufptr, (char *)&(resp[index]) );
+            /* include terminating null */
+            index += (strlen( &(resp[index]) ) + 1);
+            break;
+
+          default:
+	    if( (l1_fld_t & L1_ARG_UNKNOWN) == L1_ARG_UNKNOWN )
+	    {
+		int *arglen;
+		
+		arglen = va_arg( al, int* ); argno++;
+		bufptr = va_arg( al, void* ); argno++;
+		*arglen = ((resp[index++] & ~L1_ARG_UNKNOWN) & 0xff);
+		BCOPY( &(resp[index]), bufptr, *arglen  );
+		index += (*arglen);
+	    }
+	    
+	    else {
+		/* unhandled type */
+		va_end( al );
+		DUMP_RESP;
+		return -1;
+	    }
+        }
+    }
+    va_end( al );
+  
+    if( (l1_fldno != 0) || (argno != resp_nargs) ) {
+        /* wrong number of arguments */
+	DUMP_RESP;
+        return -1;
+    }
+    return 0;
+}
+
+
+
+
+/* sc_send takes as arguments a system controller struct, a
+ * buffer which contains a Bedrock<->L1 "request" message,
+ * the message length, and the subchannel (presumably obtained
+ * from an earlier invocation of sc_open) over which the
+ * message is to be sent.  The final argument ("wait") indicates
+ * whether the send is to be performed synchronously or not.
+ *
+ * sc_send returns either zero or an error value.  Synchronous sends 
+ * (wait != 0) will not return until the data has actually been sent
+ * to the UART.  Synchronous sends generally receive privileged
+ * treatment.  The intent is that they be used sparingly, for such
+ * purposes as kernel printf's (the "ducons" routines).  Run-of-the-mill
+ * console output and L1 requests should NOT use a non-zero value
+ * for wait.
+ */
+int
+sc_send( l1sc_t *sc, int ch, char *msg, int len, int wait )
+{
+    char type_and_subch;
+    int result;
+
+    if( (ch < 0) || ( ch >= BRL1_NUM_SUBCHANS) ) {
+        return SC_BADSUBCH;
+    }
+
+    /* Verify that this is an open subchannel
+     */
+    if( sc->subch[ch].use == BRL1_SUBCH_FREE )
+    {
+        return SC_NOPEN;
+    }
+       
+    type_and_subch = (BRL1_REQUEST | ((u_char)ch));
+    result = brl1_send( sc, msg, len, type_and_subch, wait );
+
+    /* If we sent as much as we asked to, return "ok". */
+    if( result == len )
+	return( SC_SUCCESS );
+
+    /* Or, if we sent less, than either the UART is busy or
+     * we're trying to send too large a packet anyway.
+     */
+    else if( result >= 0 && result < len )
+	return( SC_BUSY );
+
+    /* Or, if something else went wrong (result < 0), then
+     * return that error value.
+     */
+    else
+	return( result );
+}
+
+
+
+/* subch_pull_msg pulls a message off the receive queue for subch
+ * and places it the buffer pointed to by msg.  This routine should only
+ * be called when the caller already knows a message is available on the
+ * receive queue (and, in the kernel, only when the subchannel data lock
+ * is held by the caller).
+ */
+static void
+subch_pull_msg( brl1_sch_t *subch, char *msg, int *len )
+{
+    sc_cq_t *q;         /* receive queue */
+    int before_wrap,    /* packet may be split into two different       */
+        after_wrap;     /*   pieces to acommodate queue wraparound      */
+
+    /* pull message off the receive queue */
+    q = subch->iqp;
+
+    cq_rem( q, *len );   /* remove length byte and store */
+    cq_discard( q );     /* remove type/subch byte and discard */
+
+    if ( *len > 0 )
+	(*len)--;        /* don't count type/subch byte in length returned */
+
+    if( (q->opos + (*len)) > BRL1_QSIZE ) {
+        before_wrap = BRL1_QSIZE - q->opos;
+        after_wrap = (*len) - before_wrap;
+    }
+    else {
+        before_wrap = (*len);
+        after_wrap = 0;
+    }
+
+    BCOPY( q->buf + q->opos, msg, before_wrap  );
+    if( after_wrap ) {
+        BCOPY( q->buf, msg + before_wrap, after_wrap  );
+	q->opos = after_wrap;
+    }
+    else {
+	q->opos = ((q->opos + before_wrap) & (BRL1_QSIZE - 1));
+    }
+    atomicAddInt( &(subch->packet_arrived), -1 );
+}
+
+
+/* sc_recv_poll can be called as a blocking or non-blocking function;
+ * it attempts to pull a message off of the subchannel specified
+ * in the argument list (ch).
+ *
+ * The "block" argument, if non-zero, is interpreted as a timeout
+ * delay (to avoid permanent waiting).
+ */
+
+int
+sc_recv_poll( l1sc_t *sc, int ch, char *msg, int *len, uint64_t block )
+{
+    int pl;             /* lock cookie */
+    int is_msg = 0;
+    brl1_sch_t *subch = &(sc->subch[ch]);
+
+    rtc_time_t exp_time = rtc_time() + block;
+
+    /* sanity check-- make sure this is an open subchannel */
+    if( subch->use == BRL1_SUBCH_FREE )
+	return( SC_NOPEN );
+
+    do {
+
+        /* kick the next lower layer and see if it pulls anything in
+         */
+	brl1_receive( sc );
+	is_msg = subch->packet_arrived;
+
+    } while( block && !is_msg && (rtc_time() < exp_time) );
+
+    if( !is_msg ) {
+	/* no message and we didn't care to wait for one */
+	return( SC_NMSG );
+    }
+
+    SUBCH_DATA_LOCK( subch, pl );
+    subch_pull_msg( subch, msg, len );
+    SUBCH_DATA_UNLOCK( subch, pl );
+
+    return( SC_SUCCESS );
+}
+    
+
+/* Like sc_recv_poll, sc_recv_intr can be called in either a blocking
+ * or non-blocking mode.  Rather than polling until an appointed timeout,
+ * however, sc_recv_intr sleeps on a syncrhonization variable until a
+ * signal from the lower layer tells us that a packet has arrived.
+ *
+ * sc_recv_intr can't be used with remote (router) L1s.
+ */
+int
+sc_recv_intr( l1sc_t *sc, int ch, char *msg, int *len, uint64_t block )
+{
+    int pl;             /* lock cookie */
+    int is_msg = 0;
+    brl1_sch_t *subch = &(sc->subch[ch]);
+
+    do {
+	SUBCH_DATA_LOCK(subch, pl);
+	is_msg = subch->packet_arrived;
+	if( !is_msg && block ) {
+	    /* wake me when you've got something */
+	    subch->rx_notify = sc_data_ready;
+	    sv_wait( &(subch->arrive_sv), 0, &(subch->data_lock), pl );
+	    if( subch->use == BRL1_SUBCH_FREE ) {
+		/* oops-- somebody closed our subchannel while we were
+		 * sleeping!
+		 */
+
+		/* no need to unlock since the channel's closed anyhow */
+		return( SC_NOPEN );
+	    }
+	}
+    } while( !is_msg && block );
+
+    if( !is_msg ) {
+	/* no message and we didn't care to wait for one */
+	SUBCH_DATA_UNLOCK( subch, pl );
+	return( SC_NMSG );
+    }
+
+    subch_pull_msg( subch, msg, len );
+    SUBCH_DATA_UNLOCK( subch, pl );
+
+    return( SC_SUCCESS );
+}
+
+/* sc_command implements a (blocking) combination of sc_send and sc_recv.
+ * It is intended to be the SN1 equivalent of SN0's "elsc_command", which
+ * issued a system controller command and then waited for a response from
+ * the system controller before returning.
+ *
+ * cmd points to the outgoing command; resp points to the buffer in
+ * which the response is to be stored.  Both buffers are assumed to
+ * be the same length; if there is any doubt as to whether the
+ * response buffer is long enough to hold the L1's response, then
+ * make it BRL1_QSIZE bytes-- no Bedrock<->L1 message can be any
+ * bigger.
+ *
+ * Be careful using the same buffer for both cmd and resp; it could get
+ * hairy if there were ever an L1 command reqeuest that spanned multiple
+ * packets.  (On the other hand, that would require some additional
+ * rewriting of the L1 command interface anyway.)
+ */
+#define __RETRIES	50
+#define __WAIT_SEND	( sc->uart != BRL1_LOCALUART )
+#define __WAIT_RECV	10000000
+
+
+int
+sc_command( l1sc_t *sc, int ch, char *cmd, char *resp, int *len )
+{
+#ifndef CONFIG_SERIAL_SGI_L1_PROTOCOL
+    return SC_NMSG;
+#else
+    int result;
+    int retries;
+
+    if ( IS_RUNNING_ON_SIMULATOR() )
+    	return SC_NMSG;
+
+    retries = __RETRIES;
+
+    while( (result = sc_send( sc, ch, cmd, *len, __WAIT_SEND )) < 0 ) {
+	if( result == SC_BUSY ) {
+	    retries--;
+	    if( retries <= 0 )
+		return result;
+	    uart_delay(500);
+	}
+	else {
+	    return result;
+	}
+    }
+    
+    /* block on sc_recv_* */
+#ifdef notyet
+    if( sc->uart == BRL1_LOCALUART ) {
+	return( sc_recv_intr( sc, ch, resp, len, __WAIT_RECV ) );
+    }
+    else
+#endif
+    {
+	return( sc_recv_poll( sc, ch, resp, len, __WAIT_RECV ) );
+    }
+#endif /* CONFIG_SERIAL_SGI_L1_PROTOCOL */
+}
+
+/* sc_command_kern is a knuckle-dragging, no-patience version of sc_command
+ * used in situations where the kernel has a command that shouldn't be
+ * delayed until the send buffer clears.  sc_command should be used instead
+ * under most circumstances.
+ */
+int
+sc_command_kern( l1sc_t *sc, int ch, char *cmd, char *resp, int *len )
+{
+#ifndef CONFIG_SERIAL_SGI_L1_PROTOCOL
+    return SC_NMSG;
+#else
+    int result;
+
+    if ( IS_RUNNING_ON_SIMULATOR() )
+    	return SC_NMSG;
+
+    if( (result = sc_send( sc, ch, cmd, *len, 1 )) < 0 ) {
+	return result;
+    }
+
+    return( sc_recv_poll( sc, ch, resp, len, __WAIT_RECV ) );
+#endif /* CONFIG_SERIAL_SGI_L1_PROTOCOL */
+}
+
+
+
+/* sc_poll checks the queue corresponding to the given
+ * subchannel to see if there's anything available.  If
+ * not, it kicks the brl1 layer and then checks again.
+ *
+ * Returns 1 if input is available on the given queue,
+ * 0 otherwise.
+ */
+int
+sc_poll( l1sc_t *sc, int ch )
+{
+    brl1_sch_t *subch = &(sc->subch[ch]);
+
+    if( subch->packet_arrived )
+	return 1;
+
+    brl1_receive( sc );
+
+    if( subch->packet_arrived )
+	return 1;
+
+    return 0;
+}
+
+/* for now, sc_init just calls brl1_init
+ */
+void
+sc_init( l1sc_t *sc, nasid_t nasid, net_vec_t uart )
+{
+    if ( !IS_RUNNING_ON_SIMULATOR() )
+    	brl1_init( sc, nasid, uart );
+}
+
+/* sc_dispatch_env_event handles events sent from the system control
+ * network's environmental monitor tasks.
+ */
+static void
+sc_dispatch_env_event( uint code, int argc, char *args, int maxlen )
+{
+    int j, i = 0;
+    uint32_t ESPcode;
+
+    switch( code ) {
+	/* for now, all codes do the same thing: grab two arguments
+	 * and print a cmn_err_tag message */
+      default:
+	/* check number of arguments */
+	if( argc != 2 ) {
+	    L1_DBG_PRF(( "sc_dispatch_env_event: "
+			 "expected 2 arguments, got %d\n", argc ));
+	    return;
+	}
+	
+	/* get ESP code (integer argument) */
+	if( args[i++] != L1_ARG_INT ) {
+	    L1_DBG_PRF(( "sc_dispatch_env_event: "
+			 "expected integer argument\n" ));
+	    return;
+	}
+	/* WARNING: highly endian */
+	COPY_BUFFER_TO_INT(args, i, ESPcode);
+
+	/* verify string argument */
+	if( args[i++] != L1_ARG_ASCII ) {
+	    L1_DBG_PRF(( "sc_dispatch_env_event: "
+			 "expected an ASCII string\n" ));
+	    return;
+	}
+	for( j = i; j < maxlen; j++ ) {
+	    if( args[j] == '\0' ) break; /* found string termination */
+	}
+	if( j == maxlen ) {
+	    j--;
+	    L1_DBG_PRF(( "sc_dispatch_env_event: "
+			 "message too long-- truncating\n" ));
+	}
+
+	/* strip out trailing cr/lf */
+	for( ; 
+	     j > 1 && ((args[j-1] == 0xd) || (args[j-1] == 0xa)); 
+	     j-- );
+	args[j] = '\0';
+	
+	/* strip out leading cr/lf */
+	for( ;
+	     i < j && ((args[i] == 0xd) || (args[i] == 0xa));
+	     i++ );
+	
+	/* write the event to syslog */
+#ifdef IRIX
+	cmn_err_tag( ESPcode, CE_WARN, &(args[i]) );
+#endif
+    }
+}
+
+
+/* sc_event waits for events to arrive from the system controller, and
+ * prints appropriate messages to the syslog.
+ */
+static void
+sc_event( l1sc_t *sc, int ch )
+{
+    char event[BRL1_QSIZE];
+    int i;
+    int result;
+    int event_len;
+    uint32_t ev_src;
+    uint32_t ev_code;
+    int ev_argc;
+
+    while(1) {
+	
+	bzero( event, BRL1_QSIZE );
+
+	/*
+	 * wait for an event 
+	 */
+	result = sc_recv_intr( sc, ch, event, &event_len, 1 );
+	if( result != SC_SUCCESS ) {
+	    cmn_err( CE_WARN, "Error receiving sysctl event on nasid %d\n",
+		     sc->nasid );
+	}
+	else {
+	    /*
+	     * an event arrived; break it down into useful pieces
+	     */
+#if defined(L1_DEBUG) && 0
+	    int ix;
+	    printf( "Event packet received:\n" );
+	    for (ix = 0; ix < 64; ix++) {
+		printf( "%x%x ", ((event[ix] >> 4) & ((uint64_t)0xf)),
+			(event[ix] & ((uint64_t)0xf)) );
+		if( (ix % 16) == 0xf ) printf( "\n" );
+	    }
+#endif /* L1_DEBUG */
+
+	    i = 0;
+
+	    /* get event source */
+	    COPY_BUFFER_TO_INT(event, i, ev_src);
+	    COPY_BUFFER_TO_INT(event, i, ev_code);
+
+	    /* get arg count */
+	    ev_argc = (event[i++] & 0xffUL);
+	    
+	    /* dispatch events by task */
+	    switch( (ev_src & L1_ADDR_TASK_MASK) >> L1_ADDR_TASK_SHFT )
+	    {
+	      case L1_ADDR_TASK_ENV: /* environmental monitor event */
+		sc_dispatch_env_event( ev_code, ev_argc, &(event[i]), 
+				       BRL1_QSIZE - i );
+		break;
+
+	      default: /* unhandled task type */
+		L1_DBG_PRF(( "Unhandled event type received from system "
+			     "controllers: source task %x\n",
+			     (ev_src & L1_ADDR_TASK_MASK) >> L1_ADDR_TASK_SHFT
+			   ));
+	    }
+	}
+	
+    }			
+}
+
+/* sc_listen sets up a service thread to listen for incoming events.
+ */
+void
+sc_listen( l1sc_t *sc )
+{
+    int pl;
+    int result;
+    brl1_sch_t *subch;
+
+    char        msg[BRL1_QSIZE];
+    int         len;    /* length of message being sent */
+    int         ch;     /* system controller subchannel used */
+
+    extern int msc_shutdown_pri;
+
+    /* grab the designated "event subchannel" */
+    SUBCH_LOCK( sc, pl );
+    subch = &(sc->subch[BRL1_EVENT_SUBCH]);
+    if( subch->use != BRL1_SUBCH_FREE ) {
+	SUBCH_UNLOCK( sc, pl );
+	cmn_err( CE_WARN, "sysctl event subchannel in use! "
+		 "Not monitoring sysctl events.\n" );
+	return;
+    }
+    subch->use = BRL1_SUBCH_RSVD;
+    SUBCH_UNLOCK( sc, pl );
+
+    subch->packet_arrived = 0;
+    subch->target = BRL1_LOCALUART;
+    sv_init( &(subch->arrive_sv), SV_FIFO, NULL );
+    spinlock_init( &(subch->data_lock), NULL );
+    subch->tx_notify = NULL;
+    subch->rx_notify = sc_data_ready;
+    subch->iqp = kmem_zalloc_node( sizeof(sc_cq_t), KM_NOSLEEP,
+				   NASID_TO_COMPACT_NODEID(sc->nasid) );
+    ASSERT( subch->iqp );
+    cq_init( subch->iqp );
+
+#ifdef LINUX_KERNEL_THREADS
+    /* set up a thread to listen for events */
+    sthread_create( "sysctl event handler", 0, 0, 0, msc_shutdown_pri,
+		    KT_PS, (st_func_t *) sc_event,
+		    (void *)sc, (void *)(uint64_t)BRL1_EVENT_SUBCH, 0, 0 );
+#endif
+
+    /* signal the L1 to begin sending events */
+    bzero( msg, BRL1_QSIZE );
+    ch = sc_open( sc, L1_ADDR_LOCAL );
+
+    if( (len = sc_construct_msg( sc, ch, msg, BRL1_QSIZE,
+				 L1_ADDR_TASK_GENERAL,
+				 L1_REQ_EVENT_SUBCH, 2,
+				 L1_ARG_INT, BRL1_EVENT_SUBCH )) < 0 )
+    {
+	sc_close( sc, ch );
+	L1_DBG_PRF(( "Failure in sc_construct_msg (%d)\n", len ));
+	goto err_return;
+    }
+
+    result = sc_command_kern( sc, ch, msg, msg, &len );
+    if( result < 0 )
+    {
+	sc_close( sc, ch );
+	L1_DBG_PRF(( "Failure in sc_command_kern (%d)\n", result ));
+	goto err_return;
+    }
+
+    sc_close( sc, ch );
+
+    result = sc_interpret_resp( msg, 0 );
+    if( result < 0 )
+    {
+	L1_DBG_PRF(( "Failure in sc_interpret_resp (%d)\n", result ));
+	goto err_return;
+    }
+
+    /* everything went fine; just return */
+    return;
+	
+err_return:
+    /* there was a problem; complain */
+    cmn_err( CE_WARN, "failed to set sysctl event-monitoring subchannel.  "
+	     "Sysctl events will not be monitored.\n" );
+}
+
+
+/*********************************************************************
+ * elscuart functions.  These provide a uart-like interface to the
+ * bedrock/l1 protocol console channels.  They are similar in form
+ * and intent to the elscuart_* functions defined for SN0 in elsc.c.
+ *
+ */
+
+int _elscuart_flush( l1sc_t *sc );
+
+/* Leave room in queue for CR/LF */
+#define ELSCUART_LINE_MAX       (BRL1_QSIZE - 2)
+
+
+/*
+ * _elscuart_putc provides an entry point to the L1 interface driver;
+ * writes a single character to the output queue.  Flushes at the
+ * end of each line, and translates newlines into CR/LF.
+ *
+ * The kernel should generally use l1_cons_write instead, since it assumes
+ * buffering, translation, prefixing, etc. are done at a higher
+ * level.
+ *
+ */
+int
+_elscuart_putc( l1sc_t *sc, int c )
+{
+    sc_cq_t *q;
+    
+    q = &(sc->oq[ MAP_OQ(L1_ELSCUART_SUBCH(get_myid())) ]);
+
+    if( c != '\n' && c != '\r' && cq_used(q) >= ELSCUART_LINE_MAX ) {
+        cq_add( q, '\r' );
+        cq_add( q, '\n' );
+         _elscuart_flush( sc );
+        sc->sol = 1;
+    }
+
+    if( sc->sol && c != '\r' ) {
+        char            prefix[16], *s;
+
+        if( cq_room( q ) < 8 && _elscuart_flush(sc) < 0 )
+        {
+            return -1;
+        }
+	
+	if( sc->verbose )
+	{
+#ifdef  SUPPORT_PRINTING_M_FORMAT
+	    sprintf( prefix,
+		     "%c %d%d%d %M:",
+		     'A' + get_myid(),
+		     sc->nasid / 100,
+		     (sc->nasid / 10) % 10,
+		     sc->nasid / 10,
+		     sc->modid );
+#else
+	    sprintf( prefix,
+		     "%c %d%d%d 0x%x:",
+		     'A' + get_myid(),
+		     sc->nasid / 100,
+		     (sc->nasid / 10) % 10,
+		     sc->nasid / 10,
+		     sc->modid );
+#endif
+	    
+	    for( s = prefix; *s; s++ )
+		cq_add( q, *s );
+	}	    
+	sc->sol = 0;
+
+    }
+
+    if( cq_room( q ) < 2 && _elscuart_flush(sc) < 0 )
+    {
+        return -1;
+    }
+
+    if( c == '\n' ) {
+        cq_add( q, '\r' );
+        sc->sol = 1;
+    }
+
+    cq_add( q, (u_char) c );
+
+    if( c == '\n' ) {
+        /* flush buffered line */
+        if( _elscuart_flush( sc ) < 0 )
+        {
+            return -1;
+        }
+    }
+
+    if( c== '\r' )
+    {
+        sc->sol = 1;
+    }
+
+    return 0;
+}
+
+
+/*
+ * _elscuart_getc reads a character from the input queue.  This
+ * routine blocks.
+ */
+int
+_elscuart_getc( l1sc_t *sc )
+{
+    int r;
+
+    while( (r = _elscuart_poll( sc )) == 0 );
+
+    if( r < 0 ) {
+	/* some error occured */
+	return r;
+    }
+
+    return _elscuart_readc( sc );
+}
+
+
+
+/*
+ * _elscuart_poll returns 1 if characters are ready for the
+ * calling processor, 0 if they are not
+ */
+int
+_elscuart_poll( l1sc_t *sc )
+{
+    int result;
+
+    if( sc->cons_listen ) {
+        result = l1_cons_poll( sc );
+        if( result )
+            return result;
+    }
+
+    return sc_poll( sc, L1_ELSCUART_SUBCH(get_myid()) );
+}
+
+
+
+/* _elscuart_readc is to be used only when _elscuart_poll has
+ * indicated that a character is waiting.  Pulls a character
+ * of this processor's console queue and returns it.
+ *
+ */
+int
+_elscuart_readc( l1sc_t *sc )
+{
+    int c, pl;
+    sc_cq_t *q;
+    brl1_sch_t *subch;
+
+    if( sc->cons_listen ) {
+	subch = &(sc->subch[ SC_CONS_SYSTEM ]);
+	q = subch->iqp;
+	
+	SUBCH_DATA_LOCK( subch, pl );
+        if( !cq_empty( q ) ) {
+            cq_rem( q, c );
+	    if( cq_empty( q ) ) {
+		subch->packet_arrived = 0;
+	    }
+	    SUBCH_DATA_UNLOCK( subch, pl );
+            return c;
+        }
+	SUBCH_DATA_UNLOCK( subch, pl );
+    }
+
+    subch = &(sc->subch[ L1_ELSCUART_SUBCH(get_myid()) ]);
+    q = subch->iqp;
+
+    SUBCH_DATA_LOCK( subch, pl );
+    if( cq_empty( q ) ) {
+	SUBCH_DATA_UNLOCK( subch, pl );
+        return -1;
+    }
+
+    cq_rem( q, c );
+    if( cq_empty ( q ) ) {
+	subch->packet_arrived = 0;
+    }
+    SUBCH_DATA_UNLOCK( subch, pl );
+
+    return c;
+}
+
+
+/*
+ * _elscuart_flush flushes queued output to the the L1.
+ * This routine blocks until the queue is flushed.
+ */
+int
+_elscuart_flush( l1sc_t *sc )
+{
+    int r, n;
+    char buf[BRL1_QSIZE];
+    sc_cq_t *q = &(sc->oq[ MAP_OQ(L1_ELSCUART_SUBCH(get_myid())) ]);
+
+    while( (n = cq_used(q)) ) {
+
+        /* buffer queue contents */
+        r = BRL1_QSIZE - q->opos;
+
+        if( n > r ) {
+            BCOPY( q->buf + q->opos, buf, r  );
+            BCOPY( q->buf, buf + r, n - r  );
+        } else {
+            BCOPY( q->buf + q->opos, buf, n  );
+        }
+
+        /* attempt to send buffer contents */
+        r = brl1_send( sc, buf, cq_used( q ), 
+		       (BRL1_EVENT | L1_ELSCUART_SUBCH(get_myid())), 1 );
+
+        /* if no error, dequeue the sent characters; otherwise,
+         * return the error
+         */
+        if( r >= SC_SUCCESS ) {
+            q->opos = (q->opos + r) % BRL1_QSIZE;
+        }
+        else {
+            return r;
+        }
+    }
+
+    return 0;
+}
+
+
+
+/* _elscuart_probe returns non-zero if the L1 (and
+ * consequently the elscuart) can be accessed
+ */
+int
+_elscuart_probe( l1sc_t *sc )
+{
+#ifndef CONFIG_SERIAL_SGI_L1_PROTOCOL
+    return 0;
+#else
+    char ver[BRL1_QSIZE];
+    extern int elsc_version( l1sc_t *, char * );
+    if ( IS_RUNNING_ON_SIMULATOR() )
+    	return 0;
+    return( elsc_version(sc, ver) >= 0 );
+#endif /* CONFIG_SERIAL_SGI_L1_PROTOCOL */
+}
+
+
+
+/* _elscuart_init zeroes out the l1sc_t console
+ * queues for this processor's console subchannel.
+ */
+void
+_elscuart_init( l1sc_t *sc )
+{
+    int pl;
+    brl1_sch_t *subch = &sc->subch[L1_ELSCUART_SUBCH(get_myid())];
+
+    SUBCH_DATA_LOCK(subch, pl);
+
+    subch->packet_arrived = 0;
+    cq_init( subch->iqp );
+    cq_init( &sc->oq[MAP_OQ(L1_ELSCUART_SUBCH(get_myid()))] );
+
+    SUBCH_DATA_UNLOCK(subch, pl);
+}
+
+
+#ifdef IRIX
+
+/* elscuart_syscon_listen causes the processor on which it's
+ * invoked to "listen" to the system console subchannel (that
+ * is, subchannel 4) for console input.
+ */
+void
+elscuart_syscon_listen( l1sc_t *sc )
+{
+    int pl;
+    brl1_sch_t *subch = &(sc->subch[SC_CONS_SYSTEM]);
+
+    /* if we're already listening, don't bother */
+    if( sc->cons_listen )
+        return;
+
+    SUBCH_DATA_LOCK( subch, pl );
+
+    subch->use = BRL1_SUBCH_RSVD;
+    subch->packet_arrived = 0;
+
+    SUBCH_DATA_UNLOCK( subch, pl );
+
+
+    sc->cons_listen = 1;
+}
+#endif	/* IRIX */
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/io/l1_command.c linux/arch/ia64/sn/io/l1_command.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/io/l1_command.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/io/l1_command.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,1357 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */ 
+
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/iograph.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/hcl_util.h>
+#include <asm/sn/labelcl.h>
+#include <asm/sn/eeprom.h>
+#include <asm/sn/ksys/i2c.h>
+#include <asm/sn/cmn_err.h>
+#include <asm/sn/router.h>
+#include <asm/sn/module.h>
+#include <asm/sn/ksys/l1.h>
+#include <asm/sn/nodepda.h>
+#include <asm/sn/clksupport.h>
+
+#define ELSC_TIMEOUT	1000000		/* ELSC response timeout (usec) */
+#define LOCK_TIMEOUT	5000000		/* Hub lock timeout (usec) */
+
+#define LOCAL_HUB	LOCAL_HUB_ADDR
+#define LD(x)		(*(volatile uint64_t *)(x))
+#define SD(x, v)	(LD(x) = (uint64_t) (v))
+
+#define hub_cpu_get()	0
+
+#define LBYTE(caddr)	(*(char *) caddr)
+
+extern char *bcopy(const char * src, char * dest, int count);
+
+#define LDEBUG		0
+
+/*
+ * ELSC data is in NVRAM page 7 at the following offsets.
+ */
+
+#define NVRAM_MAGIC_AD	0x700		/* magic number used for init */
+#define NVRAM_PASS_WD	0x701		/* password (4 bytes in length) */
+#define NVRAM_DBG1	0x705		/* virtual XOR debug switches */
+#define NVRAM_DBG2	0x706		/* physical XOR debug switches */
+#define NVRAM_CFG	0x707		/* ELSC Configuration info */
+#define NVRAM_MODULE	0x708		/* system module number */
+#define NVRAM_BIST_FLG	0x709		/* BIST flags (2 bits per nodeboard) */
+#define NVRAM_PARTITION 0x70a		/* module's partition id */
+#define	NVRAM_DOMAIN	0x70b		/* module's domain id */
+#define	NVRAM_CLUSTER	0x70c		/* module's cluster id */
+#define	NVRAM_CELL	0x70d		/* module's cellid */
+
+#define NVRAM_MAGIC_NO	0x37		/* value of magic number */
+#define NVRAM_SIZE	16		/* 16 bytes in nvram */
+
+/*
+ * Declare a static ELSC NVRAM buffer to hold all data read from
+ * and written to NVRAM.  This nvram "cache" will be used only during the
+ * IP27prom execution.
+ */
+static char elsc_nvram_buffer[NVRAM_SIZE];
+
+#define SC_COMMAND sc_command
+
+
+/*
+ * elsc_init
+ *
+ *   Initialize ELSC structure
+ */
+
+void elsc_init(elsc_t *e, nasid_t nasid)
+{
+    sc_init((l1sc_t *)e, nasid, BRL1_LOCALUART);
+}
+
+
+/*
+ * elsc_errmsg
+ *
+ *   Given a negative error code,
+ *   returns a corresponding static error string.
+ */
+
+char *elsc_errmsg(int code)
+{
+    switch (code) {
+    case ELSC_ERROR_CMD_SEND:
+	return "Command send error";
+    case ELSC_ERROR_CMD_CHECKSUM:
+	return "Command packet checksum error";
+    case ELSC_ERROR_CMD_UNKNOWN:
+	return "Unknown command";
+    case ELSC_ERROR_CMD_ARGS:
+	return "Invalid command argument(s)";
+    case ELSC_ERROR_CMD_PERM:
+	return "Permission denied";
+    case ELSC_ERROR_RESP_TIMEOUT:
+	return "System controller response timeout";
+    case ELSC_ERROR_RESP_CHECKSUM:
+	return "Response packet checksum error";
+    case ELSC_ERROR_RESP_FORMAT:
+	return "Response format error";
+    case ELSC_ERROR_RESP_DIR:
+	return "Response direction error";
+    case ELSC_ERROR_MSG_LOST:
+	return "Message lost because queue is full";
+    case ELSC_ERROR_LOCK_TIMEOUT:
+	return "Timed out getting ELSC lock";
+    case ELSC_ERROR_DATA_SEND:
+	return "Error sending data";
+    case ELSC_ERROR_NIC:
+	return "NIC protocol error";
+    case ELSC_ERROR_NVMAGIC:
+	return "Bad magic number in NVRAM";
+    case ELSC_ERROR_MODULE:
+	return "Module location protocol error";
+    default:
+	return "Unknown error";
+    }
+}
+
+/*
+ * elsc_nvram_init
+ *
+ *   Initializes reads and writes to NVRAM.  This will perform a single
+ *   read to NVRAM, getting all data at once.  When the PROM tries to
+ *   read NVRAM, it returns the data from the buffer being read.  If the
+ *   PROM tries to write out to NVRAM, the write is done, and the internal
+ *   buffer is updated.
+ */
+
+void elsc_nvram_init(nasid_t nasid, uchar_t *elsc_nvram_data)
+{
+    /* This might require implementation of multiple-packet request/responses
+     * if it's to provide the same behavior that was available in SN0.
+     */
+    nasid = nasid;
+    elsc_nvram_data = elsc_nvram_data;
+}
+
+/*
+ * elsc_nvram_copy
+ *
+ *   Copies the content of a buffer into the static buffer in this library.
+ */
+
+void elsc_nvram_copy(uchar_t *elsc_nvram_data)
+{
+    memcpy(elsc_nvram_buffer, elsc_nvram_data, NVRAM_SIZE);
+}
+
+/*
+ * elsc_nvram_write
+ *
+ *   Copies bytes from 'buf' into NVRAM, starting at NVRAM address
+ *   'addr' which must be between 0 and 2047.
+ *
+ *   If 'len' is non-negative, the routine copies 'len' bytes.
+ *
+ *   If 'len' is negative, the routine treats the data as a string and
+ *   copies bytes up to and including a NUL-terminating zero, but not
+ *   to exceed '-len' bytes.
+ */
+
+int elsc_nvram_write(elsc_t *e, int addr, char *buf, int len)
+{
+    /* Here again, we might need to work out the details of a
+     * multiple-packet protocol.
+     */
+
+    /* For now, pretend it worked. */
+    e = e;
+    addr = addr;
+    buf = buf;
+    return (len < 0 ? -len : len);
+}
+
+/*
+ * elsc_nvram_read
+ *
+ *   Copies bytes from NVRAM into 'buf', starting at NVRAM address
+ *   'addr' which must be between 0 and 2047.
+ *
+ *   If 'len' is non-negative, the routine copies 'len' bytes.
+ *
+ *   If 'len' is negative, the routine treats the data as a string and
+ *   copies bytes up to and including a NUL-terminating zero, but not
+ *   to exceed '-len' bytes.  NOTE:  This method is no longer supported.
+ *   It was never used in the first place.
+ */
+
+int elsc_nvram_read(elsc_t *e, int addr, char *buf, int len)
+{
+    /* multiple packets? */
+    e = e;
+    addr = addr;
+    buf = buf;
+    len = len;
+    return -1;
+}
+
+/*
+ * Command Set
+ */
+
+int elsc_version(elsc_t *e, char *result)
+{
+    char	msg[BRL1_QSIZE];
+    int		len;    /* length of message being sent */
+    int		subch;  /* system controller subchannel used */
+    int		major,  /* major rev number */
+	        minor,  /* minor rev number */
+                bugfix; /* bugfix rev number */
+
+    /* fill in msg with the opcode & params */
+    bzero( msg, BRL1_QSIZE );
+    subch = sc_open( (l1sc_t *)e, L1_ADDR_LOCAL );
+
+    if( (len = sc_construct_msg( (l1sc_t *)e, subch, msg, BRL1_QSIZE,
+				 L1_ADDR_TASK_GENERAL,
+				 L1_REQ_FW_REV, 0 )) < 0 )
+    {
+	sc_close( e, subch );
+	return( ELSC_ERROR_CMD_ARGS );
+    }
+
+    /* send the request to the L1 */
+    if( SC_COMMAND( (l1sc_t *)e, subch, msg, msg, &len ) < 0 )
+    {
+	sc_close( e, subch );
+	return( ELSC_ERROR_CMD_SEND );
+    }
+
+    /* free up subchannel */
+    sc_close( (l1sc_t *)e, subch );
+
+    /* check response */
+    if( sc_interpret_resp( msg, 6, L1_ARG_INT, &major,
+			   L1_ARG_INT, &minor, L1_ARG_INT, &bugfix )
+	< 0 )
+    {
+	return( ELSC_ERROR_RESP_FORMAT );
+    }
+
+    sprintf( result, "%d.%d.%d", major, minor, bugfix );
+
+    return 0;
+}
+
+int elsc_debug_set(elsc_t *e, u_char byte1, u_char byte2)
+{
+    /* shush compiler */
+    e = e;
+    byte1 = byte1;
+    byte2 = byte2;
+
+    /* fill in a buffer with the opcode & params; call sc_command */
+
+    return 0;
+}
+
+int elsc_debug_get(elsc_t *e, u_char *byte1, u_char *byte2)
+{
+    char	msg[BRL1_QSIZE];
+    int		subch;  /* system controller subchannel used */
+    int		dbg_sw; /* holds debug switch settings */
+    int		len;	/* number of msg buffer bytes used */
+
+    /* fill in msg with the opcode & params */
+    bzero( msg, BRL1_QSIZE );
+    if( (subch = sc_open( (l1sc_t *)e, L1_ADDR_LOCAL )) < 0 ) {
+	return( ELSC_ERROR_CMD_SEND );
+    }
+
+    if( (len = sc_construct_msg( (l1sc_t *)e, subch, msg, BRL1_QSIZE,
+				 L1_ADDR_TASK_GENERAL,
+				 L1_REQ_RDBG, 0 ) ) < 0 )
+    {
+	sc_close( e, subch );
+	return( ELSC_ERROR_CMD_ARGS );
+    }
+
+    /* send the request to the L1 */
+    if( sc_command( (l1sc_t *)e, subch, msg, msg, &len ) < 0 )
+    {
+	sc_close( e, subch );
+	return( ELSC_ERROR_CMD_SEND );
+    }
+
+    /* free up subchannel */
+    sc_close( (l1sc_t *)e, subch );
+
+    /* check response */
+    if( sc_interpret_resp( msg, 2, L1_ARG_INT, &dbg_sw ) < 0 )
+    {
+	return( ELSC_ERROR_RESP_FORMAT );
+    }
+
+    /* copy out debug switch settings (last two bytes of the
+     * integer response)
+     */
+    *byte1 = ((dbg_sw >> 8) & 0xFF);
+    *byte2 = (dbg_sw & 0xFF);
+
+    return 0;
+}
+
+/*
+ * elsc_rack_bay_get fills in the two int * arguments with the
+ * rack number and bay number of the L1 being addressed
+ */
+int elsc_rack_bay_get(elsc_t *e, uint *rack, uint *bay)
+{
+    char msg[BRL1_QSIZE];	/* L1 request/response info */
+    int subch;			/* system controller subchannel used */
+    int len;			/* length of message */
+    uint32_t	buf32;		/* used to copy 32-bit rack/bay out of msg */
+
+    /* fill in msg with the opcode & params */
+    bzero( msg, BRL1_QSIZE );
+    if( (subch = sc_open( (l1sc_t *)e, L1_ADDR_LOCAL )) < 0 ) {
+	return( ELSC_ERROR_CMD_SEND );
+    }
+
+    if( (len = sc_construct_msg( (l1sc_t *)e, subch, msg, BRL1_QSIZE,
+				 L1_ADDR_TASK_GENERAL,
+				 L1_REQ_RRACK, 0 )) < 0 ) 
+    {
+	sc_close( e, subch );
+	return( ELSC_ERROR_CMD_ARGS );
+    }
+
+
+    /* send the request to the L1 */
+    if( sc_command( (l1sc_t *)e, subch, msg, msg, &len ) ) {
+	sc_close( e, subch );
+	return( ELSC_ERROR_CMD_SEND );
+    }
+
+    /* free up subchannel */
+    sc_close(e, subch);
+
+    /* check response */
+    if( sc_interpret_resp( msg, 2, L1_ARG_INT, &buf32 ) < 0 )
+    {
+	return( ELSC_ERROR_RESP_FORMAT );
+    }
+
+    /* extract rack/bay info
+     *
+     * note that the 32-bit value returned by the L1 actually
+     * only uses the low-order sixteen bits for rack and bay
+     * information.  A "normal" L1 address puts rack and bay
+     * information in bit positions 12 through 28.  So if
+     * we initially shift the value returned 12 bits to the left,
+     * we can use the L1 addressing #define's to extract the
+     * values we need (see ksys/l1.h for a complete list of the
+     * various fields of an L1 address).
+     */
+    buf32 <<= L1_ADDR_BAY_SHFT;
+
+    *rack = (buf32 & L1_ADDR_RACK_MASK) >> L1_ADDR_RACK_SHFT;
+    *bay = (buf32 & L1_ADDR_BAY_MASK) >> L1_ADDR_BAY_SHFT;
+
+    return 0;
+}
+
+
+/* elsc_rack_bay_type_get fills in the three int * arguments with the
+ * rack number, bay number and brick type of the L1 being addressed.  Note
+ * that if the L1 operation fails and this function returns an error value, 
+ * garbage may be written to brick_type.
+ */
+int elsc_rack_bay_type_get( l1sc_t *sc, uint *rack, 
+			       uint *bay, uint *brick_type )
+{
+    char msg[BRL1_QSIZE];       /* L1 request/response info */
+    int subch;                  /* system controller subchannel used */
+    int len;                    /* length of message */
+    uint32_t buf32;	        /* used to copy 32-bit rack & bay out of msg */
+
+    /* fill in msg with the opcode & params */
+    bzero( msg, BRL1_QSIZE );
+    if( (subch = sc_open( sc, L1_ADDR_LOCAL )) < 0 ) {
+	return ELSC_ERROR_CMD_SEND;
+    }
+
+    if( (len = sc_construct_msg( sc, subch, msg, BRL1_QSIZE,
+				 L1_ADDR_TASK_GENERAL,
+				 L1_REQ_RRBT, 0 )) < 0 )
+    {
+	sc_close( sc, subch );
+	return( ELSC_ERROR_CMD_ARGS );
+    }
+
+    /* send the request to the L1 */
+    if( SC_COMMAND( sc, subch, msg, msg, &len ) ) {
+	sc_close( sc, subch );
+	return( ELSC_ERROR_CMD_SEND );
+    }
+
+    /* free up subchannel */
+    sc_close( sc, subch );
+
+    /* check response */
+    if( sc_interpret_resp( msg, 4, L1_ARG_INT, &buf32, 
+			           L1_ARG_INT, brick_type ) < 0 )
+    {
+	return( ELSC_ERROR_RESP_FORMAT );
+    }
+
+    /* extract rack/bay info
+     *
+     * note that the 32-bit value returned by the L1 actually
+     * only uses the low-order sixteen bits for rack and bay
+     * information.  A "normal" L1 address puts rack and bay
+     * information in bit positions 12 through 28.  So if
+     * we initially shift the value returned 12 bits to the left,
+     * we can use the L1 addressing #define's to extract the
+     * values we need (see ksys/l1.h for a complete list of the
+     * various fields of an L1 address).
+     */
+    buf32 <<= L1_ADDR_BAY_SHFT;
+
+    *rack = (buf32 & L1_ADDR_RACK_MASK) >> L1_ADDR_RACK_SHFT;
+    *bay = (buf32 & L1_ADDR_BAY_MASK) >> L1_ADDR_BAY_SHFT;
+
+    /* convert brick_type to lower case */
+    *brick_type = *brick_type - 'A' + 'a';
+
+    return 0;
+}
+
+
+int elsc_module_get(elsc_t *e)
+{
+    extern char brick_types[];
+    uint rnum, rack, bay, bricktype, t;
+    int ret;
+
+    /* construct module ID from rack and slot info */
+
+    if ((ret = elsc_rack_bay_type_get(e, &rnum, &bay, &bricktype)) < 0)
+	return ret;
+
+    /* report unset location info. with a special, otherwise invalid modid */
+    if (rnum == 0 && bay == 0)
+	return MODULE_NOT_SET;
+
+    if (bay > MODULE_BPOS_MASK >> MODULE_BPOS_SHFT)
+	return ELSC_ERROR_MODULE;
+
+    /* Build a moduleid_t-compatible rack number */
+
+    rack = 0;		
+    t = rnum / 100;		/* rack class (CPU/IO) */
+    if (t > RACK_CLASS_MASK(rack) >> RACK_CLASS_SHFT(rack))
+	return ELSC_ERROR_MODULE;
+    RACK_ADD_CLASS(rack, t);
+    rnum %= 100;
+
+    t = rnum / 10;		/* rack group */
+    if (t > RACK_GROUP_MASK(rack) >> RACK_GROUP_SHFT(rack))
+	return ELSC_ERROR_MODULE;
+    RACK_ADD_GROUP(rack, t);
+
+    t = rnum % 10;		/* rack number (one-based) */
+    if (t-1 > RACK_NUM_MASK(rack) >> RACK_NUM_SHFT(rack))
+	return ELSC_ERROR_MODULE;
+    RACK_ADD_NUM(rack, t);
+
+    for( t = 0; t < MAX_BRICK_TYPES; t++ ) {
+	if( brick_types[t] == bricktype )
+	    return RBT_TO_MODULE(rack, bay, t);
+    }
+    
+    return ELSC_ERROR_MODULE;
+}
+
+int elsc_partition_set(elsc_t *e, int partition)
+{
+    char msg[BRL1_QSIZE];       /* L1 request/response info */
+    int subch;                  /* system controller subchannel used */
+    int len;                    /* length of message */
+
+    /* fill in msg with the opcode & params */
+    bzero( msg, BRL1_QSIZE );
+    if( (subch = sc_open( e, L1_ADDR_LOCAL )) < 0 ) {
+	return ELSC_ERROR_CMD_SEND;
+    }
+
+    if( (len = sc_construct_msg( e, subch, msg, BRL1_QSIZE,
+				 L1_ADDR_TASK_GENERAL,
+				 L1_REQ_PARTITION_SET, 2,
+				 L1_ARG_INT, partition )) < 0 )
+    {
+	
+	sc_close( e, subch );
+	return( ELSC_ERROR_CMD_ARGS );
+    }
+
+    /* send the request to the L1 */
+    if( sc_command( e, subch, msg, msg, &len ) ) {
+	sc_close( e, subch );
+	return( ELSC_ERROR_CMD_SEND );
+    }
+
+    /* free up subchannel */
+    sc_close( e, subch );
+
+    /* check response */
+    if( sc_interpret_resp( msg, 0 ) < 0 )
+    {
+	return( ELSC_ERROR_RESP_FORMAT );
+    }
+    
+    return( 0 );
+}
+
+int elsc_partition_get(elsc_t *e)
+{
+    char msg[BRL1_QSIZE];       /* L1 request/response info */
+    int subch;                  /* system controller subchannel used */
+    int len;                    /* length of message */
+    uint32_t partition_id;    /* used to copy partition id out of msg */
+
+    /* fill in msg with the opcode & params */
+    bzero( msg, BRL1_QSIZE );
+    if( (subch = sc_open( e, L1_ADDR_LOCAL )) < 0 ) {
+	return ELSC_ERROR_CMD_SEND;
+    }
+
+    if( (len = sc_construct_msg( e, subch, msg, BRL1_QSIZE,
+				 L1_ADDR_TASK_GENERAL,
+				 L1_REQ_PARTITION_GET, 0 )) < 0 )
+
+    {
+	sc_close( e, subch );
+	return( ELSC_ERROR_CMD_ARGS );
+    }
+
+    /* send the request to the L1 */
+    if( sc_command( e, subch, msg, msg, &len ) ) {
+	sc_close( e, subch );
+	return( ELSC_ERROR_CMD_SEND );
+    }
+
+    /* free up subchannel */
+    sc_close( e, subch );
+
+    /* check response */
+    if( sc_interpret_resp( msg, 2, L1_ARG_INT, &partition_id ) < 0 )
+    {
+	return( ELSC_ERROR_RESP_FORMAT );
+    }
+    
+    return( partition_id );
+}
+
+
+/*
+ * elsc_cons_subch selects the "active" console subchannel for this node
+ * (i.e., the one that will currently receive input)
+ */
+int elsc_cons_subch(elsc_t *e, uint ch)
+{
+    char msg[BRL1_QSIZE];       /* L1 request/response info */
+    int subch;                  /* system controller subchannel used */
+    int len;                    /* length of message */
+
+    /* fill in msg with the opcode & params */
+    bzero( msg, BRL1_QSIZE );
+    subch = sc_open( e, L1_ADDR_LOCAL );
+    
+    if( (len = sc_construct_msg( e, subch, msg, BRL1_QSIZE,
+				 L1_ADDR_TASK_GENERAL,
+				 L1_REQ_CONS_SUBCH, 2,
+				 L1_ARG_INT, ch)) < 0 )
+    {
+	sc_close( e, subch );
+	return( ELSC_ERROR_CMD_ARGS );
+    }
+
+    /* send the request to the L1 */
+    if( SC_COMMAND( e, subch, msg, msg, &len ) ) {
+	sc_close( e, subch );
+	return( ELSC_ERROR_CMD_SEND );
+    }
+
+    /* free up subchannel */
+    sc_close( e, subch );
+
+    /* check response */
+    if( sc_interpret_resp( msg, 0 ) < 0 )
+    {
+	return( ELSC_ERROR_RESP_FORMAT );
+    }
+
+    return 0;
+}
+
+
+/*
+ * elsc_cons_node should only be executed by one node.  It declares to
+ * the system controller that the node from which it is called will be
+ * the owner of the system console.
+ */
+int elsc_cons_node(elsc_t *e)
+{
+    char msg[BRL1_QSIZE];       /* L1 request/response info */
+    int subch;                  /* system controller subchannel used */
+    int len;                    /* length of message */
+
+    /* fill in msg with the opcode & params */
+    bzero( msg, BRL1_QSIZE );
+    subch = sc_open( e, L1_ADDR_LOCAL );
+    
+    if( (len = sc_construct_msg( e, subch, msg, BRL1_QSIZE,
+				 L1_ADDR_TASK_GENERAL,
+				 L1_REQ_CONS_NODE, 0 )) < 0 )
+    {
+	sc_close( e, subch );
+	return( ELSC_ERROR_CMD_ARGS );
+    }
+
+    /* send the request to the L1 */
+    if( SC_COMMAND( e, subch, msg, msg, &len ) ) {
+	sc_close( e, subch );
+	return( ELSC_ERROR_CMD_SEND );
+    }
+
+    /* free up subchannel */
+    sc_close( e, subch );
+
+    /* check response */
+    if( sc_interpret_resp( msg, 0 ) < 0 )
+    {
+	return( ELSC_ERROR_RESP_FORMAT );
+    }
+
+    return 0;
+}
+    
+
+/* elsc_display_line writes up to 12 characters to either the top or bottom
+ * line of the L1 display.  line points to a buffer containing the message
+ * to be displayed.  The zero-based line number is specified by lnum (so
+ * lnum == 0 specifies the top line and lnum == 1 specifies the bottom).
+ * Lines longer than 12 characters, or line numbers not less than
+ * L1_DISPLAY_LINES, cause elsc_display_line to return an error.
+ */
+int elsc_display_line(elsc_t *e, char *line, int lnum)
+{
+    char	msg[BRL1_QSIZE];
+    int		subch;  /* system controller subchannel used */
+    int		len;	/* number of msg buffer bytes used */
+
+    /* argument sanity checking */
+    if( !(lnum < L1_DISPLAY_LINES) )
+	return( ELSC_ERROR_CMD_ARGS );
+    if( !(strlen( line ) <= L1_DISPLAY_LINE_LENGTH) )
+	return( ELSC_ERROR_CMD_ARGS );
+
+    /* fill in msg with the opcode & params */
+    bzero( msg, BRL1_QSIZE );
+    subch = sc_open( (l1sc_t *)e, L1_ADDR_LOCAL );
+
+    if( (len = sc_construct_msg( (l1sc_t *)e, subch, msg, BRL1_QSIZE,
+				 L1_ADDR_TASK_GENERAL,
+				 (L1_REQ_DISP1+lnum), 2,
+				 L1_ARG_ASCII, line )) < 0 )
+    {
+	sc_close( e, subch );
+	return( ELSC_ERROR_CMD_ARGS );
+    }
+
+    /* send the request to the L1 */
+    if( SC_COMMAND( (l1sc_t *)e, subch, msg, msg, &len ) < 0 )
+    {
+	sc_close( e, subch );
+	return( ELSC_ERROR_CMD_SEND );
+    }
+
+    /* free up subchannel */
+    sc_close( (l1sc_t *)e, subch );
+
+    /* check response */
+    if( sc_interpret_resp( msg, 0 ) < 0 )
+    {
+	return( ELSC_ERROR_RESP_FORMAT );
+    }
+
+    return 0;
+}
+
+
+/* elsc_display_mesg silently drops message characters beyond the 12th.
+ */
+int elsc_display_mesg(elsc_t *e, char *chr)
+{
+
+    char line[L1_DISPLAY_LINE_LENGTH+1];
+    int numlines, i;
+    int result;
+
+    numlines = (strlen( chr ) + L1_DISPLAY_LINE_LENGTH - 1) /
+	L1_DISPLAY_LINE_LENGTH;
+
+    if( numlines > L1_DISPLAY_LINES )
+	numlines = L1_DISPLAY_LINES;
+
+    for( i = 0; i < numlines; i++ )
+    {
+	strncpy( line, chr, L1_DISPLAY_LINE_LENGTH );
+	line[L1_DISPLAY_LINE_LENGTH] = '\0';
+
+	/* generally we want to leave the first line of the L1 display
+	 * alone (so the L1 can manipulate it).  If you need to be able
+	 * to display to both lines (for debugging purposes), define
+	 * L1_DISP_2LINES in irix/kern/ksys/l1.h, or add -DL1_DISP_2LINES
+	 * to your 'defs file.
+	 */
+#if defined(L1_DISP_2LINES)
+	if( (result = elsc_display_line( e, line, i )) < 0 )
+#else
+	if( (result = elsc_display_line( e, line, i+1 )) < 0 )
+#endif
+
+	    return result;
+
+	chr += L1_DISPLAY_LINE_LENGTH;
+    }
+    
+    return 0;
+}
+
+
+int elsc_password_set(elsc_t *e, char *password)
+{
+    /* shush compiler */
+    e = e;
+    password = password;
+
+    /* fill in buffer with the opcode & params; call elsc_command */
+
+    return 0;
+}
+
+int elsc_password_get(elsc_t *e, char *password)
+{
+    /* shush compiler */
+    e = e;
+    password = password;
+
+    /* fill in buffer with the opcode & params; call elsc_command */
+
+    return 0;
+}
+
+
+/*
+ * sc_portspeed_get
+ *
+ * retrieve the current portspeed setting for the bedrock II
+ */
+int sc_portspeed_get(l1sc_t *sc)
+{
+    char	msg[BRL1_QSIZE];
+    int         len;    /* length of message being sent */
+    int         subch;  /* system controller subchannel used */
+    int		portspeed_a, portspeed_b;
+			/* ioport clock rates */
+
+    bzero( msg, BRL1_QSIZE );
+    subch = sc_open( sc, L1_ADDR_LOCAL );
+
+    if( (len = sc_construct_msg( sc, subch, msg, BRL1_QSIZE,
+                                 L1_ADDR_TASK_GENERAL,
+				 L1_REQ_PORTSPEED,
+				 0 )) < 0 )
+    {
+	sc_close( sc, subch );
+	return( ELSC_ERROR_CMD_ARGS );
+    }
+    
+    /* send the request to the L1 */
+    if( sc_command( sc, subch, msg, msg, &len ) < 0 )
+    {
+        sc_close( sc, subch );
+        return( ELSC_ERROR_CMD_SEND );
+    }
+
+    /* free up subchannel */
+    sc_close( sc, subch );
+
+    /* check response */
+    if( sc_interpret_resp( msg, 4, 
+			   L1_ARG_INT, &portspeed_a,
+			   L1_ARG_INT, &portspeed_b ) < 0 )
+    {
+	return( ELSC_ERROR_RESP_FORMAT );
+    }
+
+    /* for the c-brick, we ignore the portspeed_b value */
+    return (portspeed_a ? 600 : 400);
+}
+
+/*
+ * elsc_power_query
+ *
+ *   To be used after system reset, this command returns 1 if the reset
+ *   was the result of a power-on, 0 otherwise.
+ *
+ *   The power query status is cleared to 0 after it is read.
+ */
+
+int elsc_power_query(elsc_t *e)
+{
+    e = e; /* shush the compiler */
+
+    /* fill in buffer with the opcode & params; call elsc_command */
+
+    return 1;
+}
+
+int elsc_rpwr_query(elsc_t *e, int is_master)
+{
+    /* shush the compiler */
+    e = e;
+    is_master = is_master;
+
+    /* fill in buffer with the opcode & params; call elsc_command */
+
+    return 0;
+} 
+
+/*
+ * elsc_power_down
+ *
+ *   Sets up system to shut down in "sec" seconds (or modifies the
+ *   shutdown time if one is already in effect).  Use 0 to power
+ *   down immediately.
+ */
+
+int elsc_power_down(elsc_t *e, int sec)
+{
+    /* shush compiler */
+    e = e;
+    sec = sec;
+
+    /* fill in buffer with the opcode & params; call elsc_command */
+
+    return 0;
+}
+
+
+int elsc_system_reset(elsc_t *e)
+{
+    char	msg[BRL1_QSIZE];
+    int		subch;  /* system controller subchannel used */
+    int		len;	/* number of msg buffer bytes used */
+    int		result;
+
+    /* fill in msg with the opcode & params */
+    bzero( msg, BRL1_QSIZE );
+    if( (subch = sc_open( e, L1_ADDR_LOCAL )) < 0 ) {
+	return ELSC_ERROR_CMD_SEND;
+    }
+
+    if( (len = sc_construct_msg( e, subch, msg, BRL1_QSIZE,
+				 L1_ADDR_TASK_GENERAL,
+				 L1_REQ_RESET, 0 )) < 0 )
+    {
+	sc_close( e, subch );
+	return( ELSC_ERROR_CMD_ARGS );
+    }
+
+    /* send the request to the L1 */
+    if( (result = sc_command( e, subch, msg, msg, &len )) ) {
+	sc_close( e, subch );
+	if( result == SC_NMSG ) {
+	    /* timeout is OK.  We've sent the reset.  Now it's just
+	     * a matter of time...
+	     */
+	    return( 0 );
+	}
+	return( ELSC_ERROR_CMD_SEND );
+    }
+
+    /* free up subchannel */
+    sc_close( e, subch );
+
+    /* check response */
+    if( sc_interpret_resp( msg, 0 ) < 0 )
+    {
+	return( ELSC_ERROR_RESP_FORMAT );
+    }
+
+    return 0;
+}
+
+
+int elsc_power_cycle(elsc_t *e)
+{
+    /* shush compiler */
+    e = e;
+
+    /* fill in buffer with the opcode & params; call sc_command */
+
+    return 0;
+}
+
+
+/*
+ * L1 Support for reading 
+ * cbrick uid.
+ */
+
+int elsc_nic_get(elsc_t *e, uint64_t *nic, int verbose)
+{
+    /* this parameter included only for SN0 compatibility */
+    verbose = verbose;
+
+    /* We don't go straight to the bedrock/L1 protocol on this one, but let
+     * the eeprom layer prepare the eeprom data as we would like it to
+     * appear to the caller
+     */
+    return cbrick_uid_get( e->nasid, nic );
+}
+
+int _elsc_hbt(elsc_t *e, int ival, int rdly)
+{
+    e = e;
+    ival = ival;
+    rdly = rdly;
+
+    /* fill in buffer with the opcode & params; call elsc_command */
+
+    return 0;
+}
+
+
+/* send a command string to an L1 */
+int sc_command_interp( l1sc_t *sc, l1addr_t compt, l1addr_t rack, l1addr_t bay,
+		       char *cmd )
+{
+    char        msg[BRL1_QSIZE];
+    int         len;    /* length of message being sent */
+    int         subch;  /* system controller subchannel used */
+    l1addr_t	target; /* target system controller for command */
+
+    /* fill in msg with the opcode & params */
+    bzero( msg, BRL1_QSIZE );
+    subch = sc_open( sc, L1_ADDR_LOCAL );
+
+    L1_BUILD_ADDR( &target, compt, rack, bay, L1_ADDR_TASK_CMD );
+    if( (len = sc_construct_msg( sc, subch, msg, BRL1_QSIZE,
+				 target, L1_REQ_EXEC_CMD, 2,
+				 L1_ARG_ASCII, cmd )) < 0 )
+    {
+	sc_close( sc, subch );
+	return( ELSC_ERROR_CMD_ARGS );
+    }
+		   
+    /* send the request to the L1 */
+    if( sc_command( sc, subch, msg, msg, &len ) < 0 )
+    {
+	sc_close( sc, subch );
+	return( ELSC_ERROR_CMD_SEND );
+    }
+
+    /* free up subchannel */
+    sc_close( sc, subch );
+    
+    /* check response */
+    if( sc_interpret_resp( msg, 0 ) < 0 )
+    {
+	return( ELSC_ERROR_RESP_FORMAT );
+    }
+
+    return 0;
+}
+
+
+/*
+ * Routines for reading the R-brick's L1
+ */
+
+int router_module_get( nasid_t nasid, net_vec_t path )
+{
+    uint rnum, rack, bay, t;
+    int ret;
+    l1sc_t sc;
+
+    /* prepare l1sc_t struct */
+    sc_init( &sc, nasid, path );
+
+    /* construct module ID from rack and slot info */
+
+    if ((ret = elsc_rack_bay_get(&sc, &rnum, &bay)) < 0)
+	return ret;
+
+    /* report unset location info. with a special, otherwise invalid modid */
+    if (rnum == 0 && bay == 0)
+	return MODULE_NOT_SET;
+
+    if (bay > MODULE_BPOS_MASK >> MODULE_BPOS_SHFT)
+	return ELSC_ERROR_MODULE;
+
+    /* Build a moduleid_t-compatible rack number */
+
+    rack = 0;		
+    t = rnum / 100;		/* rack class (CPU/IO) */
+    if (t > RACK_CLASS_MASK(rack) >> RACK_CLASS_SHFT(rack))
+	return ELSC_ERROR_MODULE;
+    RACK_ADD_CLASS(rack, t);
+    rnum %= 100;
+
+    t = rnum / 10;		/* rack group */
+    if (t > RACK_GROUP_MASK(rack) >> RACK_GROUP_SHFT(rack))
+	return ELSC_ERROR_MODULE;
+    RACK_ADD_GROUP(rack, t);
+
+    t = rnum % 10;		/* rack number (one-based) */
+    if (t-1 > RACK_NUM_MASK(rack) >> RACK_NUM_SHFT(rack))
+	return ELSC_ERROR_MODULE;
+    RACK_ADD_NUM(rack, t);
+
+    ret = RBT_TO_MODULE(rack, bay, MODULE_RBRICK);
+    return ret;
+}
+    
+
+/*
+ * iobrick routines
+ */
+
+/* iobrick_rack_bay_type_get fills in the three int * arguments with the
+ * rack number, bay number and brick type of the L1 being addressed.  Note
+ * that if the L1 operation fails and this function returns an error value, 
+ * garbage may be written to brick_type.
+ */
+int iobrick_rack_bay_type_get( l1sc_t *sc, uint *rack, 
+			       uint *bay, uint *brick_type )
+{
+    char msg[BRL1_QSIZE];       /* L1 request/response info */
+    int subch;                  /* system controller subchannel used */
+    int len;                    /* length of message */
+    uint32_t buf32;	        /* used to copy 32-bit rack & bay out of msg */
+
+    /* fill in msg with the opcode & params */
+    bzero( msg, BRL1_QSIZE );
+    if( (subch = sc_open( sc, L1_ADDR_LOCALIO )) < 0 ) {
+	return( ELSC_ERROR_CMD_SEND );
+    }
+
+    if( (len = sc_construct_msg( sc, subch, msg, BRL1_QSIZE,
+				 L1_ADDR_TASK_GENERAL,
+				 L1_REQ_RRBT, 0 )) < 0 )
+    {
+	sc_close( sc, subch );
+	return( ELSC_ERROR_CMD_ARGS );
+    }
+
+    /* send the request to the L1 */
+    if( sc_command( sc, subch, msg, msg, &len ) ) {
+	sc_close( sc, subch );
+	return( ELSC_ERROR_CMD_SEND );
+    }
+
+    /* free up subchannel */
+    sc_close( sc, subch );
+
+    /* check response */
+    if( sc_interpret_resp( msg, 4, L1_ARG_INT, &buf32, 
+			           L1_ARG_INT, brick_type ) < 0 )
+    {
+	return( ELSC_ERROR_RESP_FORMAT );
+    }
+
+    /* extract rack/bay info
+     *
+     * note that the 32-bit value returned by the L1 actually
+     * only uses the low-order sixteen bits for rack and bay
+     * information.  A "normal" L1 address puts rack and bay
+     * information in bit positions 12 through 28.  So if
+     * we initially shift the value returned 12 bits to the left,
+     * we can use the L1 addressing #define's to extract the
+     * values we need (see ksys/l1.h for a complete list of the
+     * various fields of an L1 address).
+     */
+    buf32 <<= L1_ADDR_BAY_SHFT;
+
+    *rack = (buf32 & L1_ADDR_RACK_MASK) >> L1_ADDR_RACK_SHFT;
+    *bay = (buf32 & L1_ADDR_BAY_MASK) >> L1_ADDR_BAY_SHFT;
+
+    return 0;
+}
+
+
+int iobrick_module_get(l1sc_t *sc)
+{
+    uint rnum, rack, bay, brick_type, t;
+    int ret;
+
+    /* construct module ID from rack and slot info */
+
+    if ((ret = iobrick_rack_bay_type_get(sc, &rnum, &bay, &brick_type)) < 0)
+        return ret;
+
+    /* report unset location info. with a special, otherwise invalid modid */
+    if (rnum == 0 && bay == 0)
+        return MODULE_NOT_SET;
+
+    if (bay > MODULE_BPOS_MASK >> MODULE_BPOS_SHFT)
+        return ELSC_ERROR_MODULE;
+
+    /* Build a moduleid_t-compatible rack number */
+
+    rack = 0;           
+    t = rnum / 100;             /* rack class (CPU/IO) */
+    if (t > RACK_CLASS_MASK(rack) >> RACK_CLASS_SHFT(rack))
+        return ELSC_ERROR_MODULE;
+    RACK_ADD_CLASS(rack, t);
+    rnum %= 100;
+
+    t = rnum / 10;              /* rack group */
+    if (t > RACK_GROUP_MASK(rack) >> RACK_GROUP_SHFT(rack))
+        return ELSC_ERROR_MODULE;
+    RACK_ADD_GROUP(rack, t);
+
+    t = rnum % 10;              /* rack number (one-based) */
+    if (t-1 > RACK_NUM_MASK(rack) >> RACK_NUM_SHFT(rack))
+        return ELSC_ERROR_MODULE;
+    RACK_ADD_NUM(rack, t);
+
+    switch( brick_type ) {
+      case 'I': 
+	brick_type = MODULE_IBRICK; break;
+      case 'P':
+	brick_type = MODULE_PBRICK; break;
+      case 'X':
+	brick_type = MODULE_XBRICK; break;
+    }
+
+    ret = RBT_TO_MODULE(rack, bay, brick_type);
+
+    return ret;
+}
+
+/* iobrick_get_sys_snum asks the attached iobrick for the system
+ * serial number.  This function will only be relevant to the master
+ * cbrick (the one attached to the bootmaster ibrick); other nodes
+ * may call the function, but the value returned to the master node
+ * will be the one used as the system serial number by the kernel.
+ */
+
+int
+iobrick_get_sys_snum( l1sc_t *sc, char *snum_str )
+{
+    char msg[BRL1_QSIZE];       /* L1 request/response info */
+    int subch;                  /* system controller subchannel used */
+    int len;                    /* length of message */
+    
+    /* fill in msg with the opcode & params */
+    bzero( msg, BRL1_QSIZE );
+    if( (subch = sc_open( sc, L1_ADDR_LOCALIO )) < 0 ) {
+	return( ELSC_ERROR_CMD_SEND );
+    }
+
+    if( (len = sc_construct_msg( sc, subch, msg, BRL1_QSIZE,
+				 L1_ADDR_TASK_GENERAL,
+				 L1_REQ_SYS_SERIAL, 0 )) < 0 )
+    {
+	sc_close( sc, subch );
+	return( ELSC_ERROR_CMD_ARGS );
+    }
+
+    /* send the request to the L1 */
+    if( sc_command( sc, subch, msg, msg, &len ) ) {
+	sc_close( sc, subch );
+	return( ELSC_ERROR_CMD_SEND );
+    }
+
+    /* free up subchannel */
+    sc_close( sc, subch );
+
+    /* check response */
+    return( sc_interpret_resp( msg, 2, L1_ARG_ASCII, snum_str ) );
+}
+
+
+/*
+ * The following functions apply (or cut off) power to the specified
+ * pci bus or slot.
+ */
+
+int
+iobrick_pci_slot_pwr( l1sc_t *sc, int bus, int slot, int up )
+{
+    char cmd[BRL1_QSIZE];
+    unsigned rack, bay, brick_type;
+    if( iobrick_rack_bay_type_get( sc, &rack, &bay, &brick_type ) < 0 )
+	return( ELSC_ERROR_CMD_SEND );
+    sprintf( cmd, "pci %d %d %s", bus, slot,
+	     (up ? "u" : "d") );
+    return( sc_command_interp
+	    ( sc, L1_ADDR_TYPE_L1, rack, bay, cmd ) );
+}
+
+int
+iobrick_pci_bus_pwr( l1sc_t *sc, int bus, int up )
+{
+    char cmd[BRL1_QSIZE];
+    unsigned rack, bay, brick_type;
+    if( iobrick_rack_bay_type_get( sc, &rack, &bay, &brick_type ) < 0 )
+	return( ELSC_ERROR_CMD_SEND );
+    sprintf( cmd, "pci %d %s", bus, (up ? "u" : "d") );
+    return( sc_command_interp
+	    ( sc, L1_ADDR_TYPE_L1, rack, bay, cmd ) );
+}
+
+
+/* get the L1 firmware version for an iobrick */
+int
+iobrick_sc_version( l1sc_t *sc, char *result )
+{
+    char	msg[BRL1_QSIZE];
+    int		len;    /* length of message being sent */
+    int		subch;  /* system controller subchannel used */
+    int		major,  /* major rev number */
+	        minor,  /* minor rev number */
+                bugfix; /* bugfix rev number */
+
+    /* fill in msg with the opcode & params */
+    bzero( msg, BRL1_QSIZE );
+    subch = sc_open( sc, L1_ADDR_LOCALIO );
+
+    if( (len = sc_construct_msg( sc, subch, msg, BRL1_QSIZE,
+				 L1_ADDR_TASK_GENERAL,
+				 L1_REQ_FW_REV, 0 )) < 0 )
+    {
+	sc_close( sc, subch );
+	return( ELSC_ERROR_CMD_ARGS );
+    }
+
+    /* send the request to the L1 */
+    if( SC_COMMAND(sc, subch, msg, msg, &len ) < 0 )
+    {
+	sc_close( sc, subch );
+	return( ELSC_ERROR_CMD_SEND );
+    }
+
+    /* free up subchannel */
+    sc_close( sc, subch );
+
+    /* check response */
+    if( sc_interpret_resp( msg, 6, L1_ARG_INT, &major,
+			   L1_ARG_INT, &minor, L1_ARG_INT, &bugfix )
+	< 0 )
+    {
+	return( ELSC_ERROR_RESP_FORMAT );
+    }
+
+    sprintf( result, "%d.%d.%d", major, minor, bugfix );
+
+    return 0;
+}
+
+
+
+/* elscuart routines 
+ *
+ * Most of the elscuart functionality is implemented in l1.c.  The following
+ * is directly "recycled" from elsc.c.
+ */
+
+
+/*
+ * _elscuart_puts
+ */
+
+int _elscuart_puts(elsc_t *e, char *s)
+{
+    int			c;
+
+    if (s == 0)
+	s = "<NULL>";
+
+    while ((c = LBYTE(s)) != 0) {
+	if (_elscuart_putc(e, c) < 0)
+	    return -1;
+	s++;
+    }
+
+    return 0;
+}
+
+
+/*
+ * elscuart wrapper routines
+ *
+ *   The following routines are similar to their counterparts in l1.c,
+ *   except instead of taking an elsc_t pointer directly, they call
+ *   a global routine "get_elsc" to obtain the pointer.
+ *   This is useful when the elsc is employed for stdio.
+ */
+
+int elscuart_probe(void)
+{
+    return _elscuart_probe(get_elsc());
+}
+
+void elscuart_init(void *init_data)
+{
+    _elscuart_init(get_elsc());
+    /* dummy variable included for driver compatability */
+    init_data = init_data;
+}
+
+int elscuart_poll(void)
+{
+    return _elscuart_poll(get_elsc());
+}
+
+int elscuart_readc(void)
+{
+    return _elscuart_readc(get_elsc());
+}
+
+int elscuart_getc(void)
+{
+    return _elscuart_getc(get_elsc());
+}
+
+int elscuart_puts(char *s)
+{
+    return _elscuart_puts(get_elsc(), s);
+}
+
+int elscuart_putc(int c)
+{
+    return _elscuart_putc(get_elsc(), c);
+}
+
+int elscuart_flush(void)
+{
+    return _elscuart_flush(get_elsc());
+}
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/io/labelcl.c linux/arch/ia64/sn/io/labelcl.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/io/labelcl.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/io/labelcl.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,667 @@
+/*  labelcl - SGI's Hwgraph Compatibility Layer.
+
+    This library is free software; you can redistribute it and/or
+    modify it under the terms of the GNU Library General Public
+    License as published by the Free Software Foundation; either
+    version 2 of the License, or (at your option) any later version.
+
+    This library is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+    Library General Public License for more details.
+
+    You should have received a copy of the GNU Library General Public
+    License along with this library; if not, write to the Free
+    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+    Colin Ngam may be reached by email at cngam@sgi.com
+
+*/
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <asm/sn/sgi.h>
+#include <linux/devfs_fs.h>
+#include <linux/devfs_fs_kernel.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/labelcl.h>
+
+/*
+** Very simple and dumb string table that supports only find/insert.
+** In practice, if this table gets too large, we may need a more
+** efficient data structure.   Also note that currently there is no 
+** way to delete an item once it's added.  Therefore, name collision 
+** will return an error.
+*/
+
+struct string_table label_string_table;
+
+
+
+/*
+ * string_table_init - Initialize the given string table.
+ */
+void
+string_table_init(struct string_table *string_table)
+{
+	string_table->string_table_head = NULL;
+	string_table->string_table_generation = 0;
+
+	/*
+	 * We nedd to initialize locks here!
+	 */
+
+	return;
+}
+
+
+/*
+ * string_table_destroy - Destroy the given string table.
+ */
+void
+string_table_destroy(struct string_table *string_table)
+{
+	struct string_table_item *item, *next_item;
+
+	item = string_table->string_table_head;
+	while (item) {
+		next_item = item->next;
+
+		STRTBL_FREE(item);
+		item = next_item;
+	}
+
+	/*
+	 * We need to destroy whatever lock we have here
+	 */
+
+	return;
+}
+
+
+
+/*
+ * string_table_insert - Insert an entry in the string table .. duplicate 
+ *	names are not allowed.
+ */
+char *
+string_table_insert(struct string_table *string_table, char *name)
+{
+	struct string_table_item *item, *new_item = NULL, *last_item = NULL;
+
+again:
+	/*
+	 * Need to lock the table ..
+	 */
+	item = string_table->string_table_head;
+	last_item = NULL;
+
+	while (item) {
+		if (!strcmp(item->string, name)) {
+			/*
+			 * If we allocated space for the string and the found that
+			 * someone else already entered it into the string table,
+			 * free the space we just allocated.
+			 */
+			if (new_item)
+				STRTBL_FREE(new_item);
+
+
+			/*
+			 * Search optimization: move the found item to the head
+			 * of the list.
+			 */
+			if (last_item != NULL) {
+				last_item->next = item->next;
+				item->next = string_table->string_table_head;
+				string_table->string_table_head = item;
+			}
+			goto out;
+		}
+		last_item = item;
+		item=item->next;
+	}
+
+	/*
+	 * name was not found, so add it to the string table.
+	 */
+	if (new_item == NULL) {
+		long old_generation = string_table->string_table_generation;
+
+		new_item = STRTBL_ALLOC(strlen(name));
+
+		strcpy(new_item->string, name);
+
+		/*
+		 * While we allocated memory for the new string, someone else 
+		 * changed the string table.
+		 */
+		if (old_generation != string_table->string_table_generation) {
+			goto again;
+		}
+	} else {
+		/* At this we only have the string table lock in access mode.
+		 * Promote the access lock to an update lock for the string
+		 * table insertion below.
+		 */
+			long old_generation = 
+				string_table->string_table_generation;
+
+			/*
+			 * After we did the unlock and wer waiting for update
+			 * lock someone could have potentially updated
+			 * the string table. Check the generation number
+			 * for this case. If it is the case we have to
+			 * try all over again.
+			 */
+			if (old_generation != 
+			    string_table->string_table_generation) {
+				goto again;
+			}
+		}
+
+	/*
+	 * At this point, we're committed to adding new_item to the string table.
+	 */
+	new_item->next = string_table->string_table_head;
+	item = string_table->string_table_head = new_item;
+	string_table->string_table_generation++;
+
+out:
+	/*
+	 * Need to unlock here.
+	 */
+	return(item->string);
+}
+
+/*
+ * labelcl_info_create - Creates the data structure that will hold the
+ *	device private information asscoiated with a devfs entry.
+ *	The pointer to this structure is what gets stored in the devfs 
+ *	(void * info).
+ */
+labelcl_info_t *
+labelcl_info_create()
+{
+
+	labelcl_info_t *new = NULL;
+
+	/* Initial allocation does not include any area for labels */
+	if ( ( new = (labelcl_info_t *)kmalloc (sizeof(labelcl_info_t), GFP_KERNEL) ) == NULL )
+		return NULL;
+
+	memset (new, 0, sizeof(labelcl_info_t));
+	new->hwcl_magic = LABELCL_MAGIC;
+	return( new);
+
+}
+
+/*
+ * labelcl_info_destroy - Frees the data structure that holds the
+ *      device private information asscoiated with a devfs entry.  This 
+ *	data structure was created by device_info_create().
+ *
+ *	The caller is responsible for nulling the (void *info) in the 
+ *	corresponding devfs entry.
+ */
+int
+labelcl_info_destroy(labelcl_info_t *labelcl_info)
+{
+
+	if (labelcl_info == NULL)
+		return(0);
+
+	/* Free the label list */
+	if (labelcl_info->label_list)
+		kfree(labelcl_info->label_list);
+
+	/* Now free the label info area */
+	labelcl_info->hwcl_magic = 0;
+	kfree(labelcl_info);
+
+	return(0);
+}
+
+/*
+ * labelcl_info_add_LBL - Adds a new label entry in the labelcl info 
+ *	structure.
+ *
+ *	Error is returned if we find another label with the same name.
+ */
+int
+labelcl_info_add_LBL(devfs_handle_t de,
+			char *info_name,
+			arb_info_desc_t info_desc,
+			arbitrary_info_t info)
+{
+	labelcl_info_t	*labelcl_info = NULL;
+	int num_labels;
+	int new_label_list_size;
+	label_info_t *old_label_list, *new_label_list = NULL;
+	char *name;
+	int i;
+
+	if (de == NULL)
+		return(-1);
+
+        labelcl_info = devfs_get_info(de);
+	if (labelcl_info == NULL)
+		return(-1);
+
+	if (labelcl_info->hwcl_magic != LABELCL_MAGIC)
+		return(-1);
+
+	if (info_name == NULL)
+		return(-1);
+
+	if (strlen(info_name) >= LABEL_LENGTH_MAX)
+		return(-1);
+
+	name = string_table_insert(&label_string_table, info_name);
+
+	num_labels = labelcl_info->num_labels;
+	new_label_list_size = sizeof(label_info_t) * (num_labels+1);
+
+	/*
+	 * Create a new label info area.
+	 */
+	if (new_label_list_size != 0) {
+		new_label_list = (label_info_t *) kmalloc(new_label_list_size, GFP_KERNEL);
+
+		if (new_label_list == NULL)
+			return(-1);
+	}
+
+	/*
+	 * At this point, we are committed to adding the labelled info, 
+	 * if there isn't already information there with the same name.
+	 */
+	old_label_list = labelcl_info->label_list;
+
+	/* 
+	 * Look for matching info name.
+	 */
+	for (i=0; i<num_labels; i++) {
+		if (!strcmp(info_name, old_label_list[i].name)) {
+			/* Not allowed to add duplicate labelled info names. */
+			kfree(new_label_list);
+			printk("labelcl_info_add_LBL: Duplicate label name %s for vertex 0x%p\n", info_name, de);
+			return(-1);
+		}
+		new_label_list[i] = old_label_list[i]; /* structure copy */
+	}
+
+	new_label_list[num_labels].name = name;
+	new_label_list[num_labels].desc = info_desc;
+	new_label_list[num_labels].info = info;
+
+	labelcl_info->num_labels = num_labels+1;
+	labelcl_info->label_list = new_label_list;
+
+	if (old_label_list != NULL)
+		kfree(old_label_list);
+
+	return(0);
+}
+
+/*
+ * labelcl_info_remove_LBL - Remove a label entry.
+ */
+int
+labelcl_info_remove_LBL(devfs_handle_t de,
+			 char *info_name,
+			 arb_info_desc_t *info_desc,
+			 arbitrary_info_t *info)
+{
+	labelcl_info_t	*labelcl_info = NULL;
+	int num_labels;
+	int new_label_list_size;
+	label_info_t *old_label_list, *new_label_list = NULL;
+	arb_info_desc_t label_desc_found;
+	arbitrary_info_t label_info_found;
+	int i;
+
+	if (de == NULL)
+		return(-1);
+
+	labelcl_info = devfs_get_info(de);
+	if (labelcl_info == NULL)
+		return(-1);
+
+	if (labelcl_info->hwcl_magic != LABELCL_MAGIC)
+		return(-1);
+
+	num_labels = labelcl_info->num_labels;
+	if (num_labels == 0) {
+		return(-1);
+	}
+
+	/*
+	 * Create a new info area.
+	 */
+	new_label_list_size = sizeof(label_info_t) * (num_labels-1);
+	if (new_label_list_size) {
+		new_label_list = (label_info_t *) kmalloc(new_label_list_size, GFP_KERNEL);
+		if (new_label_list == NULL)
+			return(-1);
+	}
+
+	/*
+	 * At this point, we are committed to removing the labelled info, 
+	 * if it still exists.
+	 */
+	old_label_list = labelcl_info->label_list;
+
+	/* 
+	 * Find matching info name.
+	 */
+	for (i=0; i<num_labels; i++) {
+		if (!strcmp(info_name, old_label_list[i].name)) {
+			label_desc_found = old_label_list[i].desc;
+			label_info_found = old_label_list[i].info;
+			goto found;
+		}
+		if (i < num_labels-1) /* avoid walking off the end of the new vertex */
+			new_label_list[i] = old_label_list[i]; /* structure copy */
+	}
+
+	/* The named info doesn't exist. */
+	if (new_label_list)
+		kfree(new_label_list);
+
+	return(-1);
+
+found:
+	/* Finish up rest of labelled info */
+	for (i=i+1; i<num_labels; i++)
+		new_label_list[i-1] = old_label_list[i]; /* structure copy */
+
+	labelcl_info->num_labels = num_labels+1;
+	labelcl_info->label_list = new_label_list;
+
+	kfree(old_label_list);
+
+	if (info != NULL)
+		*info = label_info_found;
+
+	if (info_desc != NULL)
+		*info_desc = label_desc_found;
+
+	return(0);
+}
+
+
+/*
+ * labelcl_info_replace_LBL - Replace an existing label entry with the 
+ *	given new information.
+ *
+ *	Label entry must exist.
+ */
+int
+labelcl_info_replace_LBL(devfs_handle_t de,
+			char *info_name,
+			arb_info_desc_t info_desc,
+			arbitrary_info_t info,
+			arb_info_desc_t *old_info_desc,
+			arbitrary_info_t *old_info)
+{
+	labelcl_info_t	*labelcl_info = NULL;
+	int num_labels;
+	label_info_t *label_list;
+	int i;
+
+	if (de == NULL)
+		return(-1);
+
+	labelcl_info = devfs_get_info(de);
+	if (labelcl_info == NULL)
+		return(-1);
+
+	if (labelcl_info->hwcl_magic != LABELCL_MAGIC)
+		return(-1);
+
+	num_labels = labelcl_info->num_labels;
+	if (num_labels == 0) {
+		return(-1);
+	}
+
+	if (info_name == NULL)
+		return(-1);
+
+	label_list = labelcl_info->label_list;
+
+	/* 
+	 * Verify that information under info_name already exists.
+	 */
+	for (i=0; i<num_labels; i++)
+		if (!strcmp(info_name, label_list[i].name)) {
+			if (old_info != NULL)
+				*old_info = label_list[i].info;
+
+			if (old_info_desc != NULL)
+				*old_info_desc = label_list[i].desc;
+
+			label_list[i].info = info;
+			label_list[i].desc = info_desc;
+
+			return(0);
+		}
+
+
+	return(-1);
+}
+
+/*
+ * labelcl_info_get_LBL - Retrieve and return the information for the 
+ *	given label entry.
+ */
+int
+labelcl_info_get_LBL(devfs_handle_t de,
+		      char *info_name,
+		      arb_info_desc_t *info_desc,
+		      arbitrary_info_t *info)
+{
+	labelcl_info_t	*labelcl_info = NULL;
+	int num_labels;
+	label_info_t *label_list;
+	int i;
+
+	if (de == NULL)
+		return(-1);
+
+	labelcl_info = devfs_get_info(de);
+	if (labelcl_info == NULL)
+		return(-1);
+
+	if (labelcl_info->hwcl_magic != LABELCL_MAGIC)
+		return(-1);
+
+	num_labels = labelcl_info->num_labels;
+	if (num_labels == 0) {
+		return(-1);
+	}
+
+	label_list = labelcl_info->label_list;
+
+	/* 
+	 * Find information under info_name.
+	 */
+	for (i=0; i<num_labels; i++)
+		if (!strcmp(info_name, label_list[i].name)) {
+			if (info != NULL)
+				*info = label_list[i].info;
+			if (info_desc != NULL)
+				*info_desc = label_list[i].desc;
+
+			return(0);
+		}
+
+	return(-1);
+}
+
+/*
+ * labelcl_info_get_next_LBL - returns the next label entry on the list.
+ */
+int
+labelcl_info_get_next_LBL(devfs_handle_t de,
+			   char *buffer,
+			   arb_info_desc_t *info_descp,
+			   arbitrary_info_t *infop,
+			   labelcl_info_place_t *placeptr)
+{
+	labelcl_info_t	*labelcl_info = NULL;
+	uint which_info;
+	label_info_t *label_list;
+
+	if ((buffer == NULL) && (infop == NULL))
+		return(-1);
+
+	if (placeptr == NULL)
+		return(-1);
+
+	if (de == NULL)
+		return(-1);
+
+	labelcl_info = devfs_get_info(de);
+	if (labelcl_info == NULL)
+		return(-1);
+
+	if (labelcl_info->hwcl_magic != LABELCL_MAGIC)
+		return(-1);
+
+	which_info = *placeptr;
+
+	if (which_info >= labelcl_info->num_labels) {
+		return(-1);
+	}
+
+	label_list = (label_info_t *) labelcl_info->label_list;
+
+	if (buffer != NULL)
+		strcpy(buffer, label_list[which_info].name);
+
+	if (infop)
+		*infop = label_list[which_info].info;
+
+	if (info_descp)
+		*info_descp = label_list[which_info].desc;
+
+	*placeptr = which_info + 1;
+
+	return(0);
+}
+
+
+int
+labelcl_info_replace_IDX(devfs_handle_t de,
+			int index,
+			arbitrary_info_t info,
+			arbitrary_info_t *old_info)
+{
+	arbitrary_info_t *info_list_IDX;
+	labelcl_info_t	*labelcl_info = NULL;
+
+	if (de == NULL) {
+		printk(KERN_ALERT "labelcl: NULL devfs handle given.\n");
+		return(-1);
+	}
+
+	labelcl_info = devfs_get_info(de);
+	if (labelcl_info == NULL) {
+		printk(KERN_ALERT "labelcl: Entry does not have info pointer.\n");
+		return(-1);
+	}
+
+	if (labelcl_info->hwcl_magic != LABELCL_MAGIC)
+		return(-1);
+
+	if ( (index < 0) || (index >= HWGRAPH_NUM_INDEX_INFO) )
+		return(-1);
+
+	/*
+	 * Replace information at the appropriate index in this vertex with 
+	 * the new info.
+	 */
+	info_list_IDX = labelcl_info->IDX_list;
+	if (old_info != NULL)
+		*old_info = info_list_IDX[index];
+	info_list_IDX[index] = info;
+
+	return(0);
+
+}
+
+/*
+ * labelcl_info_connectpt_set - Sets the connectpt.
+ */
+int
+labelcl_info_connectpt_set(struct devfs_entry *de,
+			  struct devfs_entry *connect_de)
+{
+	arbitrary_info_t old_info;
+	int	rv;
+
+	rv = labelcl_info_replace_IDX(de, HWGRAPH_CONNECTPT, 
+		(arbitrary_info_t) connect_de, &old_info);
+
+	if (rv) {
+		return(rv);
+	}
+
+	return(0);
+}
+
+
+/*
+ * labelcl_info_get_IDX - Returns the information pointed at by index.
+ *
+ */
+int
+labelcl_info_get_IDX(devfs_handle_t de,
+			int index,
+			arbitrary_info_t *info)
+{
+	arbitrary_info_t *info_list_IDX;
+	labelcl_info_t	*labelcl_info = NULL;
+
+	if (de == NULL)
+		return(-1);
+
+	labelcl_info = devfs_get_info(de);
+	if (labelcl_info == NULL)
+		return(-1);
+
+	if (labelcl_info->hwcl_magic != LABELCL_MAGIC)
+		return(-1);
+
+	if ( (index < 0) || (index >= HWGRAPH_NUM_INDEX_INFO) )
+		return(-1);
+
+	/*
+	 * Return information at the appropriate index in this vertex.
+	 */
+	info_list_IDX = labelcl_info->IDX_list;
+	if (info != NULL)
+		*info = info_list_IDX[index];
+
+	return(0);
+}
+
+/*
+ * labelcl_info_connectpt_get - Retrieve the connect point for a device entry.
+ */
+struct devfs_entry *
+labelcl_info_connectpt_get(struct devfs_entry *de)
+{
+	int rv;
+	arbitrary_info_t info;
+
+	rv = labelcl_info_get_IDX(de, HWGRAPH_CONNECTPT, &info);
+	if (rv)
+		return(NULL);
+
+	return((struct devfs_entry *)info);
+}
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/io/mem_refcnt.c linux/arch/ia64/sn/io/mem_refcnt.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/io/mem_refcnt.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/io/mem_refcnt.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,234 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+#include <linux/types.h>
+#include <linux/config.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/hcl_util.h>
+#include <asm/sn/nodepda.h>
+#include <asm/sn/hubspc.h>
+#include <asm/sn/iograph.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/mem_refcnt.h>
+#include <asm/sn/hwcntrs.h>
+// From numa_hw.h
+
+#define MIGR_COUNTER_MAX_GET(nodeid) \
+        (NODEPDA_MCD((nodeid))->migr_system_kparms.migr_threshold_reference)
+/*
+ * Get the Absolute Theshold
+ */
+#define MIGR_THRESHOLD_ABS_GET(nodeid) ( \
+        MD_MIG_VALUE_THRESH_GET(COMPACT_TO_NASID_NODEID(nodeid)))
+/*
+ * Get the current Differential Threshold
+ */
+#define MIGR_THRESHOLD_DIFF_GET(nodeid) \
+        (NODEPDA_MCD(nodeid)->migr_as_kparms.migr_base_threshold)
+
+#define NUM_OF_HW_PAGES_PER_SW_PAGE()   (NBPP / MD_PAGE_SIZE)
+
+// #include "migr_control.h"
+
+int
+mem_refcnt_attach(devfs_handle_t hub)
+{
+        devfs_handle_t refcnt_dev;
+        
+        hwgraph_char_device_add(hub,
+                                "refcnt",
+                                "hubspc_", 
+				&refcnt_dev);
+        device_info_set(refcnt_dev, (void*)(ulong)HUBSPC_REFCOUNTERS);
+
+        return (0);
+}
+
+
+/*ARGSUSED*/
+int
+mem_refcnt_open(devfs_handle_t *devp, mode_t oflag, int otyp, cred_t *crp)
+{
+        cnodeid_t node;
+#ifndef CONFIG_IA64_SGI_SN1
+	extern int numnodes;
+#endif
+        
+        ASSERT( (hubspc_subdevice_t)(ulong)device_info_get(*devp) == HUBSPC_REFCOUNTERS );
+
+        if (!cap_able(CAP_MEMORY_MGT)) {
+                return (EPERM);
+        }
+
+        node = master_node_get(*devp);
+
+        ASSERT( (node >= 0) && (node < numnodes) );
+
+        if (NODEPDA(node)->migr_refcnt_counterbuffer == NULL) {
+                return (ENODEV);
+        }
+
+        ASSERT( NODEPDA(node)->migr_refcnt_counterbase != NULL );
+        ASSERT( NODEPDA(node)->migr_refcnt_cbsize != (size_t)0 );
+
+        return (0);
+}
+
+/*ARGSUSED*/
+int
+mem_refcnt_close(devfs_handle_t dev, int oflag, int otyp, cred_t *crp)
+{
+        return 0;
+}
+
+/*ARGSUSED*/
+int
+mem_refcnt_mmap(devfs_handle_t dev, vhandl_t *vt, off_t off, size_t len, uint prot)
+{
+        cnodeid_t node;
+        int errcode;
+        char* buffer;
+        size_t blen;
+#ifndef CONFIG_IA64_SGI_SN1
+	extern int numnodes;
+#endif
+        
+        ASSERT( (hubspc_subdevice_t)(ulong)device_info_get(dev) == HUBSPC_REFCOUNTERS );
+
+        node = master_node_get(dev);
+
+        ASSERT( (node >= 0) && (node < numnodes) );
+
+        ASSERT( NODEPDA(node)->migr_refcnt_counterbuffer != NULL);
+        ASSERT( NODEPDA(node)->migr_refcnt_counterbase != NULL );
+        ASSERT( NODEPDA(node)->migr_refcnt_cbsize != 0 );
+
+        /*
+         * XXXX deal with prot's somewhere around here....
+         */
+
+        buffer = NODEPDA(node)->migr_refcnt_counterbuffer;
+        blen = NODEPDA(node)->migr_refcnt_cbsize;
+
+        /*
+         * Force offset to be a multiple of sizeof(refcnt_t)
+         * We round up.
+         */
+
+        off = (((off - 1)/sizeof(refcnt_t)) + 1) * sizeof(refcnt_t);
+
+        if ( ((buffer + blen) - (buffer + off + len)) < 0 ) {
+                return (EPERM);
+        }
+
+        errcode = v_mapphys(vt,
+                            buffer + off,
+                            len);
+
+        return errcode;
+}
+
+/*ARGSUSED*/
+int
+mem_refcnt_unmap(devfs_handle_t dev, vhandl_t *vt)
+{
+        return 0;
+}
+
+/* ARGSUSED */
+int
+mem_refcnt_ioctl(devfs_handle_t dev,
+                 int cmd,
+                 void *arg,
+                 int mode,
+                 cred_t *cred_p,
+                 int *rvalp)
+{
+        cnodeid_t node;
+        int errcode;
+	extern int numnodes;
+        
+        ASSERT( (hubspc_subdevice_t)(ulong)device_info_get(dev) == HUBSPC_REFCOUNTERS );
+
+        node = master_node_get(dev);
+
+        ASSERT( (node >= 0) && (node < numnodes) );
+
+        ASSERT( NODEPDA(node)->migr_refcnt_counterbuffer != NULL);
+        ASSERT( NODEPDA(node)->migr_refcnt_counterbase != NULL );
+        ASSERT( NODEPDA(node)->migr_refcnt_cbsize != 0 );
+
+        errcode = 0;
+        
+        switch (cmd) {
+        case RCB_INFO_GET:
+        {
+                rcb_info_t rcb;
+                
+                rcb.rcb_len = NODEPDA(node)->migr_refcnt_cbsize;
+                
+                rcb.rcb_sw_sets = NODEPDA(node)->migr_refcnt_numsets;
+                rcb.rcb_sw_counters_per_set = numnodes;
+                rcb.rcb_sw_counter_size = sizeof(refcnt_t);
+
+                rcb.rcb_base_pages = NODEPDA(node)->migr_refcnt_numsets /
+                                     NUM_OF_HW_PAGES_PER_SW_PAGE();  
+                rcb.rcb_base_page_size = NBPP;
+                rcb.rcb_base_paddr = ctob(slot_getbasepfn(node, 0));
+                
+                rcb.rcb_cnodeid = node;
+                rcb.rcb_granularity = MD_PAGE_SIZE;
+#ifdef notyet
+                rcb.rcb_hw_counter_max = MIGR_COUNTER_MAX_GET(node);
+                rcb.rcb_diff_threshold = MIGR_THRESHOLD_DIFF_GET(node);
+#endif
+                rcb.rcb_abs_threshold = MIGR_THRESHOLD_ABS_GET(node);
+                rcb.rcb_num_slots = node_getnumslots(node);
+
+                if (COPYOUT(&rcb, arg, sizeof(rcb_info_t))) {
+                        errcode = EFAULT;
+                }
+
+                break;
+        }
+        case RCB_SLOT_GET:
+        {
+                rcb_slot_t slot[MAX_MEM_SLOTS];
+                int s;
+                int nslots;
+
+                nslots = node_getnumslots(node);
+                ASSERT(nslots <= MAX_MEM_SLOTS);
+                for (s = 0; s < nslots; s++) {
+                        slot[s].base = (uint64_t)ctob(slot_getbasepfn(node, s));
+#ifdef notyet
+                        slot[s].size  = (uint64_t)ctob(slot_getsize(node, s));
+#else
+                        slot[s].size  = (uint64_t)1;
+#endif
+                }
+                if (COPYOUT(&slot[0], arg, nslots * sizeof(rcb_slot_t))) {
+                        errcode = EFAULT;
+                }
+                
+                *rvalp = nslots;
+                break;
+        }
+                
+        default:
+                errcode = EINVAL;
+                break;
+
+        }
+        
+        return errcode;
+}
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/io/ml_SN_init.c linux/arch/ia64/sn/io/ml_SN_init.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/io/ml_SN_init.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/io/ml_SN_init.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,661 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/iograph.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/labelcl.h>
+#include <asm/sn/nodemask.h>
+#include <asm/sn/sn_private.h>
+#include <asm/sn/klconfig.h>
+#include <asm/sn/sn_cpuid.h>
+#include <asm/sn/synergy.h>
+
+
+#if defined (CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+#include <asm/sn/sn1/ip27config.h>
+#include <asm/sn/sn1/hubdev.h>
+#include <asm/sn/sn1/sn1.h>
+#endif /* CONFIG_SGI_IP35 || CONFIG_IA64_SGI_SN1 */
+
+
+extern int numcpus;
+extern char arg_maxnodes[];
+extern cpuid_t master_procid;
+extern void * kmem_alloc_node(register size_t, register int , cnodeid_t);
+extern synergy_da_t    *Synergy_da_indr[];
+
+extern int hasmetarouter;
+
+int		maxcpus;
+cpumask_t	boot_cpumask;
+hubreg_t	region_mask = 0;
+
+
+extern xwidgetnum_t hub_widget_id(nasid_t);
+
+#ifndef CONFIG_IA64_SGI_IO
+#if defined (IP27)
+short		cputype = CPU_IP27;
+#elif defined (IP33)
+short		cputype = CPU_IP33;
+#elif defined (IP35)
+short		cputype = CPU_IP35;
+#else
+#error <BOMB! define new cputype here >
+#endif
+#endif /* CONFIG_IA64_SGI_IO */
+
+static int	fine_mode = 0;
+
+#ifndef CONFIG_IA64_SGI_IO
+/* Global variables */
+pdaindr_t	pdaindr[MAXCPUS];
+#endif
+
+static cnodemask_t	hub_init_mask;	/* Mask of cpu in a node doing init */
+static volatile cnodemask_t hub_init_done_mask;
+					/* Node mask where we wait for
+					 * per hub initialization
+					 */
+spinlock_t		hub_mask_lock;  /* Lock for hub_init_mask above. */
+
+extern int valid_icache_reasons;	/* Reasons to flush the icache */
+extern int valid_dcache_reasons;	/* Reasons to flush the dcache */
+extern int numnodes;
+extern u_char miniroot;
+extern volatile int	need_utlbmiss_patch;
+extern void iograph_early_init(void);
+
+nasid_t master_nasid = INVALID_NASID;
+
+
+/*
+ * mlreset(int slave)
+ * 	very early machine reset - at this point NO interrupts have been
+ * 	enabled; nor is memory, tlb, p0, etc setup.
+ *
+ * 	slave is zero when mlreset is called for the master processor and
+ *	is nonzero thereafter.
+ */
+
+
+void
+mlreset(int slave)
+{
+	if (!slave) {
+		/*
+		 * We are the master cpu and node.
+		 */ 
+		master_nasid = get_nasid();
+		set_master_bridge_base();
+		FIXME("mlreset: Enable when we support ioc3 ..");
+#ifndef CONFIG_IA64_SGI_IO
+		if (get_console_nasid() == master_nasid) 
+			/* Set up the IOC3 */
+			ioc3_mlreset((ioc3_cfg_t *)KL_CONFIG_CH_CONS_INFO(master_nasid)->config_base,
+				     (ioc3_mem_t *)KL_CONFIG_CH_CONS_INFO(master_nasid)->memory_base);
+
+		/*
+		 * Initialize Master nvram base.
+		 */
+		nvram_baseinit();
+
+		fine_mode = is_fine_dirmode();
+#endif /* CONFIG_IA64_SGI_IO */
+
+		/* We're the master processor */
+		master_procid = smp_processor_id();
+		master_nasid = cpuid_to_nasid(master_procid);
+
+		/*
+		 * master_nasid we get back better be same as one from
+		 * get_nasid()
+		 */
+		ASSERT_ALWAYS(master_nasid == get_nasid());
+
+#ifndef CONFIG_IA64_SGI_IO
+
+	/*
+	 * Activate when calias is implemented.
+	 */
+		/* Set all nodes' calias sizes to 8k */
+		for (i = 0; i < maxnodes; i++) {
+			nasid_t nasid;
+			int	sn;
+
+			nasid = COMPACT_TO_NASID_NODEID(i);
+
+			/*
+			 * Always have node 0 in the region mask, otherwise CALIAS accesses
+			 * get exceptions since the hub thinks it is a node 0 address.
+			 */
+			for (sn=0; sn<NUM_SUBNODES; sn++) {
+				REMOTE_HUB_PI_S(nasid, sn, PI_REGION_PRESENT, (region_mask | 1));
+				REMOTE_HUB_PI_S(nasid, sn, PI_CALIAS_SIZE, PI_CALIAS_SIZE_8K);
+			}
+
+			/*
+			 * Set up all hubs to havew a big window pointing at
+			 * widget 0.
+			 * Memory mode, widget 0, offset 0
+			 */
+			REMOTE_HUB_S(nasid, IIO_ITTE(SWIN0_BIGWIN),
+				((HUB_PIO_MAP_TO_MEM << IIO_ITTE_IOSP_SHIFT) |
+				(0 << IIO_ITTE_WIDGET_SHIFT)));
+		}
+#endif /* CONFIG_IA64_SGI_IO */
+
+		/* Set up the hub initialization mask and init the lock */
+		CNODEMASK_CLRALL(hub_init_mask);
+		CNODEMASK_CLRALL(hub_init_done_mask);
+
+		spin_lock_init(&hub_mask_lock);
+
+		/* early initialization of iograph */
+		iograph_early_init();
+
+		/* Initialize Hub Pseudodriver Management */
+		hubdev_init();
+
+#ifndef CONFIG_IA64_SGI_IO
+		/*
+		 * Our IO system doesn't require cache writebacks.  Set some
+		 * variables appropriately.
+		 */
+		cachewrback = 0;
+		valid_icache_reasons &= ~(CACH_AVOID_VCES | CACH_IO_COHERENCY);
+		valid_dcache_reasons &= ~(CACH_AVOID_VCES | CACH_IO_COHERENCY);
+
+		/*
+		 * make sure we are running with the right rev of chips
+		 */
+		verify_snchip_rev();
+
+		/*
+                 * Since we've wiped out memory at this point, we
+                 * need to reset the ARCS vector table so that it
+                 * points to appropriate functions in the kernel
+                 * itself.  In this way, we can maintain the ARCS
+                 * vector table conventions without having to actually
+                 * keep redundant PROM code in memory.
+                 */
+		he_arcs_set_vectors();
+#endif /* CONFIG_IA64_SGI_IO */
+
+	} else { /* slave != 0 */
+		/*
+		 * This code is performed ONLY by slave processors.
+		 */
+
+	}
+}
+
+
+/* XXX - Move the meat of this to intr.c ? */
+/*
+ * Set up the platform-dependent fields in the nodepda.
+ */
+void init_platform_nodepda(nodepda_t *npda, cnodeid_t node)
+{
+	hubinfo_t hubinfo;
+	int	  sn;
+	cnodeid_t i;
+	ushort *numcpus_p;
+
+	extern void router_map_init(nodepda_t *);
+	extern void router_queue_init(nodepda_t *,cnodeid_t);
+#if defined(DEBUG)
+	extern lock_t		intr_dev_targ_map_lock;
+	extern uint64_t 	intr_dev_targ_map_size;
+
+	/* Initialize the lock to access the device - target cpu mapping
+	 * table. This table is explicitly for debugging purposes only and
+	 * to aid the "intrmap" idbg command
+	 */
+	if (node == 0) {
+		/* Make sure we do this only once .
+		 * There is always a cnode 0 present.
+		 */
+		intr_dev_targ_map_size = 0;
+		init_spinlock(&intr_dev_targ_map_lock,"dtmap_lock",0);
+	}
+#endif	/* DEBUG */
+	/* Allocate per-node platform-dependent data */
+	hubinfo = (hubinfo_t)kmem_alloc_node(sizeof(struct hubinfo_s), GFP_ATOMIC, node);
+
+	ASSERT_ALWAYS(hubinfo);
+	npda->pdinfo = (void *)hubinfo;
+	hubinfo->h_nodepda = npda;
+	hubinfo->h_cnodeid = node;
+	hubinfo->h_nasid = COMPACT_TO_NASID_NODEID(node);
+
+	printk("init_platform_nodepda: hubinfo 0x%p, &hubinfo->h_crblock 0x%p\n", hubinfo, &hubinfo->h_crblock);
+
+	spin_lock_init(&hubinfo->h_crblock);
+
+	hubinfo->h_widgetid = hub_widget_id(hubinfo->h_nasid);
+	npda->xbow_peer = INVALID_NASID;
+	/* Initialize the linked list of
+	 * router info pointers to the dependent routers
+	 */
+	npda->npda_rip_first = NULL;
+	/* npda_rip_last always points to the place
+	 * where the next element is to be inserted
+	 * into the list 
+	 */
+	npda->npda_rip_last = &npda->npda_rip_first;
+	npda->dependent_routers = 0;
+	npda->module_id = INVALID_MODULE;
+
+	/*
+	 * Initialize the subnodePDA.
+	 */
+	for (sn=0; sn<NUM_SUBNODES; sn++) {
+		SNPDA(npda,sn)->prof_count = 0;
+		SNPDA(npda,sn)->next_prof_timeout = 0;
+// ajm
+#ifndef CONFIG_IA64_SGI_IO
+		intr_init_vecblk(npda, node, sn);
+#endif
+	}
+
+	npda->vector_unit_busy = 0;
+
+	spin_lock_init(&npda->vector_lock);
+	init_MUTEX_LOCKED(&npda->xbow_sema); /* init it locked? */
+	spin_lock_init(&npda->fprom_lock);
+
+	spin_lock_init(&npda->node_utlbswitchlock);
+	npda->ni_error_print = 0;
+#ifndef CONFIG_IA64_SGI_IO
+	if (need_utlbmiss_patch) {
+		npda->node_need_utlbmiss_patch = 1;
+		npda->node_utlbmiss_patched = 1;
+	}
+#endif
+
+	/*
+	 * Clear out the nasid mask.
+	 */
+	for (i = 0; i < NASID_MASK_BYTES; i++)
+		npda->nasid_mask[i] = 0;
+
+	for (i = 0; i < numnodes; i++) {
+		nasid_t nasid = COMPACT_TO_NASID_NODEID(i);
+
+		/* Set my mask bit */
+		npda->nasid_mask[nasid / 8] |= (1 << nasid % 8);
+	}
+
+#ifndef CONFIG_IA64_SGI_IO
+	npda->node_first_cpu = get_cnode_cpu(node);
+#endif
+
+	if (npda->node_first_cpu != CPU_NONE) {
+		/*
+		 * Count number of cpus only if first CPU is valid.
+		 */
+		numcpus_p = &npda->node_num_cpus;
+		*numcpus_p = 0;
+		for (i = npda->node_first_cpu; i < MAXCPUS; i++) {
+			if (CPUID_TO_COMPACT_NODEID(i) != node)
+			    break;
+			else
+			    (*numcpus_p)++;
+		}
+	} else {
+		npda->node_num_cpus = 0; 
+	}
+
+	/* Allocate memory for the dump stack on each node 
+	 * This is useful during nmi handling since we
+	 * may not be guaranteed shared memory at that time
+	 * which precludes depending on a global dump stack
+	 */
+#ifndef CONFIG_IA64_SGI_IO
+	npda->dump_stack = (uint64_t *)kmem_zalloc_node(DUMP_STACK_SIZE,VM_NOSLEEP,
+							  node);
+	ASSERT_ALWAYS(npda->dump_stack);
+	ASSERT(npda->dump_stack);
+#endif
+	/* Initialize the counter which prevents
+	 * both the cpus on a node to proceed with nmi
+	 * handling.
+	 */
+#ifndef CONFIG_IA64_SGI_IO
+	npda->dump_count = 0;
+
+	/* Setup the (module,slot) --> nic mapping for all the routers
+	 * in the system. This is useful during error handling when
+	 * there is no shared memory.
+	 */
+	router_map_init(npda);
+
+	/* Allocate memory for the per-node router traversal queue */
+	router_queue_init(npda,node);
+	npda->sbe_info = kmem_zalloc_node_hint(sizeof (sbe_info_t), 0, node);
+	ASSERT(npda->sbe_info);
+
+#ifdef CONFIG_SGI_IP35 || CONFIG_IA64_SGI_SN1 || CONFIG_IA64_GENERIC
+	/*
+	 * Initialize bte info pointers to NULL
+	 */
+	for (i = 0; i < BTES_PER_NODE; i++) {
+		npda->node_bte_info[i] = (bteinfo_t *)NULL;
+	}
+#endif
+#endif /* CONFIG_IA64_SGI_IO */
+}
+
+/* XXX - Move the interrupt stuff to intr.c ? */
+/*
+ * Set up the platform-dependent fields in the processor pda.
+ * Must be done _after_ init_platform_nodepda().
+ * If we need a lock here, something else is wrong!
+ */
+// void init_platform_pda(pda_t *ppda, cpuid_t cpu)
+void init_platform_pda(cpuid_t cpu)
+{
+	hub_intmasks_t *intmasks;
+	cpuinfo_t cpuinfo;
+	int i;
+	cnodeid_t	cnode;
+	synergy_da_t	*sda;
+	int	which_synergy;
+
+#ifndef CONFIG_IA64_SGI_IO
+	/* Allocate per-cpu platform-dependent data */
+	cpuinfo = (cpuinfo_t)kmem_alloc_node(sizeof(struct cpuinfo_s), GFP_ATOMIC, cputocnode(cpu));
+	ASSERT_ALWAYS(cpuinfo);
+	ppda->pdinfo = (void *)cpuinfo;
+	cpuinfo->ci_cpupda = ppda;
+	cpuinfo->ci_cpuid = cpu;
+#endif
+
+	cnode = cpuid_to_cnodeid(cpu);
+	which_synergy = cpuid_to_synergy(cpu);
+	sda = Synergy_da_indr[(cnode * 2) + which_synergy];
+	// intmasks = &ppda->p_intmasks;
+	intmasks = &sda->s_intmasks;
+
+#ifndef CONFIG_IA64_SGI_IO
+	ASSERT_ALWAYS(&ppda->p_nodepda);
+#endif
+
+	/* Clear INT_PEND0 masks. */
+	for (i = 0; i < N_INTPEND0_MASKS; i++)
+		intmasks->intpend0_masks[i] = 0;
+
+	/* Set up pointer to the vector block in the nodepda. */
+	/* (Cant use SUBNODEPDA - not working yet) */
+	intmasks->dispatch0 = &Nodepdaindr[cnode]->snpda[cputosubnode(cpu)].intr_dispatch0;
+	intmasks->dispatch1 = &Nodepdaindr[cnode]->snpda[cputosubnode(cpu)].intr_dispatch1;
+
+	/* Clear INT_PEND1 masks. */
+	for (i = 0; i < N_INTPEND1_MASKS; i++)
+		intmasks->intpend1_masks[i] = 0;
+
+
+#ifndef CONFIG_IA64_SGI_IO
+	/* Don't read the routers unless we're the master. */
+	ppda->p_routertick = 0;
+#endif
+
+}
+
+#if (defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)) && !defined(BRINGUP)	/* protect low mem for IP35/7 */
+#error "need protect_hub_calias, protect_nmi_handler_data"
+#endif
+
+#ifndef CONFIG_IA64_SGI_IO
+/*
+ * For now, just protect the first page (exception handlers). We
+ * may want to protect more stuff later.
+ */
+void
+protect_hub_calias(nasid_t nasid)
+{
+	paddr_t pa = NODE_OFFSET(nasid) + 0; /* page 0 on node nasid */
+	int i;
+
+	for (i = 0; i < MAX_REGIONS; i++) {
+		if (i == nasid_to_region(nasid))
+			continue;
+#ifndef BRINGUP
+		/* Protect the exception handlers. */
+		*(__psunsigned_t *)BDPRT_ENTRY(pa, i) = MD_PROT_NO;
+
+		/* Protect the ARCS SPB. */
+		*(__psunsigned_t *)BDPRT_ENTRY(pa + 4096, i) = MD_PROT_NO;
+#endif
+	}
+}
+
+/*
+ * Protect the page of low memory used to communicate with the NMI handler.
+ */
+void
+protect_nmi_handler_data(nasid_t nasid, int slice)
+{
+	paddr_t pa = NODE_OFFSET(nasid) + NMI_OFFSET(nasid, slice);
+	int i;
+
+	for (i = 0; i < MAX_REGIONS; i++) {
+		if (i == nasid_to_region(nasid))
+			continue;
+#ifndef BRINGUP
+		*(__psunsigned_t *)BDPRT_ENTRY(pa, i) = MD_PROT_NO;
+#endif
+	}
+}
+#endif /* CONFIG_IA64_SGI_IO */
+
+
+#ifdef IRIX
+/*
+ * Protect areas of memory that we access uncached by marking them as
+ * poisoned so the T5 can't read them speculatively and erroneously
+ * mark them dirty in its cache only to write them back with old data
+ * later.
+ */
+static void
+protect_low_memory(nasid_t nasid)
+{
+	/* Protect low memory directory */
+	poison_state_alter_range(KLDIR_ADDR(nasid), KLDIR_SIZE, 1);
+
+	/* Protect klconfig area */
+	poison_state_alter_range(KLCONFIG_ADDR(nasid), KLCONFIG_SIZE(nasid), 1);
+
+	/* Protect the PI error spool area. */
+	poison_state_alter_range(PI_ERROR_ADDR(nasid), PI_ERROR_SIZE(nasid), 1);
+
+	/* Protect CPU A's cache error eframe area. */
+	poison_state_alter_range(TO_NODE_UNCAC(nasid, CACHE_ERR_EFRAME),
+				CACHE_ERR_AREA_SIZE, 1);
+
+	/* Protect CPU B's area */
+	poison_state_alter_range(TO_NODE_UNCAC(nasid, CACHE_ERR_EFRAME)
+				^ UALIAS_FLIP_BIT,
+				CACHE_ERR_AREA_SIZE, 1);
+#error "SN1 not handled correctly"
+}
+#endif	/* IRIX */
+
+/*
+ * per_hub_init
+ *
+ * 	This code is executed once for each Hub chip.
+ */
+void
+per_hub_init(cnodeid_t cnode)
+{
+	uint64_t	done;
+	nasid_t		nasid;
+	nodepda_t	*npdap;
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)	/* SN1 specific */
+	ii_icmr_u_t	ii_icmr;
+	ii_ibcr_u_t	ii_ibcr;
+#endif
+#ifndef CONFIG_IA64_SGI_IO
+	int i;
+#endif
+
+#ifdef SIMULATED_KLGRAPH
+	compact_to_nasid_node[0] = 0;
+	nasid_to_compact_node[0] = 0;
+	FIXME("per_hub_init: SIMULATED_KLCONFIG: compact_to_nasid_node[0] = 0\n");
+#endif /* SIMULATED_KLGRAPH */
+	nasid = COMPACT_TO_NASID_NODEID(cnode);
+
+	ASSERT(nasid != INVALID_NASID);
+	ASSERT(NASID_TO_COMPACT_NODEID(nasid) == cnode);
+
+	/* Grab the hub_mask lock. */
+	spin_lock(&hub_mask_lock);
+
+	/* Test our bit. */
+	if (!(done = CNODEMASK_TSTB(hub_init_mask, cnode))) {
+
+		/* Turn our bit on in the mask. */
+		CNODEMASK_SETB(hub_init_mask, cnode);
+	}
+
+#if defined(SN0_HWDEBUG)
+	hub_config_setup();
+#endif
+	/* Release the hub_mask lock. */
+	spin_unlock(&hub_mask_lock);
+
+	/*
+	 * Do the actual initialization if it hasn't been done yet.
+	 * We don't need to hold a lock for this work.
+	 */
+	if (!done) {
+		npdap = NODEPDA(cnode);
+
+		npdap->hub_chip_rev = get_hub_chiprev(nasid);
+
+#ifndef CONFIG_IA64_SGI_IO
+		for (i = 0; i < CPUS_PER_NODE; i++) {
+			cpu = cnode_slice_to_cpuid(cnode, i);
+			if (!cpu_enabled(cpu))
+			    SET_CPU_LEDS(nasid, i, 0xf);
+		}
+#endif /* CONFIG_IA64_SGI_IO */
+
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC) /* SN1 specific */
+
+		/*
+		 * Set the total number of CRBs that can be used.
+		 */
+		ii_icmr.ii_icmr_regval= 0x0;
+		ii_icmr.ii_icmr_fld_s.i_c_cnt = 0xF;
+		REMOTE_HUB_S(nasid, IIO_ICMR, ii_icmr.ii_icmr_regval);
+
+		/*
+		 * Set the number of CRBs that both of the BTEs combined
+		 * can use minus 1.
+		 */
+		ii_ibcr.ii_ibcr_regval= 0x0;
+		ii_ibcr.ii_ibcr_fld_s.i_count = 0x8;
+		REMOTE_HUB_S(nasid, IIO_IBCR, ii_ibcr.ii_ibcr_regval);
+
+		/*
+		 * Set CRB timeout to be 10ms.
+		 */
+		REMOTE_HUB_S(nasid, IIO_ICTP, 0x1000 );
+		REMOTE_HUB_S(nasid, IIO_ICTO, 0xff);
+
+#endif /* SN0_HWDEBUG */
+
+
+#ifndef CONFIG_IA64_SGI_IO
+
+		/* Reserve all of the hardwired interrupt levels. */
+		intr_reserve_hardwired(cnode);
+
+		/* Initialize error interrupts for this hub. */
+		hub_error_init(cnode);
+
+		/* Set up correctable memory/directory ECC error interrupt. */
+		install_eccintr(cnode);
+
+		/* Protect our exception vectors from accidental corruption. */
+		protect_hub_calias(nasid);
+
+		/* Enable RT clock interrupts */
+		hub_rtc_init(cnode);
+		hub_migrintr_init(cnode); /* Enable migration interrupt */
+#endif
+
+		spin_lock(&hub_mask_lock);
+		CNODEMASK_SETB(hub_init_done_mask, cnode);
+		spin_unlock(&hub_mask_lock);
+
+	} else {
+		/*
+		 * Wait for the other CPU to complete the initialization.
+		 */
+		while (CNODEMASK_TSTB(hub_init_done_mask, cnode) == 0)
+			/* LOOP */
+			;
+	}
+}
+
+extern void
+update_node_information(cnodeid_t cnodeid)
+{
+	nodepda_t *npda = NODEPDA(cnodeid);
+	nodepda_router_info_t *npda_rip;
+	
+	/* Go through the list of router info 
+	 * structures and copy some frequently
+	 * accessed info from the info hanging
+	 * off the corresponding router vertices
+	 */
+	npda_rip = npda->npda_rip_first;
+	while(npda_rip) {
+		if (npda_rip->router_infop) {
+			npda_rip->router_portmask = 
+				npda_rip->router_infop->ri_portmask;
+			npda_rip->router_slot = 
+				npda_rip->router_infop->ri_slotnum;
+		} else {
+			/* No router, no ports. */
+			npda_rip->router_portmask = 0;
+		}
+		npda_rip = npda_rip->router_next;
+	}
+}
+
+hubreg_t
+get_region(cnodeid_t cnode)
+{
+	if (fine_mode)
+		return COMPACT_TO_NASID_NODEID(cnode) >> NASID_TO_FINEREG_SHFT;
+	else
+		return COMPACT_TO_NASID_NODEID(cnode) >> NASID_TO_COARSEREG_SHFT;
+}
+
+hubreg_t
+nasid_to_region(nasid_t nasid)
+{
+	if (fine_mode)
+		return nasid >> NASID_TO_FINEREG_SHFT;
+	else
+		return nasid >> NASID_TO_COARSEREG_SHFT;
+}
+
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/io/ml_SN_intr.c linux/arch/ia64/sn/io/ml_SN_intr.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/io/ml_SN_intr.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/io/ml_SN_intr.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,1738 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Alan Mayer
+ */
+
+/*
+ * intr.c-
+ *	This file contains all of the routines necessary to set up and
+ *	handle interrupts on an IP27 board.
+ */
+
+#ident  "$Revision: 1.167 $"
+
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <asm/smp.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/iograph.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/labelcl.h>
+#include <asm/sn/nodemask.h>
+#include <asm/sn/sn_private.h>
+#include <asm/sn/klconfig.h>
+#include <asm/sn/synergy.h>
+#include <asm/sn/sn_cpuid.h>
+#include <asm/sn/pci/pciio.h>
+#include <asm/sn/pci/pcibr.h>
+#include <asm/sn/xtalk/xtalk.h>
+#include <asm/sn/pci/pcibr_private.h>
+
+
+#if defined (CONFIG_SGI_IP35)
+
+#include <asm/sn/pci/pciio.h>		/* For SN1 + pcibr Addressing Limitation */
+#include <asm/sn/pci/pcibr.h>		/* For SN1 + pcibr Addressing Limitation */
+#include <asm/sn/pci/pcibr_private.h>	/* For SN1 + pcibr Addressing Limitation */
+#endif /* SN1 */
+
+#if DEBUG_INTR_TSTAMP_DEBUG
+#include <sys/debug.h>
+#include <sys/idbg.h>
+#include <sys/inst.h>
+void do_splx_log(int, int);
+void spldebug_log_event(int);
+#endif
+
+// FIXME - BRINGUP
+#ifdef CONFIG_SMP
+extern unsigned long cpu_online_map;
+#endif
+#define cpu_allows_intr(cpu)	(1)
+// If I understand what's going on with this, 32 should work.
+// physmem_maxradius seems to be the maximum number of router
+// hops to get from one end of the system to the other.  With
+// a maximally configured machine, with the dumbest possible
+// topology, we would make 32 router hops.  For what we're using
+// it for, the dumbest possible should suffice.
+#define physmem_maxradius()	32
+
+#define SUBNODE_ANY -1
+
+extern int	nmied;
+extern int	hub_intr_wakeup_cnt;
+extern synergy_da_t	*Synergy_da_indr[];
+extern cpuid_t         master_procid;
+
+extern cnodeid_t master_node_get(devfs_handle_t vhdl);
+
+
+#define INTR_LOCK(vecblk) \
+     (s = mutex_spinlock(&(vecblk)->vector_lock))
+#define INTR_UNLOCK(vecblk) \
+      mutex_spinunlock(&(vecblk)->vector_lock, s)
+
+/*
+ * REACT/Pro
+ */
+
+
+
+/* 
+ * Find first bit set 
+ * Used outside this file also 
+ */
+int ms1bit(unsigned long x)
+{
+    int			b;
+
+    if (x >> 32)	b  = 32, x >>= 32;
+    else		b  =  0;
+    if (x >> 16)	b += 16, x >>= 16;
+    if (x >>  8)	b +=  8, x >>=  8;
+    if (x >>  4)	b +=  4, x >>=  4;
+    if (x >>  2)	b +=  2, x >>=  2;
+
+    return b + (int) (x >> 1);
+}
+
+/* ARGSUSED */
+void
+intr_stray(void *lvl)
+{
+    printk("Stray Interrupt - level %ld to cpu %d", (long)lvl, cpuid());
+}
+
+#if defined(DEBUG)
+
+/* Infrastructure  to gather the device - target cpu mapping info */
+#define MAX_DEVICES	1000	/* Reasonable large number . Need not be 
+				 * the exact maximum # devices possible.
+				 */
+#define MAX_NAME	100	
+typedef struct {
+	dev_t		dev;	/* device */
+	cpuid_t		cpuid;	/* target cpu */
+	cnodeid_t	cnodeid;/* node on which the target cpu is present */
+	int		bit;	/* intr bit reserved */
+	char		intr_name[MAX_NAME]; /* name of the interrupt */
+} intr_dev_targ_map_t;
+
+intr_dev_targ_map_t 	intr_dev_targ_map[MAX_DEVICES];
+uint64_t		intr_dev_targ_map_size;
+lock_t			intr_dev_targ_map_lock;
+
+/* Print out the device - target cpu mapping.
+ * This routine is used only in the idbg command
+ * "intrmap" 
+ */
+void
+intr_dev_targ_map_print(cnodeid_t cnodeid)
+{
+	int  i,j,size = 0;
+	int  print_flag = 0,verbose = 0;	
+	char node_name[10];
+	
+	if (cnodeid != CNODEID_NONE) {
+		nodepda_t 	*npda;
+
+		npda = NODEPDA(cnodeid);
+		for (j=0; j<NUM_SUBNODES; j++) {
+			qprintf("\n SUBNODE %d\n INT_PEND0: ", j);
+			for(i = 0 ; i < N_INTPEND_BITS ; i++)
+				qprintf("%d",SNPDA(npda,j)->intr_dispatch0.info[i].ii_flags);
+			qprintf("\n INT_PEND1: ");
+			for(i = 0 ; i < N_INTPEND_BITS ; i++)
+				qprintf("%d",SNPDA(npda,j)->intr_dispatch1.info[i].ii_flags);
+		}
+		verbose = 1;
+	}
+	qprintf("\n Device - Target Map [Interrupts: %s Node%s]\n\n",
+		(verbose ? "All" : "Non-hardwired"),
+		(cnodeid == CNODEID_NONE) ? "s: All" : node_name); 
+		
+	qprintf("Device\tCpu\tCnode\tIntr_bit\tIntr_name\n");
+	for (i = 0 ; i < intr_dev_targ_map_size ; i++) {
+
+		print_flag = 0;
+		if (verbose) {
+			if (cnodeid != CNODEID_NONE) {
+				if (cnodeid == intr_dev_targ_map[i].cnodeid)
+					print_flag = 1;
+			} else {
+				print_flag = 1;
+			}
+		} else {
+			if (intr_dev_targ_map[i].dev != 0) {
+				if (cnodeid != CNODEID_NONE) {
+					if (cnodeid == 
+					    intr_dev_targ_map[i].cnodeid)
+						print_flag = 1;
+				} else {
+					print_flag = 1;
+				}
+			}
+		}
+		if (print_flag) {
+			size++;
+			qprintf("%d\t%d\t%d\t%d\t%s\n",
+				intr_dev_targ_map[i].dev,
+				intr_dev_targ_map[i].cpuid,
+				intr_dev_targ_map[i].cnodeid,
+				intr_dev_targ_map[i].bit,
+				intr_dev_targ_map[i].intr_name);
+		}
+
+	}
+	qprintf("\nTotal : %d\n",size);
+}
+#endif /* DEBUG */
+
+/*
+ * The spinlocks have already been initialized.  Now initialize the interrupt
+ * vectors.  One processor on each hub does the work.
+ */
+void
+intr_init_vecblk(nodepda_t *npda, cnodeid_t node, int sn)
+{
+    int			i, ip=0;
+    intr_vecblk_t	*vecblk;
+    subnode_pda_t	*snpda;
+
+
+    snpda = SNPDA(npda,sn);
+    do {
+	if (ip == 0) {
+	    vecblk = &snpda->intr_dispatch0;
+	} else {
+	    vecblk = &snpda->intr_dispatch1;
+	}
+
+	/* Initialize this vector. */
+	for (i = 0; i < N_INTPEND_BITS; i++) {
+		vecblk->vectors[i].iv_func = intr_stray;
+		vecblk->vectors[i].iv_prefunc = NULL;
+		vecblk->vectors[i].iv_arg = (void *)(__psint_t)(ip * N_INTPEND_BITS + i);
+
+		vecblk->info[i].ii_owner_dev = 0;
+		strcpy(vecblk->info[i].ii_name, "Unused");
+		vecblk->info[i].ii_flags = 0;	/* No flags */
+		vecblk->vectors[i].iv_mustruncpu = -1; /* No CPU yet. */
+
+	    }
+
+	spinlock_init(&vecblk->vector_lock, "ivecb");
+
+	vecblk->vector_count = 0;    
+	for (i = 0; i < CPUS_PER_SUBNODE; i++)
+		vecblk->cpu_count[i] = 0;
+
+	vecblk->vector_state = VECTOR_UNINITED;
+
+    } while (++ip < 2);
+
+}
+
+
+/*
+ * do_intr_reserve_level(cpuid_t cpu, int bit, int resflags, int reserve, 
+ *					devfs_handle_t owner_dev, char *name)
+ *	Internal work routine to reserve or unreserve an interrupt level.
+ *		cpu is the CPU to which the interrupt will be sent.
+ *		bit is the level bit to reserve.  -1 means any level
+ *		resflags should include II_ERRORINT if this is an
+ *			error interrupt, II_THREADED if the interrupt handler
+ *			will be threaded, or 0 otherwise.
+ *		reserve should be set to II_RESERVE or II_UNRESERVE
+ *			to get or clear a reservation.
+ *		owner_dev is the device that "owns" this interrupt, if supplied
+ *		name is a human-readable name for this interrupt, if supplied
+ *	intr_reserve_level returns the bit reserved or -1 to indicate an error
+ */
+static int
+do_intr_reserve_level(cpuid_t cpu, int bit, int resflags, int reserve, 
+					devfs_handle_t owner_dev, char *name)
+{
+    intr_vecblk_t	*vecblk;
+    hub_intmasks_t 	*hub_intmasks;
+    int s;
+    int rv = 0;
+    int ip;
+    synergy_da_t	*sda;
+    int		which_synergy;
+    cnodeid_t	cnode;
+
+    ASSERT(bit < N_INTPEND_BITS * 2);
+
+    cnode = cpuid_to_cnodeid(cpu);
+    which_synergy = cpuid_to_synergy(cpu);
+    sda = Synergy_da_indr[(cnode * 2) + which_synergy];
+    hub_intmasks = &sda->s_intmasks;
+    // hub_intmasks = &pdaindr[cpu].pda->p_intmasks;
+
+    // if (pdaindr[cpu].pda == NULL) return -1;
+    if ((bit < N_INTPEND_BITS) && !(resflags & II_ERRORINT)) {
+	vecblk = hub_intmasks->dispatch0;
+	ip = 0;
+    } else {
+	ASSERT((bit >= N_INTPEND_BITS) || (bit == -1));
+	bit -= N_INTPEND_BITS;	/* Get position relative to INT_PEND1 reg. */
+	vecblk = hub_intmasks->dispatch1;
+	ip = 1;
+    }
+
+    INTR_LOCK(vecblk);
+
+    if (bit <= -1) {
+	// bit = 0;
+	bit = 7;  /* First available on SNIA */
+	ASSERT(reserve == II_RESERVE);
+	/* Choose any available level */
+	for (; bit < N_INTPEND_BITS; bit++) {
+	    if (!(vecblk->info[bit].ii_flags & II_RESERVE)) {
+		rv = bit;
+		break;
+	    }
+	}
+
+	/* Return -1 if all interrupt levels int this register are taken. */
+	if (bit == N_INTPEND_BITS)
+	    rv = -1;
+
+    } else {
+	/* Reserve a particular level if it's available. */
+	if ((vecblk->info[bit].ii_flags & II_RESERVE) == reserve) {
+	    /* Can't (un)reserve a level that's already (un)reserved. */
+	    rv = -1;
+	} else {
+	    rv = bit;
+	}
+    }
+
+    /* Reserve the level and bump the count. */
+    if (rv != -1) {
+	if (reserve) {
+	    int maxlen = sizeof(vecblk->info[bit].ii_name) - 1;
+	    int namelen;
+	    vecblk->info[bit].ii_flags |= (II_RESERVE | resflags);
+	    vecblk->info[bit].ii_owner_dev = owner_dev;
+	    /* Copy in the name. */
+	    namelen = name ? strlen(name) : 0;
+	    strncpy(vecblk->info[bit].ii_name, name, MIN(namelen, maxlen)); 
+	    vecblk->info[bit].ii_name[maxlen] = '\0';
+	    vecblk->vector_count++;
+	} else {
+	    vecblk->info[bit].ii_flags = 0;	/* Clear all the flags */
+	    vecblk->info[bit].ii_owner_dev = 0;
+	    /* Clear the name. */
+	    vecblk->info[bit].ii_name[0] = '\0';
+	    vecblk->vector_count--;
+	}
+    }
+
+    INTR_UNLOCK(vecblk);
+
+#if defined(DEBUG)
+    if (rv >= 0) {
+	    int namelen = name ? strlen(name) : 0;
+	    /* Gather this device - target cpu mapping information
+	     * in a table which can be used later by the idbg "intrmap"
+	     * command
+	     */
+	    s = mutex_spinlock(&intr_dev_targ_map_lock);
+	    if (intr_dev_targ_map_size < MAX_DEVICES) {
+		    intr_dev_targ_map_t	*p;
+
+		    p 		= &intr_dev_targ_map[intr_dev_targ_map_size];
+		    p->dev  	= owner_dev;
+		    p->cpuid 	= cpu; 
+		    p->cnodeid 	= cputocnode(cpu); 
+		    p->bit 	= ip * N_INTPEND_BITS + rv;
+		    strncpy(p->intr_name,
+			    name,
+			    MIN(MAX_NAME,namelen));
+		    intr_dev_targ_map_size++;
+	    }
+	    mutex_spinunlock(&intr_dev_targ_map_lock,s);
+    }
+#endif /* DEBUG */
+
+    return (((rv == -1) ? rv : (ip * N_INTPEND_BITS) + rv)) ;
+}
+
+
+/*
+ * WARNING:  This routine should only be called from within ml/SN.
+ *	Reserve an interrupt level.
+ */
+int
+intr_reserve_level(cpuid_t cpu, int bit, int resflags, devfs_handle_t owner_dev, char *name)
+{
+	return(do_intr_reserve_level(cpu, bit, resflags, II_RESERVE, owner_dev, name));
+}
+
+
+/*
+ * WARNING:  This routine should only be called from within ml/SN.
+ *	Unreserve an interrupt level.
+ */
+void
+intr_unreserve_level(cpuid_t cpu, int bit)
+{
+	(void)do_intr_reserve_level(cpu, bit, 0, II_UNRESERVE, 0, NULL);
+}
+
+/*
+ * Get values that vary depending on which CPU and bit we're operating on
+ */
+static hub_intmasks_t *
+intr_get_ptrs(cpuid_t cpu, int bit,
+	      int *new_bit,		/* Bit relative to the register */
+	      hubreg_t **intpend_masks, /* Masks for this register */
+	      intr_vecblk_t **vecblk,	/* Vecblock for this interrupt */
+	      int *ip)			/* Which intpend register */
+{
+	hub_intmasks_t *hub_intmasks;
+	synergy_da_t	*sda;
+	int		which_synergy;
+	cnodeid_t	cnode;
+
+	ASSERT(bit < N_INTPEND_BITS * 2);
+
+	cnode = cpuid_to_cnodeid(cpu);
+	which_synergy = cpuid_to_synergy(cpu);
+	sda = Synergy_da_indr[(cnode * 2) + which_synergy];
+	hub_intmasks = &sda->s_intmasks;
+
+	// hub_intmasks = &pdaindr[cpu].pda->p_intmasks;
+
+	if (bit < N_INTPEND_BITS) {
+		*intpend_masks = hub_intmasks->intpend0_masks;
+		*vecblk = hub_intmasks->dispatch0;
+		*ip = 0;
+		*new_bit = bit;
+	} else {
+		*intpend_masks = hub_intmasks->intpend1_masks;
+		*vecblk = hub_intmasks->dispatch1;
+		*ip = 1;
+		*new_bit = bit - N_INTPEND_BITS;
+	}
+
+	return hub_intmasks;
+}
+
+
+/*
+ * intr_connect_level(cpuid_t cpu, int bit, ilvl_t intr_swlevel, 
+ *		intr_func_t intr_func, void *intr_arg);
+ *	This is the lowest-level interface to the interrupt code.  It shouldn't
+ *	be called from outside the ml/SN directory.
+ *	intr_connect_level hooks up an interrupt to a particular bit in
+ *	the INT_PEND0/1 masks.  Returns 0 on success.
+ *		cpu is the CPU to which the interrupt will be sent.
+ *		bit is the level bit to connect to
+ *		intr_swlevel tells which software level to use
+ *		intr_func is the interrupt handler
+ *		intr_arg is an arbitrary argument interpreted by the handler
+ *		intr_prefunc is a prologue function, to be called
+ *			with interrupts disabled, to disable
+ *			the interrupt at source.  It is called
+ *			with the same argument.  Should be NULL for
+ *			typical interrupts, which can be masked
+ *			by the infrastructure at the level bit.
+ *	intr_connect_level returns 0 on success or nonzero on an error
+ */
+/* ARGSUSED */
+int
+intr_connect_level(cpuid_t cpu, int bit, ilvl_t intr_swlevel, 
+		intr_func_t intr_func, void *intr_arg,
+		intr_func_t intr_prefunc)
+{
+    intr_vecblk_t	*vecblk;
+    hubreg_t		*intpend_masks;
+    int s;
+    int rv = 0;
+    int ip;
+
+    ASSERT(bit < N_INTPEND_BITS * 2);
+
+    (void)intr_get_ptrs(cpu, bit, &bit, &intpend_masks,
+				 &vecblk, &ip);
+
+    INTR_LOCK(vecblk);
+
+    if ((vecblk->info[bit].ii_flags & II_INUSE) ||
+	(!(vecblk->info[bit].ii_flags & II_RESERVE))) {
+	/* Can't assign to a level that's in use or isn't reserved. */
+	rv = -1;
+    } else {
+	/* Stuff parameters into vector and info */
+	vecblk->vectors[bit].iv_func = intr_func;
+	vecblk->vectors[bit].iv_prefunc = intr_prefunc;
+	vecblk->vectors[bit].iv_arg = intr_arg;
+	vecblk->info[bit].ii_flags |= II_INUSE;
+    }
+
+    /* Now stuff the masks if everything's okay. */
+    if (!rv) {
+	int lslice;
+	volatile hubreg_t *mask_reg;
+	// nasid_t nasid = COMPACT_TO_NASID_NODEID(cputocnode(cpu));
+	nasid_t nasid = cpuid_to_nasid(cpu);
+	int	subnode = cpuid_to_subnode(cpu);
+
+	/* Make sure it's not already pending when we connect it. */
+	REMOTE_HUB_PI_CLR_INTR(nasid, subnode, bit + ip * N_INTPEND_BITS);
+
+	intpend_masks[0] |= (1ULL << (uint64_t)bit);
+
+	lslice = cputolocalslice(cpu);
+	vecblk->cpu_count[lslice]++;
+#if SN1
+	/*
+	 * On SN1, there are 8 interrupt mask registers per node:
+	 * 	PI_0 MASK_0 A
+	 * 	PI_0 MASK_1 A
+	 * 	PI_0 MASK_0 B
+	 * 	PI_0 MASK_1 B
+	 * 	PI_1 MASK_0 A
+	 * 	PI_1 MASK_1 A
+	 * 	PI_1 MASK_0 B
+	 * 	PI_1 MASK_1 B
+	 */
+#endif
+	if (ip == 0) {
+		mask_reg = REMOTE_HUB_PI_ADDR(nasid, subnode, 
+		        PI_INT_MASK0_A + PI_INT_MASK_OFFSET * lslice);
+	} else {
+		mask_reg = REMOTE_HUB_PI_ADDR(nasid, subnode,
+			PI_INT_MASK1_A + PI_INT_MASK_OFFSET * lslice);
+	}
+
+	HUB_S(mask_reg, intpend_masks[0]);
+    }
+
+    INTR_UNLOCK(vecblk);
+
+    return rv;
+}
+
+
+/*
+ * intr_disconnect_level(cpuid_t cpu, int bit)
+ *
+ *	This is the lowest-level interface to the interrupt code.  It should
+ *	not be called from outside the ml/SN directory.
+ *	intr_disconnect_level removes a particular bit from an interrupt in
+ * 	the INT_PEND0/1 masks.  Returns 0 on success or nonzero on failure.
+ */
+int
+intr_disconnect_level(cpuid_t cpu, int bit)
+{
+    intr_vecblk_t	*vecblk;
+    hubreg_t		*intpend_masks;
+    int s;
+    int rv = 0;
+    int ip;
+
+    (void)intr_get_ptrs(cpu, bit, &bit, &intpend_masks,
+				 &vecblk, &ip);
+
+    INTR_LOCK(vecblk);
+
+    if ((vecblk->info[bit].ii_flags & (II_RESERVE | II_INUSE)) !=
+	((II_RESERVE | II_INUSE))) {
+	/* Can't remove a level that's not in use or isn't reserved. */
+	rv = -1;
+    } else {
+	/* Stuff parameters into vector and info */
+	vecblk->vectors[bit].iv_func = (intr_func_t)NULL;
+	vecblk->vectors[bit].iv_prefunc = (intr_func_t)NULL;
+	vecblk->vectors[bit].iv_arg = 0;
+	vecblk->info[bit].ii_flags &= ~II_INUSE;
+#ifdef BASE_ITHRTEAD
+	vecblk->vectors[bit].iv_mustruncpu = -1; /* No mustrun CPU any more. */
+#endif
+    }
+
+    /* Now clear the masks if everything's okay. */
+    if (!rv) {
+	int lslice;
+	volatile hubreg_t *mask_reg;
+
+	intpend_masks[0] &= ~(1ULL << (uint64_t)bit);
+	lslice = cputolocalslice(cpu);
+	vecblk->cpu_count[lslice]--;
+	mask_reg = REMOTE_HUB_PI_ADDR(COMPACT_TO_NASID_NODEID(cputocnode(cpu)), 
+				   cpuid_to_subnode(cpu),
+				   ip == 0 ? PI_INT_MASK0_A : PI_INT_MASK1_A);
+	mask_reg = (volatile hubreg_t *)((__psunsigned_t)mask_reg +
+					(PI_INT_MASK_OFFSET * lslice));
+	*mask_reg = intpend_masks[0];
+    }
+
+    INTR_UNLOCK(vecblk);
+
+    return rv;
+}
+
+/*
+ * Actually block or unblock an interrupt
+ */
+void
+do_intr_block_bit(cpuid_t cpu, int bit, int block)
+{
+	intr_vecblk_t *vecblk;
+	int s;
+	int ip;
+	hubreg_t *intpend_masks;
+	volatile hubreg_t mask_value;
+	volatile hubreg_t *mask_reg;
+
+	intr_get_ptrs(cpu, bit, &bit, &intpend_masks, &vecblk, &ip);
+
+	INTR_LOCK(vecblk);
+
+	if (block)
+		/* Block */
+		intpend_masks[0] &= ~(1ULL << (uint64_t)bit);
+	else
+		/* Unblock */
+		intpend_masks[0] |= (1ULL << (uint64_t)bit);
+
+	if (ip == 0) {
+		mask_reg = REMOTE_HUB_PI_ADDR(COMPACT_TO_NASID_NODEID(cputocnode(cpu)), 
+		        cpuid_to_subnode(cpu), PI_INT_MASK0_A);
+	} else {
+		mask_reg = REMOTE_HUB_PI_ADDR(COMPACT_TO_NASID_NODEID(cputocnode(cpu)), 
+			cpuid_to_subnode(cpu), PI_INT_MASK1_A);
+	}
+
+	HUB_S(mask_reg, intpend_masks[0]);
+
+	/*
+	 * Wait for it to take effect.  (One read should suffice.)
+	 * This is only necessary when blocking an interrupt
+	 */
+	if (block)
+		while ((mask_value = HUB_L(mask_reg)) != intpend_masks[0])
+			;
+
+	INTR_UNLOCK(vecblk);
+}
+
+
+/*
+ * Block a particular interrupt (cpu/bit pair).
+ */
+/* ARGSUSED */
+void
+intr_block_bit(cpuid_t cpu, int bit)
+{
+	do_intr_block_bit(cpu, bit, 1);
+}
+
+
+/*
+ * Unblock a particular interrupt (cpu/bit pair).
+ */
+/* ARGSUSED */
+void
+intr_unblock_bit(cpuid_t cpu, int bit)
+{
+	do_intr_block_bit(cpu, bit, 0);
+}
+
+
+/* verifies that the specified CPUID is on the specified SUBNODE (if any) */
+#define cpu_on_subnode(cpuid, which_subnode) \
+	   (((which_subnode) == SUBNODE_ANY) || (cpuid_to_subnode(cpuid) == (which_subnode)))
+
+
+/*
+ * Choose one of the CPUs on a specified node or subnode to receive
+ * interrupts. Don't pick a cpu which has been specified as a NOINTR cpu.
+ *
+ * Among all acceptable CPUs, the CPU that has the fewest total number
+ * of interrupts targetted towards it is chosen.  Note that we never
+ * consider how frequent each of these interrupts might occur, so a rare
+ * hardware error interrupt is weighted equally with a disk interrupt.
+ */
+static cpuid_t
+do_intr_cpu_choose(cnodeid_t cnode, int which_subnode)
+{
+	cpuid_t 	cpu, best_cpu = CPU_NONE;
+	int		slice, min_count=1000;
+
+	min_count = 1000;
+	for (slice=0; slice < CPUS_PER_NODE; slice++) {
+		intr_vecblk_t 	*vecblk0, *vecblk1;
+		int total_intrs_to_slice;
+		subnode_pda_t *snpda;
+		int local_cpu_num;
+
+		cpu = cnode_slice_to_cpuid(cnode, slice);
+		cpu = cpu_logical_id(cpu);
+		if (cpu == CPU_NONE)
+			continue;
+
+		/* If this cpu isn't enabled for interrupts, skip it */
+		if (!cpu_enabled(cpu) || !cpu_allows_intr(cpu))
+			continue;
+
+		/* If this isn't the right subnode, skip it */
+		if (!cpu_on_subnode(cpu, which_subnode))
+			continue;
+
+		/* OK, this one's a potential CPU for interrupts */
+		snpda = SUBNODEPDA(cnode,SUBNODE(slice));
+		vecblk0 = &snpda->intr_dispatch0;
+		vecblk1 = &snpda->intr_dispatch1;
+		local_cpu_num = LOCALCPU(slice);
+		total_intrs_to_slice = vecblk0->cpu_count[local_cpu_num] +
+		              vecblk1->cpu_count[local_cpu_num];
+
+		if (min_count > total_intrs_to_slice) {
+			min_count = total_intrs_to_slice;
+			best_cpu = cpu;
+		}
+	}
+	return best_cpu;
+}
+
+/*
+ * Choose an appropriate interrupt target CPU on a specified node.
+ * If which_subnode is SUBNODE_ANY, then subnode is not considered.
+ * Otherwise, the chosen CPU must be on the specified subnode.
+ */
+static cpuid_t
+intr_cpu_choose_from_node(cnodeid_t cnode, int which_subnode)
+{
+	return(do_intr_cpu_choose(cnode, which_subnode));
+}
+
+
+#ifndef CONFIG_IA64_SGI_IO
+/*
+ * Convert a subnode vertex into a (cnodeid, which_subnode) pair.
+ * Return 0 on success, non-zero on failure.
+ */
+static int
+subnodevertex_to_subnode(devfs_handle_t vhdl, cnodeid_t *cnodeidp, int *which_subnodep)
+{
+	arbitrary_info_t which_subnode;
+	cnodeid_t cnodeid;
+
+	/* Try to grab subnode information */
+	if (hwgraph_info_get_LBL(vhdl, INFO_LBL_CPUBUS, &which_subnode) != GRAPH_SUCCESS)
+		return(-1);
+
+	/* On which node? */
+	cnodeid = master_node_get(vhdl);
+	if (cnodeid == CNODEID_NONE)
+		return(-1);
+
+	*which_subnodep = (int)which_subnode;
+	*cnodeidp = cnodeid;
+	return(0); /* success */
+}
+
+#endif /* CONFIG_IA64_SGI_IO */
+
+/* Make it easy to identify subnode vertices in the hwgraph */
+void
+mark_subnodevertex_as_subnode(devfs_handle_t vhdl, int which_subnode)
+{
+	graph_error_t rv;
+
+	ASSERT(0 <= which_subnode);
+	ASSERT(which_subnode < NUM_SUBNODES);
+
+	rv = hwgraph_info_add_LBL(vhdl, INFO_LBL_CPUBUS, (arbitrary_info_t)which_subnode);
+	ASSERT_ALWAYS(rv == GRAPH_SUCCESS);
+
+	rv = hwgraph_info_export_LBL(vhdl, INFO_LBL_CPUBUS, sizeof(arbitrary_info_t));
+	ASSERT_ALWAYS(rv == GRAPH_SUCCESS);
+}
+
+
+#ifndef CONFIG_IA64_SGI_IO
+/*
+ * Given a device descriptor, extract interrupt target information and
+ * choose an appropriate CPU.  Return CPU_NONE if we can't make sense
+ * out of the target information.
+ * TBD: Should this be considered platform-independent code?
+ */
+static cpuid_t
+intr_target_from_desc(device_desc_t dev_desc, int favor_subnode)
+{
+	cpuid_t cpuid = CPU_NONE;
+	cnodeid_t cnodeid;
+	int which_subnode;
+	devfs_handle_t intr_target_dev;
+
+	if ((intr_target_dev = device_desc_intr_target_get(dev_desc)) != GRAPH_VERTEX_NONE) {
+		/* 
+		 * A valid device was specified.  If it's a particular
+		 * CPU, then use that CPU as target.  
+		 */
+		cpuid = cpuvertex_to_cpuid(intr_target_dev);
+		if (cpuid != CPU_NONE)
+			goto cpuchosen;
+
+		/* If a subnode vertex was specified, pick a CPU on that subnode. */
+		if (subnodevertex_to_subnode(intr_target_dev, &cnodeid, &which_subnode) == 0) {
+			cpuid = intr_cpu_choose_from_node(cnodeid, which_subnode);
+			goto cpuchosen;
+		}
+
+		/*
+		 * Otherwise, pick a CPU on the node that owns the 
+		 * specified target.  Favor "favor_subnode", if specified.
+		 */
+		cnodeid = master_node_get(intr_target_dev);
+		if (cnodeid != CNODEID_NONE) {
+			cpuid = intr_cpu_choose_from_node(cnodeid, favor_subnode);
+			goto cpuchosen;
+		}
+	}
+
+cpuchosen:
+	return(cpuid);
+}
+#endif /* CONFIG_IA64_SGI_IO */
+
+
+#ifndef CONFIG_IA64_SGI_IO
+/*
+ * Check if we had already visited this candidate cnode
+ */
+static void *
+intr_cnode_seen(cnodeid_t 	candidate,
+		void 		*arg1,
+		void 		*arg2)
+{
+	int		i;
+	cnodeid_t	*visited_cnodes = (cnodeid_t *)arg1;
+	int		*num_visited_cnodes = (int *)arg2;
+
+	ASSERT(visited_cnodes);
+	ASSERT(*num_visited_cnodes <= numnodes);
+	for(i = 0 ; i < *num_visited_cnodes; i++) {
+		if (candidate == visited_cnodes[i])
+			return(NULL);
+	}
+	return(visited_cnodes);
+}
+
+#endif /* CONFIG_IA64_SGI_IO */
+
+
+
+/*
+ * intr_bit_reserve_test(cpuid,which_subnode,cnode,req_bit,intr_resflags,
+ *		owner_dev,intr_name,*resp_bit)
+ *	Either cpuid is not CPU_NONE or cnodeid not CNODE_NONE but
+ * 	not both.
+ * 1. 	If cpuid is specified, this routine tests if this cpu can be a valid
+ * 	interrupt target candidate.
+ * 2. 	If cnodeid is specified, this routine tests if there is a cpu on 
+ *	this node which can be a valid interrupt target candidate.
+ * 3.	If a valid interrupt target cpu candidate is found then an attempt at 
+ * 	reserving an interrupt bit on the corresponding cnode is made.
+ *
+ * If steps 1 & 2 both fail or step 3 fails then we are not able to get a valid
+ * interrupt target cpu then routine returns CPU_NONE (failure)
+ * Otherwise routine returns cpuid of interrupt target (success)
+ */
+static cpuid_t
+intr_bit_reserve_test(cpuid_t 		cpuid,
+		      int		favor_subnode,
+		      cnodeid_t 	cnodeid,
+		      int		req_bit,
+		      int 		intr_resflags,
+		      devfs_handle_t 	owner_dev,
+		      char		*intr_name,
+		      int		*resp_bit)
+{
+
+	ASSERT((cpuid==CPU_NONE) || (cnodeid==CNODEID_NONE));
+
+	if (cnodeid != CNODEID_NONE) {
+		/* Try to choose a interrupt cpu candidate */
+		cpuid = intr_cpu_choose_from_node(cnodeid, favor_subnode);
+	}
+
+	if (cpuid != CPU_NONE) {
+		/* Try to reserve an interrupt bit on the hub 
+		 * corresponding to the canidate cnode. If we
+		 * are successful then we got a cpu which can
+		 * act as an interrupt target for the io device.
+		 * Otherwise we need to continue the search
+		 * further.
+		 */
+		*resp_bit = do_intr_reserve_level(cpuid, 
+						  req_bit,
+						  intr_resflags,
+						  II_RESERVE,
+						  owner_dev, 
+						  intr_name);
+
+		if (*resp_bit >= 0)
+			/* The interrupt target  specified was fine */
+			return(cpuid);
+	}
+	return(CPU_NONE);
+}
+/*
+ * intr_heuristic(dev_t dev,device_desc_t dev_desc,
+ *		  int req_bit,int intr_resflags,dev_t owner_dev,
+ *		  char *intr_name,int *resp_bit)
+ *
+ * Choose an interrupt destination for an interrupt.
+ *	dev is the device for which the interrupt is being set up
+ *	dev_desc is a description of hardware and policy that could
+ *		help determine where this interrupt should go
+ *	req_bit is the interrupt bit requested 
+ *		(can be INTRCONNECT_ANY_BIT in which the first available
+ * 		 interrupt bit is used)
+ *	intr_resflags indicates whether we want to (un)reserve bit
+ *	owner_dev is the owner device
+ *	intr_name is the readable interrupt name	
+ * 	resp_bit indicates whether we succeeded in getting the required
+ *		 action  { (un)reservation} done	
+ *		 negative value indicates failure
+ *
+ */
+/* ARGSUSED */
+cpuid_t
+intr_heuristic(devfs_handle_t 		dev,
+	       device_desc_t 	dev_desc,
+	       int		req_bit,
+	       int 		intr_resflags,
+	       devfs_handle_t 		owner_dev,
+	       char		*intr_name,
+	       int		*resp_bit)
+{
+	cpuid_t		cpuid;				/* possible intr targ*/
+	cnodeid_t 	candidate;			/* possible canidate */
+#ifndef BRINGUP
+	cnodeid_t	visited_cnodes[MAX_NASIDS], 	/* nodes seen so far */
+		        center,				/* node we are on */
+		        candidate;			/* possible canidate */
+	int		num_visited_cnodes = 0;		/* # nodes seen */
+
+	int		radius = 1,			/* start looking at the
+							 * current node
+							 */
+		        maxradius = physmem_maxradius();
+	void		*rv;
+#endif /* BRINGUP */
+	int		which_subnode = SUBNODE_ANY;
+
+#if CONFIG_IA64_SGI_IO /* SN1 + pcibr Addressing Limitation */
+	{
+	devfs_handle_t pconn_vhdl;
+	pcibr_soft_t pcibr_soft;
+
+	/*
+	 * This combination of SN1 and Bridge hardware has an odd "limitation".
+	 * Due to the choice of addresses for PI0 and PI1 registers on SN1
+	 * and historical limitations in Bridge, Bridge is unable to
+	 * send interrupts to both PI0 CPUs and PI1 CPUs -- we have
+	 * to choose one set or the other.  That choice is implicitly
+	 * made when Bridge first attaches its error interrupt.  After
+	 * that point, all subsequent interrupts are restricted to the
+	 * same PI number (though it's possible to send interrupts to
+	 * the same PI number on a different node).
+	 *
+	 * Since neither SN1 nor Bridge designers are willing to admit a
+	 * bug, we can't really call this a "workaround".  It's a permanent
+	 * solution for an SN1-specific and Bridge-specific hardware
+	 * limitation that won't ever be lifted.
+	 */
+        if ((hwgraph_edge_get(dev, EDGE_LBL_PCI, &pconn_vhdl) == GRAPH_SUCCESS) &&
+	   ((pcibr_soft = pcibr_soft_get(pconn_vhdl)) != NULL)) {
+		/*
+		 * We "know" that the error interrupt is the first
+		 * interrupt set up by pcibr_attach.  Send all interrupts
+		 * on this bridge to the same subnode number.
+		 */
+		if (pcibr_soft->bsi_err_intr) {
+			which_subnode = cpuid_to_subnode(((hub_intr_t) pcibr_soft->bsi_err_intr)->i_cpuid);
+		}
+	}
+	}
+#endif /* CONFIG_IA64_SGI_IO */
+
+#ifndef CONFIG_IA64_SGI_IO
+	/* 
+	 * If an interrupt target was specified for this
+	 * interrupt allocation, try to use it.
+	 */
+	if (dev_desc) {
+
+		/* Try to see if the interrupt target specified in the
+		 * device descriptor is a legal candidate.
+		 */
+		cpuid = intr_bit_reserve_test(intr_target_from_desc(dev_desc, which_subnode),
+					      which_subnode,
+					      CNODEID_NONE,
+					      req_bit,
+					      intr_resflags,
+					      owner_dev,
+					      intr_name,
+					      resp_bit);
+
+		if (cpuid != CPU_NONE) {
+			if (cpu_on_subnode(cpuid, which_subnode))
+				return(cpuid);	/* got a valid interrupt target */
+
+			printk("Override explicit interrupt targetting: %v (0x%x)\n",
+				owner_dev, owner_dev);
+
+			intr_unreserve_level(cpuid, *resp_bit);
+		}
+
+		/* Fall through on to the next step in the search for
+		 * the interrupt candidate.
+		 */
+
+	}
+#endif  /* CONFIG_IA64_SGI_IO */
+	
+	/* Check if we can find a valid interrupt target candidate on
+	 * the master node for the device.
+	 */
+	cpuid = intr_bit_reserve_test(CPU_NONE,
+				      which_subnode,	
+				      master_node_get(dev),
+				      req_bit,
+				      intr_resflags,
+				      owner_dev,
+				      intr_name,
+				      resp_bit);
+
+	if (cpuid != CPU_NONE) {
+		if (cpu_on_subnode(cpuid, which_subnode))
+			return(cpuid);	/* got a valid interrupt target */
+		else
+			intr_unreserve_level(cpuid, *resp_bit);
+	}
+
+	printk("Cannot target interrupts to closest node(%d): %ld (0x%lx)\n",
+		master_node_get(dev),(long) owner_dev, (unsigned long)owner_dev);
+
+	/* Fall through into the default algorithm
+	 * (exhaustive-search-for-the-nearest-possible-interrupt-target)
+	 * for finding the interrupt target
+	 */
+
+#ifndef BRINGUP
+	// Use of this algorithm is deferred until the supporting
+	// code has been implemented.
+	/* 
+	 * No valid interrupt specification exists.
+	 * Try to find a node which is closest to the current node
+	 * which can process interrupts from a device
+	 */
+
+	center = cpuid_to_cnodeid(smp_processor_id());
+	while (radius <= maxradius) {
+
+		/* Try to find a node at the given radius and which
+		 * we haven't seen already.
+		 */
+		rv = physmem_select_neighbor_node(center,radius,&candidate,
+						  intr_cnode_seen,
+						  (void *)visited_cnodes,
+						  (void *)&num_visited_cnodes);
+		if (!rv) {
+			/* We have seen all the nodes at this particular radius
+			 * Go on to the next radius level.
+			 */
+			radius++;
+			continue;
+		}			      
+		/* We are seeing this candidate  cnode for the first time
+		 */
+		visited_cnodes[num_visited_cnodes++] = candidate;
+
+		cpuid = intr_bit_reserve_test(CPU_NONE,
+					      which_subnode,
+					      candidate,
+					      req_bit,
+					      intr_resflags,
+					      owner_dev,
+					      intr_name,
+					      resp_bit);
+
+		if (cpuid != CPU_NONE) {
+			if (cpu_on_subnode(cpuid, which_subnode))
+				return(cpuid);	/* got a valid interrupt target */
+			else
+				intr_unreserve_level(cpuid, *resp_bit);
+		}
+	}
+#else  /* BRINGUP */
+	{
+	// Do a stupid round-robin assignment of the node.
+		static cnodeid_t last_node = 0;
+
+		if (last_node > numnodes) last_node = 0;
+		for (candidate = last_node; candidate <= numnodes; candidate++) {
+			cpuid = intr_bit_reserve_test(CPU_NONE,
+					      which_subnode,
+					      candidate,
+					      req_bit,
+					      intr_resflags,
+					      owner_dev,
+					      intr_name,
+					      resp_bit);
+
+			if (cpuid != CPU_NONE) {
+				if (cpu_on_subnode(cpuid, which_subnode)) {
+					last_node++;
+					return(cpuid);	/* got a valid interrupt target */
+				}
+				else
+					intr_unreserve_level(cpuid, *resp_bit);
+			}
+			last_node++;
+		}
+	}
+#endif
+
+	printk("Cannot target interrupts to any close node: %ld (0x%lx)\n",
+		(long)owner_dev, (unsigned long)owner_dev);
+
+	/* In the worst case try to allocate interrupt bits on the
+	 * master processor's node. We may get here during error interrupt
+	 * allocation phase when the topology matrix is not yet setup
+	 * and hence cannot do an exhaustive search.
+	 */
+	ASSERT(cpu_allows_intr(master_procid));
+	cpuid = intr_bit_reserve_test(master_procid,
+				      which_subnode,
+				      CNODEID_NONE,
+				      req_bit,
+				      intr_resflags,
+				      owner_dev,
+				      intr_name,
+				      resp_bit);
+
+	if (cpuid != CPU_NONE) {
+		if (cpu_on_subnode(cpuid, which_subnode))
+			return(cpuid);
+		else
+			intr_unreserve_level(cpuid, *resp_bit);
+	}
+
+	printk("Cannot target interrupts: %ld (0x%lx)\n",
+		(long)owner_dev, (unsigned long)owner_dev);
+
+	return(CPU_NONE);	/* Should never get here */
+}
+
+
+
+
+#ifndef BRINGUP
+/*
+ * Should never receive an exception while running on the idle 
+ * stack.  It IS possible to handle *interrupts* while on the
+ * idle stack, but a non-interrupt *exception* is a problem.
+ */
+void
+idle_err(inst_t *epc, uint cause, void *fep, void *sp)
+{
+	eframe_t *ep = (eframe_t *)fep;
+
+    if ((cause & CAUSE_EXCMASK) == EXC_IBE ||
+	(cause & CAUSE_EXCMASK) == EXC_DBE) {
+	(void)dobuserre((eframe_t *)ep, epc, 0);
+    }
+
+    /* XXX - This will have to change to deal with various SN errors. */
+    panic( "exception on IDLE stack "
+	    "ep:0x%x epc:0x%x cause:0x%w32x sp:0x%x badvaddr:0x%x",
+	    ep, epc, cause, sp, getbadvaddr());
+    /* NOTREACHED */
+}
+
+
+/*
+ * earlynofault - handle very early global faults - usually just while
+ *      sizing memory
+ * Returns: 1 if should do nofault
+ *          0 if not
+ */
+/* ARGSUSED */
+int
+earlynofault(eframe_t *ep, uint code)
+{
+	switch(code) {
+	case EXC_DBE:
+		return(1);
+	default:
+		return(0);
+	}
+}
+
+
+
+/* ARGSUSED */
+static void
+cpuintr(void *arg1, void *arg2)
+{
+#if RTE
+	static int rte_intrdebug = 1;
+#endif
+	/*
+	 * Frame Scheduler
+	 */
+	LOG_TSTAMP_EVENT(RTMON_INTR, TSTAMP_EV_CPUINTR, NULL, NULL,
+			 NULL, NULL);
+
+	/*
+	 * Hardware clears the IO interrupts, but we need to clear software-
+	 * generated interrupts.
+	 */
+	LOCAL_HUB_CLR_INTR(CPU_ACTION_A + cputolocalslice(cpuid()));
+
+#if 0
+	/* XXX - Handle error interrupts. */
+	if (error_intr_reason)
+		error_intr();
+#endif /* 0 */
+
+	/*
+	 * If we're headed for panicspin and it is due to a NMI, save the
+	 * eframe in the NMI area
+	 */
+	if (private.p_va_panicspin && nmied) {
+		caddr_t	nmi_save_area;
+
+		nmi_save_area = (caddr_t) (TO_UNCAC(TO_NODE(
+			cputonasid(cpuid()), IP27_NMI_EFRAME_OFFSET)) + 
+			cputoslice(cpuid()) * IP27_NMI_EFRAME_SIZE);
+		bcopy((caddr_t) arg2, nmi_save_area, sizeof(eframe_t));
+	}
+
+	doacvec();
+#if RTE
+	if (private.p_flags & PDAF_ISOLATED && !rte_intrdebug)
+		goto end_cpuintr;
+#endif
+	doactions();
+#if RTE
+end_cpuintr:
+#endif
+	LOG_TSTAMP_EVENT(RTMON_INTR, TSTAMP_EV_INTREXIT, TSTAMP_EV_CPUINTR, NULL, NULL, NULL);
+}
+
+void
+install_cpuintr(cpuid_t cpu)
+{
+	int		intr_bit = CPU_ACTION_A + cputolocalslice(cpu);
+
+	if (intr_connect_level(cpu, intr_bit, INTPEND0_MAXMASK,
+				(intr_func_t) cpuintr, NULL, NULL))
+		panic("install_cpuintr: Can't connect interrupt.");
+}
+#endif /* BRINGUP */
+
+#ifdef DEBUG_INTR_TSTAMP
+/* We allocate an array, but only use element number 64.  This guarantees that
+ * the entry is in a cacheline by itself.
+ */
+#define DINTR_CNTIDX	32
+#define DINTR_TSTAMP1	48
+#define	DINTR_TSTAMP2	64
+volatile long long dintr_tstamp_cnt[128];
+int dintr_debug_output=0;
+extern void idbg_tstamp_debug(void);
+#ifdef SPLDEBUG
+extern void idbg_splx_log(int);
+#endif
+#if DEBUG_INTR_TSTAMP_DEBUG
+int dintr_enter_symmon=1000;	/* 1000 microseconds is 1 millisecond */
+#endif
+
+#ifndef BRINGUP
+/* ARGSUSED */
+static void
+cpulatintr(void *arg)
+{
+	/*
+	 * Hardware only clears IO interrupts so we have to clear our level
+	 * here.
+	 */
+	LOCAL_HUB_CLR_INTR(CPU_INTRLAT_A + cputolocalslice(cpuid()));
+
+#if DEBUG_INTR_TSTAMP_DEBUG
+	dintr_tstamp_cnt[DINTR_TSTAMP2] =  GET_LOCAL_RTC;
+	if ((dintr_tstamp_cnt[DINTR_TSTAMP2] - dintr_tstamp_cnt[DINTR_TSTAMP1])
+	    > dintr_enter_symmon) {
+#ifdef SPLDEBUG
+		extern int spldebug_log_off;
+
+		spldebug_log_off = 1;
+#endif /* SPLDEBUG */
+		debug("ring");
+#ifdef SPLDEBUG
+		spldebug_log_off = 0;
+#endif /* SPLDEBUG */
+	}
+#endif
+	dintr_tstamp_cnt[DINTR_CNTIDX]++;
+
+	return;
+}
+
+static int install_cpulat_first=0;
+
+void
+install_cpulatintr(cpuid_t cpu)
+{
+	int		intr_bit;
+	devfs_handle_t	cpuv = cpuid_to_vertex(cpu);
+
+	intr_bit = CPU_INTRLAT_A + cputolocalslice(cpu);
+	if (intr_bit != intr_reserve_level(cpu, intr_bit, II_THREADED,
+					   cpuv, "intrlat"))
+		panic( "install_cpulatintr: Can't reserve interrupt.");
+
+	if (intr_connect_level(cpu, intr_bit, INTPEND0_MAXMASK,
+				cpulatintr, NULL, NULL))
+		panic( "install_cpulatintr: Can't connect interrupt.");
+
+	if (!install_cpulat_first) {
+		install_cpulat_first++;
+		idbg_addfunc("tstamp_debug", (void (*)())idbg_tstamp_debug);
+#if defined(SPLDEBUG) || defined(SPLDEBUG_CPU_EVENTS)
+		idbg_addfunc("splx_log", (void (*)())idbg_splx_log);
+#endif /* SPLDEBUG || SPLDEBUG_CPU_EVENTS */
+	}
+}
+#endif /* BRINGUP */
+
+#endif /* DEBUG_INTR_TSTAMP */
+
+#ifndef BRINGUP
+/* ARGSUSED */
+static void
+dbgintr(void *arg)
+{
+	/*
+	 * Hardware only clears IO interrupts so we have to clear our level
+	 * here.
+	 */
+	LOCAL_HUB_CLR_INTR(N_INTPEND_BITS + DEBUG_INTR_A + cputolocalslice(cpuid()));
+
+	debug("zing");
+	return;
+}
+
+
+void
+install_dbgintr(cpuid_t cpu)
+{
+	int		intr_bit;
+	devfs_handle_t	cpuv = cpuid_to_vertex(cpu);
+
+	intr_bit = N_INTPEND_BITS + DEBUG_INTR_A + cputolocalslice(cpu);
+	if (intr_bit != intr_reserve_level(cpu, intr_bit, 1, cpuv, "DEBUG"))
+		panic("install_dbgintr: Can't reserve interrupt. "
+			" intr_bit %d" ,intr_bit);
+
+	if (intr_connect_level(cpu, intr_bit, INTPEND1_MAXMASK,
+				dbgintr, NULL, NULL))
+		panic("install_dbgintr: Can't connect interrupt.");
+
+#ifdef DEBUG_INTR_TSTAMP
+	/* Set up my interrupt latency test interrupt */
+	install_cpulatintr(cpu);
+#endif
+}
+
+/* ARGSUSED */
+static void
+tlbintr(void *arg)
+{
+	extern void tlbflush_rand(void);
+
+	/*
+	 * Hardware only clears IO interrupts so we have to clear our level
+	 * here.
+	 */
+	LOCAL_HUB_CLR_INTR(N_INTPEND_BITS + TLB_INTR_A + cputolocalslice(cpuid()));
+
+	tlbflush_rand();
+	return;
+}
+
+
+void
+install_tlbintr(cpuid_t cpu)
+{
+	int		intr_bit;
+	devfs_handle_t	cpuv = cpuid_to_vertex(cpu);
+
+	intr_bit = N_INTPEND_BITS + TLB_INTR_A + cputolocalslice(cpu);
+	if (intr_bit != intr_reserve_level(cpu, intr_bit, 1, cpuv, "DEBUG"))
+		panic("install_tlbintr: Can't reserve interrupt. "
+			" intr_bit %d" ,intr_bit);
+
+	if (intr_connect_level(cpu, intr_bit, INTPEND1_MAXMASK,
+				tlbintr, NULL, NULL))
+		panic("install_tlbintr: Can't connect interrupt.");
+
+}
+
+
+/*
+ * Send an interrupt to all nodes.  Don't panic if we get an error.
+ * Returns 1 if any exceptions occurred.
+ */
+int
+protected_broadcast(hubreg_t intrbit)
+{
+	nodepda_t *npdap = private.p_nodepda;
+	int byte, bit, sn;
+	int error = 0;
+
+	extern int _wbadaddr_val(volatile void *, int, volatile int *);
+
+	/* Send rather than clear an interrupt. */
+	intrbit |= 0x100;
+	
+	for (byte = 0; byte < NASID_MASK_BYTES; byte++) {
+		for (bit = 0; bit < 8; bit++) {
+			if (npdap->nasid_mask[byte] & (1 << bit)) {
+				nasid_t nasid = byte * 8 + bit;
+				for (sn=0; sn<NUM_SUBNODES; sn++) {
+					error += _wbadaddr_val(REMOTE_HUB_PI_ADDR(nasid,
+					      sn, PI_INT_PEND_MOD),
+					      sizeof(hubreg_t),
+					      (volatile int *)&intrbit);
+				}
+			}
+		}
+	}
+
+	return error;
+}
+
+
+/*
+ * Poll the interrupt register to see if another cpu has asked us
+ * to drop into the debugger (without lowering spl).
+ */
+void
+chkdebug(void)
+{
+	if (LOCAL_HUB_L(PI_INT_PEND1) & (1L << (DEBUG_INTR_A + cputolocalslice(cpuid()))))
+		dbgintr((void *)NULL);
+}
+
+
+/*
+ * Install special graphics interrupt.
+ */
+void
+install_gfxintr(cpuid_t cpu, ilvl_t swlevel, intr_func_t intr_func, void *intr_arg)
+{
+	int intr_bit = GFX_INTR_A + cputolocalslice(cpu);
+
+	if (intr_connect_level(cpu, intr_bit, swlevel,
+				intr_func, intr_arg, NULL))
+		panic("install_gfxintr: Can't connect interrupt.");
+}
+
+
+/*
+ * Install page migration interrupt handler.
+ */
+void
+hub_migrintr_init(cnodeid_t cnode)
+{
+	cpuid_t cpu = cnodetocpu(cnode);
+	int intr_bit = INT_PEND0_BASELVL + PG_MIG_INTR;
+
+	if (numnodes == 1){
+		/* 
+		 * No migration with just one node..
+		 */
+		return;
+	}
+	
+	if (cpu != -1) {
+		if (intr_connect_level(cpu, intr_bit, 0,
+			       (intr_func_t) migr_intr_handler, 0, (intr_func_t) migr_intr_prologue_handler))
+			panic( "hub_migrintr_init: Can't connect interrupt.");
+	}
+}
+
+
+/*
+ * Cause all CPUs to stop by sending them each a DEBUG interrupt.
+ * Parameter is actually a (cpumask_t *).
+ */
+void
+debug_stop_all_cpus(void *stoplist)
+{
+	int cpu;
+	ulong level;
+
+	for (cpu=0; cpu<maxcpus; cpu++) {
+		if (cpu == cpuid())
+			continue;
+		if (!cpu_enabled(cpu))
+		        continue;
+		/* "-1" is the old style parameter OR could be the new style
+		 * if no-one is currently stopped.  We only stop the
+		 * requested cpus, the others are already stopped (probably
+		 * at a breakpoint).
+		 */
+
+		if (((cpumask_t *)stoplist != (cpumask_t *)-1LL) &&
+		    (!CPUMASK_TSTB(*(cpumask_t*)stoplist, cpu)))
+			continue;
+
+		/*
+		 * CPU lslice A gets level DEBUG_INTR_A
+		 * CPU lslice B gets level DEBUG_INTR_B
+		 */
+		level = DEBUG_INTR_A + LOCALCPU(get_cpu_slice(cpu));
+		/*
+		 * Convert the compact hub number to the NASID to get the
+		 * correct part of the address space.  Then set the interrupt
+		 * bit associated with the CPU we want to send the interrupt
+		 * to.
+		 */
+		REMOTE_CPU_SEND_INTR(cpu, N_INTPEND_BITS + level);
+
+	}
+}
+
+
+struct hardwired_intr_s {
+	signed char level;
+	int flags;
+	char *name;
+} const hardwired_intr[] = {
+	{ INT_PEND0_BASELVL + RESERVED_INTR,	0,	"Reserved" },
+	{ INT_PEND0_BASELVL + GFX_INTR_A,	0, 	"Gfx A" },
+	{ INT_PEND0_BASELVL + GFX_INTR_B,	0, 	"Gfx B" },
+	{ INT_PEND0_BASELVL + PG_MIG_INTR,	II_THREADED, "Migration" },
+#if defined(SN1) && !defined(DIRECT_L1_CONSOLE)
+	{ INT_PEND0_BASELVL + UART_INTR,	II_THREADED, "Bedrock/L1" },
+#else
+	{ INT_PEND0_BASELVL + UART_INTR,	0,	"Hub I2C" },
+#endif
+	{ INT_PEND0_BASELVL + CC_PEND_A,	0,	"Crosscall A" },
+	{ INT_PEND0_BASELVL + CC_PEND_B,	0,	"Crosscall B" },
+	{ INT_PEND0_BASELVL + MSC_MESG_INTR,	II_THREADED, "MSC Message" },
+	{ INT_PEND0_BASELVL + CPU_ACTION_A,	0,	"CPU Action A" },
+	{ INT_PEND0_BASELVL + CPU_ACTION_B,	0,	"CPU Action B" },
+	{ INT_PEND1_BASELVL + IO_ERROR_INTR,	II_ERRORINT, "IO Error" },
+	{ INT_PEND1_BASELVL + CLK_ERR_INTR,	II_ERRORINT, "Clock Error" },
+	{ INT_PEND1_BASELVL + COR_ERR_INTR_A,	II_ERRORINT, "Correctable Error A" },
+	{ INT_PEND1_BASELVL + COR_ERR_INTR_B,	II_ERRORINT, "Correctable Error B" },
+	{ INT_PEND1_BASELVL + MD_COR_ERR_INTR,	II_ERRORINT, "MD Correct. Error" },
+	{ INT_PEND1_BASELVL + NI_ERROR_INTR,	II_ERRORINT, "NI Error" },
+	{ INT_PEND1_BASELVL + NI_BRDCAST_ERR_A,	II_ERRORINT, "Remote NI Error"},
+	{ INT_PEND1_BASELVL + NI_BRDCAST_ERR_B,	II_ERRORINT, "Remote NI Error"},
+	{ INT_PEND1_BASELVL + MSC_PANIC_INTR,	II_ERRORINT, "MSC Panic" },
+	{ INT_PEND1_BASELVL + LLP_PFAIL_INTR_A,	II_ERRORINT, "LLP Pfail WAR" },
+	{ INT_PEND1_BASELVL + LLP_PFAIL_INTR_B,	II_ERRORINT, "LLP Pfail WAR" },
+#ifdef SN1
+	{ INT_PEND1_BASELVL + NACK_INT_A,	0, "CPU A Nack count == NACK_CMP" },
+	{ INT_PEND1_BASELVL + NACK_INT_B,	0, "CPU B Nack count == NACK_CMP" },
+	{ INT_PEND1_BASELVL + LB_ERROR,		0, "Local Block Error" },
+	{ INT_PEND1_BASELVL + XB_ERROR,		0, "Local XBar Error" },
+#endif /* SN1 */	
+	{ -1, 0, (char *)NULL}
+};
+
+/*
+ * Reserve all of the hardwired interrupt levels so they're not used as
+ * general purpose bits later.
+ */
+void
+intr_reserve_hardwired(cnodeid_t cnode)
+{
+	cpuid_t cpu;
+	int level;
+	int i;
+	char subnode_done[NUM_SUBNODES];
+
+	cpu = cnodetocpu(cnode);
+	if (cpu == CPU_NONE) {
+		printk("Node %d has no CPUs", cnode);
+		return;
+	}
+
+	for (i=0; i<NUM_SUBNODES; i++)
+		subnode_done[i] = 0;
+
+	for (; cpu<maxcpus && cpu_enabled(cpu) && cputocnode(cpu) == cnode; cpu++) {
+		int which_subnode = cpuid_to_subnode(cpu);
+		if (subnode_done[which_subnode])
+			continue;
+		subnode_done[which_subnode] = 1;
+
+		for (i = 0; hardwired_intr[i].level != -1; i++) {
+			level = hardwired_intr[i].level;
+
+			if (level != intr_reserve_level(cpu, level,
+						hardwired_intr[i].flags,
+						(devfs_handle_t) NULL,
+						hardwired_intr[i].name))
+				panic("intr_reserve_hardwired: Can't reserve level %d.", level);
+		}
+	}
+}
+
+#endif /* BRINGUP */
+
+/*
+ * Check and clear interrupts.
+ */
+/*ARGSUSED*/
+static void
+intr_clear_bits(nasid_t nasid, volatile hubreg_t *pend, int base_level,
+		char *name)
+{
+	volatile hubreg_t bits;
+	int i;
+
+	/* Check pending interrupts */
+	if ((bits = HUB_L(pend)) != 0) {
+		for (i = 0; i < N_INTPEND_BITS; i++) {
+			if (bits & (1 << i)) {
+#ifdef INTRDEBUG
+				printk( "Nasid %d interrupt bit %d set in %s",
+					nasid, i, name);
+#endif
+				LOCAL_HUB_CLR_INTR(base_level + i);
+			}
+		}
+	}
+}
+
+/*
+ * Clear out our interrupt registers.
+ */
+void
+intr_clear_all(nasid_t nasid)
+{
+	int	sn;
+
+	for(sn=0; sn<NUM_SUBNODES; sn++) {
+		REMOTE_HUB_PI_S(nasid, sn, PI_INT_MASK0_A, 0);
+		REMOTE_HUB_PI_S(nasid, sn, PI_INT_MASK0_B, 0);
+		REMOTE_HUB_PI_S(nasid, sn, PI_INT_MASK1_A, 0);
+		REMOTE_HUB_PI_S(nasid, sn, PI_INT_MASK1_B, 0);
+	
+		intr_clear_bits(nasid, REMOTE_HUB_PI_ADDR(nasid, sn, PI_INT_PEND0),
+				INT_PEND0_BASELVL, "INT_PEND0");
+		intr_clear_bits(nasid, REMOTE_HUB_PI_ADDR(nasid, sn, PI_INT_PEND1),
+				INT_PEND1_BASELVL, "INT_PEND1");
+	}
+}
+
+/* 
+ * Dump information about a particular interrupt vector.
+ */
+static void
+dump_vector(intr_info_t *info, intr_vector_t *vector, int bit, hubreg_t ip,
+		hubreg_t ima, hubreg_t imb, void (*pf)(char *, ...))
+{
+	hubreg_t value = 1LL << bit;
+
+	pf("  Bit %02d: %s: func 0x%x arg 0x%x prefunc 0x%x\n",
+		bit, info->ii_name,
+		vector->iv_func, vector->iv_arg, vector->iv_prefunc);
+	pf("   vertex 0x%x %s%s",
+		info->ii_owner_dev,
+		((info->ii_flags) & II_RESERVE) ? "R" : "U",
+		((info->ii_flags) & II_INUSE) ? "C" : "-");
+	pf("%s%s%s%s",
+		ip & value ? "P" : "-",
+		ima & value ? "A" : "-",
+		imb & value ? "B" : "-",
+		((info->ii_flags) & II_ERRORINT) ? "E" : "-");
+	pf("\n");
+}
+
+
+/*
+ * Dump information about interrupt vector assignment.
+ */
+void
+intr_dumpvec(cnodeid_t cnode, void (*pf)(char *, ...))
+{
+	nodepda_t *npda;
+	int ip, sn, bit;
+	intr_vecblk_t *dispatch;
+	hubreg_t ipr, ima, imb;
+	nasid_t nasid;
+
+	if ((cnode < 0) || (cnode >= numnodes)) {
+		pf("intr_dumpvec: cnodeid out of range: %d\n", cnode);
+		return ;
+	}
+
+	nasid = COMPACT_TO_NASID_NODEID(cnode);
+
+	if (nasid == INVALID_NASID) {
+		pf("intr_dumpvec: Bad cnodeid: %d\n", cnode);
+		return ;
+	}
+		
+
+	npda = NODEPDA(cnode);
+
+	for (sn = 0; sn < NUM_SUBNODES; sn++) {
+		for (ip = 0; ip < 2; ip++) {
+			dispatch = ip ? &(SNPDA(npda,sn)->intr_dispatch1) : &(SNPDA(npda,sn)->intr_dispatch0);
+			ipr = REMOTE_HUB_PI_L(nasid, sn, ip ? PI_INT_PEND1 : PI_INT_PEND0);
+			ima = REMOTE_HUB_PI_L(nasid, sn, ip ? PI_INT_MASK1_A : PI_INT_MASK0_A);
+			imb = REMOTE_HUB_PI_L(nasid, sn, ip ? PI_INT_MASK1_B : PI_INT_MASK0_B);
+	
+			pf("Node %d INT_PEND%d:\n", cnode, ip);
+	
+			if (dispatch->ithreads_enabled)
+				pf(" Ithreads enabled\n");
+			else
+				pf(" Ithreads disabled\n");
+			pf(" vector_count = %d, vector_state = %d\n",
+				dispatch->vector_count,
+				dispatch->vector_state);
+			pf(" CPU A count %d, CPU B count %d\n",
+ 		   	dispatch->cpu_count[0],
+ 		   	dispatch->cpu_count[1]);
+			pf(" &vector_lock = 0x%x\n",
+				&(dispatch->vector_lock));
+			for (bit = 0; bit < N_INTPEND_BITS; bit++) {
+				if ((dispatch->info[bit].ii_flags & II_RESERVE) ||
+			    	(ipr & (1L << bit))) {
+					dump_vector(&(dispatch->info[bit]),
+					    	&(dispatch->vectors[bit]),
+					    	bit, ipr, ima, imb, pf);
+				}
+			}
+			pf("\n");
+		}
+	}
+}
+
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/io/ml_iograph.c linux/arch/ia64/sn/io/ml_iograph.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/io/ml_iograph.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/io/ml_iograph.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,1583 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/iograph.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/hcl_util.h>
+#include <asm/sn/labelcl.h>
+#include <asm/sn/xtalk/xbow.h>
+#include <asm/sn/pci/bridge.h>
+#include <asm/sn/xtalk/xbow.h>
+#include <asm/sn/klconfig.h>
+#include <asm/sn/eeprom.h>
+#include <asm/sn/sn_private.h>
+#include <asm/sn/pci/pcibr.h>
+#include <asm/sn/xtalk/xtalk.h>
+#include <asm/sn/xtalk/xswitch.h>
+#include <asm/sn/xtalk/xwidget.h>
+#include <asm/sn/xtalk/xtalk_private.h>
+#include <asm/sn/xtalk/xtalkaddrs.h>
+
+extern int maxnodes;
+
+/* #define PROBE_TEST */
+
+/* At most 2 hubs can be connected to an xswitch */
+#define NUM_XSWITCH_VOLUNTEER 2
+
+/*
+ * Track which hubs have volunteered to manage devices hanging off of
+ * a Crosstalk Switch (e.g. xbow).  This structure is allocated,
+ * initialized, and hung off the xswitch vertex early on when the
+ * xswitch vertex is created.
+ */
+typedef struct xswitch_vol_s {
+	struct semaphore xswitch_volunteer_mutex;
+	int		xswitch_volunteer_count;
+	devfs_handle_t	xswitch_volunteer[NUM_XSWITCH_VOLUNTEER];
+} *xswitch_vol_t;
+
+void
+xswitch_vertex_init(devfs_handle_t xswitch)
+{
+	xswitch_vol_t xvolinfo;
+	int rc;
+
+	xvolinfo = kmalloc(sizeof(struct xswitch_vol_s), GFP_KERNEL);
+	init_MUTEX(&xvolinfo->xswitch_volunteer_mutex);
+	xvolinfo->xswitch_volunteer_count = 0;
+	rc = hwgraph_info_add_LBL(xswitch, 
+			INFO_LBL_XSWITCH_VOL,
+			(arbitrary_info_t)xvolinfo);
+	ASSERT(rc == GRAPH_SUCCESS); rc = rc;
+}
+
+
+/*
+ * When assignment of hubs to widgets is complete, we no longer need the
+ * xswitch volunteer structure hanging around.  Destroy it.
+ */
+static void
+xswitch_volunteer_delete(devfs_handle_t xswitch)
+{
+	xswitch_vol_t xvolinfo;
+	int rc;
+
+	rc = hwgraph_info_remove_LBL(xswitch, 
+				INFO_LBL_XSWITCH_VOL,
+				(arbitrary_info_t *)&xvolinfo);
+#ifndef CONFIG_IA64_SGI_IO
+	ASSERT(rc == GRAPH_SUCCESS); rc = rc;
+#endif
+
+	kfree(xvolinfo);
+}
+/*
+ * A Crosstalk master volunteers to manage xwidgets on the specified xswitch.
+ */
+/* ARGSUSED */
+static void
+volunteer_for_widgets(devfs_handle_t xswitch, devfs_handle_t master)
+{
+	xswitch_vol_t xvolinfo = NULL;
+
+	(void)hwgraph_info_get_LBL(xswitch, 
+				INFO_LBL_XSWITCH_VOL, 
+				(arbitrary_info_t *)&xvolinfo);
+	if (xvolinfo == NULL) {
+#ifndef CONFIG_IA64_SGI_IO
+	    if (!is_headless_node_vertex(master))
+		cmn_err(CE_WARN, 
+			"volunteer for widgets: vertex %v has no info label",
+			xswitch);
+#endif
+	    return;
+	}
+
+#ifndef CONFIG_IA64_SGI_IO
+	mutex_lock(&xvolinfo->xswitch_volunteer_mutex, PZERO);
+#endif
+	ASSERT(xvolinfo->xswitch_volunteer_count < NUM_XSWITCH_VOLUNTEER);
+	xvolinfo->xswitch_volunteer[xvolinfo->xswitch_volunteer_count] = master;
+	xvolinfo->xswitch_volunteer_count++;
+#ifndef CONFIG_IA64_SGI_IO
+	mutex_unlock(&xvolinfo->xswitch_volunteer_mutex);
+#endif
+}
+
+#ifndef	BRINGUP
+/* 
+ * The "ideal fixed assignment" of 12 IO slots to 4 node slots.
+ * At index N is the node slot number of the node board that should
+ * ideally control the widget in IO slot N.  Note that if there is
+ * only one node board on a given xbow, it will control all of the
+ * devices on that xbow regardless of these defaults.
+ *
+ * 	N1 controls IO slots IO1, IO3, IO5	(upper left)
+ * 	N3 controls IO slots IO2, IO4, IO6	(upper right)
+ * 	N2 controls IO slots IO7, IO9, IO11	(lower left)
+ * 	N4 controls IO slots IO8, IO10, IO12	(lower right)
+ *
+ * This makes assignments predictable and easily controllable.
+ * TBD: Allow administrator to override these defaults.
+ */
+static slotid_t ideal_assignment[] = {
+	-1,	/* IO0 -->non-existent */
+	1,	/* IO1 -->N1 */
+	3,	/* IO2 -->N3 */
+	1,	/* IO3 -->N1 */
+	3,	/* IO4 -->N3 */
+	1,	/* IO5 -->N1 */
+	3,	/* IO6 -->N3 */
+	2,	/* IO7 -->N2 */
+	4,	/* IO8 -->N4 */
+	2,	/* IO9 -->N2 */
+	4,	/* IO10-->N4 */
+	2,	/* IO11-->N2 */
+	4	/* IO12-->N4 */
+};
+
+static int
+is_ideal_assignment(slotid_t hubslot, slotid_t ioslot)
+{
+	return(ideal_assignment[ioslot] == hubslot);
+}
+#endif /* ifndef BRINGUP */
+
+extern int xbow_port_io_enabled(nasid_t nasid, int widgetnum);
+
+/*
+ * Assign all the xwidgets hanging off the specified xswitch to the
+ * Crosstalk masters that have volunteered for xswitch duty.
+ */
+/* ARGSUSED */
+static void
+assign_widgets_to_volunteers(devfs_handle_t xswitch, devfs_handle_t hubv)
+{
+	xswitch_info_t xswitch_info;
+	xswitch_vol_t xvolinfo = NULL;
+	xwidgetnum_t widgetnum;
+	int curr_volunteer, num_volunteer;
+	nasid_t nasid;
+	hubinfo_t hubinfo;
+#ifndef BRINGUP
+	int xbownum;
+#endif
+
+	hubinfo_get(hubv, &hubinfo);
+	nasid = hubinfo->h_nasid;
+	
+	xswitch_info = xswitch_info_get(xswitch);
+	ASSERT(xswitch_info != NULL);
+
+	(void)hwgraph_info_get_LBL(xswitch, 
+				INFO_LBL_XSWITCH_VOL, 
+				(arbitrary_info_t *)&xvolinfo);
+	if (xvolinfo == NULL) {
+#ifndef CONFIG_IA64_SGI_IO
+	    if (!is_headless_node_vertex(hubv))
+		cmn_err(CE_WARN, 
+			"assign_widgets_to_volunteers:vertex %v has "
+			" no info label",
+			xswitch);
+#endif
+	    return;
+	}
+
+	num_volunteer = xvolinfo->xswitch_volunteer_count;
+	ASSERT(num_volunteer > 0);
+	curr_volunteer = 0;
+
+	/* Assign master hub for xswitch itself.  */
+	if (HUB_WIDGET_ID_MIN > 0) {
+		hubv = xvolinfo->xswitch_volunteer[0];
+		xswitch_info_master_assignment_set(xswitch_info, (xwidgetnum_t)0, hubv);
+	}
+
+#ifndef	BRINGUP
+	xbownum = get_node_crossbow(nasid);
+#endif /* ifndef BRINGUP */
+
+	/*
+	 * TBD: Use administrative information to alter assignment of
+	 * widgets to hubs.
+	 */
+	for (widgetnum=HUB_WIDGET_ID_MIN; widgetnum <= HUB_WIDGET_ID_MAX; widgetnum++) {
+
+#ifndef BRINGUP
+		int i;
+#endif
+		/*
+		 * Ignore disabled/empty ports.
+		 */
+		if (!xbow_port_io_enabled(nasid, widgetnum)) 
+		    continue;
+
+		/*
+		 * If this is the master IO board, assign it to the same 
+		 * hub that owned it in the prom.
+		 */
+		if (is_master_nasid_widget(nasid, widgetnum)) {
+			int i;
+
+			for (i=0; i<num_volunteer; i++) {
+				hubv = xvolinfo->xswitch_volunteer[i];
+				hubinfo_get(hubv, &hubinfo);
+				nasid = hubinfo->h_nasid;
+				if (nasid == get_console_nasid())
+					goto do_assignment;
+			}
+#ifndef CONFIG_IA64_SGI_IO
+			cmn_err(CE_PANIC,
+				"Nasid == %d, console nasid == %d",
+				nasid, get_console_nasid());
+#endif
+		}
+
+#ifndef	BRINGUP
+		/*
+		 * Try to do the "ideal" assignment if IO slots to nodes.
+		 */
+		for (i=0; i<num_volunteer; i++) {
+			hubv = xvolinfo->xswitch_volunteer[i];
+			hubinfo_get(hubv, &hubinfo);
+			nasid = hubinfo->h_nasid;
+			if (is_ideal_assignment(SLOTNUM_GETSLOT(get_node_slotid(nasid)),
+						SLOTNUM_GETSLOT(get_widget_slotnum(xbownum, widgetnum)))) {
+
+				goto do_assignment;
+				
+			}
+		}
+#endif /* ifndef BRINGUP */
+
+		/*
+		 * Do a round-robin assignment among the volunteer nodes.
+		 */
+		hubv = xvolinfo->xswitch_volunteer[curr_volunteer];
+		curr_volunteer = (curr_volunteer + 1) % num_volunteer;
+		/* fall through */
+
+do_assignment:
+		/*
+		 * At this point, we want to make hubv the master of widgetnum.
+		 */
+		xswitch_info_master_assignment_set(xswitch_info, widgetnum, hubv);
+	}
+
+	xswitch_volunteer_delete(xswitch);
+}
+
+/*
+ * Early iograph initialization.  Called by master CPU in mlreset().
+ * Useful for including iograph.o in kernel.o.
+ */
+void
+iograph_early_init(void)
+{
+/*
+ * Need new way to get this information ..
+ */
+	cnodeid_t cnode;
+	nasid_t nasid;
+	lboard_t *board;
+
+	/*
+	 * Init. the board-to-hwgraph link early, so FRU analyzer
+	 * doesn't trip on leftover values if we panic early on.
+	 */
+	for(cnode = 0; cnode < numnodes; cnode++) {
+		nasid = COMPACT_TO_NASID_NODEID(cnode);
+		board = (lboard_t *)KL_CONFIG_INFO(nasid);
+		printk("iograph_early_init: Found board 0x%p\n", board);
+
+		/* Check out all the board info stored on a node */
+		while(board) {
+			board->brd_graph_link = GRAPH_VERTEX_NONE;
+			board = KLCF_NEXT(board);
+			printk("iograph_early_init: Found board 0x%p\n", board);
+
+
+		}
+	}
+
+	hubio_init();
+}
+
+#ifndef CONFIG_IA64_SGI_IO
+/* There is an identical definition of this in os/scheduler/runq.c */
+#define INIT_COOKIE(cookie) cookie.must_run = 0; cookie.cpu = PDA_RUNANYWHERE
+/*
+ * These functions absolutely doesn't belong here.  It's  here, though, 
+ * until the scheduler provides a platform-independent version
+ * that works the way it should.  The interface will definitely change, 
+ * too.  Currently used only in this file and by io/cdl.c in order to
+ * bind various I/O threads to a CPU on the proper node.
+ */
+cpu_cookie_t
+setnoderun(cnodeid_t cnodeid)
+{
+	int i;
+	cpuid_t cpunum;
+	cpu_cookie_t cookie;
+
+	INIT_COOKIE(cookie);
+	if (cnodeid == CNODEID_NONE)
+		return(cookie);
+
+	/*
+	 * Do a setmustrun to one of the CPUs on the specified
+	 * node.
+	 */
+	if ((cpunum = CNODE_TO_CPU_BASE(cnodeid)) == CPU_NONE) {
+		return(cookie);
+	}
+
+	cpunum += CNODE_NUM_CPUS(cnodeid) - 1;
+
+	for (i = 0; i < CNODE_NUM_CPUS(cnodeid); i++, cpunum--) {
+
+		if (cpu_enabled(cpunum)) {
+			cookie = setmustrun(cpunum);
+			break;
+		}
+	}
+
+	return(cookie);
+}
+
+void
+restorenoderun(cpu_cookie_t cookie)
+{
+	restoremustrun(cookie);
+}
+static sema_t io_init_sema;
+
+#endif	/* !CONFIG_IA64_SGI_IO */
+
+struct semaphore io_init_sema;
+
+
+/*
+ * Let boot processor know that we're done initializing our node's IO
+ * and then exit.
+ */
+/* ARGSUSED */
+static void
+io_init_done(cnodeid_t cnodeid,cpu_cookie_t c)
+{
+#ifndef CONFIG_IA64_SGI_IO
+	/* Let boot processor know that we're done. */
+	up(&io_init_sema);
+	/* This is for the setnoderun done when the io_init thread
+	 * started 
+	 */
+	restorenoderun(c);
+	sthread_exit();
+#endif
+}
+
+/* 
+ * Probe to see if this hub's xtalk link is active.  If so,
+ * return the Crosstalk Identification of the widget that we talk to.  
+ * This is called before any of the Crosstalk infrastructure for 
+ * this hub is set up.  It's usually called on the node that we're
+ * probing, but not always.
+ *
+ * TBD: Prom code should actually do this work, and pass through 
+ * hwid for our use.
+ */
+static void
+early_probe_for_widget(devfs_handle_t hubv, xwidget_hwid_t hwid)
+{
+	hubreg_t llp_csr_reg;
+	nasid_t nasid;
+	hubinfo_t hubinfo;
+
+	hubinfo_get(hubv, &hubinfo);
+	nasid = hubinfo->h_nasid;
+
+	llp_csr_reg = REMOTE_HUB_L(nasid, IIO_LLP_CSR);
+	/* 
+	 * If link is up, read the widget's part number.
+	 * A direct connect widget must respond to widgetnum=0.
+	 */
+	if (llp_csr_reg & IIO_LLP_CSR_IS_UP) {
+		/* TBD: Put hub into "indirect" mode */
+		/*
+		 * We're able to read from a widget because our hub's 
+		 * WIDGET_ID was set up earlier.
+		 */
+#ifdef	BRINGUP
+		widgetreg_t widget_id = *(volatile widgetreg_t *)
+			(RAW_NODE_SWIN_BASE(nasid, 0x0) + WIDGET_ID);
+
+		printk("early_probe_for_widget: Hub Vertex 0x%p is UP widget_id = 0x%x Register 0x%p\n", hubv, widget_id,
+		(volatile widgetreg_t *)(RAW_NODE_SWIN_BASE(nasid, 0x0) + WIDGET_ID) );
+
+#else	/* !BRINGUP */
+		widgetreg_t widget_id = XWIDGET_ID_READ(nasid, 0);
+#endif	/* BRINGUP */
+
+		hwid->part_num = XWIDGET_PART_NUM(widget_id);
+		hwid->rev_num = XWIDGET_REV_NUM(widget_id);
+		hwid->mfg_num = XWIDGET_MFG_NUM(widget_id);
+
+		/* TBD: link reset */
+	} else {
+
+		panic("\n\n**** early_probe_for_widget: Hub Vertex 0x%p is DOWN llp_csr_reg 0x%x ****\n\n", hubv, llp_csr_reg);
+
+		hwid->part_num = XWIDGET_PART_NUM_NONE;
+		hwid->rev_num = XWIDGET_REV_NUM_NONE;
+		hwid->mfg_num = XWIDGET_MFG_NUM_NONE;
+	}
+
+}
+
+/* Add inventory information to the widget vertex 
+ * Right now (module,slot,revision) is being
+ * added as inventory information.
+ */
+static void
+xwidget_inventory_add(devfs_handle_t 		widgetv,
+		      lboard_t 			*board,
+		      struct xwidget_hwid_s 	hwid)
+{
+	if (!board)
+		return;
+	/* Donot add inventory information for the baseio
+	 * on a speedo with an xbox. It has already been
+	 * taken care of in SN00_vmc.
+	 * Speedo with xbox's baseio comes in at slot io1 (widget 9)
+	 */
+	device_inventory_add(widgetv,INV_IOBD,board->brd_type,
+			     board->brd_module,
+			     SLOTNUM_GETSLOT(board->brd_slot),
+			     hwid.rev_num);
+}
+
+/*
+ * io_xswitch_widget_init
+ *	
+ */
+
+/* defined in include/linux/ctype.h  */
+/* #define toupper(c)	(islower(c) ? (c) - 'a' + 'A' : (c)) */
+
+void
+io_xswitch_widget_init(devfs_handle_t  	xswitchv,
+		       devfs_handle_t	hubv,
+		       xwidgetnum_t	widgetnum,
+		       async_attach_t	aa)
+{
+	xswitch_info_t		xswitch_info;
+	xwidgetnum_t		hub_widgetid;
+	devfs_handle_t		widgetv;
+	cnodeid_t		cnode;
+	widgetreg_t		widget_id;
+	nasid_t			nasid, peer_nasid;
+	struct xwidget_hwid_s 	hwid;
+	hubinfo_t		hubinfo;
+	/*REFERENCED*/
+	int			rc;
+	char			slotname[SLOTNUM_MAXLENGTH];
+	char 			pathname[128];
+	char			new_name[64];
+	moduleid_t		module;
+	slotid_t		slot;
+	lboard_t		*board = NULL;
+	
+	printk("\nio_xswitch_widget_init: hubv 0x%p, xswitchv 0x%p, widgetnum 0x%x\n", hubv, xswitchv, widgetnum);
+	/*
+	 * Verify that xswitchv is indeed an attached xswitch.
+	 */
+	xswitch_info = xswitch_info_get(xswitchv);
+	ASSERT(xswitch_info != NULL);
+
+	hubinfo_get(hubv, &hubinfo);
+	nasid = hubinfo->h_nasid;
+	cnode = NASID_TO_COMPACT_NODEID(nasid);
+	hub_widgetid = hubinfo->h_widgetid;
+
+
+	/* Who's the other guy on out crossbow (if anyone) */
+	peer_nasid = NODEPDA(cnode)->xbow_peer;
+	if (peer_nasid == INVALID_NASID)
+		/* If I don't have a peer, use myself. */
+		peer_nasid = nasid;
+
+
+	/* Check my xbow structure and my peer's */
+	if (!xbow_port_io_enabled(nasid, widgetnum) &&
+	    !xbow_port_io_enabled(peer_nasid, widgetnum)) {
+		return;
+	}
+
+	if (xswitch_info_link_ok(xswitch_info, widgetnum)) {
+		char			name[4];
+		/*
+		 * If the current hub is not supposed to be the master 
+		 * for this widgetnum, then skip this widget.
+		 */
+		if (xswitch_info_master_assignment_get(xswitch_info,
+						       widgetnum) != hubv) {
+			return;
+		}
+
+		module  = NODEPDA(cnode)->module_id;
+#ifdef XBRIDGE_REGS_SIM
+		/* hardwire for now...could do this with something like:
+		 * xbow_soft_t soft = hwgraph_fastinfo_get(vhdl);
+		 * xbow_t xbow = soft->base;
+		 * xbowreg_t xwidget_id = xbow->xb_wid_id;
+		 * but I don't feel like figuring out vhdl right now..
+		 * and I know for a fact the answer is 0x2d000049 
+		 */
+		printk("io_xswitch_widget_init: XBRIDGE_REGS_SIM FIXME: reading xwidget id: hardwired to xbridge (0x2d000049).\n");
+		printk("XWIDGET_PART_NUM(0x2d000049)= 0x%x\n", XWIDGET_PART_NUM(0x2d000049));
+		if (XWIDGET_PART_NUM(0x2d000049)==XXBOW_WIDGET_PART_NUM) {
+#else
+		if (nasid_has_xbridge(nasid)) {
+#endif /* XBRIDGE_REGS_SIM */
+			board = find_lboard_module_class(
+				(lboard_t *)KL_CONFIG_INFO(nasid),
+				module,
+				KLTYPE_IOBRICK);
+
+			if (board)
+				printk("io_xswitch_widget_init: Found KLTYPE_IOBRICK Board 0x%p brd_type 0x%x\n", board, board->brd_type);
+
+			/*
+			 * BRINGUP
+	 		 * Make sure we really want to say xbrick, pbrick,
+			 * etc. rather than XIO, graphics, etc.
+	 		 */
+
+#ifdef SUPPORT_PRINTING_M_FORMAT
+			sprintf(pathname, EDGE_LBL_MODULE "/%M/"
+#else
+			sprintf(pathname, EDGE_LBL_MODULE "/%x/"
+#endif
+				"%cbrick" "/%s/%d",
+				NODEPDA(cnode)->module_id,
+#ifdef BRINGUP
+
+				(board->brd_type == KLTYPE_IBRICK) ? 'I' :
+				(board->brd_type == KLTYPE_PBRICK) ? 'P' :
+				(board->brd_type == KLTYPE_XBRICK) ? 'X' : '?',
+#else
+				toupper(MODULE_GET_BTCHAR(NODEPDA(cnode)->module_id)),
+#endif /* BRINGUP */
+				EDGE_LBL_XTALK, widgetnum);
+		} 
+		
+		printk("io_xswitch_widget_init: path= %s\n", pathname);
+		rc = hwgraph_path_add(hwgraph_root, pathname, &widgetv);
+		
+		ASSERT(rc == GRAPH_SUCCESS);
+
+		/* This is needed to let the user programs to map the
+		 * module,slot numbers to the corresponding widget numbers
+		 * on the crossbow.
+		 */
+		rc = device_master_set(hwgraph_connectpt_get(widgetv), hubv);
+
+		/* If we are looking at the global master io6
+		 * then add information about the version of
+		 * the io6prom as a part of "detailed inventory"
+		 * information.
+		 */
+		if (is_master_baseio(nasid,
+				     NODEPDA(cnode)->module_id,
+#ifdef BRINGUP
+ 				     get_widget_slotnum(0,widgetnum))) {
+#else
+	<<< BOMB! >>> Need a new way to get slot numbers on IP35/IP37
+#endif
+			extern void klhwg_baseio_inventory_add(devfs_handle_t,
+							       cnodeid_t);
+			module 	= NODEPDA(cnode)->module_id;
+
+#ifdef XBRIDGE_REGS_SIM
+			printk("io_xswitch_widget_init: XBRIDGE_REGS_SIM FIXME: reading xwidget id: hardwired to xbridge (0x2d000049).\n");
+			if (XWIDGET_PART_NUM(0x2d000049)==XXBOW_WIDGET_PART_NUM) {
+#else
+			if (nasid_has_xbridge(nasid)) {
+#endif /* XBRIDGE_REGS_SIM */
+				board = find_lboard_module(
+					(lboard_t *)KL_CONFIG_INFO(nasid),
+					module);
+				/*
+			 	 * BRINGUP
+				 * Change iobrick to correct i/o brick
+				 */
+#ifdef SUPPORT_PRINTING_M_FORMAT
+				sprintf(pathname, EDGE_LBL_MODULE "/%M/"
+#else
+				sprintf(pathname, EDGE_LBL_MODULE "/%x/"
+#endif
+					"iobrick" "/%s/%d",
+					NODEPDA(cnode)->module_id,
+					EDGE_LBL_XTALK, widgetnum);
+			} else {
+#ifdef BRINGUP
+				slot = get_widget_slotnum(0, widgetnum);
+#else
+	<<< BOMB! Need a new way to get slot numbers on IP35/IP37
+#endif
+				board = get_board_name(nasid, module, slot,
+								new_name);
+				/*
+			 	 * Create the vertex for the widget, 
+				 * using the decimal 
+			 	 * widgetnum as the name of the primary edge.
+			 	 */
+#ifdef SUPPORT_PRINTING_M_FORMAT
+				sprintf(pathname, EDGE_LBL_MODULE "/%M/"
+#else
+				sprintf(pathname, EDGE_LBL_MODULE "/%x/"
+#endif
+					  	EDGE_LBL_SLOT "/%s/%s",
+					NODEPDA(cnode)->module_id,
+					slotname, new_name);
+			}
+
+			rc = hwgraph_path_add(hwgraph_root, pathname, &widgetv);
+			printk("io_xswitch_widget_init: (2) path= %s\n", pathname);
+		        /*
+		         * This is a weird ass code needed for error injection
+		         * purposes.
+		         */
+		        rc = device_master_set(hwgraph_connectpt_get(widgetv), hubv);
+			
+			klhwg_baseio_inventory_add(widgetv,cnode);
+		}
+		sprintf(name, "%d", widgetnum);
+		printk("io_xswitch_widget_init: FIXME hwgraph_edge_add %s xswitchv 0x%p, widgetv 0x%p\n", name, xswitchv, widgetv);
+		rc = hwgraph_edge_add(xswitchv, widgetv, name);
+		
+		/*
+		 * crosstalk switch code tracks which
+		 * widget is attached to each link.
+		 */
+		xswitch_info_vhdl_set(xswitch_info, widgetnum, widgetv);
+		
+		/*
+		 * Peek at the widget to get its crosstalk part and
+		 * mfgr numbers, then present it to the generic xtalk
+		 * bus provider to have its driver attach routine
+		 * called (or not).
+		 */
+#ifdef XBRIDGE_REGS_SIM
+		widget_id = 0x2d000049;
+		printk("io_xswitch_widget_init: XBRIDGE_REGS_SIM FIXME: id hardwired to widget_id\n");
+#else
+		widget_id = XWIDGET_ID_READ(nasid, widgetnum);
+#endif /* XBRIDGE_REGS_SIM */
+		hwid.part_num = XWIDGET_PART_NUM(widget_id);
+		hwid.rev_num = XWIDGET_REV_NUM(widget_id);
+		hwid.mfg_num = XWIDGET_MFG_NUM(widget_id);
+		/* Store some inventory information about
+		 * the xwidget in the hardware graph.
+		 */
+		xwidget_inventory_add(widgetv,board,hwid);
+		
+		(void)xwidget_register(&hwid, widgetv, widgetnum,
+				       hubv, hub_widgetid,
+				       aa);
+
+#ifdef	SN0_USE_BTE
+		bte_bpush_war(cnode, (void *)board);
+#endif
+	}
+
+}
+
+
+static void
+io_init_xswitch_widgets(devfs_handle_t xswitchv, cnodeid_t cnode)
+{
+	xwidgetnum_t		widgetnum;
+	async_attach_t          aa;
+
+	aa = async_attach_new();
+	
+	printk("io_init_xswitch_widgets: xswitchv 0x%p for cnode %d\n", xswitchv, cnode);
+
+	for (widgetnum = HUB_WIDGET_ID_MIN; widgetnum <= HUB_WIDGET_ID_MAX; 
+	     widgetnum++) {
+#ifdef BRINGUP
+		if (widgetnum != 0xe) 
+			io_xswitch_widget_init(xswitchv,
+				       cnodeid_to_vertex(cnode),
+				       widgetnum, aa);
+
+#else
+		io_xswitch_widget_init(xswitchv,
+				       cnodeid_to_vertex(cnode),
+				       widgetnum, aa);
+#endif /* BRINGUP */
+	}
+	/* 
+	 * Wait for parallel attach threads, if any, to complete.
+	 */
+	async_attach_waitall(aa);
+	async_attach_free(aa);
+}
+
+/*
+ * For each PCI bridge connected to the xswitch, add a link from the
+ * board's klconfig info to the bridge's hwgraph vertex.  This lets
+ * the FRU analyzer find the bridge without traversing the hardware
+ * graph and risking hangs.
+ */
+static void
+io_link_xswitch_widgets(devfs_handle_t xswitchv, cnodeid_t cnodeid)
+{
+	xwidgetnum_t		widgetnum;
+	char 			pathname[128];
+	devfs_handle_t		vhdl;
+	nasid_t			nasid, peer_nasid;
+	lboard_t		*board;
+
+
+
+	/* And its connected hub's nasids */
+	nasid = COMPACT_TO_NASID_NODEID(cnodeid);
+	peer_nasid = NODEPDA(cnodeid)->xbow_peer;
+
+	/* 
+	 * Look for paths matching "<widgetnum>/pci" under xswitchv.
+	 * For every widget, init. its lboard's hwgraph link.  If the
+	 * board has a PCI bridge, point the link to it.
+	 */
+	for (widgetnum = HUB_WIDGET_ID_MIN; widgetnum <= HUB_WIDGET_ID_MAX;
+		 widgetnum++) {
+		sprintf(pathname, "%d", widgetnum);
+		if (hwgraph_traverse(xswitchv, pathname, &vhdl) !=
+		    GRAPH_SUCCESS)
+			continue;
+
+#if defined (CONFIG_SGI_IP35) || defined (CONFIG_IA64_SGI_SN1) || defined (CONFIG_IA64_GENERIC)
+		board = find_lboard_module((lboard_t *)KL_CONFIG_INFO(nasid),
+				NODEPDA(cnodeid)->module_id);
+#else
+		{
+		slotid_t	slot;
+		slot = get_widget_slotnum(xbow_num, widgetnum);
+		board = find_lboard_modslot((lboard_t *)KL_CONFIG_INFO(nasid),
+				    NODEPDA(cnodeid)->module_id, slot);
+		}
+#endif /* CONFIG_SGI_IP35 || CONFIG_IA64_SGI_SN1 */
+		if (board == NULL && peer_nasid != INVALID_NASID) {
+			/*
+			 * Try to find the board on our peer
+			 */
+#if defined (CONFIG_SGI_IP35) || defined (CONFIG_IA64_SGI_SN1) || defined (CONFIG_IA64_GENERIC)
+			board = find_lboard_module(
+				(lboard_t *)KL_CONFIG_INFO(peer_nasid),
+				NODEPDA(cnodeid)->module_id);
+
+#else
+			board = find_lboard_modslot((lboard_t *)KL_CONFIG_INFO(peer_nasid),
+						    NODEPDA(cnodeid)->module_id, slot);
+
+#endif /* CONFIG_SGI_IP35 || CONFIG_IA64_SGI_SN1 */
+		}
+		if (board == NULL) {
+#ifndef CONFIG_IA64_SGI_IO
+			cmn_err(CE_WARN,
+				"Could not find PROM info for vertex %v, "
+				"FRU analyzer may fail",
+				vhdl);
+#endif
+			return;
+		}
+
+		sprintf(pathname, "%d/"EDGE_LBL_PCI, widgetnum);
+		if (hwgraph_traverse(xswitchv, pathname, &vhdl) == 
+		    GRAPH_SUCCESS)
+			board->brd_graph_link = vhdl;
+		else
+			board->brd_graph_link = GRAPH_VERTEX_NONE;
+	}
+}
+
+/*
+ * Initialize all I/O on the specified node.
+ */
+static void
+io_init_node(cnodeid_t cnodeid)
+{
+	/*REFERENCED*/
+	devfs_handle_t hubv, switchv, widgetv;
+	struct xwidget_hwid_s hwid;
+	hubinfo_t hubinfo;
+	int is_xswitch;
+	nodepda_t	*npdap;
+#ifndef CONFIG_IA64_SGI_IO
+	sema_t 		*peer_sema = 0;
+#else
+	struct semaphore *peer_sema = 0;
+#endif
+	uint32_t	widget_partnum;
+	nodepda_router_info_t *npda_rip;
+	cpu_cookie_t	c = 0;
+
+#ifndef CONFIG_IA64_SGI_IO
+	/* Try to execute on the node that we're initializing. */
+	c = setnoderun(cnodeid);
+#endif
+	npdap = NODEPDA(cnodeid);
+
+	/*
+	 * Get the "top" vertex for this node's hardware
+	 * graph; it will carry the per-hub hub-specific
+	 * data, and act as the crosstalk provider master.
+	 * It's canonical path is probably something of the
+	 * form /hw/module/%M/slot/%d/node
+	 */
+	hubv = cnodeid_to_vertex(cnodeid);
+	printk("io_init_node: Initialize IO for cnode %d hubv(node) 0x%p npdap 0x%p\n", cnodeid, hubv, npdap);
+
+	ASSERT(hubv != GRAPH_VERTEX_NONE);
+
+#if CONFIG_SGI_IP35 || CONFIG_IA64_SGI_SN1 || CONFIG_IA64_GENERIC
+	hubdev_docallouts(hubv);
+#endif
+
+	/*
+	 * Set up the dependent routers if we have any.
+	 */
+	npda_rip = npdap->npda_rip_first;
+
+	while(npda_rip) {
+		/* If the router info has not been initialized
+		 * then we need to do the router initialization
+		 */
+		if (!npda_rip->router_infop) {
+			router_init(cnodeid,0,npda_rip);
+		}
+		npda_rip = npda_rip->router_next;
+	}
+
+	/*
+	 * Read mfg info on this hub
+	 */
+#ifndef CONFIG_IA64_SGI_IO
+	printk("io_init_node: FIXME need to implement HUB_VERTEX_MFG_INFO\n");
+	HUB_VERTEX_MFG_INFO(hubv);
+#endif /* CONFIG_IA64_SGI_IO */
+
+	/* 
+	 * If nothing connected to this hub's xtalk port, we're done.
+	 */
+	early_probe_for_widget(hubv, &hwid);
+	if (hwid.part_num == XWIDGET_PART_NUM_NONE) {
+#ifdef PROBE_TEST
+		if ((cnodeid == 1) || (cnodeid == 2)) {
+			int index;
+
+			for (index = 0; index < 600; index++)
+				printk("Interfering with device probing!!!\n");
+		}
+#endif
+		/* io_init_done takes cpu cookie as 2nd argument 
+		 * to do a restorenoderun for the setnoderun done 
+		 * at the start of this thread 
+		 */
+		
+		printk("**** io_init_node: Node's 0x%p hub widget has XWIDGET_PART_NUM_NONE ****\n", hubv);
+		io_init_done(cnodeid,c);
+		/* NOTREACHED */
+	}
+
+	/* 
+	 * attach our hub_provider information to hubv,
+	 * so we can use it as a crosstalk provider "master"
+	 * vertex.
+	 */
+	xtalk_provider_register(hubv, &hub_provider);
+	xtalk_provider_startup(hubv);
+
+	/*
+	 * Create a vertex to represent the crosstalk bus
+	 * attached to this hub, and a vertex to be used
+	 * as the connect point for whatever is out there
+	 * on the other side of our crosstalk connection.
+	 *
+	 * Crosstalk Switch drivers "climb up" from their
+	 * connection point to try and take over the switch
+	 * point.
+	 *
+	 * Of course, the edges and verticies may already
+	 * exist, in which case our net effect is just to
+	 * associate the "xtalk_" driver with the connection
+	 * point for the device.
+	 */
+
+	(void)hwgraph_path_add(hubv, EDGE_LBL_XTALK, &switchv);
+
+	printk("io_init_node: Created 'xtalk' entry to '../node/' xtalk vertex 0x%p\n", switchv);
+
+	ASSERT(switchv != GRAPH_VERTEX_NONE);
+
+	(void)hwgraph_edge_add(hubv, switchv, EDGE_LBL_IO);
+
+	printk("io_init_node: Created symlink 'io' from ../node/io to ../node/xtalk \n");
+
+	/*
+	 * We need to find the widget id and update the basew_id field
+	 * accordingly. In particular, SN00 has direct connected bridge,
+	 * and hence widget id is Not 0.
+	 */
+
+	widget_partnum = (((*(volatile int32_t *)(NODE_SWIN_BASE(COMPACT_TO_NASID_NODEID(cnodeid), 0) + WIDGET_ID))) & WIDGET_PART_NUM) >> WIDGET_PART_NUM_SHFT;
+
+	if (widget_partnum == BRIDGE_WIDGET_PART_NUM ||
+				widget_partnum == XBRIDGE_WIDGET_PART_NUM){
+		npdap->basew_id = (((*(volatile int32_t *)(NODE_SWIN_BASE(COMPACT_TO_NASID_NODEID(cnodeid), 0) + BRIDGE_WID_CONTROL))) & WIDGET_WIDGET_ID);
+
+		printk("io_init_node: Found XBRIDGE widget_partnum= 0x%x\n", widget_partnum);
+
+	} else if (widget_partnum == XBOW_WIDGET_PART_NUM ||
+				widget_partnum == XXBOW_WIDGET_PART_NUM) {
+		/* 
+		 * Xbow control register does not have the widget ID field.
+		 * So, hard code the widget ID to be zero.
+		 */
+		printk("io_init_node: Found XBOW widget_partnum= 0x%x\n", widget_partnum);
+		npdap->basew_id = 0;
+
+#if defined(BRINGUP)
+	} else if (widget_partnum == XG_WIDGET_PART_NUM) {
+		/* 
+		 * OK, WTF do we do here if we have an XG direct connected to a HUB/Bedrock???
+		 * So, hard code the widget ID to be zero?
+		 */
+		npdap->basew_id = 0;
+		npdap->basew_id = (((*(volatile int32_t *)(NODE_SWIN_BASE(COMPACT_TO_NASID_NODEID(cnodeid), 0) + BRIDGE_WID_CONTROL))) & WIDGET_WIDGET_ID);
+#endif
+	} else { 
+		npdap->basew_id = (((*(volatile int32_t *)(NODE_SWIN_BASE(COMPACT_TO_NASID_NODEID(cnodeid), 0) + BRIDGE_WID_CONTROL))) & WIDGET_WIDGET_ID);
+
+		panic(" ****io_init_node: Unknown Widget Part Number 0x%x Widgt ID 0x%x attached to Hubv 0x%p ****\n", widget_partnum, npdap->basew_id, hubv);
+
+		/*NOTREACHED*/
+	}
+	{
+		char widname[10];
+		sprintf(widname, "%x", npdap->basew_id);
+		(void)hwgraph_path_add(switchv, widname, &widgetv);
+		printk("io_init_node: Created '%s' to '..node/xtalk/' vertex 0x%p\n", widname, widgetv);
+		ASSERT(widgetv != GRAPH_VERTEX_NONE);
+	}
+	
+	nodepda->basew_xc = widgetv;
+
+	is_xswitch = xwidget_hwid_is_xswitch(&hwid);
+
+	/* 
+	 * Try to become the master of the widget.  If this is an xswitch
+	 * with multiple hubs connected, only one will succeed.  Mastership
+	 * of an xswitch is used only when touching registers on that xswitch.
+	 * The slave xwidgets connected to the xswitch can be owned by various
+	 * masters.
+	 */
+	if (device_master_set(widgetv, hubv) == 0) {
+
+		/* Only one hub (thread) per Crosstalk device or switch makes
+		 * it to here.
+		 */
+
+		/* 
+		 * Initialize whatever xwidget is hanging off our hub.
+		 * Whatever it is, it's accessible through widgetnum 0.
+		 */
+		hubinfo_get(hubv, &hubinfo);
+
+		(void)xwidget_register(&hwid, widgetv, npdap->basew_id, hubv, hubinfo->h_widgetid, NULL);
+
+		if (!is_xswitch) {
+			/* io_init_done takes cpu cookie as 2nd argument 
+			 * to do a restorenoderun for the setnoderun done 
+			 * at the start of this thread 
+			 */
+			io_init_done(cnodeid,c);
+			/* NOTREACHED */
+		}
+
+		/* 
+		 * Special handling for Crosstalk Switches (e.g. xbow).
+		 * We need to do things in roughly the following order:
+		 *	1) Initialize xswitch hardware (done above)
+		 *	2) Determine which hubs are available to be widget masters
+		 *	3) Discover which links are active from the xswitch
+		 *	4) Assign xwidgets hanging off the xswitch to hubs
+		 *	5) Initialize all xwidgets on the xswitch
+		 */
+
+		volunteer_for_widgets(switchv, hubv);
+
+		/* If there's someone else on this crossbow, recognize him */
+		if (npdap->xbow_peer != INVALID_NASID) {
+			nodepda_t *peer_npdap = NODEPDA(NASID_TO_COMPACT_NODEID(npdap->xbow_peer));
+			peer_sema = &peer_npdap->xbow_sema;
+			volunteer_for_widgets(switchv, peer_npdap->node_vertex);
+		}
+
+		assign_widgets_to_volunteers(switchv, hubv);
+
+		/* Signal that we're done */
+		if (peer_sema) {
+			up(peer_sema);
+		}
+		
+	}
+	else {
+	    /* Wait 'til master is done assigning widgets. */
+	    down(&npdap->xbow_sema);
+	}
+
+#ifdef PROBE_TEST
+	if ((cnodeid == 1) || (cnodeid == 2)) {
+		int index;
+
+		for (index = 0; index < 500; index++)
+			printk("Interfering with device probing!!!\n");
+	}
+#endif
+	/* Now both nodes can safely inititialize widgets */
+	io_init_xswitch_widgets(switchv, cnodeid);
+	io_link_xswitch_widgets(switchv, cnodeid);
+
+	/* io_init_done takes cpu cookie as 2nd argument 
+	 * to do a restorenoderun for the setnoderun done 
+	 * at the start of this thread 
+	 */
+	io_init_done(cnodeid,c);
+
+	printk("\nio_init_node: DONE INITIALIZED ALL I/O FOR CNODEID %d\n\n", cnodeid);
+}
+
+
+#define IOINIT_STKSZ	(16 * 1024)
+
+#ifndef CONFIG_IA64_SGI_IO
+#include <sys/sn/iograph.h>
+#endif
+#define __DEVSTR1 	"/../.master/"
+#define __DEVSTR2 	"/target/"
+#define __DEVSTR3 	"/lun/0/disk/partition/"
+#define	__DEVSTR4	"/../ef"
+
+#if CONFIG_SGI_IP35 || CONFIG_IA64_SGI_SN1 || CONFIG_IA64_GENERIC
+/*
+ * Currently, we need to allow for 5 IBrick slots with 1 FC each
+ * plus an internal 1394.
+ *
+ * ioconfig starts numbering SCSI's at NUM_BASE_IO_SCSI_CTLR.
+ */
+#define NUM_BASE_IO_SCSI_CTLR 6
+#endif
+/*
+ * This tells ioconfig where it can start numbering scsi controllers.
+ * Below this base number, platform-specific handles the numbering.
+ * XXX Irix legacy..controller numbering should be part of devfsd's job
+ */
+int num_base_io_scsi_ctlr = 2; /* used by syssgi */
+devfs_handle_t		base_io_scsi_ctlr_vhdl[NUM_BASE_IO_SCSI_CTLR];
+static devfs_handle_t	baseio_enet_vhdl,baseio_console_vhdl;
+
+/*
+ * Put the logical controller number information in the 
+ * scsi controller vertices for each scsi controller that
+ * is in a "fixed position".
+ */
+static void
+scsi_ctlr_nums_add(devfs_handle_t pci_vhdl)
+{
+	{
+		int i;
+
+		num_base_io_scsi_ctlr = NUM_BASE_IO_SCSI_CTLR;
+
+		/* Initialize base_io_scsi_ctlr_vhdl array */
+		for (i=0; i<NUM_BASE_IO_SCSI_CTLR; i++)
+			base_io_scsi_ctlr_vhdl[i] = GRAPH_VERTEX_NONE;
+	}
+#if CONFIG_SGI_IP35 || CONFIG_IA64_SGI_SN1 || CONFIG_IA64_GENERIC
+	{
+	/*
+	 * May want to consider changing the SN0 code, above, to work more like
+	 * the way this works.
+	 */
+	devfs_handle_t base_ibrick_xbridge_vhdl;
+	devfs_handle_t base_ibrick_xtalk_widget_vhdl;
+	devfs_handle_t scsi_ctlr_vhdl;
+	int i;
+	graph_error_t rv;
+
+	/*
+	 * This is a table of "well-known" SCSI controllers and their well-known
+	 * controller numbers.  The names in the table start from the base IBrick's
+	 * Xbridge vertex, so the first component is the xtalk widget number.
+	 */
+	static struct {
+		char	*base_ibrick_scsi_path;
+		int	controller_number;
+	} hardwired_scsi_controllers[] = {
+		{"15/" EDGE_LBL_PCI "/1/" EDGE_LBL_SCSI_CTLR "/0", 0},
+		{"15/" EDGE_LBL_PCI "/2/" EDGE_LBL_SCSI_CTLR "/0", 1},
+		{"15/" EDGE_LBL_PCI "/3/" EDGE_LBL_SCSI_CTLR "/0", 2},
+		{"14/" EDGE_LBL_PCI "/1/" EDGE_LBL_SCSI_CTLR "/0", 3},
+		{"14/" EDGE_LBL_PCI "/2/" EDGE_LBL_SCSI_CTLR "/0", 4},
+		{NULL, -1} /* must be last */
+	};
+
+	base_ibrick_xtalk_widget_vhdl = hwgraph_connectpt_get(pci_vhdl);
+	ASSERT_ALWAYS(base_ibrick_xtalk_widget_vhdl != GRAPH_VERTEX_NONE);
+
+	base_ibrick_xbridge_vhdl = hwgraph_connectpt_get(base_ibrick_xtalk_widget_vhdl);
+	ASSERT_ALWAYS(base_ibrick_xbridge_vhdl != GRAPH_VERTEX_NONE);
+	hwgraph_vertex_unref(base_ibrick_xtalk_widget_vhdl);
+
+	/*
+	 * Iterate through the list of well-known SCSI controllers.
+	 * For each controller found, set it's controller number according
+	 * to the table.
+	 */
+	for (i=0; hardwired_scsi_controllers[i].base_ibrick_scsi_path != NULL; i++) {
+		rv = hwgraph_path_lookup(base_ibrick_xbridge_vhdl,
+			hardwired_scsi_controllers[i].base_ibrick_scsi_path, &scsi_ctlr_vhdl, NULL);
+
+		if (rv != GRAPH_SUCCESS) /* No SCSI at this path */
+			continue;
+
+		ASSERT(hardwired_scsi_controllers[i].controller_number < NUM_BASE_IO_SCSI_CTLR);
+		base_io_scsi_ctlr_vhdl[hardwired_scsi_controllers[i].controller_number] = scsi_ctlr_vhdl;
+		device_controller_num_set(scsi_ctlr_vhdl, hardwired_scsi_controllers[i].controller_number);
+		hwgraph_vertex_unref(scsi_ctlr_vhdl); /* (even though we're actually keeping a reference) */
+	}
+
+	hwgraph_vertex_unref(base_ibrick_xbridge_vhdl);
+	}
+#else
+#pragma error Bomb!
+#endif
+}
+
+
+#ifndef CONFIG_IA64_SGI_IO
+#include <sys/asm/sn/ioerror_handling.h>
+#else
+#include <asm/sn/ioerror_handling.h>
+#endif
+extern devfs_handle_t 	ioc3_console_vhdl_get(void);
+devfs_handle_t		sys_critical_graph_root = GRAPH_VERTEX_NONE;
+
+/* Define the system critical vertices and connect them through
+ * a canonical parent-child relationships for easy traversal
+ * during io error handling.
+ */
+static void
+sys_critical_graph_init(void)
+{
+	devfs_handle_t		bridge_vhdl,master_node_vhdl;
+	devfs_handle_t  		xbow_vhdl = GRAPH_VERTEX_NONE;
+	extern devfs_handle_t	hwgraph_root;
+	devfs_handle_t		pci_slot_conn;
+	int			slot;
+	devfs_handle_t		baseio_console_conn;
+
+	printk("sys_critical_graph_init: FIXME.\n");
+	baseio_console_conn = hwgraph_connectpt_get(baseio_console_vhdl);
+
+	if (baseio_console_conn == NULL) {
+		return;
+	}
+
+	/* Get the vertex handle for the baseio bridge */
+	bridge_vhdl = device_master_get(baseio_console_conn);
+
+	/* Get the master node of the baseio card */
+	master_node_vhdl = cnodeid_to_vertex(
+				master_node_get(baseio_console_vhdl));
+	
+	/* Add the "root->node" part of the system critical graph */
+
+	sys_critical_graph_vertex_add(hwgraph_root,master_node_vhdl);
+
+	/* Check if we have a crossbow */
+	if (hwgraph_traverse(master_node_vhdl,
+			     EDGE_LBL_XTALK"/0",
+			     &xbow_vhdl) == GRAPH_SUCCESS) {
+		/* We have a crossbow.Add "node->xbow" part of the system 
+		 * critical graph.
+		 */
+		sys_critical_graph_vertex_add(master_node_vhdl,xbow_vhdl);
+		
+		/* Add "xbow->baseio bridge" of the system critical graph */
+		sys_critical_graph_vertex_add(xbow_vhdl,bridge_vhdl);
+
+		hwgraph_vertex_unref(xbow_vhdl);
+	} else 
+		/* We donot have a crossbow. Add "node->baseio_bridge"
+		 * part of the system critical graph.
+		 */
+		sys_critical_graph_vertex_add(master_node_vhdl,bridge_vhdl);
+
+	/* Add all the populated PCI slot vertices to the system critical
+	 * graph with the bridge vertex as the parent.
+	 */
+	for (slot = 0 ; slot < 8; slot++) {
+		char	slot_edge[10];
+
+		sprintf(slot_edge,"%d",slot);
+		if (hwgraph_traverse(bridge_vhdl,slot_edge, &pci_slot_conn)
+		    != GRAPH_SUCCESS)
+			continue;
+		sys_critical_graph_vertex_add(bridge_vhdl,pci_slot_conn);
+		hwgraph_vertex_unref(pci_slot_conn);
+	}
+
+	hwgraph_vertex_unref(bridge_vhdl);
+
+	/* Add the "ioc3 pci connection point  -> console ioc3" part 
+	 * of the system critical graph
+	 */
+
+	if (hwgraph_traverse(baseio_console_vhdl,"..",&pci_slot_conn) ==
+	    GRAPH_SUCCESS) {
+		sys_critical_graph_vertex_add(pci_slot_conn, 
+					      baseio_console_vhdl);
+		hwgraph_vertex_unref(pci_slot_conn);
+	}
+
+	/* Add the "ethernet pci connection point  -> base ethernet" part of 
+	 * the system  critical graph
+	 */
+	if (hwgraph_traverse(baseio_enet_vhdl,"..",&pci_slot_conn) ==
+	    GRAPH_SUCCESS) {
+		sys_critical_graph_vertex_add(pci_slot_conn, 
+					      baseio_enet_vhdl);
+		hwgraph_vertex_unref(pci_slot_conn);
+	}
+
+	/* Add the "scsi controller pci connection point  -> base scsi 
+	 * controller" part of the system critical graph
+	 */
+	if (hwgraph_traverse(base_io_scsi_ctlr_vhdl[0],
+			     "../..",&pci_slot_conn) == GRAPH_SUCCESS) {
+		sys_critical_graph_vertex_add(pci_slot_conn, 
+					      base_io_scsi_ctlr_vhdl[0]);
+		hwgraph_vertex_unref(pci_slot_conn);
+	}
+	if (hwgraph_traverse(base_io_scsi_ctlr_vhdl[1],
+			     "../..",&pci_slot_conn) == GRAPH_SUCCESS) {
+		sys_critical_graph_vertex_add(pci_slot_conn, 
+					      base_io_scsi_ctlr_vhdl[1]);
+		hwgraph_vertex_unref(pci_slot_conn);
+	}
+	hwgraph_vertex_unref(baseio_console_conn);
+
+}
+
+static void
+baseio_ctlr_num_set(void)
+{
+	char 			name[MAXDEVNAME];
+	devfs_handle_t		console_vhdl, pci_vhdl, enet_vhdl;
+
+
+	printk("baseio_ctlr_num_set; FIXME\n");
+	console_vhdl = ioc3_console_vhdl_get();
+	if (console_vhdl == GRAPH_VERTEX_NONE)
+		return;
+	/* Useful for setting up the system critical graph */
+	baseio_console_vhdl = console_vhdl;
+
+	vertex_to_name(console_vhdl,name,MAXDEVNAME);
+
+	strcat(name,__DEVSTR1);
+	pci_vhdl =  hwgraph_path_to_vertex(name);
+	scsi_ctlr_nums_add(pci_vhdl);
+	/* Unref the pci_vhdl due to the reference by hwgraph_path_to_vertex
+	 */
+	hwgraph_vertex_unref(pci_vhdl);
+
+	vertex_to_name(console_vhdl, name, MAXDEVNAME);
+	strcat(name, __DEVSTR4);
+	enet_vhdl = hwgraph_path_to_vertex(name);
+
+	/* Useful for setting up the system critical graph */
+	baseio_enet_vhdl = enet_vhdl;
+
+	device_controller_num_set(enet_vhdl, 0);
+	/* Unref the enet_vhdl due to the reference by hwgraph_path_to_vertex
+	 */
+	hwgraph_vertex_unref(enet_vhdl);
+}
+/* #endif */
+
+void
+sn00_rrb_alloc(devfs_handle_t vhdl, int *vendor_list)
+{
+	/* REFERENCED */
+	int rtn_val;
+
+	/* 
+	** sn00 population:		errb	orrb
+	**	0- ql			3+?
+	**	1- ql			        2
+	**	2- ioc3 ethernet	2+?
+	**	3- ioc3 secondary	        1
+	**	4-                      0
+	** 	5- PCI slot
+	** 	6- PCI slot
+	** 	7- PCI slot
+	*/	
+	
+	/* The following code implements this heuristic for getting 
+	 * maximum usage out of the rrbs
+	 *
+	 * constraints:
+	 *  8 bit ql1 needs 1+1
+	 *  ql0 or ql5,6,7 wants 1+2
+	 *  ethernet wants 2 or more
+	 *
+	 * rules for even rrbs:
+	 *  if nothing in slot 6 
+	 *   4 rrbs to 0 and 2  (0xc8889999)
+	 *  else 
+         *   3 2 3 to slots 0 2 6  (0xc8899bbb)
+	 *
+         * rules for odd rrbs
+	 *  if nothing in slot 5 or 7  (0xc8889999)
+	 *   4 rrbs to 1 and 3
+	 *  else if 1 thing in 5 or 7  (0xc8899aaa) or (0xc8899bbb)
+         *   3 2 3 to slots 1 3 5|7
+         *  else
+         *   2 1 3 2 to slots 1 3 5 7 (note: if there's a ql card in 7 this
+	 *           (0xc89aaabb)      may short what it wants therefore the
+	 *			       rule should be to plug pci slots in order)
+	 */
+
+
+	if (vendor_list[6] != PCIIO_VENDOR_ID_NONE) {
+		/* something in slot 6 */
+		rtn_val = pcibr_alloc_all_rrbs(vhdl, 0, 3,1, 2,0, 0,0, 3,0);
+	}
+	else {
+		rtn_val = pcibr_alloc_all_rrbs(vhdl, 0, 4,1, 4,0, 0,0, 0,0);
+	}
+#ifndef CONFIG_IA64_SGI_IO
+	if (rtn_val)
+		cmn_err(CE_WARN, "sn00_rrb_alloc: pcibr_alloc_all_rrbs failed");
+#endif
+
+	if ((vendor_list[5] != PCIIO_VENDOR_ID_NONE) && 
+	    (vendor_list[7] != PCIIO_VENDOR_ID_NONE)) {
+		/* soemthing in slot 5 and 7 */
+		rtn_val = pcibr_alloc_all_rrbs(vhdl, 1, 2,1, 1,0, 3,0, 2,0);
+	}
+	else if (vendor_list[5] != PCIIO_VENDOR_ID_NONE) {
+		/* soemthing in slot 5 but not 7 */
+		rtn_val = pcibr_alloc_all_rrbs(vhdl, 1, 3,1, 2,0, 3,0, 0,0);
+	}
+	else if (vendor_list[7] != PCIIO_VENDOR_ID_NONE) {
+		/* soemthing in slot 7 but not 5 */
+		rtn_val = pcibr_alloc_all_rrbs(vhdl, 1, 3,1, 2,0, 0,0, 3,0);
+	}
+	else {
+		/* nothing in slot 5 or 7 */
+		rtn_val = pcibr_alloc_all_rrbs(vhdl, 1, 4,1, 4,0, 0,0, 0,0);
+	}
+#ifndef CONFIG_IA64_SGI_IO
+	if (rtn_val)
+		cmn_err(CE_WARN, "sn00_rrb_alloc: pcibr_alloc_all_rrbs failed");
+#endif
+}
+
+
+/*
+ * Initialize all I/O devices.  Starting closest to nodes, probe and
+ * initialize outward.
+ */
+void
+init_all_devices(void)
+{
+	/* Governor on init threads..bump up when safe 
+	 * (beware many devfs races) 
+	 */
+#ifndef CONFIG_IA64_SGI_IO
+	int io_init_node_threads = 2;	
+#endif
+	cnodeid_t cnodeid, active;
+
+	init_MUTEX(&io_init_sema);
+
+
+	active = 0;
+	for (cnodeid = 0; cnodeid < maxnodes; cnodeid++) {
+#ifndef CONFIG_IA64_SGI_IO
+		char thread_name[16];
+		extern int io_init_pri;
+
+		/*
+		 * Spawn a service thread for each node to initialize all
+		 * I/O on that node.  Each thread attempts to bind itself 
+		 * to the node whose I/O it's initializing.
+		 */
+		sprintf(thread_name, "IO_init[%d]", cnodeid);
+
+		(void)sthread_create(thread_name, 0, IOINIT_STKSZ, 0,
+			io_init_pri, KT_PS, (st_func_t *)io_init_node,
+			(void *)(long)cnodeid, 0, 0, 0);
+#else
+                printk("init_all_devices: Calling io_init_node() for cnode %d\n", cnodeid);
+                io_init_node(cnodeid);
+
+		printk("init_all_devices: Done io_init_node() for cnode %d\n", cnodeid);
+
+#endif /* !CONFIG_IA64_SGI_IO */
+
+
+		/* Limit how many nodes go at once, to not overload hwgraph */
+		/* TBD: Should timeout */
+#ifdef AA_DEBUG
+		printk("started thread for cnode %d\n", cnodeid);
+#endif
+#ifdef LINUX_KERNEL_THREADS
+		active++;
+		if (io_init_node_threads && 
+			active >= io_init_node_threads) {
+			down(&io_init_sema);
+			active--;
+		}
+#endif /* LINUX_KERNEL_THREADS */
+	}
+
+#ifdef LINUX_KERNEL_THREADS
+	/* Wait until all IO_init threads are done */
+
+	while (active > 0) {
+#ifdef AA_DEBUG
+	    printk("waiting, %d still active\n", active);
+#endif
+	    sema(&io_init_sema);
+	    active--;
+	}
+
+#endif /* LINUX_KERNEL_THREADS */
+
+	for (cnodeid = 0; cnodeid < maxnodes; cnodeid++)
+		/*
+	 	 * Update information generated by IO init.
+		 */
+		update_node_information(cnodeid);
+
+	baseio_ctlr_num_set();
+	/* Setup the system critical graph (which is a subgraph of the
+	 * main hwgraph). This information is useful during io error
+	 * handling.
+	 */
+	sys_critical_graph_init();
+
+#if HWG_PRINT
+	hwgraph_print();
+#endif
+
+}
+
+#define toint(x) ((int)(x) - (int)('0'))
+
+void
+devnamefromarcs(char *devnm)
+{
+	int 			val;
+	char 			tmpnm[MAXDEVNAME];
+	char 			*tmp1, *tmp2;
+	
+	val = strncmp(devnm, "dks", 3);
+	if (val != 0) 
+		return;
+	tmp1 = devnm + 3;
+	if (!isdigit(*tmp1))
+		return;
+
+	val = 0;
+	while (isdigit(*tmp1)) {
+		val = 10*val+toint(*tmp1);
+		tmp1++;
+	}
+
+	if(*tmp1 != 'd')
+		return;
+	else
+		tmp1++;
+
+	if ((val < 0) || (val >= NUM_BASE_IO_SCSI_CTLR)) {
+		int i;
+		int viable_found = 0;
+
+		printk("Only controller numbers 0..%d  are supported for\n", NUM_BASE_IO_SCSI_CTLR-1);
+		printk("prom \"root\" variables of the form dksXdXsX.\n");
+		printk("To use another disk you must use the full hardware graph path\n\n");
+		printk("Possible controller numbers for use in 'dksXdXsX' on this system: ");
+		for (i=0; i<NUM_BASE_IO_SCSI_CTLR; i++) {
+			if (base_io_scsi_ctlr_vhdl[i] != GRAPH_VERTEX_NONE) {
+				printk("%d ", i);
+				viable_found=1;
+			}
+		}
+		if (viable_found)
+			printk("\n");
+		else
+			printk("none found!\n");
+
+#ifndef CONFIG_IA64_SGI_IO
+		if (kdebug)
+			debug("ring");
+#endif
+		DELAY(15000000);
+		//prom_reboot();
+		panic("FIXME: devnamefromarcs: should call prom_reboot here.\n");
+		/* NOTREACHED */
+	}
+		
+	ASSERT(base_io_scsi_ctlr_vhdl[val] != GRAPH_VERTEX_NONE);
+	vertex_to_name(base_io_scsi_ctlr_vhdl[val],
+		       tmpnm,
+		       MAXDEVNAME);
+	tmp2 = 	tmpnm + strlen(tmpnm);
+	strcpy(tmp2, __DEVSTR2);
+	tmp2 += strlen(__DEVSTR2);
+	while (*tmp1 != 's') {
+		if((*tmp2++ = *tmp1++) == '\0')
+			return;
+	}	
+	tmp1++;
+	strcpy(tmp2, __DEVSTR3);
+	tmp2 += strlen(__DEVSTR3);
+	while ( (*tmp2++ = *tmp1++) )
+		;
+	tmp2--;
+	*tmp2++ = '/';
+	strcpy(tmp2, EDGE_LBL_BLOCK);
+	strcpy(devnm,tmpnm);
+}
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/io/module.c linux/arch/ia64/sn/io/module.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/io/module.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/io/module.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,312 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/labelcl.h>
+#include <asm/sn/cmn_err.h>
+#include <asm/sn/xtalk/xbow.h>
+#include <asm/sn/pci/bridge.h>
+#include <asm/sn/xtalk/xbow.h>
+#include <asm/sn/klconfig.h>
+#include <asm/sn/sn1/hubdev.h>
+#include <asm/sn/module.h>
+#include <asm/sn/pci/pcibr.h>
+#include <asm/sn/xtalk/xswitch.h>
+#include <asm/sn/nodepda.h>
+
+
+#define LDEBUG	1	
+
+#define DPRINTF		if (LDEBUG) printk
+#define printf		printk
+
+module_t	       *modules[MODULE_MAX];
+int			nummodules;
+
+#define SN00_SERIAL_FUDGE	0x3b1af409d513c2
+#define SN0_SERIAL_FUDGE	0x6e
+
+void
+encode_int_serial(uint64_t src,uint64_t *dest)
+{
+    uint64_t val;
+    int i;
+
+    val = src + SN00_SERIAL_FUDGE;
+
+
+    for (i = 0; i < sizeof(long long); i++) {
+	((char*)dest)[i] =
+	    ((char*)&val)[sizeof(long long)/2 +
+			 ((i%2) ? ((i/2 * -1) - 1) : (i/2))];
+    }
+}
+
+
+void
+decode_int_serial(uint64_t src, uint64_t *dest)
+{
+    uint64_t val;
+    int i;
+
+    for (i = 0; i < sizeof(long long); i++) {
+	((char*)&val)[sizeof(long long)/2 +
+		     ((i%2) ? ((i/2 * -1) - 1) : (i/2))] =
+	    ((char*)&src)[i];
+    }
+
+    *dest = val - SN00_SERIAL_FUDGE;
+}
+
+
+void
+encode_str_serial(const char *src, char *dest)
+{
+    int i;
+
+    for (i = 0; i < MAX_SERIAL_NUM_SIZE; i++) {
+
+	dest[i] = src[MAX_SERIAL_NUM_SIZE/2 +
+		     ((i%2) ? ((i/2 * -1) - 1) : (i/2))] +
+	    SN0_SERIAL_FUDGE;
+    }
+}
+
+void
+decode_str_serial(const char *src, char *dest)
+{
+    int i;
+
+    for (i = 0; i < MAX_SERIAL_NUM_SIZE; i++) {
+	dest[MAX_SERIAL_NUM_SIZE/2 +
+	    ((i%2) ? ((i/2 * -1) - 1) : (i/2))] = src[i] -
+	    SN0_SERIAL_FUDGE;
+    }
+}
+
+
+module_t *module_lookup(moduleid_t id)
+{
+    int			i;
+
+    DPRINTF("module_lookup: id=%d\n", id);
+
+    for (i = 0; i < nummodules; i++)
+	if (modules[i]->id == id) {
+	    DPRINTF("module_lookup: found m=0x%p\n", modules[i]);
+	    return modules[i];
+	}
+
+    return NULL;
+}
+
+/*
+ * module_add_node
+ *
+ *   The first time a new module number is seen, a module structure is
+ *   inserted into the module list in order sorted by module number
+ *   and the structure is initialized.
+ *
+ *   The node number is added to the list of nodes in the module.
+ */
+
+module_t *module_add_node(moduleid_t id, cnodeid_t n)
+{
+    module_t	       *m;
+    int			i;
+
+    DPRINTF("module_add_node: id=%x node=%d\n", id, n);
+
+    if ((m = module_lookup(id)) == 0) {
+#ifndef CONFIG_IA64_SGI_IO
+	m = kmem_zalloc_node(sizeof (module_t), KM_NOSLEEP, n);
+#else
+	m = kmalloc(sizeof (module_t), GFP_KERNEL);
+	memset(m, 0 , sizeof(module_t));
+	printk("Module nodecnt = %d\n", m->nodecnt); 
+#endif
+	ASSERT_ALWAYS(m);
+
+	DPRINTF("module_add_node: m=0x%p\n", m);
+
+	m->id = id;
+	spin_lock_init(&m->lock);
+
+	init_MUTEX_LOCKED(&m->thdcnt);
+
+printk("Set elsc to 0x%p on node %d\n", &m->elsc, get_nasid());
+
+set_elsc(&m->elsc);
+	elsc_init(&m->elsc, COMPACT_TO_NASID_NODEID(n));
+	spin_lock_init(&m->elsclock);
+
+	/* Insert in sorted order by module number */
+
+	for (i = nummodules; i > 0 && modules[i - 1]->id > id; i--)
+	    modules[i] = modules[i - 1];
+
+	modules[i] = m;
+	nummodules++;
+    }
+
+    m->nodes[m->nodecnt++] = n;
+
+printk("module_add_node: module %x now has %d nodes\n", id, m->nodecnt);
+    DPRINTF("module_add_node: module %x now has %d nodes\n", id, m->nodecnt);
+
+    return m;
+}
+
+int module_probe_snum(module_t *m, nasid_t nasid)
+{
+    lboard_t	       *board;
+    klmod_serial_num_t *comp;
+
+    board = find_lboard((lboard_t *) KL_CONFIG_INFO(nasid),
+			KLTYPE_MIDPLANE8);
+
+    if (! board || KL_CONFIG_DUPLICATE_BOARD(board))
+	return 0;
+
+    comp = GET_SNUM_COMP(board);
+
+    if (comp) {
+#if LDEBUG
+	    int i;
+
+	    printf("********found module with id %x and string", m->id);
+
+	    for (i = 0; i < MAX_SERIAL_NUM_SIZE; i++)
+		printf(" %x ", comp->snum.snum_str[i]);
+
+	    printf("\n");	/* Fudged string is not ASCII */
+#endif
+
+	    if (comp->snum.snum_str[0] != '\0') {
+		bcopy(comp->snum.snum_str,
+		      m->snum.snum_str,
+		      MAX_SERIAL_NUM_SIZE);
+		m->snum_valid = 1;
+	    }
+    }
+
+    if (m->snum_valid)
+	return 1;
+    else {
+#ifndef CONFIG_IA64_SGI_IO
+	cmn_err(CE_WARN | CE_MAINTENANCE,
+		"Invalid serial number for module %d, "
+		"possible missing or invalid NIC.", m->id);
+#else
+	printk("Invalid serial number for module %d, "
+		"possible missing or invalid NIC.", m->id);
+#endif
+	return 0;
+    }
+}
+
+void
+io_module_init(void)
+{
+    cnodeid_t		node;
+    lboard_t	       *board;
+    nasid_t		nasid;
+    int			nserial;
+    module_t	       *m;
+
+    DPRINTF("*******module_init\n");
+
+    nserial = 0;
+
+    for (node = 0; node < numnodes; node++) {
+	nasid = COMPACT_TO_NASID_NODEID(node);
+
+	board = find_lboard((lboard_t *) KL_CONFIG_INFO(nasid),
+			    KLTYPE_IP27);
+	ASSERT(board);
+
+	m = module_add_node(board->brd_module, node);
+
+	if (! m->snum_valid && module_probe_snum(m, nasid))
+	    nserial++;
+    }
+
+    DPRINTF("********found total of %d serial numbers in the system\n",
+	    nserial);
+
+    if (nserial == 0)
+	cmn_err(CE_WARN, "No serial number found.");
+}
+
+#ifdef BRINGUP
+elsc_t *Elsc[100];
+
+void
+set_elsc(elsc_t *p)
+{
+      Elsc[get_nasid()] = p;
+}
+#endif
+
+elsc_t *get_elsc(void)
+{
+#ifdef BRINGUP
+return(Elsc[get_nasid()]);
+#else
+	if ( NODEPDA(get_nasid())->module == (module_t *)0 ) {
+		printf("get_elsc() for nasd %d fails\n", get_nasid());
+//		return((elsc_t *)0);
+	}
+	return &NODEPDA(get_nasid())->module->elsc;
+
+//	return &NODEPDA(NASID_TO_COMPACT_NODEID(0))->module->elsc;
+#endif
+}
+
+int
+get_kmod_info(cmoduleid_t cmod, module_info_t *mod_info)
+{
+    int i;
+
+    if (cmod < 0 || cmod >= nummodules)
+	return EINVAL;
+
+    if (! modules[cmod]->snum_valid)
+	return ENXIO;
+
+    mod_info->mod_num = modules[cmod]->id;
+    {
+	char temp[MAX_SERIAL_NUM_SIZE];
+
+	decode_str_serial(modules[cmod]->snum.snum_str, temp);
+
+	/* if this is an invalid serial number return an error */
+	if (temp[0] != 'K')
+	    return ENXIO;
+
+	mod_info->serial_num = 0;
+
+	for (i = 0; i < MAX_SERIAL_NUM_SIZE && temp[i] != '\0'; i++) {
+	    mod_info->serial_num <<= 4;
+	    mod_info->serial_num |= (temp[i] & 0xf);
+
+	    mod_info->serial_str[i] = temp[i];
+	}
+
+	mod_info->serial_str[i] = '\0';
+    }
+
+    return 0;
+}
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/io/pci.c linux/arch/ia64/sn/io/pci.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/io/pci.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/io/pci.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,306 @@
+/* 
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * SNI64 specific PCI support for SNI IO.
+ *
+ * Copyright (C) 1997, 1998, 2000 Colin Ngam
+ */
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/pci.h>
+#include <asm/sn/types.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/cmn_err.h>
+#include <asm/sn/iobus.h>
+#include <asm/sn/iograph.h>
+#include <asm/param.h>
+#include <asm/sn/pio.h>
+#include <asm/sn/xtalk/xwidget.h>
+#include <asm/sn/sn_private.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/hcl_util.h>
+#include <asm/sn/pci/pciio.h>
+#include <asm/sn/pci/pcibr.h>
+#include <asm/sn/pci/pcibr_private.h>
+#include <asm/sn/pci/bridge.h>
+
+#ifdef DEBUG_CONFIG
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif
+
+
+
+#ifdef CONFIG_PCI
+
+extern devfs_handle_t pci_bus_to_vertex(unsigned char);
+extern devfs_handle_t devfn_to_vertex(unsigned char bus, unsigned char devfn);
+
+/*
+ * snia64_read_config_byte - Read a byte from the config area of the device.
+ */
+static int snia64_read_config_byte (struct pci_dev *dev,
+                                   int where, unsigned char *val)
+{
+	unsigned long res = 0;
+	unsigned size = 1;
+	devfs_handle_t device_vertex;
+
+	if ( (dev == (struct pci_dev *)0) || (val == (unsigned char *)0) ) {
+		return PCIBIOS_DEVICE_NOT_FOUND;
+	}
+	device_vertex = devfn_to_vertex(dev->bus->number, dev->devfn);
+	if (!device_vertex) {
+		DBG("%s : nonexistent device: bus= 0x%x  slot= 0x%x  func= 0x%x\n", 
+		__FUNCTION__, dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
+		return(-1);
+	}
+	res = pciio_config_get(device_vertex, (unsigned) where, size);
+	*val = (unsigned char) res;
+	return PCIBIOS_SUCCESSFUL;
+}
+
+/*
+ * snia64_read_config_word - Read 2 bytes from the config area of the device.
+ */
+static int snia64_read_config_word (struct pci_dev *dev,
+                                   int where, unsigned short *val)
+{
+	unsigned long res = 0;
+	unsigned size = 2; /* 2 bytes */
+	devfs_handle_t device_vertex;
+
+	if ( (dev == (struct pci_dev *)0) || (val == (unsigned short *)0) ) {
+		return PCIBIOS_DEVICE_NOT_FOUND;
+	}
+	device_vertex = devfn_to_vertex(dev->bus->number, dev->devfn);
+	if (!device_vertex) {
+		DBG("%s : nonexistent device: bus= 0x%x  slot= 0x%x  func= 0x%x\n", 
+		__FUNCTION__, dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
+		return(-1);
+	}
+	res = pciio_config_get(device_vertex, (unsigned) where, size);
+	*val = (unsigned short) res;
+	return PCIBIOS_SUCCESSFUL;
+}
+
+/*
+ * snia64_read_config_dword - Read 4 bytes from the config area of the device.
+ */
+static int snia64_read_config_dword (struct pci_dev *dev,
+                                    int where, unsigned int *val)
+{
+	unsigned long res = 0;
+	unsigned size = 4; /* 4 bytes */
+	devfs_handle_t device_vertex;
+
+	if (where & 3) {
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+	}
+	if ( (dev == (struct pci_dev *)0) || (val == (unsigned int *)0) ) {
+		return PCIBIOS_DEVICE_NOT_FOUND;
+	}
+
+	device_vertex = devfn_to_vertex(dev->bus->number, dev->devfn);
+	if (!device_vertex) {
+		DBG("%s : nonexistent device: bus= 0x%x  slot= 0x%x  func= 0x%x\n", 
+		__FUNCTION__, dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
+		return(-1);
+	}
+	res = pciio_config_get(device_vertex, (unsigned) where, size);
+	*val = (unsigned int) res;
+	return PCIBIOS_SUCCESSFUL;
+}
+
+/*
+ * snia64_write_config_byte - Writes 1 byte to the config area of the device.
+ */
+static int snia64_write_config_byte (struct pci_dev *dev,
+                                    int where, unsigned char val)
+{
+	devfs_handle_t device_vertex;
+
+	if ( dev == (struct pci_dev *)0 ) {
+		return PCIBIOS_DEVICE_NOT_FOUND;
+	}
+	/* 
+	 * if it's an IOC3 then we bail out, we special
+	 * case them with pci_fixup_ioc3
+	 */
+	if (dev->vendor == PCI_VENDOR_ID_SGI && 
+	    dev->device == PCI_DEVICE_ID_SGI_IOC3 )
+		return PCIBIOS_SUCCESSFUL;
+
+	device_vertex = devfn_to_vertex(dev->bus->number, dev->devfn);
+	if (!device_vertex) {
+		DBG("%s : nonexistent device: bus= 0x%x  slot= 0x%x  func= 0x%x\n", 
+		__FUNCTION__, dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
+		return(-1);
+	}
+	pciio_config_set( device_vertex, (unsigned)where, 1, (uint64_t) val);
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+/*
+ * snia64_write_config_word - Writes 2 bytes to the config area of the device.
+ */
+static int snia64_write_config_word (struct pci_dev *dev,
+                                    int where, unsigned short val)
+{
+	devfs_handle_t device_vertex = NULL;
+
+	if (where & 1) {
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+	}
+	if ( dev == (struct pci_dev *)0 ) {
+		return PCIBIOS_DEVICE_NOT_FOUND;
+	}
+	/* 
+	 * if it's an IOC3 then we bail out, we special
+	 * case them with pci_fixup_ioc3
+	 */
+	if (dev->vendor == PCI_VENDOR_ID_SGI && 
+	    dev->device == PCI_DEVICE_ID_SGI_IOC3)
+		return PCIBIOS_SUCCESSFUL;
+
+	device_vertex = devfn_to_vertex(dev->bus->number, dev->devfn);
+	if (!device_vertex) {
+		DBG("%s : nonexistent device: bus= 0x%x  slot= 0x%x  func= 0x%x\n", 
+		__FUNCTION__, dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
+		return(-1);
+	}
+	pciio_config_set( device_vertex, (unsigned)where, 2, (uint64_t) val);
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+/*
+ * snia64_write_config_dword - Writes 4 bytes to the config area of the device.
+ */
+static int snia64_write_config_dword (struct pci_dev *dev,
+                                     int where, unsigned int val)
+{
+	devfs_handle_t device_vertex;
+
+	if (where & 3) {
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+	}
+	if ( dev == (struct pci_dev *)0 ) {
+		return PCIBIOS_DEVICE_NOT_FOUND;
+	}
+	/* 
+	 * if it's an IOC3 then we bail out, we special
+	 * case them with pci_fixup_ioc3
+	 */
+	if (dev->vendor == PCI_VENDOR_ID_SGI && 
+	    dev->device == PCI_DEVICE_ID_SGI_IOC3)
+		return PCIBIOS_SUCCESSFUL;
+
+	device_vertex = devfn_to_vertex(dev->bus->number, dev->devfn);
+	if (!device_vertex) {
+		DBG("%s : nonexistent device: bus= 0x%x  slot= 0x%x  func= 0x%x\n", 
+		__FUNCTION__, dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
+		return(-1);
+	}
+	pciio_config_set( device_vertex, (unsigned)where, 4, (uint64_t) val);
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops snia64_pci_ops = {
+	snia64_read_config_byte,
+	snia64_read_config_word,
+	snia64_read_config_dword,
+	snia64_write_config_byte,
+	snia64_write_config_word,
+	snia64_write_config_dword
+};
+
+/*
+ * snia64_pci_find_bios - SNIA64 pci_find_bios() platform specific code.
+ */
+void __init
+sn1_pci_find_bios(void)
+{
+	extern struct pci_ops pci_conf;
+	/*
+	 * Go initialize our IO Infrastructure ..
+	 */
+	extern void sgi_master_io_infr_init(void);
+
+	sgi_master_io_infr_init();
+
+#ifdef BRINGUP
+	if ( IS_RUNNING_ON_SIMULATOR() )
+		return;
+#endif
+	/* sn1_io_infrastructure_init(); */
+	pci_conf = snia64_pci_ops;
+}
+
+void
+pci_fixup_ioc3(struct pci_dev *d)
+{
+        int 		i;
+	int 		slot;
+	unsigned long 	res = 0;
+	unsigned int 	val, size;
+	int 		ret;
+	u_short 	command;
+
+	devfs_handle_t 	device_vertex;
+	devfs_handle_t	bridge_vhdl = pci_bus_to_vertex(d->bus->number);
+	pcibr_soft_t 	pcibr_soft = (pcibr_soft_t) hwgraph_fastinfo_get(bridge_vhdl);
+	devfs_handle_t  xconn_vhdl = pcibr_soft->bs_conn;
+	bridge_t 	*bridge = pcibr_soft->bs_base;
+	bridgereg_t 	devreg;
+
+        /* IOC3 only decodes 0x20 bytes of the config space, reading
+	 * beyond that is relatively benign but writing beyond that
+	 * (especially the base address registers) will shut down the
+	 * pci bus...so avoid doing so.
+	 * NOTE: this means we can't program the intr_pin into the device,
+	 *       currently we hack this with special code in 
+	 *	 sgi_pci_intr_support()
+	 */
+        printk("pci_fixup_ioc3: Fixing base addresses for ioc3 device %s\n", d->slot_name);
+
+	/* I happen to know from the spec that the ioc3 needs only 0xfffff 
+	 * The standard pci trick of writing ~0 to the baddr and seeing
+	 * what comes back doesn't work with the ioc3
+	 */
+	size = 0xfffff;
+	d->resource[0].end = (unsigned long) d->resource[0].start + (unsigned long) size;
+
+	/*
+	 * Zero out the resource structure .. because we did not go through 
+	 * the normal PCI Infrastructure Init, garbbage are left in these 
+	 * fileds.
+	 */
+        for (i = 1; i <= PCI_ROM_RESOURCE; i++) {
+                d->resource[i].start = 0UL;
+                d->resource[i].end = 0UL;
+                d->resource[i].flags = 0UL;
+        }
+
+	/*
+	 * Hardcode Device 4 register(IOC3 is in Slot 4) to set the 
+	 * DEV_DIRECT bit.  This will not work if IOC3 is not on Slot 
+	 * 4.
+	 */
+	*(volatile u32 *)0xc0000a000f000220 |= 0x90000;
+
+        d->subsystem_vendor = 0;
+        d->subsystem_device = 0;
+
+}
+
+#endif /* CONFIG_PCI */
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/io/pci_bus_cvlink.c linux/arch/ia64/sn/io/pci_bus_cvlink.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/io/pci_bus_cvlink.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/io/pci_bus_cvlink.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,595 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/sched.h>
+#include <linux/ioport.h>
+#include <asm/sn/types.h>
+#include <asm/sn/hack.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/cmn_err.h>
+#include <asm/sn/iobus.h>
+#include <asm/sn/iograph.h>
+#include <asm/param.h>
+#include <asm/sn/pio.h>
+#include <asm/sn/xtalk/xwidget.h>
+#include <asm/sn/sn_private.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/hcl_util.h>
+#include <asm/sn/agent.h>
+#include <asm/sn/intr.h>
+#include <asm/sn/xtalk/xtalkaddrs.h>
+#include <asm/sn/klconfig.h>
+#include <asm/sn/xtalk/xwidget.h>
+#include <asm/sn/io.h>
+#include <asm/sn/pci/pci_bus_cvlink.h>
+
+#include <asm/sn/pci/pciio.h>
+// #include <sys/ql.h>
+#include <asm/sn/pci/pcibr.h>
+#include <asm/sn/pci/pcibr_private.h>
+extern int bridge_rev_b_data_check_disable;
+#include <asm/sn/pci/pciio.h>
+
+#define MAX_PCI_XWIDGET 256
+devfs_handle_t busnum_to_xwidget[MAX_PCI_XWIDGET];
+nasid_t busnum_to_nid[MAX_PCI_XWIDGET];
+unsigned char num_bridges;
+static int done_probing = 0;
+
+static int pci_bus_map_create(devfs_handle_t xtalk);
+devfs_handle_t devfn_to_vertex(unsigned char busnum, unsigned int devfn);
+
+/*
+ * pci_bus_cvlink_init() - To be called once during initialization before 
+ *	SGI IO Infrastructure init is called.
+ */
+void
+pci_bus_cvlink_init(void)
+{
+
+	memset(busnum_to_xwidget, 0x0, sizeof(devfs_handle_t) * MAX_PCI_XWIDGET);
+	memset(busnum_to_nid, 0x0, sizeof(nasid_t) * MAX_PCI_XWIDGET);
+	num_bridges = 0;
+}
+
+/*
+ * pci_bus_to_vertex() - Given a logical Linux Bus Number returns the associated 
+ *	pci bus vertex from the SGI IO Infrastructure.
+ */
+devfs_handle_t
+pci_bus_to_vertex(unsigned char busnum)
+{
+
+	devfs_handle_t	xwidget;
+	devfs_handle_t	pci_bus = NULL;
+
+
+	/*
+	 * First get the xwidget vertex.
+	 */
+	xwidget = busnum_to_xwidget[busnum];
+	if (!xwidget)
+		return (NULL);
+
+	/*
+	 * Use devfs to get the pci vertex from xwidget.
+	 */
+	if (hwgraph_traverse(xwidget, EDGE_LBL_PCI, &pci_bus) != GRAPH_SUCCESS) {
+		if (!pci_bus) {
+			printk("pci_bus_to_vertex: Cannot find pci bus for given bus number %d\n", busnum);
+			return (NULL);
+		}
+	}
+
+	return(pci_bus);
+}
+
+/*
+ * devfn_to_vertex() - returns the vertex of the device given the bus, slot, 
+ *	and function numbers.
+ */
+devfs_handle_t
+devfn_to_vertex(unsigned char busnum, unsigned int devfn)
+{
+
+	int slot = 0;
+	int func = 0;
+	char	name[16];
+	devfs_handle_t  pci_bus = NULL;
+	devfs_handle_t	device_vertex = NULL;
+
+	/*
+	 * Go get the pci bus vertex.
+	 */
+	pci_bus = pci_bus_to_vertex(busnum);
+	if (!pci_bus) {
+		/*
+		 * During probing, the Linux pci code invents non existant
+		 * bus numbers and pci_dev structures and tries to access
+		 * them to determine existance. Don't crib during probing.
+		 */
+		if (done_probing)
+			printk("devfn_to_vertex: Invalid bus number %d given.\n", busnum);
+		return(NULL);
+	}
+
+
+	/*
+	 * Go get the slot&function vertex.
+	 * Should call pciio_slot_func_to_name() when ready.
+	 */
+	slot = PCI_SLOT(devfn);
+	func = PCI_FUNC(devfn);
+
+	if (func == 0)
+        	sprintf(name, "%d", slot);
+	else
+		sprintf(name, "%d%c", slot, 'a'+func);
+
+	if (hwgraph_traverse(pci_bus, name, &device_vertex) != GRAPH_SUCCESS) {
+		if (!device_vertex) {
+			printk("devfn_to_vertex: Unable to get slot&func %s from pci vertex 0x%p\n", name, pci_bus);
+			return(NULL);
+		}
+	}
+
+	return(device_vertex);
+}
+
+/*
+ * Most drivers currently do not properly tell the arch specific pci dma
+ * interfaces whether they can handle A64. Here is where we privately
+ * keep track of this.
+ */
+static void __init
+set_sn1_pci64(struct pci_dev *dev)
+{
+	unsigned short vendor = dev->vendor;
+	unsigned short device = dev->device;
+
+	if (vendor == PCI_VENDOR_ID_QLOGIC) {
+		if ((device == PCI_DEVICE_ID_QLOGIC_ISP2100) ||
+				(device == PCI_DEVICE_ID_QLOGIC_ISP2200)) {
+			SET_PCIA64(dev);
+			return;
+		}
+	}
+
+	if (vendor == PCI_VENDOR_ID_SGI) {
+		if (device == PCI_DEVICE_ID_SGI_IOC3) {
+			SET_PCIA64(dev);
+			return;
+		}
+	}
+
+}
+
+/*
+ * sn1_pci_fixup() - This routine is called when platform_pci_fixup() is 
+ *	invoked at the end of pcibios_init() to link the Linux pci 
+ *	infrastructure to SGI IO Infrasturcture - ia64/kernel/pci.c
+ *
+ *	Other platform specific fixup can also be done here.
+ */
+void
+sn1_pci_fixup(int arg)
+{
+	struct list_head *ln;
+	struct pci_bus *pci_bus = NULL;
+	struct pci_dev *device_dev = NULL;
+	struct sn1_widget_sysdata *widget_sysdata;
+	struct sn1_device_sysdata *device_sysdata;
+	extern void sn1_pci_find_bios(void);
+
+
+unsigned long   res;
+
+	if (arg == 0) {
+		sn1_pci_find_bios();
+		return;
+	}
+
+#if 0
+{
+        devfs_handle_t  bridge_vhdl = pci_bus_to_vertex(0);
+        pcibr_soft_t    pcibr_soft = (pcibr_soft_t) hwgraph_fastinfo_get(bridge_vhdl);
+	bridge_t        *bridge = pcibr_soft->bs_base;
+printk("Before Changing PIO Map Address:\n");
+        printk("pci_fixup_ioc3: Before devreg fixup\n");
+        printk("pci_fixup_ioc3: Devreg 0 0x%x\n", bridge->b_device[0].reg);
+        printk("pci_fixup_ioc3: Devreg 1 0x%x\n", bridge->b_device[1].reg);
+        printk("pci_fixup_ioc3: Devreg 2 0x%x\n", bridge->b_device[2].reg);
+        printk("pci_fixup_ioc3: Devreg 3 0x%x\n", bridge->b_device[3].reg);
+        printk("pci_fixup_ioc3: Devreg 4 0x%x\n", bridge->b_device[4].reg);
+        printk("pci_fixup_ioc3: Devreg 5 0x%x\n", bridge->b_device[5].reg);
+        printk("pci_fixup_ioc3: Devreg 6 0x%x\n", bridge->b_device[6].reg);
+        printk("pci_fixup_ioc3: Devreg 7 0x%x\n", bridge->b_device[7].reg);
+}
+#endif
+	done_probing = 1;
+
+	if ( IS_RUNNING_ON_SIMULATOR() ) {
+		printk("sn1_pci_fixup not supported on simulator.\n");
+		return;
+	}
+
+#ifdef REAL_HARDWARE
+
+	/*
+	 * Initialize the pci bus vertex in the pci_bus struct.
+	 */
+	for( ln = pci_root_buses.next; ln != &pci_root_buses; ln = ln->next) {
+		pci_bus = pci_bus_b(ln);
+		widget_sysdata = kmalloc(sizeof(struct sn1_widget_sysdata), 
+					GFP_KERNEL);
+		widget_sysdata->vhdl = pci_bus_to_vertex(pci_bus->number);
+		pci_bus->sysdata = (void *)widget_sysdata;
+	}
+
+	/*
+ 	 * set the root start and end so that drivers calling check_region()
+	 * won't see a conflict
+	 */
+	ioport_resource.start |= IO_SWIZ_BASE;
+	ioport_resource.end |= (HSPEC_SWIZ_BASE-1);
+	/*
+	 * Initialize the device vertex in the pci_dev struct.
+	 */
+	pci_for_each_dev(device_dev) {
+		unsigned int irq;
+		int idx;
+		u16 cmd;
+		devfs_handle_t vhdl;
+		unsigned long size;
+
+		if (device_dev->vendor == PCI_VENDOR_ID_SGI &&
+				device_dev->device == PCI_DEVICE_ID_SGI_IOC3) {
+			extern void pci_fixup_ioc3(struct pci_dev *d);
+			pci_fixup_ioc3(device_dev);
+		}
+
+		/* Set the device vertex */
+
+		device_sysdata = kmalloc(sizeof(struct sn1_device_sysdata),
+					GFP_KERNEL);
+		device_sysdata->vhdl = devfn_to_vertex(device_dev->bus->number, device_dev->devfn);
+		device_sysdata->isa64 = 0;
+		device_dev->sysdata = (void *) device_sysdata;
+		set_sn1_pci64(device_dev);
+		pci_read_config_word(device_dev, PCI_COMMAND, &cmd);
+
+		/*
+		 * Set the resources address correctly.  The assumption here 
+		 * is that the addresses in the resource structure has been
+		 * read from the card and it was set in the card by our
+		 * Infrastructure ..
+		 */
+		vhdl = device_sysdata->vhdl;
+		for (idx = 0; idx < PCI_ROM_RESOURCE; idx++) {
+			size = 0;
+			size = device_dev->resource[idx].end -
+				device_dev->resource[idx].start;
+			if (size) {
+res = 0;
+res = pciio_config_get(vhdl, (unsigned) PCI_BASE_ADDRESS_0 + idx, 4);
+printk("Before pciio_pio_addr Base address %d = 0x%lx\n", idx, res);
+
+				printk(" Changing device %d:%d resource start address from 0x%lx", 
+				PCI_SLOT(device_dev->devfn),PCI_FUNC(device_dev->devfn),
+				device_dev->resource[idx].start);
+				device_dev->resource[idx].start = 
+				(unsigned long)pciio_pio_addr(vhdl, 0, 
+					PCIIO_SPACE_WIN(idx), 0, size, 0, PCIIO_BYTE_STREAM);
+			}
+			else
+				continue;
+
+			device_dev->resource[idx].end = 
+				device_dev->resource[idx].start + size;
+
+			/*
+			 * Adjust the addresses to go to the SWIZZLE ..
+			 */
+			device_dev->resource[idx].start = 
+				device_dev->resource[idx].start & 0xfffff7ffffffffff;
+			device_dev->resource[idx].end = 
+				device_dev->resource[idx].end & 0xfffff7ffffffffff;
+			printk(" to 0x%lx\n", device_dev->resource[idx].start);
+res = 0;
+res = pciio_config_get(vhdl, (unsigned) PCI_BASE_ADDRESS_0 + idx, 4);
+printk("After pciio_pio_addr Base address %d = 0x%lx\n", idx, res);
+
+			if (device_dev->resource[idx].flags & IORESOURCE_IO)
+				cmd |= PCI_COMMAND_IO;
+			else if (device_dev->resource[idx].flags & IORESOURCE_MEM)
+				cmd |= PCI_COMMAND_MEMORY;
+		}
+		/*
+		 * Now handle the ROM resource ..
+		 */
+		size = device_dev->resource[PCI_ROM_RESOURCE].end -
+			device_dev->resource[PCI_ROM_RESOURCE].start;
+		printk(" Changing device %d:%d ROM resource start address from 0x%lx", 
+			PCI_SLOT(device_dev->devfn),PCI_FUNC(device_dev->devfn),
+			device_dev->resource[PCI_ROM_RESOURCE].start);
+		device_dev->resource[PCI_ROM_RESOURCE].start =
+			(unsigned long) pciio_pio_addr(vhdl, 0, PCIIO_SPACE_ROM, 0, 
+				size, 0, PCIIO_BYTE_STREAM);
+		device_dev->resource[PCI_ROM_RESOURCE].end =
+			device_dev->resource[PCI_ROM_RESOURCE].start + size;
+
+                /*
+                 * go through synergy swizzled space
+                 */
+		device_dev->resource[PCI_ROM_RESOURCE].start &= 0xfffff7ffffffffffUL;
+		device_dev->resource[PCI_ROM_RESOURCE].end   &= 0xfffff7ffffffffffUL;
+
+		/*
+		 * Update the Command Word on the Card.
+		 */
+		cmd |= PCI_COMMAND_MASTER; /* If the device doesn't support */
+					   /* bit gets dropped .. no harm */
+		pci_write_config_word(device_dev, PCI_COMMAND, cmd);
+
+		printk("  to 0x%lx\n", device_dev->resource[PCI_ROM_RESOURCE].start);
+
+		/*
+		 * Set the irq correctly.
+		 * Bits 7:3 = slot
+		 * Bits 2:0 = function
+		 *
+		 * In the IRQ we will have:
+		 *	Bits 24:16 = bus number
+		 *	Bits 15:8 = slot|func number
+		 */
+		irq = 0;
+		irq = (irq | (device_dev->devfn << 8));
+		irq = (irq | ( (device_dev->bus->number & 0xff) << 16) );
+		device_dev->irq = irq;
+printk("sn1_pci_fixup: slot= %d  fn= %d  vendor= 0x%x  device= 0x%x  irq= 0x%x\n",
+PCI_SLOT(device_dev->devfn),PCI_FUNC(device_dev->devfn),device_dev->vendor,
+device_dev->device, device_dev->irq);
+
+	}
+#endif	/* REAL_HARDWARE */
+#if 0
+
+{
+        devfs_handle_t  bridge_vhdl = pci_bus_to_vertex(0);
+        pcibr_soft_t    pcibr_soft = (pcibr_soft_t) hwgraph_fastinfo_get(bridge_vhdl);
+        bridge_t        *bridge = pcibr_soft->bs_base;
+
+printk("After Changing PIO Map Address:\n");
+        printk("pci_fixup_ioc3: Before devreg fixup\n");
+        printk("pci_fixup_ioc3: Devreg 0 0x%x\n", bridge->b_device[0].reg);
+        printk("pci_fixup_ioc3: Devreg 1 0x%x\n", bridge->b_device[1].reg);
+        printk("pci_fixup_ioc3: Devreg 2 0x%x\n", bridge->b_device[2].reg);
+        printk("pci_fixup_ioc3: Devreg 3 0x%x\n", bridge->b_device[3].reg);
+        printk("pci_fixup_ioc3: Devreg 4 0x%x\n", bridge->b_device[4].reg);
+        printk("pci_fixup_ioc3: Devreg 5 0x%x\n", bridge->b_device[5].reg);
+        printk("pci_fixup_ioc3: Devreg 6 0x%x\n", bridge->b_device[6].reg);
+        printk("pci_fixup_ioc3: Devreg 7 0x%x\n", bridge->b_device[7].reg);
+}
+#endif
+
+}
+
+/*
+ * pci_bus_map_create() - Called by pci_bus_to_hcl_cvlink() to finish the job.
+ */
+static int 
+pci_bus_map_create(devfs_handle_t xtalk)
+{
+
+	devfs_handle_t master_node_vertex = NULL;
+	devfs_handle_t xwidget = NULL;
+	devfs_handle_t pci_bus = NULL;
+	hubinfo_t hubinfo = NULL;
+	xwidgetnum_t widgetnum;
+	char pathname[128];
+	graph_error_t rv;
+
+	/*
+	 * Loop throught this vertex and get the Xwidgets ..
+	 */
+	for (widgetnum = HUB_WIDGET_ID_MIN; widgetnum <= HUB_WIDGET_ID_MAX; widgetnum++) {
+		sprintf(pathname, "%d", widgetnum);
+		xwidget = NULL;
+		
+		rv = hwgraph_traverse(xtalk, pathname, &xwidget);
+		if ( (rv != GRAPH_SUCCESS) ) {
+			if (!xwidget)
+				continue;
+		}
+
+		sprintf(pathname, "%d/"EDGE_LBL_PCI, widgetnum);
+		pci_bus = NULL;
+		if (hwgraph_traverse(xtalk, pathname, &pci_bus) != GRAPH_SUCCESS)
+			if (!pci_bus)
+				continue;
+
+		/*
+		 * Assign the correct bus number and also the nasid of this 
+		 * pci Xwidget.
+		 * 
+		 * Should not be any race here ...
+		 */
+		num_bridges++;
+		busnum_to_xwidget[num_bridges - 1] = xwidget;
+
+		/*
+		 * Get the master node and from there get the NASID.
+		 */
+		master_node_vertex = device_master_get(xwidget);
+		if (!master_node_vertex) {
+			printk(" **** pci_bus_map_create: Unable to get .master for vertex 0x%p **** \n", xwidget);
+		}
+	
+		hubinfo_get(master_node_vertex, &hubinfo);
+		if (!hubinfo) {
+			printk(" **** pci_bus_map_create: Unable to get hubinfo for master node vertex 0x%p ****\n", master_node_vertex);
+			return(1);
+		} else {
+			busnum_to_nid[num_bridges - 1] = hubinfo->h_nasid;
+		}
+
+		printk("pci_bus_map_create: Found Hub nasid %d PCI Xwidget 0x%p  widgetnum= %d\n", hubinfo->h_nasid, xwidget, widgetnum);
+	}
+
+        return(0);
+}
+
+/*
+ * pci_bus_to_hcl_cvlink() - This routine is called after SGI IO Infrastructure   
+ *      initialization has completed to set up the mappings between Xbridge
+ *      and logical pci bus numbers.  We also set up the NASID for each of these
+ *      xbridges.
+ *
+ *      Must be called before pci_init() is invoked.
+ */
+int
+pci_bus_to_hcl_cvlink(void) 
+{
+
+	devfs_handle_t devfs_hdl = NULL;
+	devfs_handle_t module_comp = NULL;
+	devfs_handle_t node = NULL;
+	devfs_handle_t xtalk = NULL;
+	graph_vertex_place_t placeptr = EDGE_PLACE_WANT_REAL_EDGES;
+	int rv = 0;
+	char name[256];
+
+	/*
+	 * Iterate throught each xtalk links in the system ..
+	 * /hw/module/001c01/node/xtalk/ 8|9|10|11|12|13|14|15 
+	 *
+	 * /hw/module/001c01/node/xtalk/15 -> /hw/module/001c01/Ibrick/xtalk/15
+	 *
+	 * What if it is not pci?
+	 */
+	devfs_hdl = hwgraph_path_to_vertex("/dev/hw/module");
+
+	/*
+	 * Loop throught this directory "/devfs/hw/module/" and get each 
+	 * of it's entry.
+	 */
+	while (1) {
+	
+		/* Get vertex of component /dev/hw/<module_number> */
+		memset((char *)name, '0', 256);
+		module_comp = NULL;
+		rv = hwgraph_edge_get_next(devfs_hdl, (char *)name, &module_comp, (uint *)&placeptr);
+		if ((rv == 0) && (module_comp)) {
+			/* Found a valid entry */
+			node = NULL;
+			rv = hwgraph_edge_get(module_comp, "node", &node);
+
+		} else {
+			printk("pci_bus_to_hcl_cvlink: No more Module Component.\n");
+			return(0);
+		}
+
+		if ( (rv != 0) || (!node) ){
+			printk("pci_bus_to_hcl_cvlink: Module Component does not have node vertex.\n");
+			continue;
+		} else {
+			xtalk = NULL;
+			rv = hwgraph_edge_get(node, "xtalk", &xtalk);
+			if ( (rv != 0) || (xtalk == NULL) ){
+				printk("pci_bus_to_hcl_cvlink: Node has no xtalk vertex.\n");
+				continue;
+			}
+		}
+
+		printk("pci_bus_to_hcl_cvlink: Found Module %s node vertex = 0x%p xtalk vertex = 0x%p\n", name, node, xtalk);
+		/*
+		 * Call routine to get the existing PCI Xwidget and create
+		 * the convenience link from "/devfs/hw/pci_bus/.."
+		 */
+		pci_bus_map_create(xtalk);
+	}
+
+	return(0);
+}
+
+/*
+ * sgi_pci_intr_support -
+ */
+int
+sgi_pci_intr_support (unsigned int requested_irq, device_desc_t *dev_desc,
+	devfs_handle_t *bus_vertex, pciio_intr_line_t *lines,
+	devfs_handle_t *device_vertex)
+
+{
+
+	unsigned int bus;
+	unsigned int devfn;
+	struct pci_dev *pci_dev;
+	unsigned char intr_pin = 0;
+	struct sn1_widget_sysdata *widget_sysdata;
+	struct sn1_device_sysdata *device_sysdata;
+
+	printk("sgi_pci_intr_support: Called with requested_irq 0x%x\n", requested_irq);
+
+	if (!dev_desc || !bus_vertex || !device_vertex) {
+		printk("sgi_pci_intr_support: Invalid parameter dev_desc 0x%p, bus_vertex 0x%p, device_vertex 0x%p\n", dev_desc, bus_vertex, device_vertex);
+		return(-1);
+	}
+
+	devfn = (requested_irq >> 8) & 0xff;
+	bus = (requested_irq >> 16) & 0xffff;
+	pci_dev = pci_find_slot(bus, devfn);
+	widget_sysdata = (struct sn1_widget_sysdata *)pci_dev->bus->sysdata;
+	*bus_vertex = widget_sysdata->vhdl;
+	device_sysdata = (struct sn1_device_sysdata *)pci_dev->sysdata;
+	*device_vertex = device_sysdata->vhdl;
+#if 0
+	{
+		int pos;
+		char dname[256];
+		pos = devfs_generate_path(*device_vertex, dname, 256);
+		printk("%s : path= %s pos %d\n", __FUNCTION__, &dname[pos], pos);
+	}
+#endif /* BRINGUP */
+
+
+	/*
+	 * Get the Interrupt PIN.
+	 */
+	pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &intr_pin);
+	*lines = (pciio_intr_line_t)intr_pin;
+
+#ifdef BRINGUP
+	/*
+	 * ioc3 can't decode the PCI_INTERRUPT_PIN field of its config
+	 * space so we have to set it here
+	 */
+	if (pci_dev->vendor == PCI_VENDOR_ID_SGI &&
+	    pci_dev->device == PCI_DEVICE_ID_SGI_IOC3 ) {
+		*lines = 1;
+		printk("%s : IOC3 HACK: lines= %d\n", __FUNCTION__, *lines);
+	}
+#endif /* BRINGUP */
+
+	/* Not supported currently */
+	*dev_desc = NULL;
+
+	printk("sgi_pci_intr_support: Device Descriptor 0x%p, Bus Vertex 0x%p, Interrupt Pins 0x%x, Device Vertex 0x%p\n", *dev_desc, *bus_vertex, *lines, *device_vertex);
+
+	return(0);
+
+}
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/io/pci_dma.c linux/arch/ia64/sn/io/pci_dma.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/io/pci_dma.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/io/pci_dma.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,338 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Leo Dagum
+ */
+
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/devfs_fs_kernel.h>
+
+#ifndef LANGUAGE_C 
+#define LANGUAGE_C 99
+#endif
+#ifndef _LANGUAGE_C
+#define _LANGUAGE_C 99
+#endif
+#ifndef CONFIG_IA64_SGI_IO
+#define CONFIG_IA64_SGI_IO 99
+#endif
+
+#include <asm/io.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/pci/pcibr.h>
+#include <asm/sn/pci/pcibr_private.h>
+#include <asm/sn/iobus.h>
+#include <asm/sn/pci/pci_bus_cvlink.h>
+#include <asm/sn/types.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/pci/pcibr.h>
+#include <asm/sn/pci/pcibr_private.h>
+#include <asm/sn/alenlist.h>
+
+/*
+ * this is REALLY ugly, blame it on gcc's lame inlining that we
+ * have to put procedures in header files
+ */
+#if LANGUAGE_C == 99
+#undef LANGUAGE_C
+#endif
+#if _LANGUAGE_C == 99
+#undef _LANGUAGE_C
+#endif
+#if CONFIG_IA64_SGI_IO == 99
+#undef CONFIG_IA64_SGI_IO
+#endif
+
+/*
+ * sn1 platform specific pci_alloc_consistent()
+ *
+ * this interface is meant for "command" streams, i.e. called only
+ * once for initializing a device, so we don't want prefetching or
+ * write gathering turned on, hence the PCIIO_DMA_CMD flag
+ */
+void *
+sn1_pci_alloc_consistent (struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle)
+{
+        void *ret;
+        int gfp = GFP_ATOMIC;
+	devfs_handle_t    vhdl;
+	struct sn1_device_sysdata *device_sysdata;
+	paddr_t temp_ptr;
+
+	*dma_handle = (dma_addr_t) NULL;
+
+	/*
+	 * get vertex for the device
+	 */
+	device_sysdata = (struct sn1_device_sysdata *) hwdev->sysdata;
+	vhdl = device_sysdata->vhdl;
+
+        if ( ret = (void *)__get_free_pages(gfp, get_order(size)) ) {
+		memset(ret, 0, size);
+	} else {
+		return(NULL);
+	}
+
+	temp_ptr = (paddr_t) __pa(ret);
+	if (IS_PCIA64(hwdev)) {
+
+		/*
+		 * This device supports 64bits DMA addresses.
+		 */
+		*dma_handle = pciio_dmatrans_addr(vhdl, NULL, temp_ptr, size,
+			PCIBR_BARRIER | PCIIO_BYTE_STREAM | PCIIO_DMA_CMD
+			| PCIIO_DMA_A64 );
+		return (ret);
+	}
+
+	/*
+	 * Devices that supports 32 Bits upto 63 Bits DMA Address gets
+	 * 32 Bits DMA addresses.
+	 *
+	 * First try to get 32 Bit Direct Map Support.
+	 */
+	if (IS_PCI32G(hwdev)) {
+		*dma_handle = pciio_dmatrans_addr(vhdl, NULL, temp_ptr, size,
+			PCIBR_BARRIER | PCIIO_BYTE_STREAM | PCIIO_DMA_CMD);
+		if (dma_handle) {
+			return (ret);
+		} else {
+			/*
+			 * We need to map this request by using ATEs.
+			 */
+			printk("sn1_pci_alloc_consistent: 32Bits DMA Page Map support not available yet!");
+			BUG();
+		}
+	}
+
+	if (IS_PCI32L(hwdev)) {
+		/*
+		 * SNIA64 cannot support DMA Addresses smaller than 32 bits.
+		 */
+		return (NULL);
+	}
+
+        return NULL;
+}
+
+void
+sn1_pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
+{
+	free_pages((unsigned long) vaddr, get_order(size));
+}
+
+/*
+ * On sn1 we use the alt_address entry of the scatterlist to store
+ * the physical address corresponding to the given virtual address
+ */
+int
+sn1_pci_map_sg (struct pci_dev *hwdev,
+                        struct scatterlist *sg, int nents, int direction)
+{
+
+	int i;
+	devfs_handle_t	vhdl;
+	dma_addr_t dma_addr;
+	paddr_t temp_ptr;
+	struct sn1_device_sysdata *device_sysdata;
+
+
+	if (direction == PCI_DMA_NONE)
+		BUG();
+
+	/*
+	 * Handle 64 bit cards.
+	 */
+	device_sysdata = (struct sn1_device_sysdata *) hwdev->sysdata;
+	vhdl = device_sysdata->vhdl;
+	for (i = 0; i < nents; i++, sg++) {
+		sg->orig_address = sg->address;
+		dma_addr = 0;
+		temp_ptr = (paddr_t) __pa(sg->address);
+
+		/*
+		 * Handle the most common case 64Bit cards.
+		 */
+		if (IS_PCIA64(hwdev)) {
+			dma_addr = (dma_addr_t) pciio_dmatrans_addr(vhdl, NULL,
+				temp_ptr, sg->length,
+				PCIBR_BARRIER | PCIIO_BYTE_STREAM |
+				PCIIO_DMA_CMD | PCIIO_DMA_A64 );
+			sg->address = (char *)dma_addr;
+/* printk("pci_map_sg: 64Bits hwdev %p DMA Address 0x%p alt_address 0x%p orig_address 0x%p length 0x%x\n", hwdev, sg->address, sg->alt_address, sg->orig_address, sg->length); */
+			continue;
+		}
+
+		/*
+		 * Handle 32Bits and greater cards.
+		 */
+		if (IS_PCI32G(hwdev)) {
+			dma_addr = (dma_addr_t) pciio_dmatrans_addr(vhdl, NULL,
+				temp_ptr, sg->length,
+				PCIBR_BARRIER | PCIIO_BYTE_STREAM |
+				PCIIO_DMA_CMD);
+			if (dma_addr) {
+				sg->address = (char *)dma_addr;
+/* printk("pci_map_single: 32Bit direct pciio_dmatrans_addr pcidev %p returns dma_addr 0x%lx\n", hwdev, dma_addr); */
+				continue;
+			} else {
+				/*
+				 * We need to map this request by using ATEs.
+				 */
+				printk("pci_map_single: 32Bits DMA Page Map support not available yet!");
+				BUG();
+
+			}
+		}
+	}
+
+	return nents;
+
+}
+
+/*
+ * Unmap a set of streaming mode DMA translations.
+ * Again, cpu read rules concerning calls here are the same as for
+ * pci_unmap_single() above.
+ */
+void
+sn1_pci_unmap_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction)
+{
+	int i;
+
+	if (direction == PCI_DMA_NONE)
+		BUG();
+	for (i = 0; i < nelems; i++, sg++)
+		if (sg->orig_address != sg->address) {
+			/* phys_to_virt((dma_addr_t)sg->address | ~0x80000000); */
+			sg->address = sg->orig_address;
+			sg->orig_address = 0;
+		}
+}
+
+/*
+ * We map this to the one step pciio_dmamap_trans interface rather than
+ * the two step pciio_dmamap_alloc/pciio_dmamap_addr because we have
+ * no way of saving the dmamap handle from the alloc to later free
+ * (which is pretty much unacceptable).
+ *
+ * TODO: simplify our interface;
+ *       get rid of dev_desc and vhdl (seems redundant given a pci_dev);
+ *       figure out how to save dmamap handle so can use two step.
+ */
+dma_addr_t sn1_pci_map_single (struct pci_dev *hwdev,
+				void *ptr, size_t size, int direction)
+{
+	devfs_handle_t	vhdl;
+	dma_addr_t dma_addr;
+	paddr_t temp_ptr;
+	struct sn1_device_sysdata *device_sysdata;
+
+
+	if (direction == PCI_DMA_NONE)
+		BUG();
+
+	if (IS_PCI32L(hwdev)) {
+		/*
+		 * SNIA64 cannot support DMA Addresses smaller than 32 bits.
+		 */
+		return ((dma_addr_t) NULL);
+	}
+
+	/*
+	 * find vertex for the device
+	 */
+	device_sysdata = (struct sn1_device_sysdata *)hwdev->sysdata;
+	vhdl = device_sysdata->vhdl;
+/* printk("pci_map_single: Called vhdl = 0x%p ptr = 0x%p size = %d\n", vhdl, ptr, size); */
+	/*
+	 * Call our dmamap interface
+	 */
+	dma_addr = 0;
+	temp_ptr = (paddr_t) __pa(ptr);
+
+	if (IS_PCIA64(hwdev)) {
+		/*
+		 * This device supports 64bits DMA addresses.
+		 */
+		dma_addr = (dma_addr_t) pciio_dmatrans_addr(vhdl, NULL,
+			temp_ptr, size,
+			PCIBR_BARRIER | PCIIO_BYTE_STREAM | PCIIO_DMA_CMD
+			| PCIIO_DMA_A64 );
+/* printk("pci_map_single: 64Bit pciio_dmatrans_addr pcidev %p returns dma_addr 0x%lx\n", hwdev, dma_addr); */
+		return (dma_addr);
+	}
+
+	/*
+	 * Devices that supports 32 Bits upto 63 Bits DMA Address gets
+	 * 32 Bits DMA addresses.
+	 *
+	 * First try to get 32 Bit Direct Map Support.
+	 */
+	if (IS_PCI32G(hwdev)) {
+		dma_addr = (dma_addr_t) pciio_dmatrans_addr(vhdl, NULL,
+			temp_ptr, size,
+			PCIBR_BARRIER | PCIIO_BYTE_STREAM | PCIIO_DMA_CMD);
+		if (dma_addr) {
+/* printk("pci_map_single: 32Bit direct pciio_dmatrans_addr pcidev %p returns dma_addr 0x%lx\n", hwdev, dma_addr); */
+			return (dma_addr);
+		} else {
+			/*
+			 * We need to map this request by using ATEs.
+			 */
+			printk("pci_map_single: 32Bits DMA Page Map support not available yet!");
+			BUG();
+		}
+	}
+
+	if (IS_PCI32L(hwdev)) {
+		/*
+		 * SNIA64 cannot support DMA Addresses smaller than 32 bits.
+		 */
+		return ((dma_addr_t) NULL);
+	}
+
+	return ((dma_addr_t) NULL);
+
+}
+
+void
+sn1_pci_unmap_single (struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction)
+{
+        if (direction == PCI_DMA_NONE)
+                BUG();
+        /* Nothing to do */
+}
+
+void
+sn1_pci_dma_sync_single (struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction)
+{
+        if (direction == PCI_DMA_NONE)
+                BUG();
+        /* Nothing to do */
+}
+
+void
+sn1_pci_dma_sync_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction)
+{
+        if (direction == PCI_DMA_NONE)
+                BUG();
+        /* Nothing to do */
+}
+
+unsigned long
+sn1_dma_address (struct scatterlist *sg)
+{
+	return (sg->address);
+}
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/io/pcibr.c linux/arch/ia64/sn/io/pcibr.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/io/pcibr.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/io/pcibr.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,9574 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+#ifdef BRINGUP
+int NeedXbridgeSwap = 0;
+#endif
+
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/iograph.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/labelcl.h>
+#include <asm/sn/cmn_err.h>
+#include <asm/sn/xtalk/xwidget.h>
+#include <asm/sn/pci/bridge.h>
+#include <asm/sn/pci/pciio.h>
+#include <asm/sn/pci/pcibr.h>
+#include <asm/sn/pci/pcibr_private.h>
+#include <asm/sn/pci/pci_defs.h>
+#include <asm/sn/pci/bridge.h>
+#include <asm/sn/prio.h>
+#include <asm/sn/ioerror_handling.h>
+#include <asm/sn/xtalk/xbow.h>
+#include <asm/sn/xtalk/xbow.h>
+#include <asm/sn/ioc3.h>
+#include <asm/sn/eeprom.h>
+#include <asm/sn/sn1/bedrock.h>
+#include <asm/sn/sn_private.h>
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+#include <asm/sn/sn1/hubio.h>
+#include <asm/sn/sn1/hubio_next.h>
+#endif
+
+#if defined(BRINGUP)
+#if 0
+#define DEBUG 1	 /* To avoid lots of bad printk() formats leave off */
+#endif
+#define PCI_DEBUG 1
+#define ATTACH_DEBUG 1
+#define PCIBR_SOFT_LIST 1
+#endif
+
+#ifndef	LOCAL
+#define	LOCAL		static
+#endif
+
+#define PCIBR_LLP_CONTROL_WAR
+#if defined (PCIBR_LLP_CONTROL_WAR)
+int                     pcibr_llp_control_war_cnt;
+#endif				/* PCIBR_LLP_CONTROL_WAR */
+
+#define	NEWAf(ptr,n,f)	(ptr = kmem_zalloc((n)*sizeof (*(ptr)), (f&PCIIO_NOSLEEP)?KM_NOSLEEP:KM_SLEEP))
+#define NEWA(ptr,n)	(ptr = kmem_zalloc((n)*sizeof (*(ptr)), KM_SLEEP))
+#define DELA(ptr,n)	(kfree(ptr))
+
+#define NEWf(ptr,f)	NEWAf(ptr,1,f)
+#define NEW(ptr)	NEWA(ptr,1)
+#define DEL(ptr)	DELA(ptr,1)
+
+int                     pcibr_devflag = D_MP;
+
+#define F(s,n)		{ 1l<<(s),-(s), n }
+
+struct reg_desc         bridge_int_status_desc[] =
+{
+    F(31, "MULTI_ERR"),
+    F(30, "PMU_ESIZE_EFAULT"),
+    F(29, "UNEXPECTED_RESP"),
+    F(28, "BAD_XRESP_PACKET"),
+    F(27, "BAD_XREQ_PACKET"),
+    F(26, "RESP_XTALK_ERROR"),
+    F(25, "REQ_XTALK_ERROR"),
+    F(24, "INVALID_ADDRESS"),
+    F(23, "UNSUPPORTED_XOP"),
+    F(22, "XREQ_FIFO_OFLOW"),
+    F(21, "LLP_REC_SNERROR"),
+    F(20, "LLP_REC_CBERROR"),
+    F(19, "LLP_RCTY"),
+    F(18, "LLP_TX_RETRY"),
+    F(17, "LLP_TCTY"),
+    F(16, "SSRAM_PERR"),
+    F(15, "PCI_ABORT"),
+    F(14, "PCI_PARITY"),
+    F(13, "PCI_SERR"),
+    F(12, "PCI_PERR"),
+    F(11, "PCI_MASTER_TOUT"),
+    F(10, "PCI_RETRY_CNT"),
+    F(9, "XREAD_REQ_TOUT"),
+    F(8, "GIO_BENABLE_ERR"),
+    F(7, "INT7"),
+    F(6, "INT6"),
+    F(5, "INT5"),
+    F(4, "INT4"),
+    F(3, "INT3"),
+    F(2, "INT2"),
+    F(1, "INT1"),
+    F(0, "INT0"),
+    {0}
+};
+
+struct reg_values       space_v[] =
+{
+    {PCIIO_SPACE_NONE, "none"},
+    {PCIIO_SPACE_ROM, "ROM"},
+    {PCIIO_SPACE_IO, "I/O"},
+    {PCIIO_SPACE_MEM, "MEM"},
+    {PCIIO_SPACE_MEM32, "MEM(32)"},
+    {PCIIO_SPACE_MEM64, "MEM(64)"},
+    {PCIIO_SPACE_CFG, "CFG"},
+    {PCIIO_SPACE_WIN(0), "WIN(0)"},
+    {PCIIO_SPACE_WIN(1), "WIN(1)"},
+    {PCIIO_SPACE_WIN(2), "WIN(2)"},
+    {PCIIO_SPACE_WIN(3), "WIN(3)"},
+    {PCIIO_SPACE_WIN(4), "WIN(4)"},
+    {PCIIO_SPACE_WIN(5), "WIN(5)"},
+    {PCIIO_SPACE_BAD, "BAD"},
+    {0}
+};
+
+struct reg_desc         space_desc[] =
+{
+    {0xFF, 0, "space", 0, space_v},
+    {0}
+};
+
+#if DEBUG
+#define	device_desc	device_bits
+LOCAL struct reg_desc   device_bits[] =
+{
+    {BRIDGE_DEV_ERR_LOCK_EN, 0, "ERR_LOCK_EN"},
+    {BRIDGE_DEV_PAGE_CHK_DIS, 0, "PAGE_CHK_DIS"},
+    {BRIDGE_DEV_FORCE_PCI_PAR, 0, "FORCE_PCI_PAR"},
+    {BRIDGE_DEV_VIRTUAL_EN, 0, "VIRTUAL_EN"},
+    {BRIDGE_DEV_PMU_WRGA_EN, 0, "PMU_WRGA_EN"},
+    {BRIDGE_DEV_DIR_WRGA_EN, 0, "DIR_WRGA_EN"},
+    {BRIDGE_DEV_DEV_SIZE, 0, "DEV_SIZE"},
+    {BRIDGE_DEV_RT, 0, "RT"},
+    {BRIDGE_DEV_SWAP_PMU, 0, "SWAP_PMU"},
+    {BRIDGE_DEV_SWAP_DIR, 0, "SWAP_DIR"},
+    {BRIDGE_DEV_PREF, 0, "PREF"},
+    {BRIDGE_DEV_PRECISE, 0, "PRECISE"},
+    {BRIDGE_DEV_COH, 0, "COH"},
+    {BRIDGE_DEV_BARRIER, 0, "BARRIER"},
+    {BRIDGE_DEV_GBR, 0, "GBR"},
+    {BRIDGE_DEV_DEV_SWAP, 0, "DEV_SWAP"},
+    {BRIDGE_DEV_DEV_IO_MEM, 0, "DEV_IO_MEM"},
+    {BRIDGE_DEV_OFF_MASK, BRIDGE_DEV_OFF_ADDR_SHFT, "DEV_OFF", "%x"},
+    {0}
+};
+#endif	/* DEBUG */
+
+#ifdef SUPPORT_PRINTING_R_FORMAT
+LOCAL struct reg_values xio_cmd_pactyp[] =
+{
+    {0x0, "RdReq"},
+    {0x1, "RdResp"},
+    {0x2, "WrReqWithResp"},
+    {0x3, "WrResp"},
+    {0x4, "WrReqNoResp"},
+    {0x5, "Reserved(5)"},
+    {0x6, "FetchAndOp"},
+    {0x7, "Reserved(7)"},
+    {0x8, "StoreAndOp"},
+    {0x9, "Reserved(9)"},
+    {0xa, "Reserved(a)"},
+    {0xb, "Reserved(b)"},
+    {0xc, "Reserved(c)"},
+    {0xd, "Reserved(d)"},
+    {0xe, "SpecialReq"},
+    {0xf, "SpecialResp"},
+    {0}
+};
+
+LOCAL struct reg_desc   xio_cmd_bits[] =
+{
+    {WIDGET_DIDN, -28, "DIDN", "%x"},
+    {WIDGET_SIDN, -24, "SIDN", "%x"},
+    {WIDGET_PACTYP, -20, "PACTYP", 0, xio_cmd_pactyp},
+    {WIDGET_TNUM, -15, "TNUM", "%x"},
+    {WIDGET_COHERENT, 0, "COHERENT"},
+    {WIDGET_DS, 0, "DS"},
+    {WIDGET_GBR, 0, "GBR"},
+    {WIDGET_VBPM, 0, "VBPM"},
+    {WIDGET_ERROR, 0, "ERROR"},
+    {WIDGET_BARRIER, 0, "BARRIER"},
+    {0}
+};
+#endif	/* SUPPORT_PRINTING_R_FORMAT */
+
+#if PCIBR_FREEZE_TIME || PCIBR_ATE_DEBUG
+LOCAL struct reg_desc   ate_bits[] =
+{
+    {0xFFFF000000000000ull, -48, "RMF", "%x"},
+    {~(IOPGSIZE - 1) &			/* may trim off some low bits */
+     0x0000FFFFFFFFF000ull, 0, "XIO", "%x"},
+    {0x0000000000000F00ull, -8, "port", "%x"},
+    {0x0000000000000010ull, 0, "Barrier"},
+    {0x0000000000000008ull, 0, "Prefetch"},
+    {0x0000000000000004ull, 0, "Precise"},
+    {0x0000000000000002ull, 0, "Coherent"},
+    {0x0000000000000001ull, 0, "Valid"},
+    {0}
+};
+#endif
+
+#if PCIBR_ATE_DEBUG
+LOCAL struct reg_values ssram_sizes[] =
+{
+    {BRIDGE_CTRL_SSRAM_512K, "512k"},
+    {BRIDGE_CTRL_SSRAM_128K, "128k"},
+    {BRIDGE_CTRL_SSRAM_64K, "64k"},
+    {BRIDGE_CTRL_SSRAM_1K, "1k"},
+    {0}
+};
+
+LOCAL struct reg_desc   control_bits[] =
+{
+    {BRIDGE_CTRL_FLASH_WR_EN, 0, "FLASH_WR_EN"},
+    {BRIDGE_CTRL_EN_CLK50, 0, "EN_CLK50"},
+    {BRIDGE_CTRL_EN_CLK40, 0, "EN_CLK40"},
+    {BRIDGE_CTRL_EN_CLK33, 0, "EN_CLK33"},
+    {BRIDGE_CTRL_RST_MASK, -24, "RST", "%x"},
+    {BRIDGE_CTRL_IO_SWAP, 0, "IO_SWAP"},
+    {BRIDGE_CTRL_MEM_SWAP, 0, "MEM_SWAP"},
+    {BRIDGE_CTRL_PAGE_SIZE, 0, "PAGE_SIZE"},
+    {BRIDGE_CTRL_SS_PAR_BAD, 0, "SS_PAR_BAD"},
+    {BRIDGE_CTRL_SS_PAR_EN, 0, "SS_PAR_EN"},
+    {BRIDGE_CTRL_SSRAM_SIZE_MASK, 0, "SSRAM_SIZE", 0, ssram_sizes},
+    {BRIDGE_CTRL_F_BAD_PKT, 0, "F_BAD_PKT"},
+    {BRIDGE_CTRL_LLP_XBAR_CRD_MASK, -12, "LLP_XBAR_CRD", "%d"},
+    {BRIDGE_CTRL_CLR_RLLP_CNT, 0, "CLR_RLLP_CNT"},
+    {BRIDGE_CTRL_CLR_TLLP_CNT, 0, "CLR_TLLP_CNT"},
+    {BRIDGE_CTRL_SYS_END, 0, "SYS_END"},
+    {BRIDGE_CTRL_MAX_TRANS_MASK, -4, "MAX_TRANS", "%d"},
+    {BRIDGE_CTRL_WIDGET_ID_MASK, 0, "WIDGET_ID", "%x"},
+    {0}
+};
+#endif
+
+/* kbrick widgetnum-to-bus layout */
+int p_busnum[MAX_PORT_NUM] = {                  /* widget#      */
+        0, 0, 0, 0, 0, 0, 0, 0,                 /* 0x0 - 0x7    */
+        2,                                      /* 0x8          */
+        1,                                      /* 0x9          */
+        0, 0,                                   /* 0xa - 0xb    */
+        5,                                      /* 0xc          */
+        6,                                      /* 0xd          */
+        4,                                      /* 0xe          */
+        3,                                      /* 0xf          */
+};
+
+/*
+ * Additional PIO spaces per slot are
+ * recorded in this structure.
+ */
+struct pciio_piospace_s {
+    pciio_piospace_t        next;	/* another space for this device */
+    char                    free;	/* 1 if free, 0 if in use               */
+    pciio_space_t           space;	/* Which space is in use                */
+    iopaddr_t               start;	/* Starting address of the PIO space    */
+    size_t                  count;	/* size of PIO space                    */
+};
+
+/* Use io spin locks. This ensures that all the PIO writes from a particular
+ * CPU to a particular IO device are synched before the start of the next
+ * set of PIO operations to the same device.
+ */
+#define pcibr_lock(pcibr_soft)		io_splock(pcibr_soft->bs_lock)
+#define pcibr_unlock(pcibr_soft, s)	io_spunlock(pcibr_soft->bs_lock,s)
+
+#if PCIBR_SOFT_LIST
+typedef struct pcibr_list_s *pcibr_list_p;
+struct pcibr_list_s {
+    pcibr_list_p            bl_next;
+    pcibr_soft_t            bl_soft;
+    devfs_handle_t            bl_vhdl;
+};
+pcibr_list_p            pcibr_list = 0;
+#endif
+
+typedef volatile unsigned *cfg_p;
+typedef volatile bridgereg_t *reg_p;
+
+#define	INFO_LBL_PCIBR_ASIC_REV	"_pcibr_asic_rev"
+
+#define	PCIBR_D64_BASE_UNSET	(0xFFFFFFFFFFFFFFFF)
+#define	PCIBR_D32_BASE_UNSET	(0xFFFFFFFF)
+
+#define PCIBR_VALID_SLOT(s)	(s < 8)
+
+#ifdef SN_XXX
+extern int      hub_device_flags_set(devfs_handle_t       widget_dev,
+                                     hub_widget_flags_t flags);
+#endif
+
+extern devfs_handle_t hwgraph_root;
+extern graph_error_t hwgraph_vertex_unref(devfs_handle_t vhdl);
+extern int cap_able(uint64_t x);
+extern uint64_t rmalloc(struct map *mp, size_t size);
+extern void rmfree(struct map *mp, size_t size, uint64_t a);
+extern int hwgraph_vertex_name_get(devfs_handle_t vhdl, char *buf, uint buflen);
+extern long atoi(register char *p);
+extern void *swap_ptr(void **loc, void *new);
+extern char *dev_to_name(devfs_handle_t dev, char *buf, uint buflen);
+extern cnodeid_t nodevertex_to_cnodeid(devfs_handle_t vhdl);
+extern graph_error_t hwgraph_edge_remove(devfs_handle_t from, char *name, devfs_handle_t *toptr);
+extern struct map *rmallocmap(uint64_t mapsiz);
+extern void rmfreemap(struct map *mp);
+extern int compare_and_swap_ptr(void **location, void *old_ptr, void *new_ptr);
+extern void cmn_err_tag(int seqnumber, register int level, char *fmt, ...);
+
+
+
+/* =====================================================================
+ *    Function Table of Contents
+ *
+ *      The order of functions in this file has stopped
+ *      making much sense. We might want to take a look
+ *      at it some time and bring back some sanity, or
+ *      perhaps bust this file into smaller chunks.
+ */
+
+LOCAL void              do_pcibr_rrb_clear(bridge_t *, int);
+LOCAL void              do_pcibr_rrb_flush(bridge_t *, int);
+LOCAL int               do_pcibr_rrb_count_valid(bridge_t *, pciio_slot_t);
+LOCAL int               do_pcibr_rrb_count_avail(bridge_t *, pciio_slot_t);
+LOCAL int               do_pcibr_rrb_alloc(bridge_t *, pciio_slot_t, int);
+LOCAL int               do_pcibr_rrb_free(bridge_t *, pciio_slot_t, int);
+
+LOCAL void              do_pcibr_rrb_autoalloc(pcibr_soft_t, int, int);
+
+int			pcibr_wrb_flush(devfs_handle_t);
+int                     pcibr_rrb_alloc(devfs_handle_t, int *, int *);
+int                     pcibr_rrb_check(devfs_handle_t, int *, int *, int *, int *);
+int                     pcibr_alloc_all_rrbs(devfs_handle_t, int, int, int, int, int, int, int, int, int);
+void                    pcibr_rrb_flush(devfs_handle_t);
+
+LOCAL int               pcibr_try_set_device(pcibr_soft_t, pciio_slot_t, unsigned, bridgereg_t);
+void                    pcibr_release_device(pcibr_soft_t, pciio_slot_t, bridgereg_t);
+
+LOCAL void              pcibr_clearwidint(bridge_t *);
+LOCAL void              pcibr_setwidint(xtalk_intr_t);
+LOCAL int               pcibr_probe_slot(bridge_t *, cfg_p, unsigned *);
+
+void                    pcibr_init(void);
+int                     pcibr_attach(devfs_handle_t);
+int			pcibr_detach(devfs_handle_t);
+int                     pcibr_open(devfs_handle_t *, int, int, cred_t *);
+int                     pcibr_close(devfs_handle_t, int, int, cred_t *);
+int                     pcibr_map(devfs_handle_t, vhandl_t *, off_t, size_t, uint);
+int                     pcibr_unmap(devfs_handle_t, vhandl_t *);
+int                     pcibr_ioctl(devfs_handle_t, int, void *, int, struct cred *, int *);
+
+void                    pcibr_freeblock_sub(iopaddr_t *, iopaddr_t *, iopaddr_t, size_t);
+
+#ifndef BRINGUP
+LOCAL int               pcibr_init_ext_ate_ram(bridge_t *);
+#endif
+LOCAL int               pcibr_ate_alloc(pcibr_soft_t, int);
+LOCAL void              pcibr_ate_free(pcibr_soft_t, int, int);
+
+LOCAL pcibr_info_t      pcibr_info_get(devfs_handle_t);
+LOCAL pcibr_info_t      pcibr_device_info_new(pcibr_soft_t, pciio_slot_t, pciio_function_t, pciio_vendor_id_t, pciio_device_id_t);
+LOCAL void		pcibr_device_info_free(devfs_handle_t, pciio_slot_t);
+LOCAL int		pcibr_device_attach(devfs_handle_t,pciio_slot_t);
+LOCAL int		pcibr_device_detach(devfs_handle_t,pciio_slot_t);
+LOCAL iopaddr_t         pcibr_addr_pci_to_xio(devfs_handle_t, pciio_slot_t, pciio_space_t, iopaddr_t, size_t, unsigned);
+
+pcibr_piomap_t          pcibr_piomap_alloc(devfs_handle_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, size_t, unsigned);
+void                    pcibr_piomap_free(pcibr_piomap_t);
+caddr_t                 pcibr_piomap_addr(pcibr_piomap_t, iopaddr_t, size_t);
+void                    pcibr_piomap_done(pcibr_piomap_t);
+caddr_t                 pcibr_piotrans_addr(devfs_handle_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, unsigned);
+iopaddr_t               pcibr_piospace_alloc(devfs_handle_t, device_desc_t, pciio_space_t, size_t, size_t);
+void                    pcibr_piospace_free(devfs_handle_t, pciio_space_t, iopaddr_t, size_t);
+
+LOCAL iopaddr_t         pcibr_flags_to_d64(unsigned, pcibr_soft_t);
+LOCAL bridge_ate_t      pcibr_flags_to_ate(unsigned);
+
+pcibr_dmamap_t          pcibr_dmamap_alloc(devfs_handle_t, device_desc_t, size_t, unsigned);
+void                    pcibr_dmamap_free(pcibr_dmamap_t);
+LOCAL bridge_ate_p      pcibr_ate_addr(pcibr_soft_t, int);
+LOCAL iopaddr_t         pcibr_addr_xio_to_pci(pcibr_soft_t, iopaddr_t, size_t);
+iopaddr_t               pcibr_dmamap_addr(pcibr_dmamap_t, paddr_t, size_t);
+alenlist_t              pcibr_dmamap_list(pcibr_dmamap_t, alenlist_t, unsigned);
+void                    pcibr_dmamap_done(pcibr_dmamap_t);
+cnodeid_t		pcibr_get_dmatrans_node(devfs_handle_t);
+iopaddr_t               pcibr_dmatrans_addr(devfs_handle_t, device_desc_t, paddr_t, size_t, unsigned);
+alenlist_t              pcibr_dmatrans_list(devfs_handle_t, device_desc_t, alenlist_t, unsigned);
+void                    pcibr_dmamap_drain(pcibr_dmamap_t);
+void                    pcibr_dmaaddr_drain(devfs_handle_t, paddr_t, size_t);
+void                    pcibr_dmalist_drain(devfs_handle_t, alenlist_t);
+iopaddr_t               pcibr_dmamap_pciaddr_get(pcibr_dmamap_t);
+
+static unsigned		pcibr_intr_bits(pciio_info_t info, pciio_intr_line_t lines);
+pcibr_intr_t            pcibr_intr_alloc(devfs_handle_t, device_desc_t, pciio_intr_line_t, devfs_handle_t);
+void                    pcibr_intr_free(pcibr_intr_t);
+LOCAL void              pcibr_setpciint(xtalk_intr_t);
+int                     pcibr_intr_connect(pcibr_intr_t, intr_func_t, intr_arg_t, void *);
+void                    pcibr_intr_disconnect(pcibr_intr_t);
+
+devfs_handle_t            pcibr_intr_cpu_get(pcibr_intr_t);
+void                    pcibr_xintr_preset(void *, int, xwidgetnum_t, iopaddr_t, xtalk_intr_vector_t);
+void                    pcibr_intr_list_func(intr_arg_t);
+
+LOCAL void              print_bridge_errcmd(uint32_t, char *);
+
+void                    pcibr_error_dump(pcibr_soft_t);
+uint32_t              pcibr_errintr_group(uint32_t);
+LOCAL void		pcibr_pioerr_check(pcibr_soft_t);
+LOCAL void              pcibr_error_intr_handler(intr_arg_t);
+
+LOCAL int               pcibr_addr_toslot(pcibr_soft_t, iopaddr_t, pciio_space_t *, iopaddr_t *, pciio_function_t *);
+LOCAL void              pcibr_error_cleanup(pcibr_soft_t, int);
+void                    pcibr_device_disable(pcibr_soft_t, int);
+LOCAL int               pcibr_pioerror(pcibr_soft_t, int, ioerror_mode_t, ioerror_t *);
+int                     pcibr_dmard_error(pcibr_soft_t, int, ioerror_mode_t, ioerror_t *);
+int                     pcibr_dmawr_error(pcibr_soft_t, int, ioerror_mode_t, ioerror_t *);
+LOCAL int               pcibr_error_handler(error_handler_arg_t, int, ioerror_mode_t, ioerror_t *);
+int                     pcibr_error_devenable(devfs_handle_t, int);
+
+void                    pcibr_provider_startup(devfs_handle_t);
+void                    pcibr_provider_shutdown(devfs_handle_t);
+
+int                     pcibr_reset(devfs_handle_t);
+pciio_endian_t          pcibr_endian_set(devfs_handle_t, pciio_endian_t, pciio_endian_t);
+int                     pcibr_priority_bits_set(pcibr_soft_t, pciio_slot_t, pciio_priority_t);
+pciio_priority_t        pcibr_priority_set(devfs_handle_t, pciio_priority_t);
+int                     pcibr_device_flags_set(devfs_handle_t, pcibr_device_flags_t);
+
+LOCAL cfg_p             pcibr_config_addr(devfs_handle_t, unsigned);
+uint64_t                pcibr_config_get(devfs_handle_t, unsigned, unsigned);
+LOCAL uint64_t          do_pcibr_config_get(cfg_p, unsigned, unsigned);
+void                    pcibr_config_set(devfs_handle_t, unsigned, unsigned, uint64_t);
+LOCAL void              do_pcibr_config_set(cfg_p, unsigned, unsigned, uint64_t);
+
+LOCAL pcibr_hints_t     pcibr_hints_get(devfs_handle_t, int);
+void                    pcibr_hints_fix_rrbs(devfs_handle_t);
+void                    pcibr_hints_dualslot(devfs_handle_t, pciio_slot_t, pciio_slot_t);
+void			pcibr_hints_intr_bits(devfs_handle_t, pcibr_intr_bits_f *);
+void                    pcibr_set_rrb_callback(devfs_handle_t, rrb_alloc_funct_t);
+void                    pcibr_hints_handsoff(devfs_handle_t);
+void                    pcibr_hints_subdevs(devfs_handle_t, pciio_slot_t, uint64_t);
+
+LOCAL int		pcibr_slot_reset(devfs_handle_t,pciio_slot_t);
+LOCAL int		pcibr_slot_info_init(devfs_handle_t,pciio_slot_t);
+LOCAL int		pcibr_slot_info_free(devfs_handle_t,pciio_slot_t);
+LOCAL int		pcibr_slot_addr_space_init(devfs_handle_t,pciio_slot_t);
+LOCAL int		pcibr_slot_device_init(devfs_handle_t, pciio_slot_t);
+LOCAL int		pcibr_slot_guest_info_init(devfs_handle_t,pciio_slot_t);
+LOCAL int		pcibr_slot_initial_rrb_alloc(devfs_handle_t,pciio_slot_t);
+LOCAL int		pcibr_slot_call_device_attach(devfs_handle_t,pciio_slot_t);
+LOCAL int		pcibr_slot_call_device_detach(devfs_handle_t,pciio_slot_t);
+
+int			pcibr_slot_powerup(devfs_handle_t,pciio_slot_t);
+int			pcibr_slot_shutdown(devfs_handle_t,pciio_slot_t);
+int			pcibr_slot_inquiry(devfs_handle_t,pciio_slot_t);		
+
+/* =====================================================================
+ *    RRB management
+ */
+
+#define LSBIT(word)		((word) &~ ((word)-1))
+
+#define PCIBR_RRB_SLOT_VIRTUAL	8
+
+LOCAL void
+do_pcibr_rrb_clear(bridge_t *bridge, int rrb)
+{
+    bridgereg_t             status;
+
+    /* bridge_lock must be held;
+     * this RRB must be disabled.
+     */
+
+    /* wait until RRB has no outstanduing XIO packets. */
+    while ((status = bridge->b_resp_status) & BRIDGE_RRB_INUSE(rrb)) {
+	;				/* XXX- beats on bridge. bad idea? */
+    }
+
+    /* if the RRB has data, drain it. */
+    if (status & BRIDGE_RRB_VALID(rrb)) {
+	bridge->b_resp_clear = BRIDGE_RRB_CLEAR(rrb);
+
+	/* wait until RRB is no longer valid. */
+	while ((status = bridge->b_resp_status) & BRIDGE_RRB_VALID(rrb)) {
+	    ;				/* XXX- beats on bridge. bad idea? */
+	}
+    }
+}
+
+LOCAL void
+do_pcibr_rrb_flush(bridge_t *bridge, int rrbn)
+{
+    reg_p                   rrbp = &bridge->b_rrb_map[rrbn & 1].reg;
+    bridgereg_t             rrbv;
+    int                     shft = 4 * (rrbn >> 1);
+    unsigned                ebit = BRIDGE_RRB_EN << shft;
+
+    rrbv = *rrbp;
+    if (rrbv & ebit)
+	*rrbp = rrbv & ~ebit;
+
+    do_pcibr_rrb_clear(bridge, rrbn);
+
+    if (rrbv & ebit)
+	*rrbp = rrbv;
+}
+
+/*
+ *    pcibr_rrb_count_valid: count how many RRBs are
+ *      marked valid for the specified PCI slot on this
+ *      bridge.
+ *
+ *      NOTE: The "slot" parameter for all pcibr_rrb
+ *      management routines must include the "virtual"
+ *      bit; when manageing both the normal and the
+ *      virtual channel, separate calls to these
+ *      routines must be made. To denote the virtual
+ *      channel, add PCIBR_RRB_SLOT_VIRTUAL to the slot
+ *      number.
+ *
+ *      IMPL NOTE: The obvious algorithm is to iterate
+ *      through the RRB fields, incrementing a count if
+ *      the RRB is valid and matches the slot. However,
+ *      it is much simpler to use an algorithm derived
+ *      from the "partitioned add" idea. First, XOR in a
+ *      pattern such that the fields that match this
+ *      slot come up "all ones" and all other fields
+ *      have zeros in the mismatching bits. Then AND
+ *      together the bits in the field, so we end up
+ *      with one bit turned on for each field that
+ *      matched. Now we need to count these bits. This
+ *      can be done either with a series of shift/add
+ *      instructions or by using "tmp % 15"; I expect
+ *      that the cascaded shift/add will be faster.
+ */
+
+LOCAL int
+do_pcibr_rrb_count_valid(bridge_t *bridge,
+			 pciio_slot_t slot)
+{
+    bridgereg_t             tmp;
+
+    tmp = bridge->b_rrb_map[slot & 1].reg;
+    tmp ^= 0x11111111 * (7 - slot / 2);
+    tmp &= (0xCCCCCCCC & tmp) >> 2;
+    tmp &= (0x22222222 & tmp) >> 1;
+    tmp += tmp >> 4;
+    tmp += tmp >> 8;
+    tmp += tmp >> 16;
+    return tmp & 15;
+}
+
+/*
+ *    do_pcibr_rrb_count_avail: count how many RRBs are
+ *      available to be allocated for the specified slot.
+ *
+ *      IMPL NOTE: similar to the above, except we are
+ *      just counting how many fields have the valid bit
+ *      turned off.
+ */
+LOCAL int
+do_pcibr_rrb_count_avail(bridge_t *bridge,
+			 pciio_slot_t slot)
+{
+    bridgereg_t             tmp;
+
+    tmp = bridge->b_rrb_map[slot & 1].reg;
+    tmp = (0x88888888 & ~tmp) >> 3;
+    tmp += tmp >> 4;
+    tmp += tmp >> 8;
+    tmp += tmp >> 16;
+    return tmp & 15;
+}
+
+/*
+ *    do_pcibr_rrb_alloc: allocate some additional RRBs
+ *      for the specified slot. Returns -1 if there were
+ *      insufficient free RRBs to satisfy the request,
+ *      or 0 if the request was fulfilled.
+ *
+ *      Note that if a request can be partially filled,
+ *      it will be, even if we return failure.
+ *
+ *      IMPL NOTE: again we avoid iterating across all
+ *      the RRBs; instead, we form up a word containing
+ *      one bit for each free RRB, then peel the bits
+ *      off from the low end.
+ */
+LOCAL int
+do_pcibr_rrb_alloc(bridge_t *bridge,
+		   pciio_slot_t slot,
+		   int more)
+{
+    int                     rv = 0;
+    bridgereg_t             reg, tmp, bit;
+
+    reg = bridge->b_rrb_map[slot & 1].reg;
+    tmp = (0x88888888 & ~reg) >> 3;
+    while (more-- > 0) {
+	bit = LSBIT(tmp);
+	if (!bit) {
+	    rv = -1;
+	    break;
+	}
+	tmp &= ~bit;
+	reg = ((reg & ~(bit * 15)) | (bit * (8 + slot / 2)));
+    }
+    bridge->b_rrb_map[slot & 1].reg = reg;
+    return rv;
+}
+
+/*
+ *    do_pcibr_rrb_free: release some of the RRBs that
+ *      have been allocated for the specified
+ *      slot. Returns zero for success, or negative if
+ *      it was unable to free that many RRBs.
+ *
+ *      IMPL NOTE: We form up a bit for each RRB
+ *      allocated to the slot, aligned with the VALID
+ *      bitfield this time; then we peel bits off one at
+ *      a time, releasing the corresponding RRB.
+ */
+LOCAL int
+do_pcibr_rrb_free(bridge_t *bridge,
+		  pciio_slot_t slot,
+		  int less)
+{
+    int                     rv = 0;
+    bridgereg_t             reg, tmp, clr, bit;
+    int                     i;
+
+    clr = 0;
+    reg = bridge->b_rrb_map[slot & 1].reg;
+
+    /* This needs to be done otherwise the rrb's on the virtual channel
+     * for this slot won't be freed !!
+     */
+    tmp = reg & 0xbbbbbbbb;
+
+    tmp ^= (0x11111111 * (7 - slot / 2));
+    tmp &= (0x33333333 & tmp) << 2;
+    tmp &= (0x44444444 & tmp) << 1;
+    while (less-- > 0) {
+	bit = LSBIT(tmp);
+	if (!bit) {
+	    rv = -1;
+	    break;
+	}
+	tmp &= ~bit;
+	reg &= ~bit;
+	clr |= bit;
+    }
+    bridge->b_rrb_map[slot & 1].reg = reg;
+
+    for (i = 0; i < 8; i++)
+	if (clr & (8 << (4 * i)))
+	    do_pcibr_rrb_clear(bridge, (2 * i) + (slot & 1));
+
+    return rv;
+}
+
+LOCAL void
+do_pcibr_rrb_autoalloc(pcibr_soft_t pcibr_soft,
+		       int slot,
+		       int more_rrbs)
+{
+    bridge_t               *bridge = pcibr_soft->bs_base;
+    int                     got;
+
+    for (got = 0; got < more_rrbs; ++got) {
+	if (pcibr_soft->bs_rrb_res[slot & 7] > 0)
+	    pcibr_soft->bs_rrb_res[slot & 7]--;
+	else if (pcibr_soft->bs_rrb_avail[slot & 1] > 0)
+	    pcibr_soft->bs_rrb_avail[slot & 1]--;
+	else
+	    break;
+	if (do_pcibr_rrb_alloc(bridge, slot, 1) < 0)
+	    break;
+#if PCIBR_RRB_DEBUG
+	printk( "do_pcibr_rrb_autoalloc: add one to slot %d%s\n",
+		slot & 7, slot & 8 ? "v" : "");
+#endif
+	pcibr_soft->bs_rrb_valid[slot]++;
+    }
+#if PCIBR_RRB_DEBUG
+    printk("%s: %d+%d free RRBs. Allocation list:\n", pcibr_soft->bs_name,
+	    pcibr_soft->bs_rrb_avail[0],
+	    pcibr_soft->bs_rrb_avail[1]);
+    for (slot = 0; slot < 8; ++slot)
+	printk("\t%d+%d+%d",
+		0xFFF & pcibr_soft->bs_rrb_valid[slot],
+		0xFFF & pcibr_soft->bs_rrb_valid[slot + PCIBR_RRB_SLOT_VIRTUAL],
+		pcibr_soft->bs_rrb_res[slot]);
+	printk("\n");
+#endif
+}
+
+/*
+ * Device driver interface to flush the write buffers for a specified
+ * device hanging off the bridge.
+ */
+int
+pcibr_wrb_flush(devfs_handle_t pconn_vhdl)
+{
+    pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);
+    pciio_slot_t            pciio_slot = pciio_info_slot_get(pciio_info);
+    pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
+    bridge_t               *bridge = pcibr_soft->bs_base;
+    volatile bridgereg_t   *wrb_flush;
+
+    wrb_flush = &(bridge->b_wr_req_buf[pciio_slot].reg);
+    while (*wrb_flush);
+
+    return(0);
+}
+/*
+ * Device driver interface to request RRBs for a specified device
+ * hanging off a Bridge.  The driver requests the total number of
+ * RRBs it would like for the normal channel (vchan0) and for the
+ * "virtual channel" (vchan1).  The actual number allocated to each
+ * channel is returned.
+ *
+ * If we cannot allocate at least one RRB to a channel that needs
+ * at least one, return -1 (failure).  Otherwise, satisfy the request
+ * as best we can and return 0.
+ */
+int
+pcibr_rrb_alloc(devfs_handle_t pconn_vhdl,
+		int *count_vchan0,
+		int *count_vchan1)
+{
+    pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);
+    pciio_slot_t            pciio_slot = pciio_info_slot_get(pciio_info);
+    pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
+    bridge_t               *bridge = pcibr_soft->bs_base;
+    int                     desired_vchan0;
+    int                     desired_vchan1;
+    int                     orig_vchan0;
+    int                     orig_vchan1;
+    int                     delta_vchan0;
+    int                     delta_vchan1;
+    int                     final_vchan0;
+    int                     final_vchan1;
+    int                     avail_rrbs;
+    unsigned                s;
+    int                     error;
+
+    /*
+     * TBD: temper request with admin info about RRB allocation,
+     * and according to demand from other devices on this Bridge.
+     *
+     * One way of doing this would be to allocate two RRBs
+     * for each device on the bus, before any drivers start
+     * asking for extras. This has the weakness that one
+     * driver might not give back an "extra" RRB until after
+     * another driver has already failed to get one that
+     * it wanted.
+     */
+
+    s = pcibr_lock(pcibr_soft);
+
+    /* How many RRBs do we own? */
+    orig_vchan0 = pcibr_soft->bs_rrb_valid[pciio_slot];
+    orig_vchan1 = pcibr_soft->bs_rrb_valid[pciio_slot + PCIBR_RRB_SLOT_VIRTUAL];
+
+    /* How many RRBs do we want? */
+    desired_vchan0 = count_vchan0 ? *count_vchan0 : orig_vchan0;
+    desired_vchan1 = count_vchan1 ? *count_vchan1 : orig_vchan1;
+
+    /* How many RRBs are free? */
+    avail_rrbs = pcibr_soft->bs_rrb_avail[pciio_slot & 1]
+	+ pcibr_soft->bs_rrb_res[pciio_slot];
+
+    /* Figure desired deltas */
+    delta_vchan0 = desired_vchan0 - orig_vchan0;
+    delta_vchan1 = desired_vchan1 - orig_vchan1;
+
+    /* Trim back deltas to something
+     * that we can actually meet, by
+     * decreasing the ending allocation
+     * for whichever channel wants
+     * more RRBs. If both want the same
+     * number, cut the second channel.
+     * NOTE: do not change the allocation for
+     * a channel that was passed as NULL.
+     */
+    while ((delta_vchan0 + delta_vchan1) > avail_rrbs) {
+	if (count_vchan0 &&
+	    (!count_vchan1 ||
+	     ((orig_vchan0 + delta_vchan0) >
+	      (orig_vchan1 + delta_vchan1))))
+	    delta_vchan0--;
+	else
+	    delta_vchan1--;
+    }
+
+    /* Figure final RRB allocations
+     */
+    final_vchan0 = orig_vchan0 + delta_vchan0;
+    final_vchan1 = orig_vchan1 + delta_vchan1;
+
+    /* If either channel wants RRBs but our actions
+     * would leave it with none, declare an error,
+     * but DO NOT change any RRB allocations.
+     */
+    if ((desired_vchan0 && !final_vchan0) ||
+	(desired_vchan1 && !final_vchan1)) {
+
+	error = -1;
+
+    } else {
+
+	/* Commit the allocations: free, then alloc.
+	 */
+	if (delta_vchan0 < 0)
+	    (void) do_pcibr_rrb_free(bridge, pciio_slot, -delta_vchan0);
+	if (delta_vchan1 < 0)
+	    (void) do_pcibr_rrb_free(bridge, PCIBR_RRB_SLOT_VIRTUAL + pciio_slot, -delta_vchan1);
+
+	if (delta_vchan0 > 0)
+	    (void) do_pcibr_rrb_alloc(bridge, pciio_slot, delta_vchan0);
+	if (delta_vchan1 > 0)
+	    (void) do_pcibr_rrb_alloc(bridge, PCIBR_RRB_SLOT_VIRTUAL + pciio_slot, delta_vchan1);
+
+	/* Return final values to caller.
+	 */
+	if (count_vchan0)
+	    *count_vchan0 = final_vchan0;
+	if (count_vchan1)
+	    *count_vchan1 = final_vchan1;
+
+	/* prevent automatic changes to this slot's RRBs
+	 */
+	pcibr_soft->bs_rrb_fixed |= 1 << pciio_slot;
+
+	/* Track the actual allocations, release
+	 * any further reservations, and update the
+	 * number of available RRBs.
+	 */
+
+	pcibr_soft->bs_rrb_valid[pciio_slot] = final_vchan0;
+	pcibr_soft->bs_rrb_valid[pciio_slot + PCIBR_RRB_SLOT_VIRTUAL] = final_vchan1;
+	pcibr_soft->bs_rrb_avail[pciio_slot & 1] =
+	    pcibr_soft->bs_rrb_avail[pciio_slot & 1]
+	    + pcibr_soft->bs_rrb_res[pciio_slot]
+	    - delta_vchan0
+	    - delta_vchan1;
+	pcibr_soft->bs_rrb_res[pciio_slot] = 0;
+
+#if PCIBR_RRB_DEBUG
+	printk("pcibr_rrb_alloc: slot %d set to %d+%d; %d+%d free\n",
+		pciio_slot, final_vchan0, final_vchan1,
+		pcibr_soft->bs_rrb_avail[0],
+		pcibr_soft->bs_rrb_avail[1]);
+	for (pciio_slot = 0; pciio_slot < 8; ++pciio_slot)
+	    printk("\t%d+%d+%d",
+		    0xFFF & pcibr_soft->bs_rrb_valid[pciio_slot],
+		    0xFFF & pcibr_soft->bs_rrb_valid[pciio_slot + PCIBR_RRB_SLOT_VIRTUAL],
+		    pcibr_soft->bs_rrb_res[pciio_slot]);
+	printk("\n");
+#endif
+
+	error = 0;
+    }
+
+    pcibr_unlock(pcibr_soft, s);
+    return error;
+}
+
+/*
+ * Device driver interface to check the current state
+ * of the RRB allocations.
+ *
+ *   pconn_vhdl is your PCI connection point (specifies which
+ *      PCI bus and which slot).
+ *
+ *   count_vchan0 points to where to return the number of RRBs
+ *      assigned to the primary DMA channel, used by all DMA
+ *      that does not explicitly ask for the alternate virtual
+ *      channel.
+ *
+ *   count_vchan1 points to where to return the number of RRBs
+ *      assigned to the secondary DMA channel, used when
+ *      PCIBR_VCHAN1 and PCIIO_DMA_A64 are specified.
+ *
+ *   count_reserved points to where to return the number of RRBs
+ *      that have been automatically reserved for your device at
+ *      startup, but which have not been assigned to a
+ *      channel. RRBs must be assigned to a channel to be used;
+ *      this can be done either with an explicit pcibr_rrb_alloc
+ *      call, or automatically by the infrastructure when a DMA
+ *      translation is constructed. Any call to pcibr_rrb_alloc
+ *      will release any unassigned reserved RRBs back to the
+ *      free pool.
+ *
+ *   count_pool points to where to return the number of RRBs
+ *      that are currently unassigned and unreserved. This
+ *      number can (and will) change as other drivers make calls
+ *      to pcibr_rrb_alloc, or automatically allocate RRBs for
+ *      DMA beyond their initial reservation.
+ *
+ * NULL may be passed for any of the return value pointers
+ * the caller is not interested in.
+ *
+ * The return value is "0" if all went well, or "-1" if
+ * there is a problem. Additionally, if the wrong vertex
+ * is passed in, one of the subsidiary support functions
+ * could panic with a "bad pciio fingerprint."
+ */
+
+int
+pcibr_rrb_check(devfs_handle_t pconn_vhdl,
+		int *count_vchan0,
+		int *count_vchan1,
+		int *count_reserved,
+		int *count_pool)
+{
+    pciio_info_t            pciio_info;
+    pciio_slot_t            pciio_slot;
+    pcibr_soft_t            pcibr_soft;
+    unsigned                s;
+    int                     error = -1;
+
+    if ((pciio_info = pciio_info_get(pconn_vhdl)) &&
+	(pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info)) &&
+	((pciio_slot = pciio_info_slot_get(pciio_info)) < 8)) {
+
+	s = pcibr_lock(pcibr_soft);
+
+	if (count_vchan0)
+	    *count_vchan0 =
+		pcibr_soft->bs_rrb_valid[pciio_slot];
+
+	if (count_vchan1)
+	    *count_vchan1 =
+		pcibr_soft->bs_rrb_valid[pciio_slot + PCIBR_RRB_SLOT_VIRTUAL];
+
+	if (count_reserved)
+	    *count_reserved =
+		pcibr_soft->bs_rrb_res[pciio_slot];
+
+	if (count_pool)
+	    *count_pool =
+		pcibr_soft->bs_rrb_avail[pciio_slot & 1];
+
+	error = 0;
+
+	pcibr_unlock(pcibr_soft, s);
+    }
+    return error;
+}
+
+/* pcibr_alloc_all_rrbs allocates all the rrbs available in the quantities
+ * requested for each of the devies.  The evn_odd argument indicates whether
+ * allcoation for the odd or even rrbs is requested and next group of four pairse
+ * are the amount to assign to each device (they should sum to <= 8) and
+ * whether to set the viritual bit for that device (1 indictaes yes, 0 indicates no)
+ * the devices in order are either 0, 2, 4, 6 or 1, 3, 5, 7
+ * if even_odd is even we alloc even rrbs else we allocate odd rrbs
+ * returns 0 if no errors else returns -1
+ */
+
+int
+pcibr_alloc_all_rrbs(devfs_handle_t vhdl, int even_odd,
+		     int dev_1_rrbs, int virt1, int dev_2_rrbs, int virt2,
+		     int dev_3_rrbs, int virt3, int dev_4_rrbs, int virt4)
+{
+    devfs_handle_t            pcibr_vhdl;
+#ifdef colin
+    pcibr_soft_t            pcibr_soft;
+#else
+    pcibr_soft_t	pcibr_soft = NULL;
+#endif
+    bridge_t               *bridge = NULL;
+
+    uint32_t              rrb_setting = 0;
+    int                     rrb_shift = 7;
+    uint32_t              cur_rrb;
+    int                     dev_rrbs[4];
+    int                     virt[4];
+    int                     i, j;
+    unsigned                s;
+
+    if (GRAPH_SUCCESS ==
+	hwgraph_traverse(vhdl, EDGE_LBL_PCI, &pcibr_vhdl)) {
+	pcibr_soft = pcibr_soft_get(pcibr_vhdl);
+	if (pcibr_soft)
+	    bridge = pcibr_soft->bs_base;
+	hwgraph_vertex_unref(pcibr_vhdl);
+    }
+    if (bridge == NULL)
+	bridge = (bridge_t *) xtalk_piotrans_addr
+	    (vhdl, NULL, 0, sizeof(bridge_t), 0);
+
+    even_odd &= 1;
+
+    dev_rrbs[0] = dev_1_rrbs;
+    dev_rrbs[1] = dev_2_rrbs;
+    dev_rrbs[2] = dev_3_rrbs;
+    dev_rrbs[3] = dev_4_rrbs;
+
+    virt[0] = virt1;
+    virt[1] = virt2;
+    virt[2] = virt3;
+    virt[3] = virt4;
+
+    if ((dev_1_rrbs + dev_2_rrbs + dev_3_rrbs + dev_4_rrbs) > 8) {
+	return -1;
+    }
+    if ((dev_1_rrbs < 0) || (dev_2_rrbs < 0) || (dev_3_rrbs < 0) || (dev_4_rrbs < 0)) {
+	return -1;
+    }
+    /* walk through rrbs */
+    for (i = 0; i < 4; i++) {
+	if (virt[i]) {
+	    cur_rrb = i | 0xc;
+	    cur_rrb = cur_rrb << (rrb_shift * 4);
+	    rrb_shift--;
+	    rrb_setting = rrb_setting | cur_rrb;
+	    dev_rrbs[i] = dev_rrbs[i] - 1;
+	}
+	for (j = 0; j < dev_rrbs[i]; j++) {
+	    cur_rrb = i | 0x8;
+	    cur_rrb = cur_rrb << (rrb_shift * 4);
+	    rrb_shift--;
+	    rrb_setting = rrb_setting | cur_rrb;
+	}
+    }
+
+    if (pcibr_soft)
+	s = pcibr_lock(pcibr_soft);
+
+    bridge->b_rrb_map[even_odd].reg = rrb_setting;
+
+    if (pcibr_soft) {
+
+	pcibr_soft->bs_rrb_fixed |= 0x55 << even_odd;
+
+	/* since we've "FIXED" the allocations
+	 * for these slots, we probably can dispense
+	 * with tracking avail/res/valid data, but
+	 * keeping it up to date helps debugging.
+	 */
+
+	pcibr_soft->bs_rrb_avail[even_odd] =
+	    8 - (dev_1_rrbs + dev_2_rrbs + dev_3_rrbs + dev_4_rrbs);
+
+	pcibr_soft->bs_rrb_res[even_odd + 0] = 0;
+	pcibr_soft->bs_rrb_res[even_odd + 2] = 0;
+	pcibr_soft->bs_rrb_res[even_odd + 4] = 0;
+	pcibr_soft->bs_rrb_res[even_odd + 6] = 0;
+
+	pcibr_soft->bs_rrb_valid[even_odd + 0] = dev_1_rrbs - virt1;
+	pcibr_soft->bs_rrb_valid[even_odd + 2] = dev_2_rrbs - virt2;
+	pcibr_soft->bs_rrb_valid[even_odd + 4] = dev_3_rrbs - virt3;
+	pcibr_soft->bs_rrb_valid[even_odd + 6] = dev_4_rrbs - virt4;
+
+	pcibr_soft->bs_rrb_valid[even_odd + 0 + PCIBR_RRB_SLOT_VIRTUAL] = virt1;
+	pcibr_soft->bs_rrb_valid[even_odd + 2 + PCIBR_RRB_SLOT_VIRTUAL] = virt2;
+	pcibr_soft->bs_rrb_valid[even_odd + 4 + PCIBR_RRB_SLOT_VIRTUAL] = virt3;
+	pcibr_soft->bs_rrb_valid[even_odd + 6 + PCIBR_RRB_SLOT_VIRTUAL] = virt4;
+
+	pcibr_unlock(pcibr_soft, s);
+    }
+    return 0;
+}
+
+/*
+ *    pcibr_rrb_flush: chase down all the RRBs assigned
+ *      to the specified connection point, and flush
+ *      them.
+ */
+void
+pcibr_rrb_flush(devfs_handle_t pconn_vhdl)
+{
+    pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);
+    pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
+    pciio_slot_t            pciio_slot = pciio_info_slot_get(pciio_info);
+    bridge_t               *bridge = pcibr_soft->bs_base;
+    unsigned                s;
+    reg_p                   rrbp;
+    unsigned                rrbm;
+    int                     i;
+    int                     rrbn;
+    unsigned                sval;
+    unsigned                mask;
+
+    sval = BRIDGE_RRB_EN | (pciio_slot >> 1);
+    mask = BRIDGE_RRB_EN | BRIDGE_RRB_PDEV;
+    rrbn = pciio_slot & 1;
+    rrbp = &bridge->b_rrb_map[rrbn].reg;
+
+    s = pcibr_lock(pcibr_soft);
+    rrbm = *rrbp;
+    for (i = 0; i < 8; ++i) {
+	if ((rrbm & mask) == sval)
+	    do_pcibr_rrb_flush(bridge, rrbn);
+	rrbm >>= 4;
+	rrbn += 2;
+    }
+    pcibr_unlock(pcibr_soft, s);
+}
+
+/* =====================================================================
+ *    Device(x) register management
+ */
+
+/* pcibr_try_set_device: attempt to modify Device(x)
+ * for the specified slot on the specified bridge
+ * as requested in flags, limited to the specified
+ * bits. Returns which BRIDGE bits were in conflict,
+ * or ZERO if everything went OK.
+ *
+ * Caller MUST hold pcibr_lock when calling this function.
+ */
+LOCAL int
+pcibr_try_set_device(pcibr_soft_t pcibr_soft,
+		     pciio_slot_t slot,
+		     unsigned flags,
+		     bridgereg_t mask)
+{
+    bridge_t               *bridge;
+    pcibr_soft_slot_t       slotp;
+    bridgereg_t             old;
+    bridgereg_t             new;
+    bridgereg_t             chg;
+    bridgereg_t             bad;
+    bridgereg_t             badpmu;
+    bridgereg_t             badd32;
+    bridgereg_t             badd64;
+    bridgereg_t             fix;
+    unsigned                s;
+    bridgereg_t             xmask;
+
+    xmask = mask;
+    if (pcibr_soft->bs_xbridge) {
+    	if (mask == BRIDGE_DEV_PMU_BITS)
+		xmask = XBRIDGE_DEV_PMU_BITS;
+	if (mask == BRIDGE_DEV_D64_BITS)
+		xmask = XBRIDGE_DEV_D64_BITS;
+    }
+
+    slotp = &pcibr_soft->bs_slot[slot];
+
+    s = pcibr_lock(pcibr_soft);
+
+    bridge = pcibr_soft->bs_base;
+
+    old = slotp->bss_device;
+
+    /* figure out what the desired
+     * Device(x) bits are based on
+     * the flags specified.
+     */
+
+    new = old;
+
+    /* Currently, we inherit anything that
+     * the new caller has not specified in
+     * one way or another, unless we take
+     * action here to not inherit.
+     *
+     * This is needed for the "swap" stuff,
+     * since it could have been set via
+     * pcibr_endian_set -- altho note that
+     * any explicit PCIBR_BYTE_STREAM or
+     * PCIBR_WORD_VALUES will freely override
+     * the effect of that call (and vice
+     * versa, no protection either way).
+     *
+     * I want to get rid of pcibr_endian_set
+     * in favor of tracking DMA endianness
+     * using the flags specified when DMA
+     * channels are created.
+     */
+
+#define	BRIDGE_DEV_WRGA_BITS	(BRIDGE_DEV_PMU_WRGA_EN | BRIDGE_DEV_DIR_WRGA_EN)
+#define	BRIDGE_DEV_SWAP_BITS	(BRIDGE_DEV_SWAP_PMU | BRIDGE_DEV_SWAP_DIR)
+
+    /* Do not use Barrier, Write Gather,
+     * or Prefetch unless asked.
+     * Leave everything else as it
+     * was from the last time.
+     */
+    new = new
+	& ~BRIDGE_DEV_BARRIER
+	& ~BRIDGE_DEV_WRGA_BITS
+	& ~BRIDGE_DEV_PREF
+	;
+
+    /* Generic macro flags
+     */
+    if (flags & PCIIO_DMA_DATA) {
+#ifdef colin
+	new = new
+	    & ~BRIDGE_DEV_BARRIER	/* barrier off */
+	    | BRIDGE_DEV_PREF;		/* prefetch on */
+#else
+	new = (new
+            & ~BRIDGE_DEV_BARRIER)      /* barrier off */
+            | BRIDGE_DEV_PREF;          /* prefetch on */
+#endif
+
+    }
+    if (flags & PCIIO_DMA_CMD) {
+#ifdef colin
+	new = new
+	    & ~BRIDGE_DEV_PREF		/* prefetch off */
+	    & ~BRIDGE_DEV_WRGA_BITS	/* write gather off */
+	    | BRIDGE_DEV_BARRIER;	/* barrier on */
+#else
+        new = ((new
+            & ~BRIDGE_DEV_PREF)         /* prefetch off */
+            & ~BRIDGE_DEV_WRGA_BITS)    /* write gather off */
+            | BRIDGE_DEV_BARRIER;       /* barrier on */
+#endif
+    }
+    /* Generic detail flags
+     */
+    if (flags & PCIIO_WRITE_GATHER)
+	new |= BRIDGE_DEV_WRGA_BITS;
+    if (flags & PCIIO_NOWRITE_GATHER)
+	new &= ~BRIDGE_DEV_WRGA_BITS;
+
+    if (flags & PCIIO_PREFETCH)
+	new |= BRIDGE_DEV_PREF;
+    if (flags & PCIIO_NOPREFETCH)
+	new &= ~BRIDGE_DEV_PREF;
+
+    if (flags & PCIBR_WRITE_GATHER)
+	new |= BRIDGE_DEV_WRGA_BITS;
+    if (flags & PCIBR_NOWRITE_GATHER)
+	new &= ~BRIDGE_DEV_WRGA_BITS;
+
+    if (flags & PCIIO_BYTE_STREAM)
+	new |= (pcibr_soft->bs_xbridge) ? 
+			BRIDGE_DEV_SWAP_DIR : BRIDGE_DEV_SWAP_BITS;
+    if (flags & PCIIO_WORD_VALUES)
+	new &= (pcibr_soft->bs_xbridge) ? 
+			~BRIDGE_DEV_SWAP_DIR : ~BRIDGE_DEV_SWAP_BITS;
+
+    /* Provider-specific flags
+     */
+    if (flags & PCIBR_PREFETCH)
+	new |= BRIDGE_DEV_PREF;
+    if (flags & PCIBR_NOPREFETCH)
+	new &= ~BRIDGE_DEV_PREF;
+
+    if (flags & PCIBR_PRECISE)
+	new |= BRIDGE_DEV_PRECISE;
+    if (flags & PCIBR_NOPRECISE)
+	new &= ~BRIDGE_DEV_PRECISE;
+
+    if (flags & PCIBR_BARRIER)
+	new |= BRIDGE_DEV_BARRIER;
+    if (flags & PCIBR_NOBARRIER)
+	new &= ~BRIDGE_DEV_BARRIER;
+
+    if (flags & PCIBR_64BIT)
+	new |= BRIDGE_DEV_DEV_SIZE;
+    if (flags & PCIBR_NO64BIT)
+	new &= ~BRIDGE_DEV_DEV_SIZE;
+
+    chg = old ^ new;				/* what are we changing, */
+    chg &= xmask;				/* of the interesting bits */
+
+    if (chg) {
+
+	badd32 = slotp->bss_d32_uctr ? (BRIDGE_DEV_D32_BITS & chg) : 0;
+	if (pcibr_soft->bs_xbridge) {
+		badpmu = slotp->bss_pmu_uctr ? (XBRIDGE_DEV_PMU_BITS & chg) : 0;
+		badd64 = slotp->bss_d64_uctr ? (XBRIDGE_DEV_D64_BITS & chg) : 0;
+	} else {
+		badpmu = slotp->bss_pmu_uctr ? (BRIDGE_DEV_PMU_BITS & chg) : 0;
+		badd64 = slotp->bss_d64_uctr ? (BRIDGE_DEV_D64_BITS & chg) : 0;
+	}
+	bad = badpmu | badd32 | badd64;
+
+	if (bad) {
+
+	    /* some conflicts can be resolved by
+	     * forcing the bit on. this may cause
+	     * some performance degredation in
+	     * the stream(s) that want the bit off,
+	     * but the alternative is not allowing
+	     * the new stream at all.
+	     */
+#ifdef colin
+	    if (fix = bad & (BRIDGE_DEV_PRECISE |
+			     BRIDGE_DEV_BARRIER)) {
+#else
+            if ( (fix = bad & (BRIDGE_DEV_PRECISE |
+                             BRIDGE_DEV_BARRIER)) ){
+#endif
+		bad &= ~fix;
+		/* don't change these bits if
+		 * they are already set in "old"
+		 */
+		chg &= ~(fix & old);
+	    }
+	    /* some conflicts can be resolved by
+	     * forcing the bit off. this may cause
+	     * some performance degredation in
+	     * the stream(s) that want the bit on,
+	     * but the alternative is not allowing
+	     * the new stream at all.
+	     */
+#ifdef colin
+	    if (fix = bad & (BRIDGE_DEV_WRGA_BITS |
+			     BRIDGE_DEV_PREF)) {
+#else
+	    if ( (fix = bad & (BRIDGE_DEV_WRGA_BITS |
+			     BRIDGE_DEV_PREF)) ){
+#endif
+		bad &= ~fix;
+		/* don't change these bits if
+		 * we wanted to turn them on.
+		 */
+		chg &= ~(fix & new);
+	    }
+	    /* conflicts in other bits mean
+	     * we can not establish this DMA
+	     * channel while the other(s) are
+	     * still present.
+	     */
+	    if (bad) {
+		pcibr_unlock(pcibr_soft, s);
+#if (DEBUG && PCIBR_DEV_DEBUG)
+		printk("pcibr_try_set_device: mod blocked by %R\n", bad, device_bits);
+#endif
+		return bad;
+	    }
+	}
+    }
+    if (mask == BRIDGE_DEV_PMU_BITS)
+	slotp->bss_pmu_uctr++;
+    if (mask == BRIDGE_DEV_D32_BITS)
+	slotp->bss_d32_uctr++;
+    if (mask == BRIDGE_DEV_D64_BITS)
+	slotp->bss_d64_uctr++;
+
+    /* the value we want to write is the
+     * original value, with the bits for
+     * our selected changes flipped, and
+     * with any disabled features turned off.
+     */
+    new = old ^ chg;			/* only change what we want to change */
+
+    if (slotp->bss_device == new) {
+	pcibr_unlock(pcibr_soft, s);
+	return 0;
+    }
+    bridge->b_device[slot].reg = new;
+    slotp->bss_device = new;
+    bridge->b_wid_tflush;		/* wait until Bridge PIO complete */
+    pcibr_unlock(pcibr_soft, s);
+#if DEBUG && PCIBR_DEV_DEBUG
+    printk("pcibr Device(%d): 0x%p\n", slot, bridge->b_device[slot].reg);
+#endif
+
+    return 0;
+}
+
+void
+pcibr_release_device(pcibr_soft_t pcibr_soft,
+		     pciio_slot_t slot,
+		     bridgereg_t mask)
+{
+    pcibr_soft_slot_t       slotp;
+    unsigned                s;
+
+    slotp = &pcibr_soft->bs_slot[slot];
+
+    s = pcibr_lock(pcibr_soft);
+
+    if (mask == BRIDGE_DEV_PMU_BITS)
+	slotp->bss_pmu_uctr--;
+    if (mask == BRIDGE_DEV_D32_BITS)
+	slotp->bss_d32_uctr--;
+    if (mask == BRIDGE_DEV_D64_BITS)
+	slotp->bss_d64_uctr--;
+
+    pcibr_unlock(pcibr_soft, s);
+}
+
+/*
+ * flush write gather buffer for slot
+ */
+LOCAL void
+pcibr_device_write_gather_flush(pcibr_soft_t pcibr_soft,
+              pciio_slot_t slot)
+{
+    bridge_t               *bridge;
+    unsigned                s;
+    volatile uint32_t     wrf;
+    s = pcibr_lock(pcibr_soft);
+    bridge = pcibr_soft->bs_base;
+    wrf = bridge->b_wr_req_buf[slot].reg;
+    pcibr_unlock(pcibr_soft, s);
+}
+
+/* =====================================================================
+ *    Bridge (pcibr) "Device Driver" entry points
+ */
+
+/*
+ * pcibr_probe_slot: read a config space word
+ * while trapping any errors; reutrn zero if
+ * all went OK, or nonzero if there was an error.
+ * The value read, if any, is passed back
+ * through the valp parameter.
+ */
+LOCAL int
+pcibr_probe_slot(bridge_t *bridge,
+		 cfg_p cfg,
+		 unsigned *valp)
+{
+    int                     rv;
+    bridgereg_t             old_enable, new_enable;
+
+    old_enable = bridge->b_int_enable;
+    new_enable = old_enable & ~BRIDGE_IMR_PCI_MST_TIMEOUT;
+
+    bridge->b_int_enable = new_enable;
+
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+#if defined(BRINGUP)
+	/*
+	 * The xbridge doesn't clear b_err_int_view unless
+	 * multi-err is cleared...
+	 */
+	if (is_xbridge(bridge))
+	    if (bridge->b_err_int_view & BRIDGE_ISR_PCI_MST_TIMEOUT) {
+		bridge->b_int_rst_stat = BRIDGE_IRR_MULTI_CLR;
+	    }
+#endif	/* BRINGUP */
+#endif	/* CONFIG_SGI_IP35 || CONFIG_IA64_SGI_SN1 */
+
+    if (bridge->b_int_status & BRIDGE_IRR_PCI_GRP) {
+	bridge->b_int_rst_stat = BRIDGE_IRR_PCI_GRP_CLR;
+	(void) bridge->b_wid_tflush;	/* flushbus */
+    }
+    rv = badaddr_val((void *) cfg, 4, valp);
+
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+#if defined(BRINGUP)
+	/*
+	 * The xbridge doesn't set master timeout in b_int_status
+	 * here.  Fortunately it's in error_interrupt_view.
+	 */
+	if (is_xbridge(bridge))
+	    if (bridge->b_err_int_view & BRIDGE_ISR_PCI_MST_TIMEOUT) {
+		bridge->b_int_rst_stat = BRIDGE_IRR_MULTI_CLR;
+		rv = 1;		/* unoccupied slot */
+	    }
+#endif	/* BRINGUP */
+#endif /* CONFIG_SGI_IP35 */
+
+    bridge->b_int_enable = old_enable;
+    bridge->b_wid_tflush;		/* wait until Bridge PIO complete */
+
+    return rv;
+}
+
+/*
+ *    pcibr_init: called once during system startup or
+ *      when a loadable driver is loaded.
+ *
+ *      The driver_register function should normally
+ *      be in _reg, not _init.  But the pcibr driver is
+ *      required by devinit before the _reg routines
+ *      are called, so this is an exception.
+ */
+void
+pcibr_init(void)
+{
+#if DEBUG && ATTACH_DEBUG
+    printk("pcibr_init\n");
+#endif
+
+    xwidget_driver_register(XBRIDGE_WIDGET_PART_NUM,
+			    XBRIDGE_WIDGET_MFGR_NUM,
+			    "pcibr_",
+			    0);
+    xwidget_driver_register(BRIDGE_WIDGET_PART_NUM,
+			    BRIDGE_WIDGET_MFGR_NUM,
+			    "pcibr_",
+			    0);
+}
+
+/*
+ * open/close mmap/munmap interface would be used by processes
+ * that plan to map the PCI bridge, and muck around with the
+ * registers. This is dangerous to do, and will be allowed
+ * to a select brand of programs. Typically these are
+ * diagnostics programs, or some user level commands we may
+ * write to do some weird things.
+ * To start with expect them to have root priveleges.
+ * We will ask for more later.
+ */
+/* ARGSUSED */
+int
+pcibr_open(devfs_handle_t *devp, int oflag, int otyp, cred_t *credp)
+{
+#ifndef CONFIG_IA64_SGI_IO
+    if (!_CAP_CRABLE((uint64_t)credp, (uint64_t)CAP_DEVICE_MGT))
+	return EPERM;
+#endif
+    return 0;
+}
+
+/*ARGSUSED */
+int
+pcibr_close(devfs_handle_t dev, int oflag, int otyp, cred_t *crp)
+{
+    return 0;
+}
+
+/*ARGSUSED */
+int
+pcibr_map(devfs_handle_t dev, vhandl_t *vt, off_t off, size_t len, uint prot)
+{
+    int                     error;
+    devfs_handle_t            vhdl = dev_to_vhdl(dev);
+    devfs_handle_t            pcibr_vhdl = hwgraph_connectpt_get(vhdl);
+    pcibr_soft_t            pcibr_soft = pcibr_soft_get(pcibr_vhdl);
+    bridge_t               *bridge = pcibr_soft->bs_base;
+
+    hwgraph_vertex_unref(pcibr_vhdl);
+
+    ASSERT(pcibr_soft);
+    len = ctob(btoc(len));		/* Make len page aligned */
+    error = v_mapphys(vt, (void *) ((__psunsigned_t) bridge + off), len);
+
+    /*
+     * If the offset being mapped corresponds to the flash prom
+     * base, and if the mapping succeeds, and if the user
+     * has requested the protections to be WRITE, enable the
+     * flash prom to be written.
+     *
+     * XXX- deprecate this in favor of using the
+     * real flash driver ...
+     */
+    if (!error &&
+	((off == BRIDGE_EXTERNAL_FLASH) ||
+	 (len > BRIDGE_EXTERNAL_FLASH))) {
+	int                     s;
+
+	/*
+	 * ensure that we write and read without any interruption.
+	 * The read following the write is required for the Bridge war
+	 */
+	s = splhi();
+	bridge->b_wid_control |= BRIDGE_CTRL_FLASH_WR_EN;
+	bridge->b_wid_control;		/* inval addr bug war */
+	splx(s);
+    }
+    return error;
+}
+
+/*ARGSUSED */
+int
+pcibr_unmap(devfs_handle_t dev, vhandl_t *vt)
+{
+    devfs_handle_t            pcibr_vhdl = hwgraph_connectpt_get((devfs_handle_t) dev);
+    pcibr_soft_t            pcibr_soft = pcibr_soft_get(pcibr_vhdl);
+    bridge_t               *bridge = pcibr_soft->bs_base;
+
+    hwgraph_vertex_unref(pcibr_vhdl);
+
+    /*
+     * If flashprom write was enabled, disable it, as
+     * this is the last unmap.
+     */
+    if (bridge->b_wid_control & BRIDGE_CTRL_FLASH_WR_EN) {
+	int                     s;
+
+	/*
+	 * ensure that we write and read without any interruption.
+	 * The read following the write is required for the Bridge war
+	 */
+	s = splhi();
+	bridge->b_wid_control &= ~BRIDGE_CTRL_FLASH_WR_EN;
+	bridge->b_wid_control;		/* inval addr bug war */
+	splx(s);
+    }
+    return 0;
+}
+
+/* This is special case code used by grio. There are plans to make
+ * this a bit more general in the future, but till then this should
+ * be sufficient.
+ */
+pciio_slot_t
+pcibr_device_slot_get(devfs_handle_t dev_vhdl)
+{
+    char                    devname[MAXDEVNAME];
+    devfs_handle_t            tdev;
+    pciio_info_t            pciio_info;
+    pciio_slot_t            slot = PCIIO_SLOT_NONE;
+
+    vertex_to_name(dev_vhdl, devname, MAXDEVNAME);
+
+    /* run back along the canonical path
+     * until we find a PCI connection point.
+     */
+    tdev = hwgraph_connectpt_get(dev_vhdl);
+    while (tdev != GRAPH_VERTEX_NONE) {
+	pciio_info = pciio_info_chk(tdev);
+	if (pciio_info) {
+	    slot = pciio_info_slot_get(pciio_info);
+	    break;
+	}
+	hwgraph_vertex_unref(tdev);
+	tdev = hwgraph_connectpt_get(tdev);
+    }
+    hwgraph_vertex_unref(tdev);
+
+    return slot;
+}
+/*==========================================================================
+ *	BRIDGE PCI SLOT RELATED IOCTLs
+ */
+/*
+ * pcibr_slot_powerup
+ *	Software initialize the pci slot.
+ */
+int
+pcibr_slot_powerup(devfs_handle_t pcibr_vhdl,pciio_slot_t slot)
+{
+    /* Check for the valid slot */
+    if (!PCIBR_VALID_SLOT(slot))
+	return(EINVAL);
+
+    if (pcibr_device_attach(pcibr_vhdl,slot))
+	return(EINVAL);
+
+    return(0);
+}
+/*
+ * pcibr_slot_shutdown
+ *	Software shutdown the pci slot
+ */
+int
+pcibr_slot_shutdown(devfs_handle_t pcibr_vhdl,pciio_slot_t slot)
+{
+    /* Check for valid slot */
+    if (!PCIBR_VALID_SLOT(slot))
+	return(EINVAL);
+
+    if (pcibr_device_detach(pcibr_vhdl,slot))
+	return(EINVAL);
+
+    return(0);
+}
+
+char *pci_space_name[] = {"NONE", 
+			  "ROM",
+			  "IO",
+			  "",
+			  "MEM",
+			  "MEM32",
+			  "MEM64",
+			  "CFG",
+			  "WIN0",
+			  "WIN1",
+			  "WIN2",
+			  "WIN3",
+			  "WIN4",
+			  "WIN5",
+			  "",
+			  "BAD"};
+
+void
+pcibr_slot_func_info_print(pcibr_info_h pcibr_infoh, int func, int verbose)
+{
+    pcibr_info_t	pcibr_info = pcibr_infoh[func];
+    char		name[MAXDEVNAME];
+    int			win;
+    
+    if (!pcibr_info)
+	return;
+
+#ifdef SUPPORT_PRINTING_V_FORMAT
+    sprintf(name, "%v", pcibr_info->f_vertex);
+#endif
+    if (!verbose) {
+	printk("\tSlot Name : %s\n",name);
+    } else {
+	printk("\tPER-SLOT FUNCTION INFO\n");
+#ifdef SUPPORT_PRINTING_V_FORMAT
+	sprintf(name, "%v", pcibr_info->f_vertex);
+#endif
+	printk("\tSlot Name : %s\n",name);
+	printk("\tPCI Bus : %d ",pcibr_info->f_bus);
+	printk("Slot : %d ", pcibr_info->f_slot);
+	printk("Function : %d\n", pcibr_info->f_func);
+#ifdef SUPPORT_PRINTING_V_FORMAT
+	sprintf(name, "%v", pcibr_info->f_master);
+#endif
+	printk("\tBus provider : %s\n",name);
+	printk("\tProvider Fns : 0x%p ", pcibr_info->f_pops);
+	printk("Error Handler : 0x%p Arg 0x%p\n", 
+		pcibr_info->f_efunc,pcibr_info->f_einfo);
+    }
+    printk("\tVendorId : 0x%x " , pcibr_info->f_vendor);
+    printk("DeviceId : 0x%x\n", pcibr_info->f_device);
+
+    printk("\n\tBase Register Info\n");
+    printk("\t\tReg#\tBase\t\tSize\t\tSpace\n");
+    for(win = 0 ; win < 6 ; win++) 
+	printk("\t\t%d\t0x%lx\t%s0x%lx\t%s%s\n",
+		win,
+		pcibr_info->f_window[win].w_base,
+		pcibr_info->f_window[win].w_base >= 0x100000 ? "": "\t",
+		pcibr_info->f_window[win].w_size,
+		pcibr_info->f_window[win].w_size >= 0x100000 ? "": "\t",
+		pci_space_name[pcibr_info->f_window[win].w_space]);
+
+    printk("\t\t7\t0x%x\t%s0x%x\t%sROM\n", 
+	    pcibr_info->f_rbase,
+	    pcibr_info->f_rbase > 0x100000 ? "" : "\t",
+	    pcibr_info->f_rsize,
+	    pcibr_info->f_rsize > 0x100000 ? "" : "\t");
+
+    printk("\n\tInterrupt Bit Map\n");
+    printk("\t\tPCI Int#\tBridge Pin#\n");
+    for (win = 0 ; win < 4; win++)
+	printk("\t\tINT%c\t\t%d\n",win+'A',pcibr_info->f_ibit[win]);
+    printk("\n");
+}
+
+
+void
+pcibr_slot_info_print(pcibr_soft_t 	pcibr_soft, 
+		      pciio_slot_t 	slot, 
+		      int	   	verbose)
+{
+    pcibr_soft_slot_t	pss;
+    char		slot_conn_name[MAXDEVNAME];
+    int			func;
+    bridge_t		*bridge = pcibr_soft->bs_base;
+    bridgereg_t		b_resp;
+    reg_p		b_respp;
+    int			dev;
+    bridgereg_t		b_int_device;
+    bridgereg_t		b_int_host;
+    bridgereg_t		b_int_enable;
+    int			pin = 0;
+    int			int_bits = 0;
+
+    pss = &pcibr_soft->bs_slot[slot];
+    
+    printk("\nPCI INFRASTRUCTURAL INFO FOR SLOT %d\n\n", slot);
+
+    if (verbose) {
+	printk("\tHost Present ? %s ", pss->has_host ? "yes" : "no");
+	printk("\tHost Slot : %d\n",pss->host_slot);
+#ifdef SUPPORT_PRINTING_V_FORMAT
+	sprintf(slot_conn_name, "%v", pss->slot_conn);
+#endif
+	printk("\tSlot Conn : %s\n",slot_conn_name);	
+	printk("\t#Functions : %d\n",pss->bss_ninfo);
+    }
+    for (func = 0; func < pss->bss_ninfo; func++)
+	pcibr_slot_func_info_print(pss->bss_infos,func, verbose);
+    printk("\tDevio[Space:%s,Base:0x%lx,Shadow:0x%x]\n",
+	    pci_space_name[pss->bss_devio.bssd_space],
+	    pss->bss_devio.bssd_base,
+	    pss->bss_device);
+
+    if (verbose) {
+	printk("\tUsage counts : pmu %d d32 %d d64 %d\n",
+		pss->bss_pmu_uctr,pss->bss_d32_uctr,pss->bss_d64_uctr);
+    
+	printk("\tDirect Trans Info : d64_base 0x%x d64_flags 0x%x"
+		"d32_base 0x%x d32_flags 0x%x\n",
+		(unsigned int)pss->bss_d64_base, pss->bss_d64_flags,
+		(unsigned int)pss->bss_d32_base, pss->bss_d32_flags);
+    
+	printk("\tExt ATEs active ? %s", 
+		pss->bss_ext_ates_active ? "yes" : "no");
+	printk(" Command register : 0x%p ", pss->bss_cmd_pointer);
+	printk(" Shadow command val : 0x%x\n", pss->bss_cmd_shadow);
+    }
+
+    printk("\tSoft RRB Info[Valid %d+%d, Reserved %d]\n",
+	    pcibr_soft->bs_rrb_valid[slot],
+	    pcibr_soft->bs_rrb_valid[slot + PCIBR_RRB_SLOT_VIRTUAL],
+	    pcibr_soft->bs_rrb_res[slot]);
+
+
+    if (slot & 1)
+	b_respp = &bridge->b_odd_resp;
+    else
+	b_respp = &bridge->b_even_resp;
+
+    b_resp = *b_respp;
+
+    printk("\n\tBridge RRB Info\n");
+    printk("\t\tRRB#\tVirtual\n");
+    for (dev = 0; dev < 8; dev++) {
+	if ((b_resp & BRIDGE_RRB_EN) &&
+	    (b_resp & BRIDGE_RRB_PDEV) == (slot >> 1))
+	    printk( "\t\t%d\t%s\n", 
+		    dev,
+		    (b_resp & BRIDGE_RRB_VDEV) ? "yes" : "no");
+	b_resp >>= 4;
+	    
+    }
+    b_int_device = bridge->b_int_device;
+    b_int_enable = bridge->b_int_enable;
+
+    printk("\n\tBridge Interrupt Info\n"
+	    "\t\tInt_device 0x%x\n\t\tInt_enable 0x%x "
+	    "\n\t\tEnabled pin#s for this slot: ",
+	    b_int_device,
+	    b_int_enable);
+
+    while (b_int_device) {
+	if (((b_int_device & 7) == slot) &&
+	    (b_int_enable & (1 << pin))) {
+	    int_bits |= (1 << pin);
+	    printk("%d ", pin); 
+	}
+	pin++;
+	b_int_device >>= 3;
+    }
+
+    if (!int_bits)
+	printk("NONE ");
+
+    b_int_host = bridge->b_int_addr[slot].addr;
+
+    printk("\n\t\tInt_host_addr 0x%x\n",
+	    b_int_host);
+    
+}
+
+int verbose = 0;
+/*
+ * pcibr_slot_inquiry
+ *	Print information about the pci slot maintained by the infrastructure.
+ *	Current information displayed
+ *		Slot hwgraph name
+ *		Vendor/Device info
+ *		Base register info
+ *		Interrupt mapping from device pins to the bridge pins
+ *		Devio register
+ *		Software RRB info
+ *		RRB register info
+ *	In verbose mode following additional info is displayed
+ *		Host/Gues info
+ *		PCI Bus #,slot #, function #
+ *		Slot provider hwgraph name
+ *		Provider Functions
+ *		Error handler
+ *		DMA mapping usage counters
+ *		DMA direct translation info
+ *		External SSRAM workaround info
+ */
+int
+pcibr_slot_inquiry(devfs_handle_t pcibr_vhdl, pciio_slot_t slot)
+{
+    pcibr_soft_t	pcibr_soft = pcibr_soft_get(pcibr_vhdl);
+
+    /* Make sure that we are dealing with a bridge device vertex */
+    if (!pcibr_soft)
+	return(EINVAL);
+
+    /* Make sure that we have a valid pci slot number or PCIIO_SLOT_NONE */
+    if ((!PCIBR_VALID_SLOT(slot)) && (slot != PCIIO_SLOT_NONE))
+	return(EINVAL);
+
+    /* Print information for the requested pci slot */
+    if (slot != PCIIO_SLOT_NONE) {
+	pcibr_slot_info_print(pcibr_soft,slot,verbose);
+	return(0);
+    }
+    /* Print information for all the slots */
+    for (slot = 0; slot < 8; slot++)
+	pcibr_slot_info_print(pcibr_soft, slot,verbose);
+    return(0);
+}
+
+/*ARGSUSED */
+int
+pcibr_ioctl(devfs_handle_t dev,
+	    int cmd,
+	    void *arg,
+	    int flag,
+	    struct cred *cr,
+	    int *rvalp)
+{
+    devfs_handle_t            pcibr_vhdl = hwgraph_connectpt_get((devfs_handle_t)dev);
+#ifdef colin
+    pcibr_soft_t            pcibr_soft = pcibr_soft_get(pcibr_vhdl);
+#endif
+    int                     error = 0;
+
+    hwgraph_vertex_unref(pcibr_vhdl);
+
+    switch (cmd) {
+#ifdef colin
+    case GIOCSETBW:
+	{
+	    grio_ioctl_info_t       info;
+	    pciio_slot_t            slot = 0;
+
+	    if (!cap_able((uint64_t)CAP_DEVICE_MGT)) {
+		error = EPERM;
+		break;
+	    }
+	    if (COPYIN(arg, &info, sizeof(grio_ioctl_info_t))) {
+		error = EFAULT;
+		break;
+	    }
+#ifdef GRIO_DEBUG
+	    printk("pcibr:: prev_vhdl: %d reqbw: %lld\n",
+		    info.prev_vhdl, info.reqbw);
+#endif				/* GRIO_DEBUG */
+
+	    if ((slot = pcibr_device_slot_get(info.prev_vhdl)) ==
+		PCIIO_SLOT_NONE) {
+		error = EIO;
+		break;
+	    }
+	    if (info.reqbw)
+		pcibr_priority_bits_set(pcibr_soft, slot, PCI_PRIO_HIGH);
+	    break;
+	}
+
+    case GIOCRELEASEBW:
+	{
+	    grio_ioctl_info_t       info;
+	    pciio_slot_t            slot = 0;
+
+	    if (!cap_able(CAP_DEVICE_MGT)) {
+		error = EPERM;
+		break;
+	    }
+	    if (COPYIN(arg, &info, sizeof(grio_ioctl_info_t))) {
+		error = EFAULT;
+		break;
+	    }
+#ifdef GRIO_DEBUG
+	    printk("pcibr:: prev_vhdl: %d reqbw: %lld\n",
+		    info.prev_vhdl, info.reqbw);
+#endif				/* GRIO_DEBUG */
+
+	    if ((slot = pcibr_device_slot_get(info.prev_vhdl)) ==
+		PCIIO_SLOT_NONE) {
+		error = EIO;
+		break;
+	    }
+	    if (info.reqbw)
+		pcibr_priority_bits_set(pcibr_soft, slot, PCI_PRIO_LOW);
+	    break;
+	}
+#endif /* colin */
+
+    case PCIBR_SLOT_POWERUP:
+	{
+	    pciio_slot_t	slot;
+
+	    if (!cap_able(CAP_DEVICE_MGT)) {
+		error = EPERM;
+		break;
+	    }
+
+	    slot = (pciio_slot_t)(uint64_t)arg;
+	    error = pcibr_slot_powerup(pcibr_vhdl,slot);
+	    break;
+	}
+    case PCIBR_SLOT_SHUTDOWN:
+	{
+	    pciio_slot_t	slot;
+
+	    if (!cap_able(CAP_DEVICE_MGT)) {
+		error = EPERM;
+		break;
+	    }
+
+	    slot = (pciio_slot_t)(uint64_t)arg;
+	    error = pcibr_slot_shutdown(pcibr_vhdl,slot);
+	    break;
+	}
+    case PCIBR_SLOT_INQUIRY:
+	{
+	    pciio_slot_t	slot;
+
+	    if (!cap_able(CAP_DEVICE_MGT)) {
+		error = EPERM;
+		break;
+	    }
+
+	    slot = (pciio_slot_t)(uint64_t)arg;
+	    error = pcibr_slot_inquiry(pcibr_vhdl,slot);
+	    break;
+	}
+    default:
+	break;
+
+    }
+
+    return error;
+}
+
+void
+pcibr_freeblock_sub(iopaddr_t *free_basep,
+		    iopaddr_t *free_lastp,
+		    iopaddr_t base,
+		    size_t size)
+{
+    iopaddr_t               free_base = *free_basep;
+    iopaddr_t               free_last = *free_lastp;
+    iopaddr_t               last = base + size - 1;
+
+    if ((last < free_base) || (base > free_last));	/* free block outside arena */
+
+    else if ((base <= free_base) && (last >= free_last))
+	/* free block contains entire arena */
+	*free_basep = *free_lastp = 0;
+
+    else if (base <= free_base)
+	/* free block is head of arena */
+	*free_basep = last + 1;
+
+    else if (last >= free_last)
+	/* free block is tail of arena */
+	*free_lastp = base - 1;
+
+    /*
+     * We are left with two regions: the free area
+     * in the arena "below" the block, and the free
+     * area in the arena "above" the block. Keep
+     * the one that is bigger.
+     */
+
+    else if ((base - free_base) > (free_last - last))
+	*free_lastp = base - 1;		/* keep lower chunk */
+    else
+	*free_basep = last + 1;		/* keep upper chunk */
+}
+
+#ifdef IRIX
+/* Convert from ssram_bits in control register to number of SSRAM entries */
+#define ATE_NUM_ENTRIES(n) _ate_info[n]
+
+/* Possible choices for number of ATE entries in Bridge's SSRAM */
+LOCAL int               _ate_info[] =
+{
+    0,					/* 0 entries */
+    8 * 1024,				/* 8K entries */
+    16 * 1024,				/* 16K entries */
+    64 * 1024				/* 64K entries */
+};
+
+#define ATE_NUM_SIZES (sizeof(_ate_info) / sizeof(int))
+#define ATE_PROBE_VALUE 0x0123456789abcdefULL
+#endif	/* IRIX */
+
+/*
+ * Determine the size of this bridge's external mapping SSRAM, and set
+ * the control register appropriately to reflect this size, and initialize
+ * the external SSRAM.
+ */
+#ifndef BRINGUP
+LOCAL int
+pcibr_init_ext_ate_ram(bridge_t *bridge)
+{
+    int                     largest_working_size = 0;
+    int                     num_entries, entry;
+    int                     i, j;
+    bridgereg_t             old_enable, new_enable;
+    int                     s;
+
+    if (is_xbridge(bridge))
+	return 0;
+
+    /* Probe SSRAM to determine its size. */
+    old_enable = bridge->b_int_enable;
+    new_enable = old_enable & ~BRIDGE_IMR_PCI_MST_TIMEOUT;
+    bridge->b_int_enable = new_enable;
+
+    for (i = 1; i < ATE_NUM_SIZES; i++) {
+	/* Try writing a value */
+	bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(i) - 1] = ATE_PROBE_VALUE;
+
+	/* Guard against wrap */
+	for (j = 1; j < i; j++)
+	    bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(j) - 1] = 0;
+
+	/* See if value was written */
+	if (bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(i) - 1] == ATE_PROBE_VALUE)
+	    largest_working_size = i;
+    }
+    bridge->b_int_enable = old_enable;
+    bridge->b_wid_tflush;		/* wait until Bridge PIO complete */
+
+    /*
+     * ensure that we write and read without any interruption.
+     * The read following the write is required for the Bridge war
+     */
+
+    s = splhi();
+#ifdef colin
+    bridge->b_wid_control = (bridge->b_wid_control
+	& ~BRIDGE_CTRL_SSRAM_SIZE_MASK)
+	| BRIDGE_CTRL_SSRAM_SIZE(largest_working_size);
+#endif
+    bridge->b_wid_control;		/* inval addr bug war */
+    splx(s);
+
+    num_entries = ATE_NUM_ENTRIES(largest_working_size);
+
+#if PCIBR_ATE_DEBUG
+    if (num_entries)
+	printk("bridge at 0x%x: clearing %d external ATEs\n", bridge, num_entries);
+    else
+	printk("bridge at 0x%x: no externa9422l ATE RAM found\n", bridge);
+#endif
+
+    /* Initialize external mapping entries */
+    for (entry = 0; entry < num_entries; entry++)
+	bridge->b_ext_ate_ram[entry] = 0;
+
+    return (num_entries);
+}
+#endif	/* !BRINGUP */
+
+/*
+ * Allocate "count" contiguous Bridge Address Translation Entries
+ * on the specified bridge to be used for PCI to XTALK mappings.
+ * Indices in rm map range from 1..num_entries.  Indicies returned
+ * to caller range from 0..num_entries-1.
+ *
+ * Return the start index on success, -1 on failure.
+ */
+LOCAL int
+pcibr_ate_alloc(pcibr_soft_t pcibr_soft, int count)
+{
+    int                     index = 0;
+
+    index = (int) rmalloc(pcibr_soft->bs_int_ate_map, (size_t) count);
+
+    if (!index && pcibr_soft->bs_ext_ate_map)
+	index = (int) rmalloc(pcibr_soft->bs_ext_ate_map, (size_t) count);
+
+    /* rmalloc manages resources in the 1..n
+     * range, with 0 being failure.
+     * pcibr_ate_alloc manages resources
+     * in the 0..n-1 range, with -1 being failure.
+     */
+    return index - 1;
+}
+
+LOCAL void
+pcibr_ate_free(pcibr_soft_t pcibr_soft, int index, int count)
+/* Who says there's no such thing as a free meal? :-) */
+{
+    /* note the "+1" since rmalloc handles 1..n but
+     * we start counting ATEs at zero.
+     */
+    rmfree((index < pcibr_soft->bs_int_ate_size)
+	   ? pcibr_soft->bs_int_ate_map
+	   : pcibr_soft->bs_ext_ate_map,
+	   count, index + 1);
+}
+
+LOCAL pcibr_info_t
+pcibr_info_get(devfs_handle_t vhdl)
+{
+    return (pcibr_info_t) pciio_info_get(vhdl);
+}
+
+pcibr_info_t
+pcibr_device_info_new(
+			 pcibr_soft_t pcibr_soft,
+			 pciio_slot_t slot,
+			 pciio_function_t rfunc,
+			 pciio_vendor_id_t vendor,
+			 pciio_device_id_t device)
+{
+    pcibr_info_t            pcibr_info;
+    pciio_function_t        func;
+    int                     ibit;
+
+    func = (rfunc == PCIIO_FUNC_NONE) ? 0 : rfunc;
+
+    NEW(pcibr_info);
+    pciio_device_info_new(&pcibr_info->f_c,
+			  pcibr_soft->bs_vhdl,
+			  slot, rfunc,
+			  vendor, device);
+
+    if (slot != PCIIO_SLOT_NONE) {
+
+	/*
+	 * Currently favored mapping from PCI
+	 * slot number and INTA/B/C/D to Bridge
+	 * PCI Interrupt Bit Number:
+	 *
+	 *     SLOT     A B C D
+	 *      0       0 4 0 4
+	 *      1       1 5 1 5
+	 *      2       2 6 2 6
+	 *      3       3 7 3 7
+	 *      4       4 0 4 0
+	 *      5       5 1 5 1
+	 *      6       6 2 6 2
+	 *      7       7 3 7 3
+	 *
+	 * XXX- allow pcibr_hints to override default
+	 * XXX- allow ADMIN to override pcibr_hints
+	 */
+	for (ibit = 0; ibit < 4; ++ibit)
+	    pcibr_info->f_ibit[ibit] =
+		(slot + 4 * ibit) & 7;
+
+	/*
+	 * Record the info in the sparse func info space.
+	 */
+printk("pcibr_device_info_new: slot= %d  func= %d  bss_ninfo= %d  pcibr_info= 0x%p\n", slot, func, pcibr_soft->bs_slot[slot].bss_ninfo, pcibr_info);
+
+	if (func < pcibr_soft->bs_slot[slot].bss_ninfo)
+	    pcibr_soft->bs_slot[slot].bss_infos[func] = pcibr_info;
+    }
+    return pcibr_info;
+}
+
+void
+pcibr_device_info_free(devfs_handle_t pcibr_vhdl, pciio_slot_t slot)
+{
+    pcibr_soft_t	pcibr_soft = pcibr_soft_get(pcibr_vhdl);
+    pcibr_info_t	pcibr_info;
+    pciio_function_t	func;
+    pcibr_soft_slot_t	slotp = &pcibr_soft->bs_slot[slot];
+    int			nfunc = slotp->bss_ninfo;
+
+
+    for (func = 0; func < nfunc; func++) {
+	pcibr_info = slotp->bss_infos[func];
+
+	if (!pcibr_info) 
+	    continue;
+
+	slotp->bss_infos[func] = 0;
+	pciio_device_info_unregister(pcibr_vhdl, &pcibr_info->f_c);
+	pciio_device_info_free(&pcibr_info->f_c);
+	DEL(pcibr_info);
+    }
+
+    /* Clear the DEVIO(x) for this slot */
+    slotp->bss_devio.bssd_space = PCIIO_SPACE_NONE;
+    slotp->bss_devio.bssd_base = PCIBR_D32_BASE_UNSET;
+    slotp->bss_device  = 0;
+
+    
+    /* Reset the mapping usage counters */
+    slotp->bss_pmu_uctr = 0;
+    slotp->bss_d32_uctr = 0;
+    slotp->bss_d64_uctr = 0;
+
+    /* Clear the Direct translation info */
+    slotp->bss_d64_base = PCIBR_D64_BASE_UNSET;
+    slotp->bss_d64_flags = 0;
+    slotp->bss_d32_base = PCIBR_D32_BASE_UNSET;
+    slotp->bss_d32_flags = 0;
+
+    /* Clear out shadow info necessary for the external SSRAM workaround */
+    slotp->bss_ext_ates_active = 0;
+    slotp->bss_cmd_pointer = 0;
+    slotp->bss_cmd_shadow = 0;
+
+}
+
+/* 
+ * PCI_ADDR_SPACE_LIMITS_LOAD
+ *	Gets the current values of 
+ *		pci io base, 
+ *		pci io last,
+ *		pci low memory base,
+ *		pci low memory last,
+ *		pci high memory base,
+ * 		pci high memory last
+ */
+#define PCI_ADDR_SPACE_LIMITS_LOAD()			\
+    pci_io_fb = pcibr_soft->bs_spinfo.pci_io_base;	\
+    pci_io_fl = pcibr_soft->bs_spinfo.pci_io_last;	\
+    pci_lo_fb = pcibr_soft->bs_spinfo.pci_swin_base;	\
+    pci_lo_fl = pcibr_soft->bs_spinfo.pci_swin_last;	\
+    pci_hi_fb = pcibr_soft->bs_spinfo.pci_mem_base;	\
+    pci_hi_fl = pcibr_soft->bs_spinfo.pci_mem_last;
+/*
+ * PCI_ADDR_SPACE_LIMITS_STORE
+ *	Sets the current values of
+ *		pci io base, 
+ *		pci io last,
+ *		pci low memory base,
+ *		pci low memory last,
+ *		pci high memory base,
+ * 		pci high memory last
+ */
+#define PCI_ADDR_SPACE_LIMITS_STORE()			\
+    pcibr_soft->bs_spinfo.pci_io_base = pci_io_fb;	\
+    pcibr_soft->bs_spinfo.pci_io_last = pci_io_fl;	\
+    pcibr_soft->bs_spinfo.pci_swin_base = pci_lo_fb;	\
+    pcibr_soft->bs_spinfo.pci_swin_last = pci_lo_fl;	\
+    pcibr_soft->bs_spinfo.pci_mem_base = pci_hi_fb;	\
+    pcibr_soft->bs_spinfo.pci_mem_last = pci_hi_fl;
+
+#define PCI_ADDR_SPACE_LIMITS_PRINT()			\
+    printf("+++++++++++++++++++++++\n"			\
+	   "IO base 0x%x last 0x%x\n"			\
+	   "SWIN base 0x%x last 0x%x\n"			\
+	   "MEM base 0x%x last 0x%x\n"			\
+	   "+++++++++++++++++++++++\n",			\
+	   pcibr_soft->bs_spinfo.pci_io_base,		\
+	   pcibr_soft->bs_spinfo.pci_io_last,		\
+	   pcibr_soft->bs_spinfo.pci_swin_base,		\
+	   pcibr_soft->bs_spinfo.pci_swin_last,		\
+	   pcibr_soft->bs_spinfo.pci_mem_base,		\
+	   pcibr_soft->bs_spinfo.pci_mem_last);
+
+/*
+ * pcibr_slot_reset
+ *	Reset the pci device in the particular slot .
+ */
+int
+pcibr_slot_reset(devfs_handle_t pcibr_vhdl,pciio_slot_t slot)
+{
+	pcibr_soft_t		pcibr_soft = pcibr_soft_get(pcibr_vhdl);
+	bridge_t		*bridge;
+	bridgereg_t		ctrlreg,tmp;
+	volatile bridgereg_t	*wrb_flush;
+
+	if (!PCIBR_VALID_SLOT(slot))
+		return(1);
+
+	if (!pcibr_soft)
+		return(1);
+
+	/* Enable the DMA operations from this device of the xtalk widget
+	 * (PCI host bridge in this case).
+	 */
+	xtalk_widgetdev_enable(pcibr_soft->bs_conn, slot);
+	/* Set the reset slot bit in the bridge's wid control register
+	 * to reset the pci slot 
+	 */
+	bridge = pcibr_soft->bs_base;
+	/* Read the bridge widget control and clear out the reset pin
+	 * bit for the corresponding slot. 
+	 */
+	tmp = ctrlreg = bridge->b_wid_control;
+	tmp &= ~BRIDGE_CTRL_RST_PIN(slot); 
+	bridge->b_wid_control = tmp;
+	tmp = bridge->b_wid_control;
+	/* Restore the old control register back.
+	 * NOTE : pci card gets reset when the reset pin bit
+	 * changes from 0 (set above) to 1 (going to be set now).
+	 */
+	bridge->b_wid_control = ctrlreg;
+
+	/* Flush the write buffers if any !! */
+	wrb_flush = &(bridge->b_wr_req_buf[slot].reg);
+	while (*wrb_flush);
+
+	return(0);
+}
+/*
+ * pcibr_slot_info_init
+ *	Probe for this slot and see if it is populated.
+ *	If it is populated initialize the generic pci infrastructural
+ * 	information associated with this particular pci device.
+ */
+int
+pcibr_slot_info_init(devfs_handle_t 	pcibr_vhdl,
+		     pciio_slot_t 	slot)
+{
+    pcibr_soft_t	    pcibr_soft;
+    pcibr_info_h	    pcibr_infoh;
+    pcibr_info_t	    pcibr_info;
+    bridge_t		   *bridge;
+    cfg_p                   cfgw;
+    unsigned                idword;
+    unsigned                pfail;
+    unsigned                idwords[8];
+    pciio_vendor_id_t       vendor;
+    pciio_device_id_t       device;
+    unsigned                htype;
+    cfg_p                   wptr;
+    int                     win;
+    pciio_space_t           space;
+    iopaddr_t		    pci_io_fb,	pci_io_fl;
+    iopaddr_t		    pci_lo_fb,  pci_lo_fl;
+    iopaddr_t		    pci_hi_fb,  pci_hi_fl;
+    int			    nfunc;
+    pciio_function_t	    rfunc;
+    int			    func;
+    devfs_handle_t	    conn_vhdl;
+    pcibr_soft_slot_t	    slotp;
+    
+    /* Get the basic software information required to proceed */
+    pcibr_soft = pcibr_soft_get(pcibr_vhdl);
+    if (!pcibr_soft)
+	return(1);
+
+    bridge = pcibr_soft->bs_base;
+    if (!PCIBR_VALID_SLOT(slot))
+	return(1);
+
+    slotp = &pcibr_soft->bs_slot[slot];
+
+    /* Load the current values of allocated pci address spaces */
+    PCI_ADDR_SPACE_LIMITS_LOAD();
+
+    /* If we have a host slot (eg:- IOC3 has 2 pci slots and the initialization
+     * is done by the host slot then we are done.
+     */
+    if (pcibr_soft->bs_slot[slot].has_host)
+	return(0);
+    
+    /* Try to read the device-id/vendor-id from the config space */
+    cfgw = bridge->b_type0_cfg_dev[slot].l;
+
+#ifdef BRINGUP
+    if (slot < 3  || slot == 7) 
+	return (0);
+    else
+#endif /* BRINGUP */
+    if (pcibr_probe_slot(bridge, cfgw, &idword))
+	return(0);
+
+    vendor = 0xFFFF & idword;
+    /* If the vendor id is not valid then the slot is not populated
+     * and we are done.
+     */
+    if (vendor == 0xFFFF)
+	return(0);			/* next slot */
+    
+    device = 0xFFFF & (idword >> 16);
+    htype = do_pcibr_config_get(cfgw, PCI_CFG_HEADER_TYPE, 1);
+
+    nfunc = 1;
+    rfunc = PCIIO_FUNC_NONE;
+    pfail = 0;
+
+    /* NOTE: if a card claims to be multifunction
+     * but only responds to config space 0, treat
+     * it as a unifunction card.
+     */
+
+    if (htype & 0x80) {		/* MULTIFUNCTION */
+	for (func = 1; func < 8; ++func) {
+	    cfgw = bridge->b_type0_cfg_dev[slot].f[func].l;
+	    if (pcibr_probe_slot(bridge, cfgw, &idwords[func])) {
+		pfail |= 1 << func;
+		continue;
+	    }
+	    vendor = 0xFFFF & idwords[func];
+	    if (vendor == 0xFFFF) {
+		pfail |= 1 << func;
+		continue;
+	    }
+	    nfunc = func + 1;
+	    rfunc = 0;
+	}
+	cfgw = bridge->b_type0_cfg_dev[slot].l;
+    }
+    NEWA(pcibr_infoh, nfunc);
+    
+    pcibr_soft->bs_slot[slot].bss_ninfo = nfunc;
+    pcibr_soft->bs_slot[slot].bss_infos = pcibr_infoh;
+
+    for (func = 0; func < nfunc; ++func) {
+	unsigned                cmd_reg;
+	
+	if (func) {
+	    if (pfail & (1 << func))
+		continue;
+	    
+	    idword = idwords[func];
+	    cfgw = bridge->b_type0_cfg_dev[slot].f[func].l;
+	    
+	    device = 0xFFFF & (idword >> 16);
+	    htype = do_pcibr_config_get(cfgw, PCI_CFG_HEADER_TYPE, 1);
+	    rfunc = func;
+	}
+	htype &= 0x7f;
+	if (htype != 0x00) {
+	    PRINT_WARNING("%s pcibr: pci slot %d func %d has strange header type 0x%x\n",
+		    pcibr_soft->bs_name, slot, func, htype);
+	    continue;
+	}
+#if DEBUG && ATTACH_DEBUG
+	PRINT_NOTICE( 
+		"%s pcibr: pci slot %d func %d: vendor 0x%x device 0x%x",
+		pcibr_soft->bs_name, slot, func, vendor, device);
+#endif	
+
+	pcibr_info = pcibr_device_info_new
+	    (pcibr_soft, slot, rfunc, vendor, device);
+	conn_vhdl = pciio_device_info_register(pcibr_vhdl, &pcibr_info->f_c);
+	if (func == 0)
+	    slotp->slot_conn = conn_vhdl;
+	
+	cmd_reg = cfgw[PCI_CFG_COMMAND / 4];
+	
+	wptr = cfgw + PCI_CFG_BASE_ADDR_0 / 4;
+
+
+	for (win = 0; win < PCI_CFG_BASE_ADDRS; ++win) {
+	    iopaddr_t               base, mask, code;
+	    size_t                  size;
+
+	    /*
+	     * GET THE BASE & SIZE OF THIS WINDOW:
+	     *
+	     * The low two or four bits of the BASE register
+	     * determines which address space we are in; the
+	     * rest is a base address. BASE registers
+	     * determine windows that are power-of-two sized
+	     * and naturally aligned, so we can get the size
+	     * of a window by writing all-ones to the
+	     * register, reading it back, and seeing which
+	     * bits are used for decode; the least
+	     * significant nonzero bit is also the size of
+	     * the window.
+	     *
+	     * WARNING: someone may already have allocated
+	     * some PCI space to this window, and in fact
+	     * PIO may be in process at this very moment
+	     * from another processor (or even from this
+	     * one, if we get interrupted)! So, if the BASE
+	     * already has a nonzero address, be generous
+	     * and use the LSBit of that address as the
+	     * size; this could overstate the window size.
+	     * Usually, when one card is set up, all are set
+	     * up; so, since we don't bitch about
+	     * overlapping windows, we are ok.
+	     *
+	     * UNFORTUNATELY, some cards do not clear their
+	     * BASE registers on reset. I have two heuristics
+	     * that can detect such cards: first, if the
+	     * decode enable is turned off for the space
+	     * that the window uses, we can disregard the
+	     * initial value. second, if the address is
+	     * outside the range that we use, we can disregard
+	     * it as well.
+	     *
+	     * This is looking very PCI generic. Except for
+	     * knowing how many slots and where their config
+	     * spaces are, this window loop and the next one
+	     * could probably be shared with other PCI host
+	     * adapters. It would be interesting to see if
+	     * this could be pushed up into pciio, when we
+	     * start supporting more PCI providers.
+	     */
+#ifdef LITTLE_ENDIAN
+	    base = wptr[((win*4)^4)/4];
+#else
+	    base = wptr[win];
+#endif /* LITTLE_ENDIAN */
+
+	    if (base & 1) {
+		/* BASE is in I/O space. */
+		space = PCIIO_SPACE_IO;
+		mask = -4;
+		code = base & 3;
+		base = base & mask;
+		if (base == 0) {
+		    ;		/* not assigned */
+		} else if (!(cmd_reg & PCI_CMD_IO_SPACE)) {
+		    base = 0;	/* decode not enabled */
+		}
+	    } else {
+		/* BASE is in MEM space. */
+		space = PCIIO_SPACE_MEM;
+		mask = -16;
+		code = base & 15;
+		base = base & mask;
+		if (base == 0) {
+		    ;		/* not assigned */
+		} else if (!(cmd_reg & PCI_CMD_MEM_SPACE)) {
+		    base = 0;	/* decode not enabled */
+		} else if (base & 0xC0000000) {
+		    base = 0;	/* outside permissable range */
+		} else if ((code == PCI_BA_MEM_64BIT) &&
+#ifdef LITTLE_ENDIAN
+			   (wptr[(((win + 1)*4)^4)/4] != 0)) {
+#else 
+			   (wptr[win + 1] != 0)) {
+#endif /* LITTLE_ENDIAN */
+		    base = 0;	/* outside permissable range */
+		}
+	    }
+
+	    if (base != 0) {	/* estimate size */
+		size = base & -base;
+	    } else {		/* calculate size */
+#ifdef LITTLE_ENDIAN
+		wptr[((win*4)^4)/4] = ~0;	/* turn on all bits */
+		size = wptr[((win*4)^4)/4];	/* get stored bits */
+#else 
+		wptr[win] = ~0;	/* turn on all bits */
+		size = wptr[win];	/* get stored bits */
+#endif /* LITTLE_ENDIAN */
+		size &= mask;	/* keep addr */
+		size &= -size;	/* keep lsbit */
+		if (size == 0)
+		    continue;
+	    }	
+
+	    pcibr_info->f_window[win].w_space = space;
+	    pcibr_info->f_window[win].w_base = base;
+	    pcibr_info->f_window[win].w_size = size;
+
+	    /*
+	     * If this window already has PCI space
+	     * allocated for it, "subtract" that space from
+	     * our running freeblocks. Don't worry about
+	     * overlaps in existing allocated windows; we
+	     * may be overstating their sizes anyway.
+	     */
+
+	    if (base && size) {
+		if (space == PCIIO_SPACE_IO) {
+		    pcibr_freeblock_sub(&pci_io_fb,
+					&pci_io_fl,
+					base, size);
+		} else {
+		    pcibr_freeblock_sub(&pci_lo_fb,
+					&pci_lo_fl,
+					base, size);
+		    pcibr_freeblock_sub(&pci_hi_fb,
+					&pci_hi_fl,
+					base, size);
+		}	
+	    }
+#if defined(IOC3_VENDOR_ID_NUM) && defined(IOC3_DEVICE_ID_NUM)
+	    /*
+	     * IOC3 BASE_ADDR* BUG WORKAROUND
+	     *
+	     
+	     * If we write to BASE1 on the IOC3, the
+	     * data in BASE0 is replaced. The
+	     * original workaround was to remember
+	     * the value of BASE0 and restore it
+	     * when we ran off the end of the BASE
+	     * registers; however, a later
+	     * workaround was added (I think it was
+	     * rev 1.44) to avoid setting up
+	     * anything but BASE0, with the comment
+	     * that writing all ones to BASE1 set
+	     * the enable-parity-error test feature
+	     * in IOC3's SCR bit 14.
+	     *
+	     * So, unless we defer doing any PCI
+	     * space allocation until drivers
+	     * attach, and set up a way for drivers
+	     * (the IOC3 in paricular) to tell us
+	     * generically to keep our hands off
+	     * BASE registers, we gotta "know" about
+	     * the IOC3 here.
+	     *
+	     * Too bad the PCI folks didn't reserve the
+	     * all-zero value for 'no BASE here' (it is a
+	     * valid code for an uninitialized BASE in
+	     * 32-bit PCI memory space).
+	     */
+	    
+	    if ((vendor == IOC3_VENDOR_ID_NUM) &&
+		(device == IOC3_DEVICE_ID_NUM))
+		break;
+#endif
+	    if (code == PCI_BA_MEM_64BIT) {
+		win++;		/* skip upper half */
+#ifdef LITTLE_ENDIAN
+		wptr[((win*4)^4)/4] = 0;	/* which must be zero */
+#else 
+		wptr[win] = 0;	/* which must be zero */
+#endif /* LITTLE_ENDIAN */
+	    }
+	}				/* next win */
+    }				/* next func */
+
+    /* Store back the values for allocated pci address spaces */
+    PCI_ADDR_SPACE_LIMITS_STORE();
+    return(0);
+}					
+
+/*
+ * pcibr_slot_info_free
+ *	Remove all the pci infrastructural information associated
+ * 	with a particular pci device.
+ */
+int
+pcibr_slot_info_free(devfs_handle_t 	pcibr_vhdl,
+		     pciio_slot_t	slot)
+{
+    pcibr_soft_t	pcibr_soft;
+    pcibr_info_h	pcibr_infoh;
+    int			nfunc;
+#if defined(PCI_HOTSWAP_DEBUG)
+    cfg_p		cfgw;
+    bridge_t		*bridge;
+    int			win;
+    cfg_p		wptr;
+#endif /* PCI_HOTSWAP_DEBUG */
+
+    
+
+    pcibr_soft = pcibr_soft_get(pcibr_vhdl);
+    if (!pcibr_soft || !PCIBR_VALID_SLOT(slot))
+	return(1);
+
+#if defined(PCI_HOTSWAP_DEBUG)
+    /* Clean out all the base registers */
+    bridge = pcibr_soft->bs_base;
+    cfgw = bridge->b_type0_cfg_dev[slot].l;
+    wptr = cfgw + PCI_CFG_BASE_ADDR_0 / 4;
+    
+    for (win = 0; win < PCI_CFG_BASE_ADDRS; ++win) 
+#ifdef LITTLE_ENDIAN
+	wptr[((win*4)^4)/4] = 0;
+#else
+	wptr[win] = 0;
+#endif  /* LITTLE_ENDIAN */
+#endif /* PCI_HOTSWAP_DEBUG */
+
+    nfunc = pcibr_soft->bs_slot[slot].bss_ninfo;
+
+    pcibr_device_info_free(pcibr_vhdl, slot);
+
+    pcibr_infoh = pcibr_soft->bs_slot[slot].bss_infos;
+    DELA(pcibr_infoh,nfunc);
+    pcibr_soft->bs_slot[slot].bss_ninfo = 0;
+
+    return(0);
+    
+
+}
+int as_debug = 0;
+/*
+ * pcibr_slot_addr_space_init
+ *	Reserve chunks of pci address space as required by 
+ * 	the base registers in the card.
+ */
+int
+pcibr_slot_addr_space_init(devfs_handle_t pcibr_vhdl,
+			   pciio_slot_t	slot)
+{
+    pcibr_soft_t	pcibr_soft;
+    pcibr_info_h	pcibr_infoh;
+    pcibr_info_t	pcibr_info;
+    bridge_t		*bridge;
+    iopaddr_t		pci_io_fb, pci_io_fl;
+    iopaddr_t		pci_lo_fb, pci_lo_fl;
+    iopaddr_t		pci_hi_fb, pci_hi_fl;
+    size_t              align;
+    iopaddr_t           mask;
+    int		       	nfunc;
+    int			func;
+    int			win;
+
+    pcibr_soft = pcibr_soft_get(pcibr_vhdl);
+    if (!pcibr_soft || !PCIBR_VALID_SLOT(slot))
+	return(1);
+
+    bridge = pcibr_soft->bs_base;
+
+    /* Get the current values for the allocated pci address spaces */
+    PCI_ADDR_SPACE_LIMITS_LOAD();
+
+    if (as_debug)
+#ifdef colin
+    PCI_ADDR_SPACE_LIMITS_PRINT();
+#endif
+    /* allocate address space,
+     * for windows that have not been
+     * previously assigned.
+     */
+
+    if (pcibr_soft->bs_slot[slot].has_host)
+	return(0);
+
+    nfunc = pcibr_soft->bs_slot[slot].bss_ninfo;
+    if (nfunc < 1)
+	return(0);
+
+    pcibr_infoh = pcibr_soft->bs_slot[slot].bss_infos;
+    if (!pcibr_infoh)
+	return(0);
+
+    /*
+     * Try to make the DevIO windows not
+     * overlap by pushing the "io" and "hi"
+     * allocation areas up to the next one
+     * or two megabyte bound. This also
+     * keeps them from being zero.
+     *
+     * DO NOT do this with "pci_lo" since
+     * the entire "lo" area is only a
+     * megabyte, total ...
+     */
+    align = (slot < 2) ? 0x200000 : 0x100000;
+    mask = -align;
+    pci_io_fb = (pci_io_fb + align - 1) & mask;
+    pci_hi_fb = (pci_hi_fb + align - 1) & mask;
+
+    for (func = 0; func < nfunc; ++func) {
+	cfg_p                   cfgw;
+	cfg_p                   wptr;
+	pciio_space_t           space;
+	iopaddr_t               base;
+	size_t                  size;
+	cfg_p                   pci_cfg_cmd_reg_p;
+	unsigned                pci_cfg_cmd_reg;
+	unsigned                pci_cfg_cmd_reg_add = 0;
+
+	pcibr_info = pcibr_infoh[func];
+
+	if (!pcibr_info)
+	    continue;
+
+	if (pcibr_info->f_vendor == PCIIO_VENDOR_ID_NONE)
+	    continue;
+	
+	cfgw = bridge->b_type0_cfg_dev[slot].f[func].l;
+	wptr = cfgw + PCI_CFG_BASE_ADDR_0 / 4;
+
+	for (win = 0; win < PCI_CFG_BASE_ADDRS; ++win) {
+
+	    space = pcibr_info->f_window[win].w_space;
+	    base = pcibr_info->f_window[win].w_base;
+	    size = pcibr_info->f_window[win].w_size;
+	    
+	    if (size < 1)
+		continue;
+
+	    if (base >= size) {
+#if DEBUG && PCI_DEBUG
+		printk("pcibr: slot %d func %d window %d is in %d[0x%x..0x%x], alloc by prom\n",
+			slot, func, win, space, base, base + size - 1);
+#endif
+		continue;		/* already allocated */
+	    }
+	    align = size;		/* ie. 0x00001000 */
+	    if (align < _PAGESZ)
+		align = _PAGESZ;	/* ie. 0x00004000 */
+	    mask = -align;		/* ie. 0xFFFFC000 */
+
+	    switch (space) {
+	    case PCIIO_SPACE_IO:
+		base = (pci_io_fb + align - 1) & mask;
+		if ((base + size) > pci_io_fl) {
+		    base = 0;
+		    break;
+		}
+		pci_io_fb = base + size;
+		break;
+		
+	    case PCIIO_SPACE_MEM:
+#ifdef LITTLE_ENDIAN
+		if ((wptr[((win*4)^4)/4] & PCI_BA_MEM_LOCATION) ==
+#else
+		if ((wptr[win] & PCI_BA_MEM_LOCATION) ==
+#endif  /* LITTLE_ENDIAN */
+		    PCI_BA_MEM_1MEG) {
+		    /* allocate from 20-bit PCI space */
+		    base = (pci_lo_fb + align - 1) & mask;
+		    if ((base + size) > pci_lo_fl) {
+			base = 0;
+			break;
+		    }
+		    pci_lo_fb = base + size;
+		} else {
+		    /* allocate from 32-bit or 64-bit PCI space */
+		    base = (pci_hi_fb + align - 1) & mask;
+		    if ((base + size) > pci_hi_fl) {
+			base = 0;
+			break;
+		    }
+		    pci_hi_fb = base + size;
+		}
+		break;
+		
+	    default:
+		base = 0;
+#if DEBUG && PCI_DEBUG
+		printk("pcibr: slot %d window %d had bad space code %d\n",
+			slot, win, space);
+#endif
+	    }
+	    pcibr_info->f_window[win].w_base = base;
+#ifdef LITTLE_ENDIAN
+	    wptr[((win*4)^4)/4] = base;
+		printk("Setting base address 0x%p base 0x%x\n", &(wptr[((win*4)^4)/4]), base);
+#else
+	    wptr[win] = base;
+#endif  /* LITTLE_ENDIAN */
+
+#if DEBUG && PCI_DEBUG
+	    if (base >= size)
+		printk("pcibr: slot %d func %d window %d is in %d [0x%x..0x%x], alloc by pcibr\n",
+			slot, func, win, space, base, base + size - 1);
+	    else
+		printk("pcibr: slot %d func %d window %d, unable to alloc 0x%x in 0x%p\n",
+			slot, func, win, size, space);
+#endif
+	}				/* next base */
+
+	/*
+	 * Allocate space for the EXPANSION ROM
+	 * NOTE: DO NOT DO THIS ON AN IOC3,
+	 * as it blows the system away.
+	 */
+	base = size = 0;
+	if ((pcibr_soft->bs_slot[slot].bss_vendor_id != IOC3_VENDOR_ID_NUM) ||
+	    (pcibr_soft->bs_slot[slot].bss_device_id != IOC3_DEVICE_ID_NUM)) {
+
+	    wptr = cfgw + PCI_EXPANSION_ROM / 4;
+#ifdef LITTLE_ENDIAN
+	    wptr[1] = 0xFFFFF000;
+	    mask = wptr[1];
+#else
+	    *wptr = 0xFFFFF000;
+	    mask = *wptr;
+#endif  /* LITTLE_ENDIAN */
+	    if (mask & 0xFFFFF000) {
+		size = mask & -mask;
+		align = size;
+		if (align < _PAGESZ)
+		    align = _PAGESZ;
+		mask = -align;
+		base = (pci_hi_fb + align - 1) & mask;
+		if ((base + size) > pci_hi_fl)
+		    base = size = 0;
+		else {
+		    pci_hi_fb = base + size;
+#ifdef LITTLE_ENDIAN
+		    wptr[1] = base;
+#else
+		    *wptr = base;
+#endif  /* LITTLE_ENDIAN */
+#if DEBUG && PCI_DEBUG
+		    printk("%s/%d ROM in 0x%lx..0x%lx (alloc by pcibr)\n",
+			    pcibr_soft->bs_name, slot,
+			    base, base + size - 1);
+#endif
+		}
+	    }
+	}
+	pcibr_info->f_rbase = base;
+	pcibr_info->f_rsize = size;
+	
+	/*
+	 * if necessary, update the board's
+	 * command register to enable decoding
+	 * in the windows we added.
+	 *
+	 * There are some bits we always want to
+	 * be sure are set.
+	 */
+	pci_cfg_cmd_reg_add |= PCI_CMD_IO_SPACE;
+	pci_cfg_cmd_reg_add |= PCI_CMD_MEM_SPACE;
+	pci_cfg_cmd_reg_add |= PCI_CMD_BUS_MASTER;
+
+	pci_cfg_cmd_reg_p = cfgw + PCI_CFG_COMMAND / 4;
+	pci_cfg_cmd_reg = *pci_cfg_cmd_reg_p;
+#if PCI_FBBE	/* XXX- check here to see if dev can do fast-back-to-back */
+	if (!((pci_cfg_cmd_reg >> 16) & PCI_STAT_F_BK_BK_CAP))
+	    fast_back_to_back_enable = 0;
+#endif
+	pci_cfg_cmd_reg &= 0xFFFF;
+	if (pci_cfg_cmd_reg_add & ~pci_cfg_cmd_reg)
+	    *pci_cfg_cmd_reg_p = pci_cfg_cmd_reg | pci_cfg_cmd_reg_add;
+	
+    }				/* next func */
+
+    /* Now that we have allocated new chunks of pci address spaces to this
+     * card we need to update the bookkeeping values which indicate
+     * the current pci address space allocations.
+     */
+    PCI_ADDR_SPACE_LIMITS_STORE();
+    return(0);
+}
+/*
+ * pcibr_slot_device_init
+ * 	Setup the device register in the bridge for this pci slot.
+ */
+int
+pcibr_slot_device_init(devfs_handle_t 	pcibr_vhdl,
+		       pciio_slot_t    	slot)
+{
+    pcibr_soft_t	pcibr_soft;
+    bridge_t		*bridge;
+    bridgereg_t		devreg;
+
+    pcibr_soft = pcibr_soft_get(pcibr_vhdl);
+    if (!pcibr_soft || !PCIBR_VALID_SLOT(slot))
+	return(1);
+
+    bridge = pcibr_soft->bs_base;
+
+    /*
+     * Adjustments to Device(x)
+     * and init of bss_device shadow
+     */
+    devreg = bridge->b_device[slot].reg;
+    devreg &= ~BRIDGE_DEV_PAGE_CHK_DIS;
+    devreg |= BRIDGE_DEV_COH | BRIDGE_DEV_VIRTUAL_EN;
+#ifdef LITTLE_ENDIAN
+    devreg |= BRIDGE_DEV_DEV_SWAP;
+#endif
+    pcibr_soft->bs_slot[slot].bss_device = devreg;
+    bridge->b_device[slot].reg = devreg;
+
+#if DEBUG && PCI_DEBUG
+	printk("pcibr Device(%d): 0x%lx\n", slot, bridge->b_device[slot].reg);
+#endif
+
+#if DEBUG && PCI_DEBUG
+    printk("pcibr: PCI space allocation done.\n");
+#endif
+	
+    return(0);
+}
+
+/*
+ * pcibr_slot_guest_info_init
+ *	Setup the host/guest relations for a pci slot.
+ */
+int
+pcibr_slot_guest_info_init(devfs_handle_t pcibr_vhdl,
+			   pciio_slot_t	slot)
+{
+    pcibr_soft_t	pcibr_soft;
+    pcibr_info_h	pcibr_infoh;
+    pcibr_info_t	pcibr_info;
+    pcibr_soft_slot_t	slotp;
+
+    pcibr_soft = pcibr_soft_get(pcibr_vhdl);
+
+    if (!pcibr_soft || !PCIBR_VALID_SLOT(slot))
+	return(1);
+
+    slotp = &pcibr_soft->bs_slot[slot];
+
+    /* create info and verticies for guest slots;
+     * for compatibilitiy macros, create info
+     * for even unpopulated slots (but do not
+     * build verticies for them).
+     */
+    if (pcibr_soft->bs_slot[slot].bss_ninfo < 1) {
+	NEWA(pcibr_infoh, 1);
+	pcibr_soft->bs_slot[slot].bss_ninfo = 1;
+	pcibr_soft->bs_slot[slot].bss_infos = pcibr_infoh;
+
+	pcibr_info = pcibr_device_info_new
+	    (pcibr_soft, slot, PCIIO_FUNC_NONE,
+	     PCIIO_VENDOR_ID_NONE, PCIIO_DEVICE_ID_NONE);
+
+	if (pcibr_soft->bs_slot[slot].has_host) {
+	    slotp->slot_conn = pciio_device_info_register
+		(pcibr_vhdl, &pcibr_info->f_c);
+	}
+    }
+
+    /* generate host/guest relations
+     */
+    if (pcibr_soft->bs_slot[slot].has_host) {
+	int  host = pcibr_soft->bs_slot[slot].host_slot;
+	pcibr_soft_slot_t host_slotp = &pcibr_soft->bs_slot[host];
+
+	hwgraph_edge_add(slotp->slot_conn,
+			 host_slotp->slot_conn,
+			 EDGE_LBL_HOST);
+
+	/* XXX- only gives us one guest edge per
+	 * host. If/when we have a host with more than
+	 * one guest, we will need to figure out how
+	 * the host finds all its guests, and sorts
+	 * out which one is which.
+	 */
+	hwgraph_edge_add(host_slotp->slot_conn,
+			 slotp->slot_conn,
+			 EDGE_LBL_GUEST);
+    }
+
+    return(0);
+}
+/*
+ * pcibr_slot_initial_rrb_alloc
+ *	Allocate a default number of rrbs for this slot on 
+ * 	the two channels. This is dictated by the rrb allocation
+ * 	strategy routine defined per platform.
+ */
+
+int
+pcibr_slot_initial_rrb_alloc(devfs_handle_t 	pcibr_vhdl,
+			     pciio_slot_t	slot)
+
+{
+    pcibr_soft_t	pcibr_soft;
+    pcibr_info_h	pcibr_infoh;
+    pcibr_info_t	pcibr_info;
+    bridge_t		*bridge;
+    int                 c0, c1;
+    int			r;
+
+    pcibr_soft = pcibr_soft_get(pcibr_vhdl);
+    if (!pcibr_soft || !PCIBR_VALID_SLOT(slot))
+	return(1);
+
+    bridge = pcibr_soft->bs_base;
+
+
+    /* How may RRBs are on this slot?
+     */
+    c0 = do_pcibr_rrb_count_valid(bridge, slot);
+    c1 = do_pcibr_rrb_count_valid(bridge, slot + PCIBR_RRB_SLOT_VIRTUAL);
+#if PCIBR_RRB_DEBUG
+    printk("pcibr_attach: slot %d started with %d+%d\n", slot, c0, c1);
+#endif
+
+    /* Do we really need any?
+     */
+    pcibr_infoh = pcibr_soft->bs_slot[slot].bss_infos;
+    pcibr_info = pcibr_infoh[0];
+    if ((pcibr_info->f_vendor == PCIIO_VENDOR_ID_NONE) &&
+	!pcibr_soft->bs_slot[slot].has_host) {
+	if (c0 > 0)
+	    do_pcibr_rrb_free(bridge, slot, c0);
+	if (c1 > 0)
+	    do_pcibr_rrb_free(bridge, slot + PCIBR_RRB_SLOT_VIRTUAL, c1);
+	pcibr_soft->bs_rrb_valid[slot] = 0x1000;
+	pcibr_soft->bs_rrb_valid[slot + PCIBR_RRB_SLOT_VIRTUAL] = 0x1000;
+	return(0);
+    }
+
+    pcibr_soft->bs_rrb_avail[slot & 1] -= c0 + c1;
+    pcibr_soft->bs_rrb_valid[slot] = c0;
+    pcibr_soft->bs_rrb_valid[slot + PCIBR_RRB_SLOT_VIRTUAL] = c1;
+
+    pcibr_soft->bs_rrb_avail[0] = do_pcibr_rrb_count_avail(bridge, 0);
+    pcibr_soft->bs_rrb_avail[1] = do_pcibr_rrb_count_avail(bridge, 1);
+
+    r = 3 - (c0 + c1);
+
+    if (r > 0) {
+	pcibr_soft->bs_rrb_res[slot] = r;
+	pcibr_soft->bs_rrb_avail[slot & 1] -= r;
+    }
+
+#if PCIBR_RRB_DEBUG
+    printk("\t%d+%d+%d",
+	    0xFFF & pcibr_soft->bs_rrb_valid[slot],
+	    0xFFF & pcibr_soft->bs_rrb_valid[slot + PCIBR_RRB_SLOT_VIRTUAL],
+	    pcibr_soft->bs_rrb_res[slot]);
+    printk("\n");
+#endif
+    return(0);
+}
+
+/*
+ * pcibr_slot_call_device_attach
+ *	This calls the associated driver attach routine for the pci
+ * 	card in this slot.
+ */
+int
+pcibr_slot_call_device_attach(devfs_handle_t	pcibr_vhdl,
+			      pciio_slot_t	slot)
+{
+    pcibr_soft_t	pcibr_soft;
+    pcibr_info_h	pcibr_infoh;
+    pcibr_info_t	pcibr_info;
+    async_attach_t	aa = NULL;
+    int			func;
+    devfs_handle_t	xconn_vhdl,conn_vhdl;
+    int			nfunc;
+
+    pcibr_soft = pcibr_soft_get(pcibr_vhdl);
+    if (!pcibr_soft || !PCIBR_VALID_SLOT(slot))
+	return(1);
+
+
+    if (pcibr_soft->bs_slot[slot].has_host)
+        return(0);
+    
+    xconn_vhdl = pcibr_soft->bs_conn;
+    aa = async_attach_get_info(xconn_vhdl);
+
+    nfunc = pcibr_soft->bs_slot[slot].bss_ninfo;
+    pcibr_infoh = pcibr_soft->bs_slot[slot].bss_infos;
+
+    printk("\npcibr_slot_call_device_attach: link 0x%p pci bus 0x%p slot %d\n", xconn_vhdl, pcibr_vhdl, slot);
+
+    for (func = 0; func < nfunc; ++func) {
+
+	pcibr_info = pcibr_infoh[func];
+	
+	if (!pcibr_info)
+	    continue;
+
+	if (pcibr_info->f_vendor == PCIIO_VENDOR_ID_NONE)
+	    continue;
+
+	conn_vhdl = pcibr_info->f_vertex;
+
+	/* If the pci device has been disabled in the prom,
+	 * do not set it up for driver attach. NOTE: usrpci
+	 * and pciba will not "see" this connection point!
+	 */
+	if (device_admin_info_get(conn_vhdl, ADMIN_LBL_DISABLED)) {
+#ifdef SUPPORT_PRINTING_V_FORMAT
+	    PRINT_WARNING( "pcibr_slot_call_device_attach: %v disabled\n", 
+		    conn_vhdl);
+#endif
+	    continue;
+	}
+	if (aa)
+	    async_attach_add_info(conn_vhdl, aa);
+	pciio_device_attach(conn_vhdl);
+    }				/* next func */
+
+    printk("\npcibr_slot_call_device_attach: DONE\n");
+
+    return(0);
+}
+/*
+ * pcibr_slot_call_device_detach
+ *	This calls the associated driver detach routine for the pci
+ * 	card in this slot.
+ */
+int
+pcibr_slot_call_device_detach(devfs_handle_t	pcibr_vhdl,
+			      pciio_slot_t	slot)
+{
+    pcibr_soft_t	pcibr_soft;
+    pcibr_info_h	pcibr_infoh;
+    pcibr_info_t	pcibr_info;
+    int			func;
+    devfs_handle_t	conn_vhdl;
+    int			nfunc;
+    int			ndetach = 1;
+
+    pcibr_soft = pcibr_soft_get(pcibr_vhdl);
+    if (!pcibr_soft || !PCIBR_VALID_SLOT(slot))
+	return(1);
+
+
+    if (pcibr_soft->bs_slot[slot].has_host)
+        return(0);
+    
+
+    nfunc = pcibr_soft->bs_slot[slot].bss_ninfo;
+    pcibr_infoh = pcibr_soft->bs_slot[slot].bss_infos;
+
+    for (func = 0; func < nfunc; ++func) {
+
+	pcibr_info = pcibr_infoh[func];
+	
+	if (!pcibr_info)
+	    continue;
+
+	if (pcibr_info->f_vendor == PCIIO_VENDOR_ID_NONE)
+	    continue;
+
+	conn_vhdl = pcibr_info->f_vertex;
+	
+	/* Make sure that we do not detach a system critical device
+	 * vertex.
+	 */
+	if (is_sys_critical_vertex(conn_vhdl)) {
+#ifdef SUPPORT_PRINTING_V_FORMAT
+	    PRINT_WARNING( "%v is a system critical device vertex\n",
+		    conn_vhdl);
+#endif
+	    continue;
+	}
+	
+	ndetach = 0;
+	pciio_device_detach(conn_vhdl);
+    }				/* next func */
+
+
+    return(ndetach);
+}
+
+/*
+ * pcibr_device_attach
+ *	This is a place holder routine to keep track of all the
+ *	slot-specific initialization that needs to be done.
+ *	This is usually called when we want to initialize a new
+ * 	pci card on the bus.
+ */
+int
+pcibr_device_attach(devfs_handle_t 	pcibr_vhdl,
+		    pciio_slot_t	slot)
+{
+    return (
+	    /* Reset the slot */
+	    pcibr_slot_reset(pcibr_vhdl,slot)			||
+	    /* FInd out what is out there */
+	    pcibr_slot_info_init(pcibr_vhdl,slot)		||
+
+	    /* Set up the address space for this slot in the pci land */
+	    pcibr_slot_addr_space_init(pcibr_vhdl,slot) 	||
+
+	    /* Setup the device register */
+	    pcibr_slot_device_init(pcibr_vhdl, slot)		||
+
+	    /* Setup host/guest relations */
+	    pcibr_slot_guest_info_init(pcibr_vhdl,slot)		||
+
+	    /* Initial RRB management */
+	    pcibr_slot_initial_rrb_alloc(pcibr_vhdl,slot)	||
+
+	    /* Call the device attach */
+	    pcibr_slot_call_device_attach(pcibr_vhdl,slot)
+	    );
+
+}
+/*
+ * pcibr_device_detach
+ *	This is a place holder routine to keep track of all the
+ *	slot-specific freeing that needs to be done.
+ */
+int
+pcibr_device_detach(devfs_handle_t 	pcibr_vhdl,
+		    pciio_slot_t	slot)
+{
+    
+    /* Call the device detach */
+    return (pcibr_slot_call_device_detach(pcibr_vhdl,slot));
+
+}
+/*
+ * pcibr_device_unregister
+ *	This frees up any hardware resources reserved for this pci device
+ * 	and removes any pci infrastructural information setup for it.
+ *	This is usually used at the time of shutting down of the pci card.
+ */
+void
+pcibr_device_unregister(devfs_handle_t pconn_vhdl)
+{
+    pciio_info_t	pciio_info;
+    devfs_handle_t	pcibr_vhdl;
+    pciio_slot_t	slot;
+    pcibr_soft_t	pcibr_soft;
+    bridge_t		*bridge;
+
+    pciio_info = pciio_info_get(pconn_vhdl);
+
+    /* Detach the pciba name space */
+    pciio_device_detach(pconn_vhdl);
+
+    pcibr_vhdl = pciio_info_master_get(pciio_info);
+    slot = pciio_info_slot_get(pciio_info);
+
+    pcibr_soft = pcibr_soft_get(pcibr_vhdl);
+    bridge = pcibr_soft->bs_base;
+
+    /* Clear all the hardware xtalk resources for this device */
+    xtalk_widgetdev_shutdown(pcibr_soft->bs_conn, slot);
+
+    /* Flush all the rrbs */
+    pcibr_rrb_flush(pconn_vhdl);
+
+    /* Free the rrbs allocated to this slot */
+    do_pcibr_rrb_free(bridge, slot, 
+		      pcibr_soft->bs_rrb_valid[slot] +
+		      pcibr_soft->bs_rrb_valid[slot + PCIBR_RRB_SLOT_VIRTUAL]);
+
+
+    pcibr_soft->bs_rrb_valid[slot] = 0;
+    pcibr_soft->bs_rrb_valid[slot + PCIBR_RRB_SLOT_VIRTUAL] = 0;
+    pcibr_soft->bs_rrb_res[slot] = 0;
+
+    /* Flush the write buffers !! */
+    (void)pcibr_wrb_flush(pconn_vhdl);
+    /* Clear the information specific to the slot */
+    (void)pcibr_slot_info_free(pcibr_vhdl, slot);
+    
+}
+
+/* 
+ * build a convenience link path in the
+ * form of ".../<iobrick>/bus/<busnum>"
+ * 
+ * returns 1 on success, 0 otherwise
+ *
+ * depends on hwgraph separator == '/'
+ */
+int
+pcibr_bus_cnvlink(devfs_handle_t f_c, int slot)
+{
+        char dst[MAXDEVNAME];
+	char *dp = dst;
+        char *cp, *xp;
+        int widgetnum;
+        char pcibus[8];
+	devfs_handle_t nvtx, svtx;
+	int rv;
+
+#if DEBUG
+	printk("pcibr_bus_cnvlink: slot= %d f_c= %p\n", 
+		slot, f_c);
+	{
+		int pos;
+		char dname[256];
+		pos = devfs_generate_path(f_c, dname, 256);
+		printk("%s : path= %s\n", __FUNCTION__, &dname[pos]);
+	}
+#endif
+
+	if (GRAPH_SUCCESS != hwgraph_vertex_name_get(f_c, dst, MAXDEVNAME))
+		return 0;
+
+	/* dst example == /hw/module/001c02/Pbrick/xtalk/8/pci/direct */
+
+	/* find the widget number */
+	xp = strstr(dst, "/"EDGE_LBL_XTALK"/");
+	if (xp == NULL)
+		return 0;
+	widgetnum = atoi(xp+7);
+	if (widgetnum < XBOW_PORT_8 || widgetnum > XBOW_PORT_F)
+		return 0;
+
+	/* remove "/pci/direct" from path */
+	cp = strstr(dst, "/" EDGE_LBL_PCI "/" "direct");
+	if (cp == NULL)
+		return 0;
+	*cp = (char)NULL;
+
+	/* get the vertex for the widget */
+	if (GRAPH_SUCCESS != hwgraph_traverse(NULL, dp, &svtx))	
+		return 0;
+
+	*xp = (char)NULL;		/* remove "/xtalk/..." from path */
+
+	/* dst example now == /hw/module/001c02/Pbrick */
+
+	/* get the bus number */
+        strcat(dst, "/bus");
+        sprintf(pcibus, "%d", p_busnum[widgetnum]);
+
+	/* link to bus to widget */
+	rv = hwgraph_path_add(NULL, dp, &nvtx);
+	if (GRAPH_SUCCESS == rv)
+		rv = hwgraph_edge_add(nvtx, svtx, pcibus);
+
+	return (rv == GRAPH_SUCCESS);
+}
+
+/*
+ *    pcibr_attach: called every time the crosstalk
+ *      infrastructure is asked to initialize a widget
+ *      that matches the part number we handed to the
+ *      registration routine above.
+ */
+/*ARGSUSED */
+int
+pcibr_attach(devfs_handle_t xconn_vhdl)
+{
+    /* REFERENCED */
+    graph_error_t           rc;
+    devfs_handle_t            pcibr_vhdl;
+    devfs_handle_t            ctlr_vhdl;
+    bridge_t               *bridge = NULL;
+    bridgereg_t             id;
+    int                     rev;
+    pcibr_soft_t            pcibr_soft;
+    pcibr_info_t            pcibr_info;
+    xwidget_info_t          info;
+    xtalk_intr_t            xtalk_intr;
+    device_desc_t           dev_desc;
+    int                     slot;
+    int                     ibit;
+    devfs_handle_t            noslot_conn;
+    char                    devnm[MAXDEVNAME], *s;
+    pcibr_hints_t           pcibr_hints;
+    bridgereg_t             b_int_enable;
+    unsigned                rrb_fixed = 0;
+
+    iopaddr_t               pci_io_fb, pci_io_fl;
+    iopaddr_t               pci_lo_fb, pci_lo_fl;
+    iopaddr_t               pci_hi_fb, pci_hi_fl;
+
+    int                     spl_level;
+    char		    *nicinfo = (char *)0;
+
+#if PCI_FBBE
+    int                     fast_back_to_back_enable;
+#endif
+
+    async_attach_t          aa = NULL;
+
+    aa = async_attach_get_info(xconn_vhdl);
+
+#if DEBUG && ATTACH_DEBUG
+    printk("pcibr_attach: xconn_vhdl=  %p\n", xconn_vhdl);
+    {
+	int pos;
+	char dname[256];
+	pos = devfs_generate_path(xconn_vhdl, dname, 256);
+	printk("%s : path= %s \n", __FUNCTION__, &dname[pos]);
+    }
+#endif
+
+    /* Setup the PRB for the bridge in CONVEYOR BELT
+     * mode. PRBs are setup in default FIRE-AND-FORGET
+     * mode during the initialization.
+     */
+    hub_device_flags_set(xconn_vhdl, HUB_PIO_CONVEYOR);
+
+    bridge = (bridge_t *)
+	xtalk_piotrans_addr(xconn_vhdl, NULL,
+			    0, sizeof(bridge_t), 0);
+
+#ifndef MEDUSA_HACK
+    if ((bridge->b_wid_stat & BRIDGE_STAT_PCI_GIO_N) == 0)
+	return -1;			/* someone else handles GIO bridges. */
+#endif
+
+#ifdef BRINGUP
+    if (XWIDGET_PART_REV_NUM(bridge->b_wid_id) == XBRIDGE_PART_REV_A)
+	NeedXbridgeSwap = 1;
+#endif
+
+	printk("pcibr_attach: Called with vertex 0x%p, b_wid_stat 0x%x, gio 0x%x\n",xconn_vhdl, bridge->b_wid_stat, BRIDGE_STAT_PCI_GIO_N);
+
+    /*
+     * Create the vertex for the PCI bus, which we
+     * will also use to hold the pcibr_soft and
+     * which will be the "master" vertex for all the
+     * pciio connection points we will hang off it.
+     * This needs to happen before we call nic_bridge_vertex_info
+     * as we are some of the *_vmc functions need access to the edges.
+     *
+     * Opening this vertex will provide access to
+     * the Bridge registers themselves.
+     */
+    rc = hwgraph_path_add(xconn_vhdl, EDGE_LBL_PCI, &pcibr_vhdl);
+    ASSERT(rc == GRAPH_SUCCESS);
+
+    rc = hwgraph_char_device_add(pcibr_vhdl, EDGE_LBL_CONTROLLER, "pcibr_", &ctlr_vhdl);
+    ASSERT(rc == GRAPH_SUCCESS);
+
+    /*
+     * decode the nic, and hang its stuff off our
+     * connection point where other drivers can get
+     * at it.
+     */
+#ifdef LATER
+    nicinfo = BRIDGE_VERTEX_MFG_INFO(xconn_vhdl, (nic_data_t) & bridge->b_nic);
+#endif
+
+    /*
+     * Get the hint structure; if some NIC callback
+     * marked this vertex as "hands-off" then we
+     * just return here, before doing anything else.
+     */
+    pcibr_hints = pcibr_hints_get(xconn_vhdl, 0);
+
+    if (pcibr_hints && pcibr_hints->ph_hands_off)
+	return -1;			/* generic operations disabled */
+
+    id = bridge->b_wid_id;
+    rev = XWIDGET_PART_REV_NUM(id);
+
+    hwgraph_info_add_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, (arbitrary_info_t) rev);
+
+    /*
+     * allocate soft state structure, fill in some
+     * fields, and hook it up to our vertex.
+     */
+    NEW(pcibr_soft);
+    BZERO(pcibr_soft, sizeof *pcibr_soft);
+    pcibr_soft_set(pcibr_vhdl, pcibr_soft);
+
+    pcibr_soft->bs_conn = xconn_vhdl;
+    pcibr_soft->bs_vhdl = pcibr_vhdl;
+    pcibr_soft->bs_base = bridge;
+    pcibr_soft->bs_rev_num = rev;
+    pcibr_soft->bs_intr_bits = pcibr_intr_bits;
+    if (is_xbridge(bridge)) {
+	pcibr_soft->bs_int_ate_size = XBRIDGE_INTERNAL_ATES;
+	pcibr_soft->bs_xbridge = 1;
+    } else {
+	pcibr_soft->bs_int_ate_size = BRIDGE_INTERNAL_ATES;
+	pcibr_soft->bs_xbridge = 0;
+    }
+
+    pcibr_soft->bsi_err_intr = 0;
+
+    /* Bridges up through REV C
+     * are unable to set the direct
+     * byteswappers to BYTE_STREAM.
+     */
+    if (pcibr_soft->bs_rev_num <= BRIDGE_PART_REV_C) {
+	pcibr_soft->bs_pio_end_io = PCIIO_WORD_VALUES;
+	pcibr_soft->bs_pio_end_mem = PCIIO_WORD_VALUES;
+    }
+#if PCIBR_SOFT_LIST
+    {
+	pcibr_list_p            self;
+
+	NEW(self);
+	self->bl_soft = pcibr_soft;
+	self->bl_vhdl = pcibr_vhdl;
+	self->bl_next = pcibr_list;
+	self->bl_next = swap_ptr((void **) &pcibr_list, (void *)self);
+    }
+#endif
+
+    /*
+     * get the name of this bridge vertex and keep the info. Use this
+     * only where it is really needed now: like error interrupts.
+     */
+    s = dev_to_name(pcibr_vhdl, devnm, MAXDEVNAME);
+    pcibr_soft->bs_name = kmalloc(strlen(s) + 1, GFP_KERNEL);
+    strcpy(pcibr_soft->bs_name, s);
+
+#if SHOW_REVS || DEBUG
+#if !DEBUG
+    if (kdebug)
+#endif
+	printk("%sBridge ASIC: rev %s (code=0x%x) at %s\n",
+		is_xbridge(bridge) ? "X" : "",
+		(rev == BRIDGE_PART_REV_A) ? "A" :
+		(rev == BRIDGE_PART_REV_B) ? "B" :
+		(rev == BRIDGE_PART_REV_C) ? "C" :
+		(rev == BRIDGE_PART_REV_D) ? "D" :
+		(rev == XBRIDGE_PART_REV_A) ? "A" :
+		(rev == XBRIDGE_PART_REV_B) ? "B" :
+		"unknown",
+		rev, pcibr_soft->bs_name);
+#endif
+
+    info = xwidget_info_get(xconn_vhdl);
+    pcibr_soft->bs_xid = xwidget_info_id_get(info);
+    pcibr_soft->bs_master = xwidget_info_master_get(info);
+    pcibr_soft->bs_mxid = xwidget_info_masterid_get(info);
+
+    /*
+     * Init bridge lock.
+     */
+    spinlock_init(&pcibr_soft->bs_lock, "pcibr_loc");
+
+    /*
+     * If we have one, process the hints structure.
+     */
+    if (pcibr_hints) {
+	rrb_fixed = pcibr_hints->ph_rrb_fixed;
+
+	pcibr_soft->bs_rrb_fixed = rrb_fixed;
+
+	if (pcibr_hints->ph_intr_bits)
+	    pcibr_soft->bs_intr_bits = pcibr_hints->ph_intr_bits;
+
+	for (slot = 0; slot < 8; ++slot) {
+	    int                     hslot = pcibr_hints->ph_host_slot[slot] - 1;
+
+	    if (hslot < 0) {
+		pcibr_soft->bs_slot[slot].host_slot = slot;
+	    } else {
+		pcibr_soft->bs_slot[slot].has_host = 1;
+		pcibr_soft->bs_slot[slot].host_slot = hslot;
+	    }
+	}
+    }
+    /*
+     * set up initial values for state fields
+     */
+    for (slot = 0; slot < 8; ++slot) {
+	pcibr_soft->bs_slot[slot].bss_devio.bssd_space = PCIIO_SPACE_NONE;
+	pcibr_soft->bs_slot[slot].bss_d64_base = PCIBR_D64_BASE_UNSET;
+	pcibr_soft->bs_slot[slot].bss_d32_base = PCIBR_D32_BASE_UNSET;
+	pcibr_soft->bs_slot[slot].bss_ext_ates_active = 0;
+    }
+
+    for (ibit = 0; ibit < 8; ++ibit) {
+	pcibr_soft->bs_intr[ibit].bsi_xtalk_intr = 0;
+	pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_list = 0;
+    }
+
+    /*
+     * connect up our error handler
+     */
+    xwidget_error_register(xconn_vhdl, pcibr_error_handler, pcibr_soft);
+
+    /*
+     * Initialize various Bridge registers.
+     */
+
+    /*
+     * On pre-Rev.D bridges, set the PCI_RETRY_CNT
+     * to zero to avoid dropping stores. (#475347)
+     */
+    if (rev < BRIDGE_PART_REV_D)
+	bridge->b_bus_timeout &= ~BRIDGE_BUS_PCI_RETRY_MASK;
+
+    /*
+     * Clear all pending interrupts.
+     */
+    bridge->b_int_rst_stat = (BRIDGE_IRR_ALL_CLR);
+
+    /*
+     * Until otherwise set up,
+     * assume all interrupts are
+     * from slot 7.
+     */
+    bridge->b_int_device = (uint32_t) 0xffffffff;
+
+    {
+	bridgereg_t             dirmap;
+	paddr_t                 paddr;
+	iopaddr_t               xbase;
+	xwidgetnum_t            xport;
+	iopaddr_t               offset;
+	int                     num_entries;
+	int                     entry;
+	cnodeid_t		cnodeid;
+	nasid_t			nasid;
+	char		       *node_val;
+	devfs_handle_t		node_vhdl;
+	char			vname[MAXDEVNAME];
+
+	/* Set the Bridge's 32-bit PCI to XTalk
+	 * Direct Map register to the most useful
+	 * value we can determine.  Note that we
+	 * must use a single xid for all of:
+	 *      direct-mapped 32-bit DMA accesses
+	 *      direct-mapped 64-bit DMA accesses
+	 *      DMA accesses through the PMU
+	 *      interrupts
+	 * This is the only way to guarantee that
+	 * completion interrupts will reach a CPU
+	 * after all DMA data has reached memory.
+	 * (Of course, there may be a few special
+	 * drivers/controlers that explicitly manage
+	 * this ordering problem.)
+	 */
+
+	cnodeid = 0;  /* default node id */
+	/*
+	 * Determine the base address node id to be used for all 32-bit
+	 * Direct Mapping I/O. The default is node 0, but this can be changed
+	 * via a DEVICE_ADMIN directive and the PCIBUS_DMATRANS_NODE
+	 * attribute in the irix.sm config file. A device driver can obtain
+	 * this node value via a call to pcibr_get_dmatrans_node().
+	 */
+	node_val = device_admin_info_get(pcibr_vhdl, ADMIN_LBL_DMATRANS_NODE);
+	if (node_val != NULL) {
+	    node_vhdl = hwgraph_path_to_vertex(node_val);
+	    if (node_vhdl != GRAPH_VERTEX_NONE) {
+		cnodeid = nodevertex_to_cnodeid(node_vhdl);
+	    }
+	    if ((node_vhdl == GRAPH_VERTEX_NONE) || (cnodeid == CNODEID_NONE)) {
+		cnodeid = 0;
+		vertex_to_name(pcibr_vhdl, vname, sizeof(vname));
+		PRINT_WARNING( "Invalid hwgraph node path specified:\n     DEVICE_ADMIN: %s %s=%s\n",
+			vname, ADMIN_LBL_DMATRANS_NODE, node_val);
+	    }
+	}
+	nasid = COMPACT_TO_NASID_NODEID(cnodeid);
+	paddr = NODE_OFFSET(nasid) + 0;
+
+	/* currently, we just assume that if we ask
+	 * for a DMA mapping to "zero" the XIO
+	 * host will transmute this into a request
+	 * for the lowest hunk of memory.
+	 */
+	xbase = xtalk_dmatrans_addr(xconn_vhdl, 0,
+				    paddr, _PAGESZ, 0);
+
+	if (xbase != XIO_NOWHERE) {
+	    if (XIO_PACKED(xbase)) {
+		xport = XIO_PORT(xbase);
+		xbase = XIO_ADDR(xbase);
+	    } else
+		xport = pcibr_soft->bs_mxid;
+
+	    offset = xbase & ((1ull << BRIDGE_DIRMAP_OFF_ADDRSHFT) - 1ull);
+	    xbase >>= BRIDGE_DIRMAP_OFF_ADDRSHFT;
+
+	    dirmap = xport << BRIDGE_DIRMAP_W_ID_SHFT;
+
+#ifdef IRIX
+	    dirmap |= BRIDGE_DIRMAP_RMF_64;
+#endif
+
+	    if (xbase)
+		dirmap |= BRIDGE_DIRMAP_OFF & xbase;
+	    else if (offset >= (512 << 20))
+		dirmap |= BRIDGE_DIRMAP_ADD512;
+
+	    bridge->b_dir_map = dirmap;
+	}
+	/*
+	 * Set bridge's idea of page size according to the system's
+	 * idea of "IO page size".  TBD: The idea of IO page size
+	 * should really go away.
+	 */
+	/*
+	 * ensure that we write and read without any interruption.
+	 * The read following the write is required for the Bridge war
+	 */
+	spl_level = splhi();
+#if IOPGSIZE == 4096
+	bridge->b_wid_control &= ~BRIDGE_CTRL_PAGE_SIZE;
+#elif IOPGSIZE == 16384
+	bridge->b_wid_control |= BRIDGE_CTRL_PAGE_SIZE;
+#else
+	<<<Unable to deal with IOPGSIZE >>>;
+#endif
+	bridge->b_wid_control;		/* inval addr bug war */
+	splx(spl_level);
+
+	/* Initialize internal mapping entries */
+	for (entry = 0; entry < pcibr_soft->bs_int_ate_size; entry++)
+	    bridge->b_int_ate_ram[entry].wr = 0;
+
+	/*
+	 * Determine if there's external mapping SSRAM on this
+	 * bridge.  Set up Bridge control register appropriately,
+	 * inititlize SSRAM, and set software up to manage RAM
+	 * entries as an allocatable resource.
+	 *
+	 * Currently, we just use the rm* routines to manage ATE
+	 * allocation.  We should probably replace this with a
+	 * Best Fit allocator.
+	 *
+	 * For now, if we have external SSRAM, avoid using
+	 * the internal ssram: we can't turn PREFETCH on
+	 * when we use the internal SSRAM; and besides,
+	 * this also guarantees that no allocation will
+	 * straddle the internal/external line, so we
+	 * can increment ATE write addresses rather than
+	 * recomparing against BRIDGE_INTERNAL_ATES every
+	 * time.
+	 */
+#ifdef BRINGUP
+	/*
+	 * 082799: for some reason pcibr_init_ext_ate_ram is causing
+	 * a Data Bus Error.  It should be zero anyway so just force it.
+	 */
+	num_entries = 0;
+#else
+	num_entries = pcibr_init_ext_ate_ram(bridge);
+#endif
+
+	/* we always have 128 ATEs (512 for Xbridge) inside the chip
+	 * even if disabled for debugging.
+	 */
+	pcibr_soft->bs_int_ate_map = rmallocmap(pcibr_soft->bs_int_ate_size);
+	pcibr_ate_free(pcibr_soft, 0, pcibr_soft->bs_int_ate_size);
+#if PCIBR_ATE_DEBUG
+	printk("pcibr_attach: %d INTERNAL ATEs\n", pcibr_soft->bs_int_ate_size);
+#endif
+
+	if (num_entries > pcibr_soft->bs_int_ate_size) {
+#if PCIBR_ATE_NOTBOTH			/* for debug -- forces us to use external ates */
+	    printk("pcibr_attach: disabling internal ATEs.\n");
+	    pcibr_ate_alloc(pcibr_soft, pcibr_soft->bs_int_ate_size);
+#endif
+	    pcibr_soft->bs_ext_ate_map = rmallocmap(num_entries);
+	    pcibr_ate_free(pcibr_soft, pcibr_soft->bs_int_ate_size,
+			   num_entries - pcibr_soft->bs_int_ate_size);
+#if PCIBR_ATE_DEBUG
+	    printk("pcibr_attach: %d EXTERNAL ATEs\n",
+		    num_entries - pcibr_soft->bs_int_ate_size);
+#endif
+	}
+    }
+
+    {
+	bridgereg_t             dirmap;
+	iopaddr_t               xbase;
+
+	/*
+	 * now figure the *real* xtalk base address
+	 * that dirmap sends us to.
+	 */
+	dirmap = bridge->b_dir_map;
+	if (dirmap & BRIDGE_DIRMAP_OFF)
+	    xbase = (iopaddr_t)(dirmap & BRIDGE_DIRMAP_OFF)
+			<< BRIDGE_DIRMAP_OFF_ADDRSHFT;
+	else if (dirmap & BRIDGE_DIRMAP_ADD512)
+	    xbase = 512 << 20;
+	else
+	    xbase = 0;
+
+	pcibr_soft->bs_dir_xbase = xbase;
+
+	/* it is entirely possible that we may, at this
+	 * point, have our dirmap pointing somewhere
+	 * other than our "master" port.
+	 */
+	pcibr_soft->bs_dir_xport =
+	    (dirmap & BRIDGE_DIRMAP_W_ID) >> BRIDGE_DIRMAP_W_ID_SHFT;
+    }
+
+    /* pcibr sources an error interrupt;
+     * figure out where to send it.
+     *
+     * If any interrupts are enabled in bridge,
+     * then the prom set us up and our interrupt
+     * has already been reconnected in mlreset
+     * above.
+     *
+     * Need to set the D_INTR_ISERR flag
+     * in the dev_desc used for alocating the
+     * error interrupt, so our interrupt will
+     * be properly routed and prioritized.
+     *
+     * If our crosstalk provider wants to
+     * fix widget error interrupts to specific
+     * destinations, D_INTR_ISERR is how it
+     * knows to do this.
+     */
+
+    dev_desc = device_desc_dup(pcibr_vhdl);
+    device_desc_flags_set(dev_desc,
+			  device_desc_flags_get(dev_desc) | D_INTR_ISERR);
+    device_desc_intr_name_set(dev_desc, "Bridge error");
+
+    xtalk_intr = xtalk_intr_alloc(xconn_vhdl, dev_desc, pcibr_vhdl);
+    ASSERT(xtalk_intr != NULL);
+
+    device_desc_free(dev_desc);
+
+    pcibr_soft->bsi_err_intr = xtalk_intr;
+
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+    /*
+     * On IP35 with XBridge, we do some extra checks in pcibr_setwidint
+     * in order to work around some addressing limitations.  In order
+     * for that fire wall to work properly, we need to make sure we
+     * start from a known clean state.
+     */
+    pcibr_clearwidint(bridge);
+#endif
+
+    printk("pribr_attach:  FIXME Error Interrupt not registered\n");
+
+    xtalk_intr_connect(xtalk_intr,
+		       (intr_func_t) pcibr_error_intr_handler,
+		       (intr_arg_t) pcibr_soft,
+		       (xtalk_intr_setfunc_t) pcibr_setwidint,
+		       (void *) bridge,
+		       (void *) 0);
+
+    /*
+     * now we can start handling error interrupts;
+     * enable all of them.
+     * NOTE: some PCI ints may already be enabled.
+     */
+    b_int_enable = bridge->b_int_enable | BRIDGE_ISR_ERRORS;
+
+
+    bridge->b_int_enable = b_int_enable;
+    bridge->b_int_mode = 0;		/* do not send "clear interrupt" packets */
+
+    bridge->b_wid_tflush;		/* wait until Bridge PIO complete */
+
+    /*
+     * Depending on the rev of bridge, disable certain features.
+     * Easiest way seems to be to force the PCIBR_NOwhatever
+     * flag to be on for all DMA calls, which overrides any
+     * PCIBR_whatever flag or even the setting of whatever
+     * from the PCIIO_DMA_class flags (or even from the other
+     * PCIBR flags, since NO overrides YES).
+     */
+    pcibr_soft->bs_dma_flags = 0;
+
+    /* PREFETCH:
+     * Always completely disabled for REV.A;
+     * at "pcibr_prefetch_enable_rev", anyone
+     * asking for PCIIO_PREFETCH gets it.
+     * Between these two points, you have to ask
+     * for PCIBR_PREFETCH, which promises that
+     * your driver knows about known Bridge WARs.
+     */
+    if (pcibr_soft->bs_rev_num < BRIDGE_PART_REV_B)
+	pcibr_soft->bs_dma_flags |= PCIBR_NOPREFETCH;
+    else if (pcibr_soft->bs_rev_num < 
+		(BRIDGE_WIDGET_PART_NUM << 4 | pcibr_prefetch_enable_rev))
+	pcibr_soft->bs_dma_flags |= PCIIO_NOPREFETCH;
+
+    /* WRITE_GATHER:
+     * Disabled up to but not including the
+     * rev number in pcibr_wg_enable_rev. There
+     * is no "WAR range" as with prefetch.
+     */
+    if (pcibr_soft->bs_rev_num < 
+		(BRIDGE_WIDGET_PART_NUM << 4 | pcibr_wg_enable_rev))
+	pcibr_soft->bs_dma_flags |= PCIBR_NOWRITE_GATHER;
+
+    pciio_provider_register(pcibr_vhdl, &pcibr_provider);
+    pciio_provider_startup(pcibr_vhdl);
+
+    pci_io_fb = 0x00000004;		/* I/O FreeBlock Base */
+    pci_io_fl = 0xFFFFFFFF;		/* I/O FreeBlock Last */
+
+    pci_lo_fb = 0x00000010;		/* Low Memory FreeBlock Base */
+    pci_lo_fl = 0x001FFFFF;		/* Low Memory FreeBlock Last */
+
+    pci_hi_fb = 0x00200000;		/* High Memory FreeBlock Base */
+    pci_hi_fl = 0x3FFFFFFF;		/* High Memory FreeBlock Last */
+
+
+    PCI_ADDR_SPACE_LIMITS_STORE();
+
+    /* build "no-slot" connection point
+     */
+    pcibr_info = pcibr_device_info_new
+	(pcibr_soft, PCIIO_SLOT_NONE, PCIIO_FUNC_NONE,
+	 PCIIO_VENDOR_ID_NONE, PCIIO_DEVICE_ID_NONE);
+    noslot_conn = pciio_device_info_register
+	(pcibr_vhdl, &pcibr_info->f_c);
+
+    /* Remember the no slot connection point info for tearing it
+     * down during detach.
+     */
+    pcibr_soft->bs_noslot_conn = noslot_conn;
+    pcibr_soft->bs_noslot_info = pcibr_info;
+#if PCI_FBBE
+    fast_back_to_back_enable = 1;
+#endif
+
+#if PCI_FBBE
+    if (fast_back_to_back_enable) {
+	/*
+	 * All devices on the bus are capable of fast back to back, so
+	 * we need to set the fast back to back bit in all devices on
+	 * the bus that are capable of doing such accesses.
+	 */
+    }
+#endif
+
+#ifdef IRIX
+    /* If the bridge has been reset then there is no need to reset
+     * the individual PCI slots.
+     */
+    for (slot = 0; slot < 8; ++slot)  
+	/* Reset all the slots */
+	(void)pcibr_slot_reset(pcibr_vhdl,slot);
+#endif
+
+    for (slot = 0; slot < 8; ++slot)
+	/* Find out what is out there */
+	(void)pcibr_slot_info_init(pcibr_vhdl,slot);
+
+    for (slot = 0; slot < 8; ++slot)  
+	/* Set up the address space for this slot in the pci land */
+	(void)pcibr_slot_addr_space_init(pcibr_vhdl,slot);
+
+    for (slot = 0; slot < 8; ++slot)  
+	/* Setup the device register */
+	(void)pcibr_slot_device_init(pcibr_vhdl, slot);
+
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+    for (slot = 0; slot < 8; ++slot)  
+	/* Set up convenience links */
+	if (is_xbridge(bridge))
+		if (pcibr_soft->bs_slot[slot].bss_ninfo > 0) /* if occupied */
+			pcibr_bus_cnvlink(pcibr_info->f_vertex, slot);
+#endif
+
+    for (slot = 0; slot < 8; ++slot)  
+	/* Setup host/guest relations */
+	(void)pcibr_slot_guest_info_init(pcibr_vhdl,slot);
+
+    for (slot = 0; slot < 8; ++slot)  
+	/* Initial RRB management */
+	(void)pcibr_slot_initial_rrb_alloc(pcibr_vhdl,slot);
+
+#ifdef dagum
+    /* driver attach routines should be called out from generic linux code */
+    for (slot = 0; slot < 8; ++slot)  
+	/* Call the device attach */
+	(void)pcibr_slot_call_device_attach(pcibr_vhdl,slot);
+#endif /* dagum */
+
+#ifdef LATER
+    if (strstr(nicinfo, XTALK_PCI_PART_NUM)) {
+	do_pcibr_rrb_autoalloc(pcibr_soft, 1, 8);
+#if PCIBR_RRB_DEBUG
+	printf("\n\nFound XTALK_PCI (030-1275) at %v\n", xconn_vhdl);
+
+	printf("pcibr_attach: %v Shoebox RRB MANAGEMENT: %d+%d free\n",
+		pcibr_vhdl,
+		pcibr_soft->bs_rrb_avail[0],
+		pcibr_soft->bs_rrb_avail[1]);
+
+	for (slot = 0; slot < 8; ++slot)
+	    printf("\t%d+%d+%d",
+	    0xFFF & pcibr_soft->bs_rrb_valid[slot],
+	    0xFFF & pcibr_soft->bs_rrb_valid[slot + PCIBR_RRB_SLOT_VIRTUAL],
+	    pcibr_soft->bs_rrb_res[slot]);
+
+	printf("\n");
+#endif
+    }
+#else
+	printk("pcibr_attach: FIXME to call do_pcibr_rrb_autoalloc nicinfo 0x%p\n", nicinfo);
+#endif
+
+    if (aa)
+	    async_attach_add_info(noslot_conn, aa);
+
+    pciio_device_attach(noslot_conn);
+
+
+    /* 
+     * Tear down pointer to async attach info -- async threads for
+     * bridge's descendants may be running but the bridge's work is done.
+     */
+    if (aa)
+	    async_attach_del_info(xconn_vhdl);
+
+    return 0;
+}
+/*
+ * pcibr_detach:
+ *	Detach the bridge device from the hwgraph after cleaning out all the 
+ *	underlying vertices.
+ */
+int
+pcibr_detach(devfs_handle_t xconn)
+{
+    pciio_slot_t	slot;
+    devfs_handle_t	pcibr_vhdl;
+    pcibr_soft_t	pcibr_soft;
+    bridge_t		*bridge;
+
+    /* Get the bridge vertex from its xtalk connection point */
+    if (hwgraph_traverse(xconn, EDGE_LBL_PCI, &pcibr_vhdl) != GRAPH_SUCCESS)
+	return(1);
+
+    pcibr_soft = pcibr_soft_get(pcibr_vhdl);
+    bridge = pcibr_soft->bs_base;
+
+    /* Disable the interrupts from the bridge */
+    bridge->b_int_enable = 0;
+
+    /* Detach all the PCI devices talking to this bridge */
+    for(slot = 0; slot < 8; slot++) {
+#ifdef DEBUG
+	printk("pcibr_device_detach called for %p/%d\n",
+		pcibr_vhdl,slot);
+#endif
+	pcibr_device_detach(pcibr_vhdl, slot);
+    }
+
+    /* Unregister the no-slot connection point */
+    pciio_device_info_unregister(pcibr_vhdl,
+				 &(pcibr_soft->bs_noslot_info->f_c));
+
+    spinlock_destroy(&pcibr_soft->bs_lock);
+    kfree(pcibr_soft->bs_name);
+    
+    /* Error handler gets unregistered when the widget info is 
+     * cleaned 
+     */
+    /* Free the soft ATE maps */
+    if (pcibr_soft->bs_int_ate_map)
+	rmfreemap(pcibr_soft->bs_int_ate_map);
+    if (pcibr_soft->bs_ext_ate_map)
+	rmfreemap(pcibr_soft->bs_ext_ate_map);
+
+    /* Disconnect the error interrupt and free the xtalk resources 
+     * associated with it.
+     */
+    xtalk_intr_disconnect(pcibr_soft->bsi_err_intr);
+    xtalk_intr_free(pcibr_soft->bsi_err_intr);
+
+    /* Clear the software state maintained by the bridge driver for this
+     * bridge.
+     */
+    DEL(pcibr_soft);
+    /* Remove the Bridge revision labelled info */
+    (void)hwgraph_info_remove_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, NULL);
+    /* Remove the character device associated with this bridge */
+    (void)hwgraph_edge_remove(pcibr_vhdl, EDGE_LBL_CONTROLLER, NULL);
+    /* Remove the PCI bridge vertex */
+    (void)hwgraph_edge_remove(xconn, EDGE_LBL_PCI, NULL);
+
+    return(0);
+}
+
+int
+pcibr_asic_rev(devfs_handle_t pconn_vhdl)
+{
+    devfs_handle_t            pcibr_vhdl;
+    arbitrary_info_t        ainfo;
+
+    if (GRAPH_SUCCESS !=
+	hwgraph_traverse(pconn_vhdl, EDGE_LBL_MASTER, &pcibr_vhdl))
+	return -1;
+
+    if (GRAPH_SUCCESS !=
+	hwgraph_info_get_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, &ainfo))
+	return -1;
+
+    return (int) ainfo;
+}
+
+int
+pcibr_write_gather_flush(devfs_handle_t pconn_vhdl)
+{
+    pciio_info_t  pciio_info = pciio_info_get(pconn_vhdl);
+    pcibr_soft_t  pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
+    pciio_slot_t  slot;
+    slot = pciio_info_slot_get(pciio_info);
+    pcibr_device_write_gather_flush(pcibr_soft, slot);
+    return 0;
+}
+
+/* =====================================================================
+ *    PIO MANAGEMENT
+ */
+
+LOCAL iopaddr_t
+pcibr_addr_pci_to_xio(devfs_handle_t pconn_vhdl,
+		      pciio_slot_t slot,
+		      pciio_space_t space,
+		      iopaddr_t pci_addr,
+		      size_t req_size,
+		      unsigned flags)
+{
+    pcibr_info_t            pcibr_info = pcibr_info_get(pconn_vhdl);
+    pciio_info_t            pciio_info = &pcibr_info->f_c;
+    pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
+    bridge_t               *bridge = pcibr_soft->bs_base;
+
+    unsigned                bar;	/* which BASE reg on device is decoding */
+    iopaddr_t               xio_addr = XIO_NOWHERE;
+
+    pciio_space_t           wspace;	/* which space device is decoding */
+    iopaddr_t               wbase;	/* base of device decode on PCI */
+    size_t                  wsize;	/* size of device decode on PCI */
+
+    int                     try;	/* DevIO(x) window scanning order control */
+    int                     win;	/* which DevIO(x) window is being used */
+    pciio_space_t           mspace;	/* target space for devio(x) register */
+    iopaddr_t               mbase;	/* base of devio(x) mapped area on PCI */
+    size_t                  msize;	/* size of devio(x) mapped area on PCI */
+    size_t                  mmask;	/* addr bits stored in Device(x) */
+
+    unsigned                s;
+
+    s = pcibr_lock(pcibr_soft);
+
+    if (pcibr_soft->bs_slot[slot].has_host) {
+	slot = pcibr_soft->bs_slot[slot].host_slot;
+	pcibr_info = pcibr_soft->bs_slot[slot].bss_infos[0];
+    }
+    if (space == PCIIO_SPACE_NONE)
+	goto done;
+
+    if (space == PCIIO_SPACE_CFG) {
+	/*
+	 * Usually, the first mapping
+	 * established to a PCI device
+	 * is to its config space.
+	 *
+	 * In any case, we definitely
+	 * do NOT need to worry about
+	 * PCI BASE registers, and
+	 * MUST NOT attempt to point
+	 * the DevIO(x) window at
+	 * this access ...
+	 */
+	if (((flags & PCIIO_BYTE_STREAM) == 0) &&
+	    ((pci_addr + req_size) <= BRIDGE_TYPE0_CFG_FUNC_OFF))
+	    xio_addr = pci_addr + BRIDGE_TYPE0_CFG_DEV(slot);
+
+	goto done;
+    }
+    if (space == PCIIO_SPACE_ROM) {
+	/* PIO to the Expansion Rom.
+	 * Driver is responsible for
+	 * enabling and disabling
+	 * decodes properly.
+	 */
+	wbase = pcibr_info->f_rbase;
+	wsize = pcibr_info->f_rsize;
+
+	/*
+	 * While the driver should know better
+	 * than to attempt to map more space
+	 * than the device is decoding, he might
+	 * do it; better to bail out here.
+	 */
+	if ((pci_addr + req_size) > wsize)
+	    goto done;
+
+	pci_addr += wbase;
+	space = PCIIO_SPACE_MEM;
+    }
+    /*
+     * reduce window mappings to raw
+     * space mappings (maybe allocating
+     * windows), and try for DevIO(x)
+     * usage (setting it if it is available).
+     */
+    bar = space - PCIIO_SPACE_WIN0;
+    if (bar < 6) {
+	wspace = pcibr_info->f_window[bar].w_space;
+	if (wspace == PCIIO_SPACE_NONE)
+	    goto done;
+
+	/* get pci base and size */
+	wbase = pcibr_info->f_window[bar].w_base;
+	wsize = pcibr_info->f_window[bar].w_size;
+
+	/*
+	 * While the driver should know better
+	 * than to attempt to map more space
+	 * than the device is decoding, he might
+	 * do it; better to bail out here.
+	 */
+	if ((pci_addr + req_size) > wsize)
+	    goto done;
+
+	/* shift from window relative to
+	 * decoded space relative.
+	 */
+	pci_addr += wbase;
+	space = wspace;
+    } else
+	bar = -1;
+
+    /* Scan all the DevIO(x) windows twice looking for one
+     * that can satisfy our request. The first time through,
+     * only look at assigned windows; the second time, also
+     * look at PCIIO_SPACE_NONE windows. Arrange the order
+     * so we always look at our own window first.
+     *
+     * We will not attempt to satisfy a single request
+     * by concatinating multiple windows.
+     */
+    for (try = 0; try < 16; ++try) {
+	bridgereg_t             devreg;
+	unsigned                offset;
+
+	win = (try + slot) % 8;
+
+	/* If this DevIO(x) mapping area can provide
+	 * a mapping to this address, use it.
+	 */
+	msize = (win < 2) ? 0x200000 : 0x100000;
+	mmask = -msize;
+	if (space != PCIIO_SPACE_IO)
+	    mmask &= 0x3FFFFFFF;
+
+	offset = pci_addr & (msize - 1);
+
+	/* If this window can't possibly handle that request,
+	 * go on to the next window.
+	 */
+	if (((pci_addr & (msize - 1)) + req_size) > msize)
+	    continue;
+
+	devreg = pcibr_soft->bs_slot[win].bss_device;
+
+	/* Is this window "nailed down"?
+	 * If not, maybe we can use it.
+	 * (only check this the second time through)
+	 */
+	mspace = pcibr_soft->bs_slot[win].bss_devio.bssd_space;
+	if ((try > 7) && (mspace == PCIIO_SPACE_NONE)) {
+
+	    /* If this is the primary DevIO(x) window
+	     * for some other device, skip it.
+	     */
+	    if ((win != slot) &&
+		(PCIIO_VENDOR_ID_NONE !=
+		 pcibr_soft->bs_slot[win].bss_vendor_id))
+		continue;
+
+	    /* It's a free window, and we fit in it.
+	     * Set up Device(win) to our taste.
+	     */
+	    mbase = pci_addr & mmask;
+
+	    /* check that we would really get from
+	     * here to there.
+	     */
+	    if ((mbase | offset) != pci_addr)
+		continue;
+
+	    devreg &= ~BRIDGE_DEV_OFF_MASK;
+	    if (space != PCIIO_SPACE_IO)
+		devreg |= BRIDGE_DEV_DEV_IO_MEM;
+	    else
+		devreg &= ~BRIDGE_DEV_DEV_IO_MEM;
+	    devreg |= (mbase >> 20) & BRIDGE_DEV_OFF_MASK;
+
+	    /* default is WORD_VALUES.
+	     * if you specify both,
+	     * operation is undefined.
+	     */
+	    if (flags & PCIIO_BYTE_STREAM)
+		devreg |= BRIDGE_DEV_DEV_SWAP;
+	    else
+		devreg &= ~BRIDGE_DEV_DEV_SWAP;
+
+	    if (pcibr_soft->bs_slot[win].bss_device != devreg) {
+		bridge->b_device[win].reg = devreg;
+		pcibr_soft->bs_slot[win].bss_device = devreg;
+		bridge->b_wid_tflush;	/* wait until Bridge PIO complete */
+
+#if DEBUG && PCI_DEBUG
+		printk("pcibr Device(%d): 0x%lx\n", win, bridge->b_device[win].reg);
+#endif
+	    }
+	    pcibr_soft->bs_slot[win].bss_devio.bssd_space = space;
+	    pcibr_soft->bs_slot[win].bss_devio.bssd_base = mbase;
+	    xio_addr = BRIDGE_DEVIO(win) + (pci_addr - mbase);
+
+#if DEBUG && PCI_DEBUG
+	    printk("%s LINE %d map to space %d space desc 0x%x[%lx..%lx] for slot %d allocates DevIO(%d) devreg 0x%x\n", 
+		    __FUNCTION__, __LINE__, space, space_desc,
+		    pci_addr, pci_addr + req_size - 1,
+		    slot, win, devreg);
+#endif
+
+	    goto done;
+	}				/* endif DevIO(x) not pointed */
+	mbase = pcibr_soft->bs_slot[win].bss_devio.bssd_base;
+
+	/* Now check for request incompat with DevIO(x)
+	 */
+	if ((mspace != space) ||
+	    (pci_addr < mbase) ||
+	    ((pci_addr + req_size) > (mbase + msize)) ||
+	    ((flags & PCIIO_BYTE_STREAM) && !(devreg & BRIDGE_DEV_DEV_SWAP)) ||
+	    (!(flags & PCIIO_BYTE_STREAM) && (devreg & BRIDGE_DEV_DEV_SWAP)))
+	    continue;
+
+	/* DevIO(x) window is pointed at PCI space
+	 * that includes our target. Calculate the
+	 * final XIO address, release the lock and
+	 * return.
+	 */
+	xio_addr = BRIDGE_DEVIO(win) + (pci_addr - mbase);
+
+#if DEBUG && PCI_DEBUG
+	printk("%s LINE %d map to space %d [0x%p..0x%p] for slot %d uses DevIO(%d)\n",
+		__FUNCTION__, __LINE__, space,  pci_addr, pci_addr + req_size - 1, slot, win);
+#endif
+	goto done;
+    }
+
+    switch (space) {
+	/*
+	 * Accesses to device decode
+	 * areas that do a not fit
+	 * within the DevIO(x) space are
+	 * modified to be accesses via
+	 * the direct mapping areas.
+	 *
+	 * If necessary, drivers can
+	 * explicitly ask for mappings
+	 * into these address spaces,
+	 * but this should never be needed.
+	 */
+    case PCIIO_SPACE_MEM:		/* "mem space" */
+    case PCIIO_SPACE_MEM32:		/* "mem, use 32-bit-wide bus" */
+	if ((pci_addr + BRIDGE_PCI_MEM32_BASE + req_size - 1) <=
+	    BRIDGE_PCI_MEM32_LIMIT)
+	    xio_addr = pci_addr + BRIDGE_PCI_MEM32_BASE;
+	break;
+
+    case PCIIO_SPACE_MEM64:		/* "mem, use 64-bit-wide bus" */
+	if ((pci_addr + BRIDGE_PCI_MEM64_BASE + req_size - 1) <=
+	    BRIDGE_PCI_MEM64_LIMIT)
+	    xio_addr = pci_addr + BRIDGE_PCI_MEM64_BASE;
+	break;
+
+    case PCIIO_SPACE_IO:		/* "i/o space" */
+	/* Bridge Hardware Bug WAR #482741:
+	 * The 4G area that maps directly from
+	 * XIO space to PCI I/O space is busted
+	 * until Bridge Rev D.
+	 */
+	if ((pcibr_soft->bs_rev_num > BRIDGE_PART_REV_C) &&
+	    ((pci_addr + BRIDGE_PCI_IO_BASE + req_size - 1) <=
+	     BRIDGE_PCI_IO_LIMIT))
+	    xio_addr = pci_addr + BRIDGE_PCI_IO_BASE;
+	break;
+    }
+
+    /* Check that "Direct PIO" byteswapping matches,
+     * try to change it if it does not.
+     */
+    if (xio_addr != XIO_NOWHERE) {
+	unsigned                bst;	/* nonzero to set bytestream */
+	unsigned               *bfp;	/* addr of record of how swapper is set */
+	unsigned                swb;	/* which control bit to mung */
+	unsigned                bfo;	/* current swapper setting */
+	unsigned                bfn;	/* desired swapper setting */
+
+	bfp = ((space == PCIIO_SPACE_IO)
+	       ? (&pcibr_soft->bs_pio_end_io)
+	       : (&pcibr_soft->bs_pio_end_mem));
+
+	bfo = *bfp;
+
+	bst = flags & PCIIO_BYTE_STREAM;
+
+	bfn = bst ? PCIIO_BYTE_STREAM : PCIIO_WORD_VALUES;
+
+	if (bfn == bfo) {		/* we already match. */
+	    ;
+	} else if (bfo != 0) {		/* we have a conflict. */
+#if DEBUG && PCI_DEBUG
+	    printk("pcibr_addr_pci_to_xio: swap conflict in space %d , was%s%s, want%s%s\n",
+		    space, 
+		    bfo & PCIIO_BYTE_STREAM ? " BYTE_STREAM" : "",
+		    bfo & PCIIO_WORD_VALUES ? " WORD_VALUES" : "",
+		    bfn & PCIIO_BYTE_STREAM ? " BYTE_STREAM" : "",
+		    bfn & PCIIO_WORD_VALUES ? " WORD_VALUES" : "");
+#endif
+	    xio_addr = XIO_NOWHERE;
+	} else {			/* OK to make the change. */
+	    bridgereg_t             octl, nctl;
+
+	    swb = (space == PCIIO_SPACE_IO) ? BRIDGE_CTRL_IO_SWAP : BRIDGE_CTRL_MEM_SWAP;
+	    octl = bridge->b_wid_control;
+	    nctl = bst ? octl | swb : octl & ~swb;
+
+	    if (octl != nctl)		/* make the change if any */
+		bridge->b_wid_control = nctl;
+
+	    *bfp = bfn;			/* record the assignment */
+
+#if DEBUG && PCI_DEBUG
+	    printk("pcibr_addr_pci_to_xio: swap for space %d  set to%s%s\n",
+		    space, 
+		    bfn & PCIIO_BYTE_STREAM ? " BYTE_STREAM" : "",
+		    bfn & PCIIO_WORD_VALUES ? " WORD_VALUES" : "");
+#endif
+	}
+    }
+  done:
+    pcibr_unlock(pcibr_soft, s);
+    return xio_addr;
+}
+
+/*ARGSUSED6 */
+pcibr_piomap_t
+pcibr_piomap_alloc(devfs_handle_t pconn_vhdl,
+		   device_desc_t dev_desc,
+		   pciio_space_t space,
+		   iopaddr_t pci_addr,
+		   size_t req_size,
+		   size_t req_size_max,
+		   unsigned flags)
+{
+    pcibr_info_t	    pcibr_info = pcibr_info_get(pconn_vhdl);
+    pciio_info_t            pciio_info = &pcibr_info->f_c;
+    pciio_slot_t            pciio_slot = pciio_info_slot_get(pciio_info);
+    pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
+    devfs_handle_t            xconn_vhdl = pcibr_soft->bs_conn;
+
+    pcibr_piomap_t         *mapptr;
+    pcibr_piomap_t          maplist;
+    pcibr_piomap_t          pcibr_piomap;
+    iopaddr_t               xio_addr;
+    xtalk_piomap_t          xtalk_piomap;
+    unsigned                s;
+
+    /* Make sure that the req sizes are non-zero */
+    if ((req_size < 1) || (req_size_max < 1))
+	return NULL;
+
+    /*
+     * Code to translate slot/space/addr
+     * into xio_addr is common between
+     * this routine and pcibr_piotrans_addr.
+     */
+    xio_addr = pcibr_addr_pci_to_xio(pconn_vhdl, pciio_slot, space, pci_addr, req_size, flags);
+
+    if (xio_addr == XIO_NOWHERE)
+	return NULL;
+
+    /* Check the piomap list to see if there is already an allocated
+     * piomap entry but not in use. If so use that one. Otherwise
+     * allocate a new piomap entry and add it to the piomap list
+     */
+    mapptr = &(pcibr_info->f_piomap);
+
+    s = pcibr_lock(pcibr_soft);
+    for (pcibr_piomap = *mapptr;
+	 pcibr_piomap != NULL;
+	 pcibr_piomap = pcibr_piomap->bp_next) {
+	if (pcibr_piomap->bp_mapsz == 0)
+	    break;
+    }
+
+    if (pcibr_piomap)
+	mapptr = NULL;
+    else {
+	pcibr_unlock(pcibr_soft, s);
+	NEW(pcibr_piomap);
+    }
+
+    pcibr_piomap->bp_dev = pconn_vhdl;
+    pcibr_piomap->bp_slot = pciio_slot;
+    pcibr_piomap->bp_flags = flags;
+    pcibr_piomap->bp_space = space;
+    pcibr_piomap->bp_pciaddr = pci_addr;
+    pcibr_piomap->bp_mapsz = req_size;
+    pcibr_piomap->bp_soft = pcibr_soft;
+    pcibr_piomap->bp_toc[0] = 0;
+
+    if (mapptr) {
+	s = pcibr_lock(pcibr_soft);
+	maplist = *mapptr;
+	pcibr_piomap->bp_next = maplist;
+	*mapptr = pcibr_piomap;
+    }
+    pcibr_unlock(pcibr_soft, s);
+
+
+    if (pcibr_piomap) {
+	xtalk_piomap =
+	    xtalk_piomap_alloc(xconn_vhdl, 0,
+			       xio_addr,
+			       req_size, req_size_max,
+			       flags & PIOMAP_FLAGS);
+	if (xtalk_piomap) {
+	    pcibr_piomap->bp_xtalk_addr = xio_addr;
+	    pcibr_piomap->bp_xtalk_pio = xtalk_piomap;
+	} else {
+	    pcibr_piomap->bp_mapsz = 0;
+	    pcibr_piomap = 0;
+	}
+    }
+    return pcibr_piomap;
+}
+
+/*ARGSUSED */
+void
+pcibr_piomap_free(pcibr_piomap_t pcibr_piomap)
+{
+    xtalk_piomap_free(pcibr_piomap->bp_xtalk_pio);
+    pcibr_piomap->bp_xtalk_pio = 0;
+    pcibr_piomap->bp_mapsz = 0;
+}
+
+/*ARGSUSED */
+caddr_t
+pcibr_piomap_addr(pcibr_piomap_t pcibr_piomap,
+		  iopaddr_t pci_addr,
+		  size_t req_size)
+{
+    return xtalk_piomap_addr(pcibr_piomap->bp_xtalk_pio,
+			     pcibr_piomap->bp_xtalk_addr +
+			     pci_addr - pcibr_piomap->bp_pciaddr,
+			     req_size);
+}
+
+/*ARGSUSED */
+void
+pcibr_piomap_done(pcibr_piomap_t pcibr_piomap)
+{
+    xtalk_piomap_done(pcibr_piomap->bp_xtalk_pio);
+}
+
+/*ARGSUSED */
+caddr_t
+pcibr_piotrans_addr(devfs_handle_t pconn_vhdl,
+		    device_desc_t dev_desc,
+		    pciio_space_t space,
+		    iopaddr_t pci_addr,
+		    size_t req_size,
+		    unsigned flags)
+{
+    pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);
+    pciio_slot_t            pciio_slot = pciio_info_slot_get(pciio_info);
+    pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
+    devfs_handle_t            xconn_vhdl = pcibr_soft->bs_conn;
+
+    iopaddr_t               xio_addr;
+
+    xio_addr = pcibr_addr_pci_to_xio(pconn_vhdl, pciio_slot, space, pci_addr, req_size, flags);
+
+    if (xio_addr == XIO_NOWHERE)
+	return NULL;
+
+    return xtalk_piotrans_addr(xconn_vhdl, 0, xio_addr, req_size, flags & PIOMAP_FLAGS);
+}
+
+/*
+ * PIO Space allocation and management.
+ *      Allocate and Manage the PCI PIO space (mem and io space)
+ *      This routine is pretty simplistic at this time, and
+ *      does pretty trivial management of allocation and freeing..
+ *      The current scheme is prone for fragmentation..
+ *      Change the scheme to use bitmaps.
+ */
+
+/*ARGSUSED */
+iopaddr_t
+pcibr_piospace_alloc(devfs_handle_t pconn_vhdl,
+		     device_desc_t dev_desc,
+		     pciio_space_t space,
+		     size_t req_size,
+		     size_t alignment)
+{
+    pcibr_info_t            pcibr_info = pcibr_info_get(pconn_vhdl);
+    pciio_info_t            pciio_info = &pcibr_info->f_c;
+    pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
+
+    pciio_piospace_t        piosp;
+    int                     s;
+
+    iopaddr_t              *pciaddr, *pcilast;
+    iopaddr_t               start_addr;
+    size_t                  align_mask;
+
+    /*
+     * Check for proper alignment
+     */
+    ASSERT(alignment >= NBPP);
+    ASSERT((alignment & (alignment - 1)) == 0);
+
+    align_mask = alignment - 1;
+    s = pcibr_lock(pcibr_soft);
+
+    /*
+     * First look if a previously allocated chunk exists.
+     */
+    if ((piosp = pcibr_info->f_piospace) != (pciio_piospace_t)0) {
+	/*
+	 * Look through the list for a right sized free chunk.
+	 */
+	do {
+	    if (piosp->free &&
+		(piosp->space == space) &&
+		(piosp->count >= req_size) &&
+		!(piosp->start & align_mask)) {
+		piosp->free = 0;
+		pcibr_unlock(pcibr_soft, s);
+		return piosp->start;
+	    }
+	    piosp = piosp->next;
+	} while (piosp);
+    }
+    ASSERT(!piosp);
+
+    switch (space) {
+    case PCIIO_SPACE_IO:
+	pciaddr = &pcibr_soft->bs_spinfo.pci_io_base;
+	pcilast = &pcibr_soft->bs_spinfo.pci_io_last;
+	break;
+    case PCIIO_SPACE_MEM:
+    case PCIIO_SPACE_MEM32:
+	pciaddr = &pcibr_soft->bs_spinfo.pci_mem_base;
+	pcilast = &pcibr_soft->bs_spinfo.pci_mem_last;
+	break;
+    default:
+	ASSERT(0);
+	pcibr_unlock(pcibr_soft, s);
+	return 0;
+    }
+
+    start_addr = *pciaddr;
+
+    /*
+     * Align start_addr.
+     */
+    if (start_addr & align_mask)
+	start_addr = (start_addr + align_mask) & ~align_mask;
+
+    if ((start_addr + req_size) > *pcilast) {
+	/*
+	 * If too big a request, reject it.
+	 */
+	pcibr_unlock(pcibr_soft, s);
+	return 0;
+    }
+    *pciaddr = (start_addr + req_size);
+
+    NEW(piosp);
+    piosp->free = 0;
+    piosp->space = space;
+    piosp->start = start_addr;
+    piosp->count = req_size;
+    piosp->next = pcibr_info->f_piospace;
+    pcibr_info->f_piospace = piosp;
+
+    pcibr_unlock(pcibr_soft, s);
+    return start_addr;
+}
+
+/*ARGSUSED */
+void
+pcibr_piospace_free(devfs_handle_t pconn_vhdl,
+		    pciio_space_t space,
+		    iopaddr_t pciaddr,
+		    size_t req_size)
+{
+    pcibr_info_t            pcibr_info = pcibr_info_get(pconn_vhdl);
+    pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pcibr_info->f_mfast;
+
+    pciio_piospace_t        piosp;
+    int                     s;
+    char                    name[1024];
+
+    /*
+     * Look through the bridge data structures for the pciio_piospace_t
+     * structure corresponding to  'pciaddr'
+     */
+    s = pcibr_lock(pcibr_soft);
+    piosp = pcibr_info->f_piospace;
+    while (piosp) {
+	/*
+	 * Piospace free can only be for the complete
+	 * chunk and not parts of it..
+	 */
+	if (piosp->start == pciaddr) {
+	    if (piosp->count == req_size)
+		break;
+	    /*
+	     * Improper size passed for freeing..
+	     * Print a message and break;
+	     */
+	    hwgraph_vertex_name_get(pconn_vhdl, name, 1024);
+	    PRINT_WARNING("pcibr_piospace_free: error");
+	    PRINT_WARNING("Device %s freeing size (0x%lx) different than allocated (0x%lx)",
+					name, req_size, piosp->count);
+	    PRINT_WARNING("Freeing 0x%lx instead", piosp->count);
+	    break;
+	}
+	piosp = piosp->next;
+    }
+
+    if (!piosp) {
+	PRINT_WARNING(
+		"pcibr_piospace_free: Address 0x%lx size 0x%lx - No match\n",
+		pciaddr, req_size);
+	pcibr_unlock(pcibr_soft, s);
+	return;
+    }
+    piosp->free = 1;
+    pcibr_unlock(pcibr_soft, s);
+    return;
+}
+
+/* =====================================================================
+ *    DMA MANAGEMENT
+ *
+ *      The Bridge ASIC provides three methods of doing
+ *      DMA: via a "direct map" register available in
+ *      32-bit PCI space (which selects a contiguous 2G
+ *      address space on some other widget), via
+ *      "direct" addressing via 64-bit PCI space (all
+ *      destination information comes from the PCI
+ *      address, including transfer attributes), and via
+ *      a "mapped" region that allows a bunch of
+ *      different small mappings to be established with
+ *      the PMU.
+ *
+ *      For efficiency, we most prefer to use the 32-bit
+ *      direct mapping facility, since it requires no
+ *      resource allocations. The advantage of using the
+ *      PMU over the 64-bit direct is that single-cycle
+ *      PCI addressing can be used; the advantage of
+ *      using 64-bit direct over PMU addressing is that
+ *      we do not have to allocate entries in the PMU.
+ */
+
+/*
+ * Convert PCI-generic software flags and Bridge-specific software flags
+ * into Bridge-specific Direct Map attribute bits.
+ */
+LOCAL iopaddr_t
+pcibr_flags_to_d64(unsigned flags, pcibr_soft_t pcibr_soft)
+{
+    iopaddr_t               attributes = 0;
+
+    /* Sanity check: Bridge only allows use of VCHAN1 via 64-bit addrs */
+#ifdef IRIX
+    ASSERT_ALWAYS(!(flags & PCIBR_VCHAN1) || (flags & PCIIO_DMA_A64));
+#endif
+
+    /* Generic macro flags
+     */
+    if (flags & PCIIO_DMA_DATA) {	/* standard data channel */
+	attributes &= ~PCI64_ATTR_BAR;	/* no barrier bit */
+	attributes |= PCI64_ATTR_PREF;	/* prefetch on */
+    }
+    if (flags & PCIIO_DMA_CMD) {	/* standard command channel */
+	attributes |= PCI64_ATTR_BAR;	/* barrier bit on */
+	attributes &= ~PCI64_ATTR_PREF;	/* disable prefetch */
+    }
+    /* Generic detail flags
+     */
+    if (flags & PCIIO_PREFETCH)
+	attributes |= PCI64_ATTR_PREF;
+    if (flags & PCIIO_NOPREFETCH)
+	attributes &= ~PCI64_ATTR_PREF;
+
+    /* the swap bit is in the address attributes for xbridge */
+    if (pcibr_soft->bs_xbridge) {
+    	if (flags & PCIIO_BYTE_STREAM)
+        	attributes |= PCI64_ATTR_SWAP;
+    	if (flags & PCIIO_WORD_VALUES)
+        	attributes &= ~PCI64_ATTR_SWAP;
+    }
+
+    /* Provider-specific flags
+     */
+    if (flags & PCIBR_BARRIER)
+	attributes |= PCI64_ATTR_BAR;
+    if (flags & PCIBR_NOBARRIER)
+	attributes &= ~PCI64_ATTR_BAR;
+
+    if (flags & PCIBR_PREFETCH)
+	attributes |= PCI64_ATTR_PREF;
+    if (flags & PCIBR_NOPREFETCH)
+	attributes &= ~PCI64_ATTR_PREF;
+
+    if (flags & PCIBR_PRECISE)
+	attributes |= PCI64_ATTR_PREC;
+    if (flags & PCIBR_NOPRECISE)
+	attributes &= ~PCI64_ATTR_PREC;
+
+    if (flags & PCIBR_VCHAN1)
+	attributes |= PCI64_ATTR_VIRTUAL;
+    if (flags & PCIBR_VCHAN0)
+	attributes &= ~PCI64_ATTR_VIRTUAL;
+
+    return (attributes);
+}
+
+/*
+ * Convert PCI-generic software flags and Bridge-specific software flags
+ * into Bridge-specific Address Translation Entry attribute bits.
+ */
+LOCAL bridge_ate_t
+pcibr_flags_to_ate(unsigned flags)
+{
+    bridge_ate_t            attributes;
+
+    /* default if nothing specified:
+     * NOBARRIER
+     * NOPREFETCH
+     * NOPRECISE
+     * COHERENT
+     * Plus the valid bit
+     */
+    attributes = ATE_CO | ATE_V;
+
+    /* Generic macro flags
+     */
+    if (flags & PCIIO_DMA_DATA) {	/* standard data channel */
+	attributes &= ~ATE_BAR;		/* no barrier */
+	attributes |= ATE_PREF;		/* prefetch on */
+    }
+    if (flags & PCIIO_DMA_CMD) {	/* standard command channel */
+	attributes |= ATE_BAR;		/* barrier bit on */
+	attributes &= ~ATE_PREF;	/* disable prefetch */
+    }
+    /* Generic detail flags
+     */
+    if (flags & PCIIO_PREFETCH)
+	attributes |= ATE_PREF;
+    if (flags & PCIIO_NOPREFETCH)
+	attributes &= ~ATE_PREF;
+
+    /* Provider-specific flags
+     */
+    if (flags & PCIBR_BARRIER)
+	attributes |= ATE_BAR;
+    if (flags & PCIBR_NOBARRIER)
+	attributes &= ~ATE_BAR;
+
+    if (flags & PCIBR_PREFETCH)
+	attributes |= ATE_PREF;
+    if (flags & PCIBR_NOPREFETCH)
+	attributes &= ~ATE_PREF;
+
+    if (flags & PCIBR_PRECISE)
+	attributes |= ATE_PREC;
+    if (flags & PCIBR_NOPRECISE)
+	attributes &= ~ATE_PREC;
+
+    return (attributes);
+}
+
+/*ARGSUSED */
+pcibr_dmamap_t
+pcibr_dmamap_alloc(devfs_handle_t pconn_vhdl,
+		   device_desc_t dev_desc,
+		   size_t req_size_max,
+		   unsigned flags)
+{
+    pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);
+    pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
+    devfs_handle_t            xconn_vhdl = pcibr_soft->bs_conn;
+    pciio_slot_t            slot;
+    xwidgetnum_t            xio_port;
+
+    xtalk_dmamap_t          xtalk_dmamap;
+    pcibr_dmamap_t          pcibr_dmamap;
+    int                     ate_count;
+    int                     ate_index;
+
+    /* merge in forced flags */
+    flags |= pcibr_soft->bs_dma_flags;
+
+    NEWf(pcibr_dmamap, flags);
+    if (!pcibr_dmamap)
+	return 0;
+
+    xtalk_dmamap = xtalk_dmamap_alloc(xconn_vhdl, dev_desc, req_size_max,
+				      flags & DMAMAP_FLAGS);
+    if (!xtalk_dmamap) {
+#if PCIBR_ATE_DEBUG
+	printk("pcibr_attach: xtalk_dmamap_alloc failed\n");
+#endif
+	DEL(pcibr_dmamap);
+	return 0;
+    }
+    xio_port = pcibr_soft->bs_mxid;
+    slot = pciio_info_slot_get(pciio_info);
+
+    pcibr_dmamap->bd_dev = pconn_vhdl;
+    pcibr_dmamap->bd_slot = slot;
+    pcibr_dmamap->bd_soft = pcibr_soft;
+    pcibr_dmamap->bd_xtalk = xtalk_dmamap;
+    pcibr_dmamap->bd_max_size = req_size_max;
+    pcibr_dmamap->bd_xio_port = xio_port;
+
+    if (flags & PCIIO_DMA_A64) {
+	if (!pcibr_try_set_device(pcibr_soft, slot, flags, BRIDGE_DEV_D64_BITS)) {
+	    iopaddr_t               pci_addr;
+	    int                     have_rrbs;
+	    int                     min_rrbs;
+
+	    /* Device is capable of A64 operations,
+	     * and the attributes of the DMA are
+	     * consistant with any previous DMA
+	     * mappings using shared resources.
+	     */
+
+	    pci_addr = pcibr_flags_to_d64(flags, pcibr_soft);
+
+	    pcibr_dmamap->bd_flags = flags;
+	    pcibr_dmamap->bd_xio_addr = 0;
+	    pcibr_dmamap->bd_pci_addr = pci_addr;
+
+	    /* Make sure we have an RRB (or two).
+	     */
+	    if (!(pcibr_soft->bs_rrb_fixed & (1 << slot))) {
+		if (flags & PCIBR_VCHAN1)
+		    slot += PCIBR_RRB_SLOT_VIRTUAL;
+		have_rrbs = pcibr_soft->bs_rrb_valid[slot];
+		if (have_rrbs < 2) {
+		    if (pci_addr & PCI64_ATTR_PREF)
+			min_rrbs = 2;
+		    else
+			min_rrbs = 1;
+		    if (have_rrbs < min_rrbs)
+			do_pcibr_rrb_autoalloc(pcibr_soft, slot, min_rrbs - have_rrbs);
+		}
+	    }
+#if PCIBR_ATE_DEBUG
+	    printk("pcibr_dmamap_alloc: using direct64\n");
+#endif
+	    return pcibr_dmamap;
+	}
+#if PCIBR_ATE_DEBUG
+	printk("pcibr_dmamap_alloc: unable to use direct64\n");
+#endif
+	flags &= ~PCIIO_DMA_A64;
+    }
+    if (flags & PCIIO_FIXED) {
+	/* warning: mappings may fail later,
+	 * if direct32 can't get to the address.
+	 */
+	if (!pcibr_try_set_device(pcibr_soft, slot, flags, BRIDGE_DEV_D32_BITS)) {
+	    /* User desires DIRECT A32 operations,
+	     * and the attributes of the DMA are
+	     * consistant with any previous DMA
+	     * mappings using shared resources.
+	     * Mapping calls may fail if target
+	     * is outside the direct32 range.
+	     */
+#if PCIBR_ATE_DEBUG
+	    printk("pcibr_dmamap_alloc: using direct32\n");
+#endif
+	    pcibr_dmamap->bd_flags = flags;
+	    pcibr_dmamap->bd_xio_addr = pcibr_soft->bs_dir_xbase;
+	    pcibr_dmamap->bd_pci_addr = PCI32_DIRECT_BASE;
+	    return pcibr_dmamap;
+	}
+#if PCIBR_ATE_DEBUG
+	printk("pcibr_dmamap_alloc: unable to use direct32\n");
+#endif
+	/* If the user demands FIXED and we can't
+	 * give it to him, fail.
+	 */
+	xtalk_dmamap_free(xtalk_dmamap);
+	DEL(pcibr_dmamap);
+	return 0;
+    }
+    /*
+     * Allocate Address Translation Entries from the mapping RAM.
+     * Unless the PCIBR_NO_ATE_ROUNDUP flag is specified,
+     * the maximum number of ATEs is based on the worst-case
+     * scenario, where the requested target is in the
+     * last byte of an ATE; thus, mapping IOPGSIZE+2
+     * does end up requiring three ATEs.
+     */
+    if (!(flags & PCIBR_NO_ATE_ROUNDUP)) {
+	ate_count = IOPG((IOPGSIZE - 1)	/* worst case start offset */
+		     +req_size_max	/* max mapping bytes */
+		     - 1) + 1;		/* round UP */
+    } else {	/* assume requested target is page aligned */
+	ate_count = IOPG(req_size_max   /* max mapping bytes */
+		     - 1) + 1;		/* round UP */
+    }
+
+    ate_index = pcibr_ate_alloc(pcibr_soft, ate_count);
+
+    if (ate_index != -1) {
+	if (!pcibr_try_set_device(pcibr_soft, slot, flags, BRIDGE_DEV_PMU_BITS)) {
+	    bridge_ate_t            ate_proto;
+	    int                     have_rrbs;
+	    int                     min_rrbs;
+
+#if PCIBR_ATE_DEBUG
+	    printk("pcibr_dmamap_alloc: using PMU\n");
+#endif
+
+	    ate_proto = pcibr_flags_to_ate(flags);
+
+	    pcibr_dmamap->bd_flags = flags;
+	    pcibr_dmamap->bd_pci_addr =
+		PCI32_MAPPED_BASE + IOPGSIZE * ate_index;
+	    /*
+	     * for xbridge the byte-swap bit == bit 29 of pci address
+	     */
+	    if (pcibr_soft->bs_xbridge) {
+		    if (flags & PCIIO_BYTE_STREAM)
+			    ATE_SWAP_ON(pcibr_dmamap->bd_pci_addr);
+		    /*
+		     * If swap was set in bss_device in pcibr_endian_set()
+		     * we need to change the address bit.
+		     */
+		    if (pcibr_soft->bs_slot[slot].bss_device & 
+							BRIDGE_DEV_SWAP_PMU)
+			    ATE_SWAP_ON(pcibr_dmamap->bd_pci_addr);
+		    if (flags & PCIIO_WORD_VALUES)
+			    ATE_SWAP_OFF(pcibr_dmamap->bd_pci_addr);
+	    }
+	    pcibr_dmamap->bd_xio_addr = 0;
+	    pcibr_dmamap->bd_ate_ptr = pcibr_ate_addr(pcibr_soft, ate_index);
+	    pcibr_dmamap->bd_ate_index = ate_index;
+	    pcibr_dmamap->bd_ate_count = ate_count;
+	    pcibr_dmamap->bd_ate_proto = ate_proto;
+
+	    /* Make sure we have an RRB (or two).
+	     */
+	    if (!(pcibr_soft->bs_rrb_fixed & (1 << slot))) {
+		have_rrbs = pcibr_soft->bs_rrb_valid[slot];
+		if (have_rrbs < 2) {
+		    if (ate_proto & ATE_PREF)
+			min_rrbs = 2;
+		    else
+			min_rrbs = 1;
+		    if (have_rrbs < min_rrbs)
+			do_pcibr_rrb_autoalloc(pcibr_soft, slot, min_rrbs - have_rrbs);
+		}
+	    }
+	    if (ate_index >= pcibr_soft->bs_int_ate_size && 
+				!pcibr_soft->bs_xbridge) {
+		bridge_t               *bridge = pcibr_soft->bs_base;
+		volatile unsigned      *cmd_regp;
+		unsigned                cmd_reg;
+		unsigned                s;
+
+		pcibr_dmamap->bd_flags |= PCIBR_DMAMAP_SSRAM;
+
+		s = pcibr_lock(pcibr_soft);
+		cmd_regp = &(bridge->
+			     b_type0_cfg_dev[slot].
+			     l[PCI_CFG_COMMAND / 4]);
+		cmd_reg = *cmd_regp;
+		pcibr_soft->bs_slot[slot].bss_cmd_pointer = cmd_regp;
+		pcibr_soft->bs_slot[slot].bss_cmd_shadow = cmd_reg;
+		pcibr_unlock(pcibr_soft, s);
+	    }
+	    return pcibr_dmamap;
+	}
+#if PCIBR_ATE_DEBUG
+	printk("pcibr_dmamap_alloc: unable to use PMU\n");
+#endif
+	pcibr_ate_free(pcibr_soft, ate_index, ate_count);
+    }
+    /* total failure: sorry, you just can't
+     * get from here to there that way.
+     */
+#if PCIBR_ATE_DEBUG
+    printk("pcibr_dmamap_alloc: complete failure.\n");
+#endif
+    xtalk_dmamap_free(xtalk_dmamap);
+    DEL(pcibr_dmamap);
+    return 0;
+}
+
+/*ARGSUSED */
+void
+pcibr_dmamap_free(pcibr_dmamap_t pcibr_dmamap)
+{
+    pcibr_soft_t            pcibr_soft = pcibr_dmamap->bd_soft;
+    pciio_slot_t            slot = pcibr_dmamap->bd_slot;
+
+#ifdef IRIX
+    unsigned                flags = pcibr_dmamap->bd_flags;
+#endif
+
+    /* Make sure that bss_ext_ates_active
+     * is properly kept up to date.
+     */
+#ifdef IRIX
+    if (PCIBR_DMAMAP_BUSY & flags)
+	if (PCIBR_DMAMAP_SSRAM & flags)
+	    atomicAddInt(&(pcibr_soft->
+			   bs_slot[slot].
+			   bss_ext_ates_active), -1);
+#endif
+
+    xtalk_dmamap_free(pcibr_dmamap->bd_xtalk);
+
+    if (pcibr_dmamap->bd_flags & PCIIO_DMA_A64) {
+	pcibr_release_device(pcibr_soft, slot, BRIDGE_DEV_D64_BITS);
+    }
+    if (pcibr_dmamap->bd_ate_count) {
+	pcibr_ate_free(pcibr_dmamap->bd_soft,
+		       pcibr_dmamap->bd_ate_index,
+		       pcibr_dmamap->bd_ate_count);
+	pcibr_release_device(pcibr_soft, slot, BRIDGE_DEV_PMU_BITS);
+    }
+    DEL(pcibr_dmamap);
+}
+
+/*
+ * Setup an Address Translation Entry as specified.  Use either the Bridge
+ * internal maps or the external map RAM, as appropriate.
+ */
+LOCAL bridge_ate_p
+pcibr_ate_addr(pcibr_soft_t pcibr_soft,
+	       int ate_index)
+{
+    bridge_t *bridge = pcibr_soft->bs_base;
+
+    return (ate_index < pcibr_soft->bs_int_ate_size)
+	? &(bridge->b_int_ate_ram[ate_index].wr)
+	: &(bridge->b_ext_ate_ram[ate_index]);
+}
+
+/*
+ *    pcibr_addr_xio_to_pci: given a PIO range, hand
+ *      back the corresponding base PCI MEM address;
+ *      this is used to short-circuit DMA requests that
+ *      loop back onto this PCI bus.
+ */
+LOCAL iopaddr_t
+pcibr_addr_xio_to_pci(pcibr_soft_t soft,
+		      iopaddr_t xio_addr,
+		      size_t req_size)
+{
+    iopaddr_t               xio_lim = xio_addr + req_size - 1;
+    iopaddr_t               pci_addr;
+    pciio_slot_t            slot;
+
+    if ((xio_addr >= BRIDGE_PCI_MEM32_BASE) &&
+	(xio_lim <= BRIDGE_PCI_MEM32_LIMIT)) {
+	pci_addr = xio_addr - BRIDGE_PCI_MEM32_BASE;
+	return pci_addr;
+    }
+    if ((xio_addr >= BRIDGE_PCI_MEM64_BASE) &&
+	(xio_lim <= BRIDGE_PCI_MEM64_LIMIT)) {
+	pci_addr = xio_addr - BRIDGE_PCI_MEM64_BASE;
+	return pci_addr;
+    }
+    for (slot = 0; slot < 8; ++slot)
+	if ((xio_addr >= BRIDGE_DEVIO(slot)) &&
+	    (xio_lim < BRIDGE_DEVIO(slot + 1))) {
+	    bridgereg_t             dev;
+
+	    dev = soft->bs_slot[slot].bss_device;
+	    pci_addr = dev & BRIDGE_DEV_OFF_MASK;
+	    pci_addr <<= BRIDGE_DEV_OFF_ADDR_SHFT;
+	    pci_addr += xio_addr - BRIDGE_DEVIO(slot);
+	    return (dev & BRIDGE_DEV_DEV_IO_MEM) ? pci_addr : PCI_NOWHERE;
+	}
+    return 0;
+}
+
+/* We are starting to get more complexity
+ * surrounding writing ATEs, so pull
+ * the writing code into this new function.
+ * XXX mail ranga@engr for IP27 prom!
+ */
+
+#if PCIBR_FREEZE_TIME
+#define	ATE_FREEZE()	s = ate_freeze(pcibr_dmamap, &freeze_time, cmd_regs)
+#else
+#define	ATE_FREEZE()	s = ate_freeze(pcibr_dmamap, cmd_regs)
+#endif
+
+LOCAL unsigned
+ate_freeze(pcibr_dmamap_t pcibr_dmamap,
+#if PCIBR_FREEZE_TIME
+	   unsigned *freeze_time_ptr,
+#endif
+	   unsigned *cmd_regs)
+{
+    pcibr_soft_t            pcibr_soft = pcibr_dmamap->bd_soft;
+#ifdef IRIX
+    int                     dma_slot = pcibr_dmamap->bd_slot;
+#endif
+    int                     ext_ates = pcibr_dmamap->bd_flags & PCIBR_DMAMAP_SSRAM;
+    int                     slot;
+
+    unsigned                s;
+    unsigned                cmd_reg;
+    volatile unsigned      *cmd_lwa;
+    unsigned                cmd_lwd;
+
+    if (!ext_ates)
+	return 0;
+
+    /* Bridge Hardware Bug WAR #484930:
+     * Bridge can't handle updating External ATEs
+     * while DMA is occuring that uses External ATEs,
+     * even if the particular ATEs involved are disjoint.
+     */
+
+    /* need to prevent anyone else from
+     * unfreezing the grant while we
+     * are working; also need to prevent
+     * this thread from being interrupted
+     * to keep PCI grant freeze time
+     * at an absolute minimum.
+     */
+    s = pcibr_lock(pcibr_soft);
+
+#ifdef IRIX
+    /* just in case pcibr_dmamap_done was not called */
+    if (pcibr_dmamap->bd_flags & PCIBR_DMAMAP_BUSY) {
+	pcibr_dmamap->bd_flags &= ~PCIBR_DMAMAP_BUSY;
+	if (pcibr_dmamap->bd_flags & PCIBR_DMAMAP_SSRAM)
+	    atomicAddInt(&(pcibr_soft->
+			   bs_slot[dma_slot].
+			   bss_ext_ates_active), -1);
+	xtalk_dmamap_done(pcibr_dmamap->bd_xtalk);
+    }
+#endif
+#if PCIBR_FREEZE_TIME
+    *freeze_time_ptr = get_timestamp();
+#endif
+
+    cmd_lwa = 0;
+    for (slot = 0; slot < 8; ++slot)
+	if (pcibr_soft->
+	    bs_slot[slot].
+	    bss_ext_ates_active) {
+
+	    cmd_reg = pcibr_soft->
+		bs_slot[slot].
+		bss_cmd_shadow;
+	    if (cmd_reg & PCI_CMD_BUS_MASTER) {
+		cmd_lwa = pcibr_soft->
+		    bs_slot[slot].
+		    bss_cmd_pointer;
+		cmd_lwd = cmd_reg ^ PCI_CMD_BUS_MASTER;
+		cmd_lwa[0] = cmd_lwd;
+	    }
+	    cmd_regs[slot] = cmd_reg;
+	} else
+	    cmd_regs[slot] = 0;
+
+    if (cmd_lwa) {
+	    bridge_t	*bridge = pcibr_soft->bs_base;
+
+	    /* Read the last master bit that has been cleared. This PIO read
+	     * on the PCI bus is to ensure the completion of any DMAs that
+	     * are due to bus requests issued by PCI devices before the
+	     * clearing of master bits.
+	     */
+	    cmd_lwa[0];
+
+	    /* Flush all the write buffers in the bridge */
+	    for (slot = 0; slot < 8; ++slot)
+		    if (pcibr_soft->
+			bs_slot[slot].
+			bss_ext_ates_active) {
+			    /* Flush the write buffer associated with this
+			     * PCI device which might be using dma map RAM.
+			     */
+			    bridge->b_wr_req_buf[slot].reg;
+		    }
+    }
+    return s;
+}
+
+#define	ATE_WRITE()    ate_write(ate_ptr, ate_count, ate)
+
+LOCAL void
+ate_write(bridge_ate_p ate_ptr,
+	  int ate_count,
+	  bridge_ate_t ate)
+{
+    while (ate_count-- > 0) {
+	*ate_ptr++ = ate;
+	ate += IOPGSIZE;
+    }
+}
+
+
+#if PCIBR_FREEZE_TIME
+#define	ATE_THAW()	ate_thaw(pcibr_dmamap, ate_index, ate, ate_total, freeze_time, cmd_regs, s)
+#else
+#define	ATE_THAW()	ate_thaw(pcibr_dmamap, ate_index, cmd_regs, s)
+#endif
+
+LOCAL void
+ate_thaw(pcibr_dmamap_t pcibr_dmamap,
+	 int ate_index,
+#if PCIBR_FREEZE_TIME
+	 bridge_ate_t ate,
+	 int ate_total,
+	 unsigned freeze_time_start,
+#endif
+	 unsigned *cmd_regs,
+	 unsigned s)
+{
+    pcibr_soft_t            pcibr_soft = pcibr_dmamap->bd_soft;
+#ifdef IRIX
+    int                     dma_slot = pcibr_dmamap->bd_slot;
+#endif
+    int                     slot;
+    bridge_t               *bridge = pcibr_soft->bs_base;
+    int                     ext_ates = pcibr_dmamap->bd_flags & PCIBR_DMAMAP_SSRAM;
+
+    unsigned                cmd_reg;
+
+#if PCIBR_FREEZE_TIME
+    unsigned                freeze_time;
+    static unsigned         max_freeze_time = 0;
+    static unsigned         max_ate_total;
+#endif
+
+    if (!ext_ates)
+	return;
+
+    /* restore cmd regs */
+    for (slot = 0; slot < 8; ++slot)
+	if ((cmd_reg = cmd_regs[slot]) & PCI_CMD_BUS_MASTER)
+	    bridge->b_type0_cfg_dev[slot].l[PCI_CFG_COMMAND / 4] = cmd_reg;
+
+    pcibr_dmamap->bd_flags |= PCIBR_DMAMAP_BUSY;
+#ifdef IRIX
+    atomicAddInt(&(pcibr_soft->
+		   bs_slot[dma_slot].
+		   bss_ext_ates_active), 1);
+#endif
+
+#if PCIBR_FREEZE_TIME
+    freeze_time = get_timestamp() - freeze_time_start;
+
+    if ((max_freeze_time < freeze_time) ||
+	(max_ate_total < ate_total)) {
+	if (max_freeze_time < freeze_time)
+	    max_freeze_time = freeze_time;
+	if (max_ate_total < ate_total)
+	    max_ate_total = ate_total;
+	pcibr_unlock(pcibr_soft, s);
+	printk("%s: pci freeze time %d usec for %d ATEs\n"
+		"\tfirst ate: %R\n",
+		pcibr_soft->bs_name,
+		freeze_time * 1000 / 1250,
+		ate_total,
+		ate, ate_bits);
+    } else
+#endif
+	pcibr_unlock(pcibr_soft, s);
+}
+
+/*ARGSUSED */
+iopaddr_t
+pcibr_dmamap_addr(pcibr_dmamap_t pcibr_dmamap,
+		  paddr_t paddr,
+		  size_t req_size)
+{
+    pcibr_soft_t            pcibr_soft;
+    iopaddr_t               xio_addr;
+    xwidgetnum_t            xio_port;
+    iopaddr_t               pci_addr;
+    unsigned                flags;
+
+    ASSERT(pcibr_dmamap != NULL);
+    ASSERT(req_size > 0);
+    ASSERT(req_size <= pcibr_dmamap->bd_max_size);
+
+    pcibr_soft = pcibr_dmamap->bd_soft;
+
+    flags = pcibr_dmamap->bd_flags;
+
+    xio_addr = xtalk_dmamap_addr(pcibr_dmamap->bd_xtalk, paddr, req_size);
+    if (XIO_PACKED(xio_addr)) {
+	xio_port = XIO_PORT(xio_addr);
+	xio_addr = XIO_ADDR(xio_addr);
+    } else
+	xio_port = pcibr_dmamap->bd_xio_port;
+
+    /* If this DMA is to an addres that
+     * refers back to this Bridge chip,
+     * reduce it back to the correct
+     * PCI MEM address.
+     */
+    if (xio_port == pcibr_soft->bs_xid) {
+	pci_addr = pcibr_addr_xio_to_pci(pcibr_soft, xio_addr, req_size);
+    } else if (flags & PCIIO_DMA_A64) {
+	/* A64 DMA:
+	 * always use 64-bit direct mapping,
+	 * which always works.
+	 * Device(x) was set up during
+	 * dmamap allocation.
+	 */
+
+	/* attributes are already bundled up into bd_pci_addr.
+	 */
+	pci_addr = pcibr_dmamap->bd_pci_addr
+	    | ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT)
+	    | xio_addr;
+
+	/* Bridge Hardware WAR #482836:
+	 * If the transfer is not cache aligned
+	 * and the Bridge Rev is <= B, force
+	 * prefetch to be off.
+	 */
+	if (flags & PCIBR_NOPREFETCH)
+	    pci_addr &= ~PCI64_ATTR_PREF;
+
+#if DEBUG && PCIBR_DMA_DEBUG
+	printk("pcibr_dmamap_addr (direct64):\n"
+		"\twanted paddr [0x%x..0x%x]\n"
+		"\tXIO port 0x%x offset 0x%x\n"
+		"\treturning PCI 0x%x\n",
+		paddr, paddr + req_size - 1,
+		xio_port, xio_addr, pci_addr);
+#endif
+    } else if (flags & PCIIO_FIXED) {
+	/* A32 direct DMA:
+	 * always use 32-bit direct mapping,
+	 * which may fail.
+	 * Device(x) was set up during
+	 * dmamap allocation.
+	 */
+
+	if (xio_port != pcibr_soft->bs_dir_xport)
+	    pci_addr = 0;		/* wrong DIDN */
+	else if (xio_addr < pcibr_dmamap->bd_xio_addr)
+	    pci_addr = 0;		/* out of range */
+	else if ((xio_addr + req_size) >
+		 (pcibr_dmamap->bd_xio_addr + BRIDGE_DMA_DIRECT_SIZE))
+	    pci_addr = 0;		/* out of range */
+	else
+	    pci_addr = pcibr_dmamap->bd_pci_addr +
+		xio_addr - pcibr_dmamap->bd_xio_addr;
+
+#if DEBUG && PCIBR_DMA_DEBUG
+	printk("pcibr_dmamap_addr (direct32):\n"
+		"\twanted paddr [0x%x..0x%x]\n"
+		"\tXIO port 0x%x offset 0x%x\n"
+		"\treturning PCI 0x%x\n",
+		paddr, paddr + req_size - 1,
+		xio_port, xio_addr, pci_addr);
+#endif
+    } else {
+	bridge_t               *bridge = pcibr_soft->bs_base;
+	iopaddr_t               offset = IOPGOFF(xio_addr);
+	bridge_ate_t            ate_proto = pcibr_dmamap->bd_ate_proto;
+	int                     ate_count = IOPG(offset + req_size - 1) + 1;
+
+	int                     ate_index = pcibr_dmamap->bd_ate_index;
+	unsigned                cmd_regs[8];
+	unsigned                s;
+
+#if PCIBR_FREEZE_TIME
+	int                     ate_total = ate_count;
+	unsigned                freeze_time;
+#endif
+
+#if PCIBR_ATE_DEBUG
+	bridge_ate_t            ate_cmp;
+	bridge_ate_p            ate_cptr;
+	unsigned                ate_lo, ate_hi;
+	int                     ate_bad = 0;
+	int                     ate_rbc = 0;
+#endif
+	bridge_ate_p            ate_ptr = pcibr_dmamap->bd_ate_ptr;
+	bridge_ate_t            ate;
+
+	/* Bridge Hardware WAR #482836:
+	 * If the transfer is not cache aligned
+	 * and the Bridge Rev is <= B, force
+	 * prefetch to be off.
+	 */
+	if (flags & PCIBR_NOPREFETCH)
+	    ate_proto &= ~ATE_PREF;
+
+	ate = ate_proto
+	    | (xio_port << ATE_TIDSHIFT)
+	    | (xio_addr - offset);
+
+	pci_addr = pcibr_dmamap->bd_pci_addr + offset;
+
+	/* Fill in our mapping registers
+	 * with the appropriate xtalk data,
+	 * and hand back the PCI address.
+	 */
+
+	ASSERT(ate_count > 0);
+	if (ate_count <= pcibr_dmamap->bd_ate_count) {
+		ATE_FREEZE();
+		ATE_WRITE();
+		ATE_THAW();
+		bridge->b_wid_tflush;	/* wait until Bridge PIO complete */
+	} else {
+		/* The number of ATE's required is greater than the number
+		 * allocated for this map. One way this can happen is if
+		 * pcibr_dmamap_alloc() was called with the PCIBR_NO_ATE_ROUNDUP
+		 * flag, and then when that map is used (right now), the
+		 * target address tells us we really did need to roundup.
+		 * The other possibility is that the map is just plain too
+		 * small to handle the requested target area.
+		 */
+#if PCIBR_ATE_DEBUG
+		PRINT_WARNING( "pcibr_dmamap_addr :\n"
+			"\twanted paddr [0x%x..0x%x]\n"
+			"\tate_count 0x%x bd_ate_count 0x%x\n"
+			"\tATE's required > number allocated\n",
+			paddr, paddr + req_size - 1,
+			ate_count, pcibr_dmamap->bd_ate_count);
+#endif
+		pci_addr = 0;
+	}
+
+    }
+    return pci_addr;
+}
+
+/*ARGSUSED */
+alenlist_t
+pcibr_dmamap_list(pcibr_dmamap_t pcibr_dmamap,
+		  alenlist_t palenlist,
+		  unsigned flags)
+{
+    pcibr_soft_t            pcibr_soft;
+#ifdef IRIX
+    bridge_t               *bridge;
+#else
+    bridge_t               *bridge=NULL;
+#endif
+
+    unsigned                al_flags = (flags & PCIIO_NOSLEEP) ? AL_NOSLEEP : 0;
+    int                     inplace = flags & PCIIO_INPLACE;
+
+    alenlist_t              pciio_alenlist = 0;
+    alenlist_t              xtalk_alenlist;
+    size_t                  length;
+    iopaddr_t               offset;
+    unsigned                direct64;
+#ifdef IRIX
+    int                     ate_index;
+    int                     ate_count;
+    int                     ate_total = 0;
+    bridge_ate_p            ate_ptr;
+    bridge_ate_t            ate_proto;
+#else
+    int                     ate_index = 0;
+    int                     ate_count = 0;
+    int                     ate_total = 0;
+    bridge_ate_p            ate_ptr = (bridge_ate_p)0;
+    bridge_ate_t            ate_proto = (bridge_ate_t)0;
+#endif
+    bridge_ate_t            ate_prev;
+    bridge_ate_t            ate;
+    alenaddr_t              xio_addr;
+    xwidgetnum_t            xio_port;
+    iopaddr_t               pci_addr;
+    alenaddr_t              new_addr;
+
+    unsigned                cmd_regs[8];
+    unsigned                s = 0;
+
+#if PCIBR_FREEZE_TIME
+    unsigned                freeze_time;
+#endif
+    int			    ate_freeze_done = 0;	/* To pair ATE_THAW
+							 * with an ATE_FREEZE
+							 */
+
+    pcibr_soft = pcibr_dmamap->bd_soft;
+
+    xtalk_alenlist = xtalk_dmamap_list(pcibr_dmamap->bd_xtalk, palenlist,
+				       flags & DMAMAP_FLAGS);
+    if (!xtalk_alenlist)
+	goto fail;
+
+    alenlist_cursor_init(xtalk_alenlist, 0, NULL);
+
+    if (inplace) {
+	pciio_alenlist = xtalk_alenlist;
+    } else {
+	pciio_alenlist = alenlist_create(al_flags);
+	if (!pciio_alenlist)
+	    goto fail;
+    }
+
+    direct64 = pcibr_dmamap->bd_flags & PCIIO_DMA_A64;
+    if (!direct64) {
+	bridge = pcibr_soft->bs_base;
+	ate_ptr = pcibr_dmamap->bd_ate_ptr;
+	ate_index = pcibr_dmamap->bd_ate_index;
+	ate_proto = pcibr_dmamap->bd_ate_proto;
+	ATE_FREEZE();
+	ate_freeze_done = 1;	/* Remember that we need to do an ATE_THAW */
+    }
+    pci_addr = pcibr_dmamap->bd_pci_addr;
+
+    ate_prev = 0;			/* matches no valid ATEs */
+    while (ALENLIST_SUCCESS ==
+	   alenlist_get(xtalk_alenlist, NULL, 0,
+			&xio_addr, &length, al_flags)) {
+	if (XIO_PACKED(xio_addr)) {
+	    xio_port = XIO_PORT(xio_addr);
+	    xio_addr = XIO_ADDR(xio_addr);
+	} else
+	    xio_port = pcibr_dmamap->bd_xio_port;
+
+	if (xio_port == pcibr_soft->bs_xid) {
+	    new_addr = pcibr_addr_xio_to_pci(pcibr_soft, xio_addr, length);
+	    if (new_addr == PCI_NOWHERE)
+		goto fail;
+	} else if (direct64) {
+	    new_addr = pci_addr | xio_addr
+		| ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT);
+
+	    /* Bridge Hardware WAR #482836:
+	     * If the transfer is not cache aligned
+	     * and the Bridge Rev is <= B, force
+	     * prefetch to be off.
+	     */
+	    if (flags & PCIBR_NOPREFETCH)
+		new_addr &= ~PCI64_ATTR_PREF;
+
+	} else {
+	    /* calculate the ate value for
+	     * the first address. If it
+	     * matches the previous
+	     * ATE written (ie. we had
+	     * multiple blocks in the
+	     * same IOPG), then back up
+	     * and reuse that ATE.
+	     *
+	     * We are NOT going to
+	     * aggressively try to
+	     * reuse any other ATEs.
+	     */
+	    offset = IOPGOFF(xio_addr);
+	    ate = ate_proto
+		| (xio_port << ATE_TIDSHIFT)
+		| (xio_addr - offset);
+	    if (ate == ate_prev) {
+#if PCIBR_ATE_DEBUG
+		printk("pcibr_dmamap_list: ATE share\n");
+#endif
+		ate_ptr--;
+		ate_index--;
+		pci_addr -= IOPGSIZE;
+	    }
+	    new_addr = pci_addr + offset;
+
+	    /* Fill in the hardware ATEs
+	     * that contain this block.
+	     */
+	    ate_count = IOPG(offset + length - 1) + 1;
+	    ate_total += ate_count;
+
+	    /* Ensure that this map contains enough ATE's */
+	    if (ate_total > pcibr_dmamap->bd_ate_count) {
+#if PCIBR_ATE_DEBUG
+		PRINT_WARNING( "pcibr_dmamap_list :\n"
+			"\twanted xio_addr [0x%x..0x%x]\n"
+			"\tate_total 0x%x bd_ate_count 0x%x\n"
+			"\tATE's required > number allocated\n",
+			xio_addr, xio_addr + length - 1,
+			ate_total, pcibr_dmamap->bd_ate_count);
+#endif
+		goto fail;
+	    }
+
+	    ATE_WRITE();
+
+	    ate_index += ate_count;
+	    ate_ptr += ate_count;
+
+	    ate_count <<= IOPFNSHIFT;
+	    ate += ate_count;
+	    pci_addr += ate_count;
+	}
+
+	/* write the PCI DMA address
+	 * out to the scatter-gather list.
+	 */
+	if (inplace) {
+	    if (ALENLIST_SUCCESS !=
+		alenlist_replace(pciio_alenlist, NULL,
+				 &new_addr, &length, al_flags))
+		goto fail;
+	} else {
+	    if (ALENLIST_SUCCESS !=
+		alenlist_append(pciio_alenlist,
+				new_addr, length, al_flags))
+		goto fail;
+	}
+    }
+    if (!inplace)
+	alenlist_done(xtalk_alenlist);
+
+    /* Reset the internal cursor of the alenlist to be returned back
+     * to the caller.
+     */
+    alenlist_cursor_init(pciio_alenlist, 0, NULL);
+
+
+    /* In case an ATE_FREEZE was done do the ATE_THAW to unroll all the
+     * changes that ATE_FREEZE has done to implement the external SSRAM
+     * bug workaround.
+     */
+    if (ate_freeze_done) {
+	ATE_THAW();
+	bridge->b_wid_tflush;		/* wait until Bridge PIO complete */
+    }
+    return pciio_alenlist;
+
+  fail:
+    /* There are various points of failure after doing an ATE_FREEZE
+     * We need to do an ATE_THAW. Otherwise the ATEs are locked forever.
+     * The decision to do an ATE_THAW needs to be based on whether a
+     * an ATE_FREEZE was done before.
+     */
+    if (ate_freeze_done) {
+	ATE_THAW();
+	bridge->b_wid_tflush;
+    }
+    if (pciio_alenlist && !inplace)
+	alenlist_destroy(pciio_alenlist);
+    return 0;
+}
+
+/*ARGSUSED */
+void
+pcibr_dmamap_done(pcibr_dmamap_t pcibr_dmamap)
+{
+    /*
+     * We could go through and invalidate ATEs here;
+     * for performance reasons, we don't.
+     * We also don't enforce the strict alternation
+     * between _addr/_list and _done, but Hub does.
+     */
+
+#ifdef IRIX
+    if (pcibr_dmamap->bd_flags & PCIBR_DMAMAP_BUSY) {
+	pcibr_dmamap->bd_flags &= ~PCIBR_DMAMAP_BUSY;
+
+	if (pcibr_dmamap->bd_flags & PCIBR_DMAMAP_SSRAM)
+	    atomicAddInt(&(pcibr_dmamap->bd_soft->
+			   bs_slot[pcibr_dmamap->bd_slot].
+			   bss_ext_ates_active), -1);
+    }
+#endif
+
+    xtalk_dmamap_done(pcibr_dmamap->bd_xtalk);
+}
+
+
+/*
+ * For each bridge, the DIR_OFF value in the Direct Mapping Register
+ * determines the PCI to Crosstalk memory mapping to be used for all
+ * 32-bit Direct Mapping memory accesses. This mapping can be to any
+ * node in the system. This function will return that compact node id.
+ */
+
+/*ARGSUSED */
+cnodeid_t
+pcibr_get_dmatrans_node(devfs_handle_t pconn_vhdl)
+{
+
+	pciio_info_t	pciio_info = pciio_info_get(pconn_vhdl);
+	pcibr_soft_t	pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
+
+	return(NASID_TO_COMPACT_NODEID(NASID_GET(pcibr_soft->bs_dir_xbase)));
+}
+
+/*ARGSUSED */
+iopaddr_t
+pcibr_dmatrans_addr(devfs_handle_t pconn_vhdl,
+		    device_desc_t dev_desc,
+		    paddr_t paddr,
+		    size_t req_size,
+		    unsigned flags)
+{
+    pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);
+    pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
+    devfs_handle_t            xconn_vhdl = pcibr_soft->bs_conn;
+    pciio_slot_t            pciio_slot = pciio_info_slot_get(pciio_info);
+    pcibr_soft_slot_t       slotp = &pcibr_soft->bs_slot[pciio_slot];
+
+    xwidgetnum_t            xio_port;
+    iopaddr_t               xio_addr;
+    iopaddr_t               pci_addr;
+
+    int                     have_rrbs;
+    int                     min_rrbs;
+
+    /* merge in forced flags */
+    flags |= pcibr_soft->bs_dma_flags;
+
+    xio_addr = xtalk_dmatrans_addr(xconn_vhdl, 0, paddr, req_size,
+				   flags & DMAMAP_FLAGS);
+
+    if (!xio_addr) {
+#if PCIBR_DMA_DEBUG
+	printk("pcibr_dmatrans_addr:\n"
+		"\tpciio connection point %v\n"
+		"\txtalk connection point %v\n"
+		"\twanted paddr [0x%x..0x%x]\n"
+		"\txtalk_dmatrans_addr returned 0x%x\n",
+		pconn_vhdl, xconn_vhdl,
+		paddr, paddr + req_size - 1,
+		xio_addr);
+#endif
+	return 0;
+    }
+    /*
+     * find which XIO port this goes to.
+     */
+    if (XIO_PACKED(xio_addr)) {
+	if (xio_addr == XIO_NOWHERE) {
+#if PCIBR_DMA_DEBUG
+	    printk("pcibr_dmatrans_addr:\n"
+		    "\tpciio connection point %v\n"
+		    "\txtalk connection point %v\n"
+		    "\twanted paddr [0x%x..0x%x]\n"
+		    "\txtalk_dmatrans_addr returned 0x%x\n",
+		    pconn_vhdl, xconn_vhdl,
+		    paddr, paddr + req_size - 1,
+		    xio_addr);
+#endif
+	    return 0;
+	}
+	xio_port = XIO_PORT(xio_addr);
+	xio_addr = XIO_ADDR(xio_addr);
+
+    } else
+	xio_port = pcibr_soft->bs_mxid;
+
+    /*
+     * If this DMA comes back to us,
+     * return the PCI MEM address on
+     * which it would land, or NULL
+     * if the target is something
+     * on bridge other than PCI MEM.
+     */
+    if (xio_port == pcibr_soft->bs_xid) {
+	pci_addr = pcibr_addr_xio_to_pci(pcibr_soft, xio_addr, req_size);
+	return pci_addr;
+    }
+    /* If the caller can use A64, try to
+     * satisfy the request with the 64-bit
+     * direct map. This can fail if the
+     * configuration bits in Device(x)
+     * conflict with our flags.
+     */
+
+    if (flags & PCIIO_DMA_A64) {
+	pci_addr = slotp->bss_d64_base;
+	if (!(flags & PCIBR_VCHAN1))
+	    flags |= PCIBR_VCHAN0;
+	if ((pci_addr != PCIBR_D64_BASE_UNSET) &&
+	    (flags == slotp->bss_d64_flags)) {
+
+	    pci_addr |= xio_addr
+		| ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT);
+
+#if DEBUG && PCIBR_DMA_DEBUG
+#if HWG_PERF_CHECK
+	    if (xio_addr != 0x20000000)
+#endif
+		printk("pcibr_dmatrans_addr: [reuse]\n"
+			"\tpciio connection point %v\n"
+			"\txtalk connection point %v\n"
+			"\twanted paddr [0x%x..0x%x]\n"
+			"\txtalk_dmatrans_addr returned 0x%x\n"
+			"\tdirect 64bit address is 0x%x\n",
+			pconn_vhdl, xconn_vhdl,
+			paddr, paddr + req_size - 1,
+			xio_addr, pci_addr);
+#endif
+	    return (pci_addr);
+	}
+	if (!pcibr_try_set_device(pcibr_soft, pciio_slot, flags, BRIDGE_DEV_D64_BITS)) {
+	    pci_addr = pcibr_flags_to_d64(flags, pcibr_soft);
+	    slotp->bss_d64_flags = flags;
+	    slotp->bss_d64_base = pci_addr;
+	    pci_addr |= xio_addr
+		| ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT);
+
+	    /* Make sure we have an RRB (or two).
+	     */
+	    if (!(pcibr_soft->bs_rrb_fixed & (1 << pciio_slot))) {
+		if (flags & PCIBR_VCHAN1)
+		    pciio_slot += PCIBR_RRB_SLOT_VIRTUAL;
+		have_rrbs = pcibr_soft->bs_rrb_valid[pciio_slot];
+		if (have_rrbs < 2) {
+		    if (pci_addr & PCI64_ATTR_PREF)
+			min_rrbs = 2;
+		    else
+			min_rrbs = 1;
+		    if (have_rrbs < min_rrbs)
+			do_pcibr_rrb_autoalloc(pcibr_soft, pciio_slot, min_rrbs - have_rrbs);
+		}
+	    }
+#if PCIBR_DMA_DEBUG
+#if HWG_PERF_CHECK
+	    if (xio_addr != 0x20000000)
+#endif
+		printk("pcibr_dmatrans_addr:\n"
+			"\tpciio connection point %v\n"
+			"\txtalk connection point %v\n"
+			"\twanted paddr [0x%x..0x%x]\n"
+			"\txtalk_dmatrans_addr returned 0x%x\n"
+			"\tdirect 64bit address is 0x%x\n"
+			"\tnew flags: 0x%x\n",
+			pconn_vhdl, xconn_vhdl,
+			paddr, paddr + req_size - 1,
+			xio_addr, pci_addr, (uint64_t) flags);
+#endif
+	    return (pci_addr);
+	}
+	/* our flags conflict with Device(x).
+	 */
+	flags = flags
+	    & ~PCIIO_DMA_A64
+	    & ~PCIBR_VCHAN0
+	    ;
+
+#if PCIBR_DMA_DEBUG
+	printk("pcibr_dmatrans_addr:\n"
+		"\tpciio connection point %v\n"
+		"\txtalk connection point %v\n"
+		"\twanted paddr [0x%x..0x%x]\n"
+		"\txtalk_dmatrans_addr returned 0x%x\n"
+		"\tUnable to set Device(x) bits for Direct-64\n",
+		pconn_vhdl, xconn_vhdl,
+		paddr, paddr + req_size - 1,
+		xio_addr);
+#endif
+    }
+    /* Try to satisfy the request with the 32-bit direct
+     * map. This can fail if the configuration bits in
+     * Device(x) conflict with our flags, or if the
+     * target address is outside where DIR_OFF points.
+     */
+    {
+	size_t                  map_size = 1ULL << 31;
+	iopaddr_t               xio_base = pcibr_soft->bs_dir_xbase;
+	iopaddr_t               offset = xio_addr - xio_base;
+	iopaddr_t               endoff = req_size + offset;
+
+	if ((req_size > map_size) ||
+	    (xio_addr < xio_base) ||
+	    (xio_port != pcibr_soft->bs_dir_xport) ||
+	    (endoff > map_size)) {
+#if PCIBR_DMA_DEBUG
+	    printk("pcibr_dmatrans_addr:\n"
+		    "\tpciio connection point %v\n"
+		    "\txtalk connection point %v\n"
+		    "\twanted paddr [0x%x..0x%x]\n"
+		    "\txtalk_dmatrans_addr returned 0x%x\n"
+		    "\txio region outside direct32 target\n",
+		    pconn_vhdl, xconn_vhdl,
+		    paddr, paddr + req_size - 1,
+		    xio_addr);
+#endif
+	} else {
+	    pci_addr = slotp->bss_d32_base;
+	    if ((pci_addr != PCIBR_D32_BASE_UNSET) &&
+		(flags == slotp->bss_d32_flags)) {
+
+		pci_addr |= offset;
+
+#if DEBUG && PCIBR_DMA_DEBUG
+		printk("pcibr_dmatrans_addr: [reuse]\n"
+			"\tpciio connection point %v\n"
+			"\txtalk connection point %v\n"
+			"\twanted paddr [0x%x..0x%x]\n"
+			"\txtalk_dmatrans_addr returned 0x%x\n"
+			"\tmapped via direct32 offset 0x%x\n"
+			"\twill DMA via pci addr 0x%x\n",
+			pconn_vhdl, xconn_vhdl,
+			paddr, paddr + req_size - 1,
+			xio_addr, offset, pci_addr);
+#endif
+		return (pci_addr);
+	    }
+	    if (!pcibr_try_set_device(pcibr_soft, pciio_slot, flags, BRIDGE_DEV_D32_BITS)) {
+
+		pci_addr = PCI32_DIRECT_BASE;
+		slotp->bss_d32_flags = flags;
+		slotp->bss_d32_base = pci_addr;
+		pci_addr |= offset;
+
+		/* Make sure we have an RRB (or two).
+		 */
+		if (!(pcibr_soft->bs_rrb_fixed & (1 << pciio_slot))) {
+		    have_rrbs = pcibr_soft->bs_rrb_valid[pciio_slot];
+		    if (have_rrbs < 2) {
+			if (slotp->bss_device & BRIDGE_DEV_PREF)
+			    min_rrbs = 2;
+			else
+			    min_rrbs = 1;
+			if (have_rrbs < min_rrbs)
+			    do_pcibr_rrb_autoalloc(pcibr_soft, pciio_slot, min_rrbs - have_rrbs);
+		    }
+		}
+#if PCIBR_DMA_DEBUG
+#if HWG_PERF_CHECK
+		if (xio_addr != 0x20000000)
+#endif
+		    printk("pcibr_dmatrans_addr:\n"
+			    "\tpciio connection point %v\n"
+			    "\txtalk connection point %v\n"
+			    "\twanted paddr [0x%x..0x%x]\n"
+			    "\txtalk_dmatrans_addr returned 0x%x\n"
+			    "\tmapped via direct32 offset 0x%x\n"
+			    "\twill DMA via pci addr 0x%x\n"
+			    "\tnew flags: 0x%x\n",
+			    pconn_vhdl, xconn_vhdl,
+			    paddr, paddr + req_size - 1,
+			    xio_addr, offset, pci_addr, (uint64_t) flags);
+#endif
+		return (pci_addr);
+	    }
+	    /* our flags conflict with Device(x).
+	     */
+#if PCIBR_DMA_DEBUG
+	    printk("pcibr_dmatrans_addr:\n"
+		    "\tpciio connection point %v\n"
+		    "\txtalk connection point %v\n"
+		    "\twanted paddr [0x%x..0x%x]\n"
+		    "\txtalk_dmatrans_addr returned 0x%x\n"
+		    "\tUnable to set Device(x) bits for Direct-32\n",
+		    pconn_vhdl, xconn_vhdl,
+		    paddr, paddr + req_size - 1,
+		    xio_addr);
+#endif
+	}
+    }
+
+#if PCIBR_DMA_DEBUG
+    printk("pcibr_dmatrans_addr:\n"
+	    "\tpciio connection point %v\n"
+	    "\txtalk connection point %v\n"
+	    "\twanted paddr [0x%x..0x%x]\n"
+	    "\txtalk_dmatrans_addr returned 0x%x\n"
+	    "\tno acceptable PCI address found or constructable\n",
+	    pconn_vhdl, xconn_vhdl,
+	    paddr, paddr + req_size - 1,
+	    xio_addr);
+#endif
+
+    return 0;
+}
+
+/*ARGSUSED */
+alenlist_t
+pcibr_dmatrans_list(devfs_handle_t pconn_vhdl,
+		    device_desc_t dev_desc,
+		    alenlist_t palenlist,
+		    unsigned flags)
+{
+    pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);
+    pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
+    devfs_handle_t            xconn_vhdl = pcibr_soft->bs_conn;
+    pciio_slot_t            pciio_slot = pciio_info_slot_get(pciio_info);
+    pcibr_soft_slot_t       slotp = &pcibr_soft->bs_slot[pciio_slot];
+    xwidgetnum_t            xio_port;
+
+    alenlist_t              pciio_alenlist = 0;
+    alenlist_t              xtalk_alenlist = 0;
+
+    int                     inplace;
+    unsigned                direct64;
+    unsigned                al_flags;
+
+    iopaddr_t               xio_base;
+    alenaddr_t              xio_addr;
+    size_t                  xio_size;
+
+    size_t                  map_size;
+    iopaddr_t               pci_base;
+    alenaddr_t              pci_addr;
+
+    unsigned                relbits = 0;
+
+    /* merge in forced flags */
+    flags |= pcibr_soft->bs_dma_flags;
+
+    inplace = flags & PCIIO_INPLACE;
+    direct64 = flags & PCIIO_DMA_A64;
+    al_flags = (flags & PCIIO_NOSLEEP) ? AL_NOSLEEP : 0;
+
+    if (direct64) {
+	map_size = 1ull << 48;
+	xio_base = 0;
+	pci_base = slotp->bss_d64_base;
+	if ((pci_base != PCIBR_D64_BASE_UNSET) &&
+	    (flags == slotp->bss_d64_flags)) {
+	    /* reuse previous base info */
+	} else if (pcibr_try_set_device(pcibr_soft, pciio_slot, flags, BRIDGE_DEV_D64_BITS) < 0) {
+	    /* DMA configuration conflict */
+	    goto fail;
+	} else {
+	    relbits = BRIDGE_DEV_D64_BITS;
+	    pci_base =
+		pcibr_flags_to_d64(flags, pcibr_soft);
+	}
+    } else {
+	xio_base = pcibr_soft->bs_dir_xbase;
+	map_size = 1ull << 31;
+	pci_base = slotp->bss_d32_base;
+	if ((pci_base != PCIBR_D32_BASE_UNSET) &&
+	    (flags == slotp->bss_d32_flags)) {
+	    /* reuse previous base info */
+	} else if (pcibr_try_set_device(pcibr_soft, pciio_slot, flags, BRIDGE_DEV_D32_BITS) < 0) {
+	    /* DMA configuration conflict */
+	    goto fail;
+	} else {
+	    relbits = BRIDGE_DEV_D32_BITS;
+	    pci_base = PCI32_DIRECT_BASE;
+	}
+    }
+
+    xtalk_alenlist = xtalk_dmatrans_list(xconn_vhdl, 0, palenlist,
+					 flags & DMAMAP_FLAGS);
+    if (!xtalk_alenlist)
+	goto fail;
+
+    alenlist_cursor_init(xtalk_alenlist, 0, NULL);
+
+    if (inplace) {
+	pciio_alenlist = xtalk_alenlist;
+    } else {
+	pciio_alenlist = alenlist_create(al_flags);
+	if (!pciio_alenlist)
+	    goto fail;
+    }
+
+    while (ALENLIST_SUCCESS ==
+	   alenlist_get(xtalk_alenlist, NULL, 0,
+			&xio_addr, &xio_size, al_flags)) {
+
+	/*
+	 * find which XIO port this goes to.
+	 */
+	if (XIO_PACKED(xio_addr)) {
+	    if (xio_addr == XIO_NOWHERE) {
+#if PCIBR_DMA_DEBUG
+		printk("pcibr_dmatrans_addr:\n"
+			"\tpciio connection point %v\n"
+			"\txtalk connection point %v\n"
+			"\twanted paddr [0x%x..0x%x]\n"
+			"\txtalk_dmatrans_addr returned 0x%x\n",
+			pconn_vhdl, xconn_vhdl,
+			paddr, paddr + req_size - 1,
+			xio_addr);
+#endif
+		return 0;
+	    }
+	    xio_port = XIO_PORT(xio_addr);
+	    xio_addr = XIO_ADDR(xio_addr);
+	} else
+	    xio_port = pcibr_soft->bs_mxid;
+
+	/*
+	 * If this DMA comes back to us,
+	 * return the PCI MEM address on
+	 * which it would land, or NULL
+	 * if the target is something
+	 * on bridge other than PCI MEM.
+	 */
+	if (xio_port == pcibr_soft->bs_xid) {
+	    pci_addr = pcibr_addr_xio_to_pci(pcibr_soft, xio_addr, xio_size);
+#ifdef IRIX
+	    if (pci_addr == NULL)
+#else
+	    if ( (pci_addr == (alenaddr_t)NULL) )
+#endif
+		goto fail;
+	} else if (direct64) {
+	    ASSERT(xio_port != 0);
+	    pci_addr = pci_base | xio_addr
+		| ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT);
+	} else {
+	    iopaddr_t               offset = xio_addr - xio_base;
+	    iopaddr_t               endoff = xio_size + offset;
+
+	    if ((xio_size > map_size) ||
+		(xio_addr < xio_base) ||
+		(xio_port != pcibr_soft->bs_dir_xport) ||
+		(endoff > map_size))
+		goto fail;
+
+	    pci_addr = pci_base + (xio_addr - xio_base);
+	}
+
+	/* write the PCI DMA address
+	 * out to the scatter-gather list.
+	 */
+	if (inplace) {
+	    if (ALENLIST_SUCCESS !=
+		alenlist_replace(pciio_alenlist, NULL,
+				 &pci_addr, &xio_size, al_flags))
+		goto fail;
+	} else {
+	    if (ALENLIST_SUCCESS !=
+		alenlist_append(pciio_alenlist,
+				pci_addr, xio_size, al_flags))
+		goto fail;
+	}
+    }
+
+#ifdef IRIX
+    if (relbits)
+#else
+    if (relbits) {
+#endif
+	if (direct64) {
+	    slotp->bss_d64_flags = flags;
+	    slotp->bss_d64_base = pci_base;
+	} else {
+	    slotp->bss_d32_flags = flags;
+	    slotp->bss_d32_base = pci_base;
+	}
+#ifndef IRIX
+    }
+#endif
+    if (!inplace)
+	alenlist_done(xtalk_alenlist);
+
+    /* Reset the internal cursor of the alenlist to be returned back
+     * to the caller.
+     */
+    alenlist_cursor_init(pciio_alenlist, 0, NULL);
+    return pciio_alenlist;
+
+  fail:
+    if (relbits)
+	pcibr_release_device(pcibr_soft, pciio_slot, relbits);
+    if (pciio_alenlist && !inplace)
+	alenlist_destroy(pciio_alenlist);
+    return 0;
+}
+
+void
+pcibr_dmamap_drain(pcibr_dmamap_t map)
+{
+    xtalk_dmamap_drain(map->bd_xtalk);
+}
+
+void
+pcibr_dmaaddr_drain(devfs_handle_t pconn_vhdl,
+		    paddr_t paddr,
+		    size_t bytes)
+{
+    pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);
+    pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
+    devfs_handle_t            xconn_vhdl = pcibr_soft->bs_conn;
+
+    xtalk_dmaaddr_drain(xconn_vhdl, paddr, bytes);
+}
+
+void
+pcibr_dmalist_drain(devfs_handle_t pconn_vhdl,
+		    alenlist_t list)
+{
+    pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);
+    pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
+    devfs_handle_t            xconn_vhdl = pcibr_soft->bs_conn;
+
+    xtalk_dmalist_drain(xconn_vhdl, list);
+}
+
+/*
+ * Get the starting PCIbus address out of the given DMA map.
+ * This function is supposed to be used by a close friend of PCI bridge
+ * since it relies on the fact that the starting address of the map is fixed at
+ * the allocation time in the current implementation of PCI bridge.
+ */
+iopaddr_t
+pcibr_dmamap_pciaddr_get(pcibr_dmamap_t pcibr_dmamap)
+{
+    return (pcibr_dmamap->bd_pci_addr);
+}
+
+/* =====================================================================
+ *    INTERRUPT MANAGEMENT
+ */
+
+static unsigned
+pcibr_intr_bits(pciio_info_t info,
+		pciio_intr_line_t lines)
+{
+    pciio_slot_t            slot = pciio_info_slot_get(info);
+    unsigned		    bbits = 0;
+
+    /*
+     * Currently favored mapping from PCI
+     * slot number and INTA/B/C/D to Bridge
+     * PCI Interrupt Bit Number:
+     *
+     *     SLOT     A B C D
+     *      0       0 4 0 4
+     *      1       1 5 1 5
+     *      2       2 6 2 6
+     *      3       3 7 3 7
+     *      4       4 0 4 0
+     *      5       5 1 5 1
+     *      6       6 2 6 2
+     *      7       7 3 7 3
+     */
+
+    if (slot < 8) {
+	if (lines & (PCIIO_INTR_LINE_A| PCIIO_INTR_LINE_C))
+	    bbits |= 1 << slot;
+	if (lines & (PCIIO_INTR_LINE_B| PCIIO_INTR_LINE_D))
+	    bbits |= 1 << (slot ^ 4);
+    }
+    return bbits;
+}
+
+#ifdef IRIX
+/* Wrapper for pcibr interrupt threads. */
+static void
+pcibr_intrd(pcibr_intr_t intr)
+{
+	/* Called on each restart */
+	ASSERT(cpuid() == intr->bi_mustruncpu);
+
+#ifdef ITHREAD_LATENCY
+	xthread_update_latstats(intr->bi_tinfo->thd_latstats);
+#endif /* ITHREAD_LATENCY */
+
+	ASSERT(intr->bi_func != NULL);
+	intr->bi_func(intr->bi_arg);		/* Invoke the interrupt handler */
+
+	ipsema(&intr->bi_tinfo.thd_isync);	/* Sleep 'till next interrupt */
+	/* NOTREACHED */
+}
+
+
+static void
+pcibr_intrd_start(pcibr_intr_t intr)
+{
+	ASSERT(intr->bi_mustruncpu >= 0);
+	setmustrun(intr->bi_mustruncpu);
+
+	xthread_set_func(KT_TO_XT(curthreadp), (xt_func_t *)pcibr_intrd, (void *)intr);
+	atomicSetInt(&intr->bi_tinfo.thd_flags, THD_INIT);
+	ipsema(&intr->bi_tinfo.thd_isync);  /* Comes out in pcibr_intrd */
+	/* NOTREACHED */
+}
+
+
+static void
+pcibr_thread_setup(pcibr_intr_t intr, int bridge_levels, ilvl_t intr_swlevel)
+{
+	char thread_name[32];
+
+	sprintf(thread_name, "pcibr_intrd[0x%x]", bridge_levels);
+
+	/* XXX need to adjust priority whenever an interrupt is connected */
+	atomicSetInt(&intr->bi_tinfo.thd_flags, THD_ISTHREAD | THD_REG);
+	xthread_setup(thread_name, intr_swlevel, &intr->bi_tinfo,
+			(xt_func_t *)pcibr_intrd_start,
+			(void *)intr);
+}
+#endif	/* IRIX */
+
+
+
+/*ARGSUSED */
+pcibr_intr_t
+pcibr_intr_alloc(devfs_handle_t pconn_vhdl,
+		 device_desc_t dev_desc,
+		 pciio_intr_line_t lines,
+		 devfs_handle_t owner_dev)
+{
+    pcibr_info_t            pcibr_info = pcibr_info_get(pconn_vhdl);
+    pciio_slot_t            pciio_slot = pcibr_info->f_slot;
+    pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pcibr_info->f_mfast;
+    devfs_handle_t            xconn_vhdl = pcibr_soft->bs_conn;
+    bridge_t               *bridge = pcibr_soft->bs_base;
+    int                     is_threaded;
+    int                     thread_swlevel;
+
+    xtalk_intr_t           *xtalk_intr_p;
+    pcibr_intr_t           *pcibr_intr_p;
+    pcibr_intr_list_t      *intr_list_p;
+    pcibr_intr_wrap_t      *intr_wrap_p;
+
+    unsigned                pcibr_int_bits;
+    unsigned                pcibr_int_bit;
+    xtalk_intr_t            xtalk_intr = (xtalk_intr_t)0;
+    hub_intr_t		    hub_intr;
+    pcibr_intr_t            pcibr_intr;
+    pcibr_intr_list_t       intr_entry;
+    pcibr_intr_list_t       intr_list;
+    pcibr_intr_wrap_t       intr_wrap;
+    bridgereg_t             int_dev;
+
+#if DEBUG && INTR_DEBUG
+    printk("%v: pcibr_intr_alloc\n"
+	    "%v:%s%s%s%s%s\n",
+	    owner_dev, pconn_vhdl,
+	    !(lines & 15) ? " No INTs?" : "",
+	    lines & 1 ? " INTA" : "",
+	    lines & 2 ? " INTB" : "",
+	    lines & 4 ? " INTC" : "",
+	    lines & 8 ? " INTD" : "");
+#endif
+
+    NEW(pcibr_intr);
+    if (!pcibr_intr)
+	return NULL;
+
+    if (dev_desc) {
+	is_threaded = !(device_desc_flags_get(dev_desc) & D_INTR_NOTHREAD);
+	if (is_threaded)
+		thread_swlevel = device_desc_intr_swlevel_get(dev_desc);
+    } else {
+	extern int default_intr_pri;
+
+	is_threaded = 1; /* PCI interrupts are threaded, by default */
+	thread_swlevel = default_intr_pri;
+    }
+
+    pcibr_intr->bi_dev = pconn_vhdl;
+    pcibr_intr->bi_lines = lines;
+    pcibr_intr->bi_soft = pcibr_soft;
+    pcibr_intr->bi_ibits = 0;		/* bits will be added below */
+    pcibr_intr->bi_func = 0;		/* unset until connect */
+    pcibr_intr->bi_arg = 0;		/* unset until connect */
+    pcibr_intr->bi_flags = is_threaded ? 0 : PCIIO_INTR_NOTHREAD;
+    pcibr_intr->bi_mustruncpu = CPU_NONE;
+
+    pcibr_int_bits = pcibr_soft->bs_intr_bits((pciio_info_t)pcibr_info, lines);
+
+
+    /*
+     * For each PCI interrupt line requested, figure
+     * out which Bridge PCI Interrupt Line it maps
+     * to, and make sure there are xtalk resources
+     * allocated for it.
+     */
+#if DEBUG && INTR_DEBUG
+    printk("pcibr_int_bits: 0x%X\n", pcibr_int_bits);
+#endif 
+    for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit ++) {
+	if (pcibr_int_bits & (1 << pcibr_int_bit)) {
+	    xtalk_intr_p = &pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr;
+
+	    xtalk_intr = *xtalk_intr_p;
+
+	    if (xtalk_intr == NULL) {
+		/*
+		 * This xtalk_intr_alloc is constrained for two reasons:
+		 * 1) Normal interrupts and error interrupts need to be delivered
+		 *    through a single xtalk target widget so that there aren't any
+		 *    ordering problems with DMA, completion interrupts, and error
+		 *    interrupts. (Use of xconn_vhdl forces this.)
+		 *
+		 * 2) On IP35, addressing constraints on IP35 and Bridge force
+		 *    us to use a single PI number for all interrupts from a
+		 *    single Bridge. (IP35-specific code forces this, and we
+		 *    verify in pcibr_setwidint.)
+		 */
+		xtalk_intr = xtalk_intr_alloc(xconn_vhdl, dev_desc, owner_dev);
+#if DEBUG && INTR_DEBUG
+		printk("%v: xtalk_intr=0x%X\n", xconn_vhdl, xtalk_intr);
+#endif
+
+		/* both an assert and a runtime check on this:
+		 * we need to check in non-DEBUG kernels, and
+		 * the ASSERT gets us more information when
+		 * we use DEBUG kernels.
+		 */
+		ASSERT(xtalk_intr != NULL);
+		if (xtalk_intr == NULL) {
+		    /* it is quite possible that our
+		     * xtalk_intr_alloc failed because
+		     * someone else got there first,
+		     * and we can find their results
+		     * in xtalk_intr_p.
+		     */
+		    if (!*xtalk_intr_p) {
+#ifdef SUPPORT_PRINTING_V_FORMAT
+			PRINT_ALERT(
+				"pcibr_intr_alloc %v: unable to get xtalk interrupt resources",
+				xconn_vhdl);
+#endif
+			/* yes, we leak resources here. */
+			return 0;
+		    }
+		} else if (compare_and_swap_ptr((void **) xtalk_intr_p, NULL, xtalk_intr)) {
+		    /*
+		     * now tell the bridge which slot is
+		     * using this interrupt line.
+		     */
+		    int_dev = bridge->b_int_device;
+		    int_dev &= ~BRIDGE_INT_DEV_MASK(pcibr_int_bit);
+		    int_dev |= pciio_slot << BRIDGE_INT_DEV_SHFT(pcibr_int_bit);
+		    bridge->b_int_device = int_dev;	/* XXXMP */
+
+#if DEBUG && INTR_DEBUG
+		    printk("%v: bridge intr bit %d clears my wrb\n",
+			    pconn_vhdl, pcibr_int_bit);
+#endif
+		} else {
+		    /* someone else got one allocated first;
+		     * free the one we just created, and
+		     * retrieve the one they allocated.
+		     */
+		    xtalk_intr_free(xtalk_intr);
+		    xtalk_intr = *xtalk_intr_p;
+#if PARANOID
+		    /* once xtalk_intr is set, we never clear it,
+		     * so if the CAS fails above, this condition
+		     * can "never happen" ...
+		     */
+		    if (!xtalk_intr) {
+			PRINT_ALERT(
+				"pcibr_intr_alloc %v: unable to set xtalk interrupt resources",
+				xconn_vhdl);
+			/* yes, we leak resources here. */
+			return 0;
+		    }
+#endif
+		}
+	    }
+
+	    /*
+	     * For threaded drivers, set the interrupt thread to run wherever
+	     * the interrupt is targeted.
+	     */
+#ifdef notyet
+	    if (is_threaded) {
+		cpuid_t old_mustrun = pcibr_intr->bi_mustruncpu;
+		pcibr_intr->bi_mustruncpu = cpuvertex_to_cpuid(xtalk_intr_cpu_get(xtalk_intr));
+		ASSERT(pcibr_intr->bi_mustruncpu >= 0);
+
+		/*
+		 * This is possible, but very unlikely: It means that 2 (or more) interrupts
+		 * originating on a single Bridge and used by a single device were unable to
+		 * find sufficient xtalk interrupt resources that would allow them all to be
+		 * handled by the same CPU.  If someone tries to target lots of interrupts to
+		 * a single CPU, we might hit this case.  Things should still operate correctly,
+		 * but it's a sub-optimal configuration.
+		 */
+		if ((old_mustrun != CPU_NONE) && (old_mustrun != pcibr_intr->bi_mustruncpu)) {
+#ifdef SUPPORT_PRINTING_V_FORMAT
+			PRINT_WARNING( "Conflict on where to schedule interrupts for %v\n", pconn_vhdl);
+#endif
+			PRINT_WARNING( "(on cpu %d or on cpu %d)\n", old_mustrun, pcibr_intr->bi_mustruncpu);
+		}
+	    }
+#endif
+
+	    pcibr_intr->bi_ibits |= 1 << pcibr_int_bit;
+
+	    NEW(intr_entry);
+	    intr_entry->il_next = NULL;
+	    intr_entry->il_intr = pcibr_intr;
+	    intr_entry->il_wrbf = &(bridge->b_wr_req_buf[pciio_slot].reg);
+
+	    intr_list_p = &pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_list;
+	    if (compare_and_swap_ptr((void **) intr_list_p, NULL, intr_entry)) {
+		/* we are the first interrupt on this bridge bit.
+		 */
+#if DEBUG && INTR_DEBUG
+		printk("%v INT 0x%x (bridge bit %d) allocated [FIRST]\n",
+			pconn_vhdl, pcibr_int_bits, pcibr_int_bit);
+#endif
+		continue;
+	    }
+	    intr_list = *intr_list_p;
+	    pcibr_intr_p = &intr_list->il_intr;
+	    if (compare_and_swap_ptr((void **) pcibr_intr_p, NULL, pcibr_intr)) {
+		/* first entry on list was erased,
+		 * and we replaced it, so we
+		 * don't need our intr_entry.
+		 */
+		DEL(intr_entry);
+#if DEBUG && INTR_DEBUG
+		printk("%v INT 0x%x (bridge bit %d) replaces erased first\n",
+			pconn_vhdl, pcibr_int_bits, pcibr_int_bit);
+#endif
+		continue;
+	    }
+	    intr_list_p = &intr_list->il_next;
+	    if (compare_and_swap_ptr((void **) intr_list_p, NULL, intr_entry)) {
+		/* we are the new second interrupt on this bit.
+		 * switch to local wrapper.
+		 */
+#if DEBUG && INTR_DEBUG
+		printk("%v INT 0x%x (bridge bit %d) is new SECOND\n",
+			pconn_vhdl, pcibr_int_bits, pcibr_int_bit);
+#endif
+		NEW(intr_wrap);
+		intr_wrap->iw_soft = pcibr_soft;
+		intr_wrap->iw_stat = &(bridge->b_int_status);
+		intr_wrap->iw_intr = 1 << pcibr_int_bit;
+		intr_wrap->iw_list = intr_list;
+		intr_wrap_p = &pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap;
+		if (!compare_and_swap_ptr((void **) intr_wrap_p, NULL, intr_wrap)) {
+		    /* someone else set up the wrapper.
+		     */
+		    DEL(intr_wrap);
+		    continue;
+#if DEBUG && INTR_DEBUG
+		} else {
+		    printk("%v bridge bit %d wrapper state created\n",
+			    pconn_vhdl, pcibr_int_bit);
+#endif
+		}
+		continue;
+	    }
+	    while (1) {
+		pcibr_intr_p = &intr_list->il_intr;
+		if (compare_and_swap_ptr((void **) pcibr_intr_p, NULL, pcibr_intr)) {
+		    /* an entry on list was erased,
+		     * and we replaced it, so we
+		     * don't need our intr_entry.
+		     */
+		    DEL(intr_entry);
+#if DEBUG && INTR_DEBUG
+		    printk("%v INT 0x%x (bridge bit %d) replaces erased Nth\n",
+			    pconn_vhdl, pcibr_int_bits, pcibr_int_bit);
+#endif
+		    break;
+		}
+		intr_list_p = &intr_list->il_next;
+		if (compare_and_swap_ptr((void **) intr_list_p, NULL, intr_entry)) {
+		    /* entry appended to share list
+		     */
+#if DEBUG && INTR_DEBUG
+		    printk("%v INT 0x%x (bridge bit %d) is new Nth\n",
+			    pconn_vhdl, pcibr_int_bits, pcibr_int_bit);
+#endif
+		    break;
+		}
+		/* step to next record in chain
+		 */
+		intr_list = *intr_list_p;
+	    }
+	}
+    }
+
+#ifdef IRIX
+    if (is_threaded) {
+	/* Set pcibr_intr->bi_tinfo */
+	pcibr_thread_setup(pcibr_intr, pcibr_int_bits, thread_swlevel);
+	ASSERT(!(pcibr_intr->bi_flags & PCIIO_INTR_CONNECTED));
+    }
+#endif
+
+#if DEBUG && INTR_DEBUG
+    printk("%v pcibr_intr_alloc complete\n", pconn_vhdl);
+#endif
+    hub_intr = (hub_intr_t)xtalk_intr;
+    pcibr_intr->bi_irq = hub_intr->i_bit;
+    pcibr_intr->bi_cpu = hub_intr->i_cpuid;
+    return pcibr_intr;
+}
+
+/*ARGSUSED */
+void
+pcibr_intr_free(pcibr_intr_t pcibr_intr)
+{
+    unsigned                pcibr_int_bits = pcibr_intr->bi_ibits;
+    pcibr_soft_t            pcibr_soft = pcibr_intr->bi_soft;
+    unsigned                pcibr_int_bit;
+    pcibr_intr_list_t       intr_list;
+    pcibr_intr_wrap_t	    intr_wrap;
+    xtalk_intr_t	    *xtalk_intrp;
+
+    for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++) {
+	if (pcibr_int_bits & (1 << pcibr_int_bit)) {
+	    for (intr_list = 
+		     pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_list;
+		 intr_list != NULL;
+		 intr_list = intr_list->il_next)
+		if (compare_and_swap_ptr((void **) &intr_list->il_intr, 
+					 pcibr_intr, 
+					 NULL)) {
+#if DEBUG && INTR_DEBUG
+		    printk("%s: cleared a handler from bit %d\n",
+			    pcibr_soft->bs_name, pcibr_int_bit);
+#endif
+		}
+	    /* If this interrupt line is not being shared between multiple
+	     * devices release the xtalk interrupt resources.
+	     */
+	    intr_wrap = 
+		pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap;
+	    xtalk_intrp = &pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr;
+	    if ((intr_wrap == NULL) && (*xtalk_intrp)) {
+
+		bridge_t 	*bridge = pcibr_soft->bs_base;
+		bridgereg_t	int_dev;
+
+		xtalk_intr_free(*xtalk_intrp);
+		*xtalk_intrp = 0;
+
+		/* Clear the PCI device interrupt to bridge interrupt pin
+		 * mapping.
+		 */
+		int_dev = bridge->b_int_device;
+		int_dev &= ~BRIDGE_INT_DEV_MASK(pcibr_int_bit);
+		bridge->b_int_device = int_dev;
+
+	    }
+	}
+    }
+    DEL(pcibr_intr);
+}
+
+LOCAL void
+pcibr_setpciint(xtalk_intr_t xtalk_intr)
+{
+    iopaddr_t               addr = xtalk_intr_addr_get(xtalk_intr);
+    xtalk_intr_vector_t     vect = xtalk_intr_vector_get(xtalk_intr);
+    bridgereg_t            *int_addr = (bridgereg_t *)
+    xtalk_intr_sfarg_get(xtalk_intr);
+
+    *int_addr = ((BRIDGE_INT_ADDR_HOST & (addr >> 30)) |
+		 (BRIDGE_INT_ADDR_FLD & vect));
+}
+
+/*ARGSUSED */
+int
+pcibr_intr_connect(pcibr_intr_t pcibr_intr,
+		   intr_func_t intr_func,
+		   intr_arg_t intr_arg,
+		   void *thread)
+{
+    pcibr_soft_t            pcibr_soft = pcibr_intr->bi_soft;
+    bridge_t               *bridge = pcibr_soft->bs_base;
+    unsigned                pcibr_int_bits = pcibr_intr->bi_ibits;
+    unsigned                pcibr_int_bit;
+    bridgereg_t             b_int_enable;
+    unsigned                s;
+
+    if (pcibr_intr == NULL)
+	return -1;
+
+#if DEBUG && INTR_DEBUG
+    printk("%v: pcibr_intr_connect 0x%X(0x%X)\n",
+	    pcibr_intr->bi_dev, intr_func, intr_arg);
+#endif
+
+    pcibr_intr->bi_func = intr_func;
+    pcibr_intr->bi_arg = intr_arg;
+    *((volatile unsigned *)&pcibr_intr->bi_flags) |= PCIIO_INTR_CONNECTED;
+
+    /*
+     * For each PCI interrupt line requested, figure
+     * out which Bridge PCI Interrupt Line it maps
+     * to, and make sure there are xtalk resources
+     * allocated for it.
+     */
+    for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++)
+	if (pcibr_int_bits & (1 << pcibr_int_bit)) {
+	    pcibr_intr_wrap_t       intr_wrap;
+	    xtalk_intr_t            xtalk_intr;
+	    int                    *setptr;
+
+	    xtalk_intr = pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr;
+
+	    /* if we have no wrap structure,
+	     * tell xtalk to deliver the interrupt
+	     * directly to the client.
+	     */
+	    intr_wrap = pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap;
+	    if (intr_wrap == NULL) {
+		xtalk_intr_connect(xtalk_intr,
+				   (intr_func_t) intr_func,
+				   (intr_arg_t) intr_arg,
+				   (xtalk_intr_setfunc_t) pcibr_setpciint,
+				   (void *) &(bridge->b_int_addr[pcibr_int_bit].addr),
+				   thread);
+#if DEBUG && INTR_DEBUG
+		printk("%v bridge bit %d routed by xtalk\n",
+			pcibr_intr->bi_dev, pcibr_int_bit);
+#endif
+		continue;
+	    }
+
+	    setptr = &pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_wrap_set;
+	    if (*setptr)
+		continue;
+
+
+	    /* We have a wrap structure, so we're sharing a Bridge interrupt level */
+
+	    xtalk_intr_disconnect(xtalk_intr); /* Disconnect old interrupt */
+
+	    /*
+		If the existing xtalk_intr was allocated without the NOTHREAD flag,
+		we need to allocate a new one that's NOTHREAD, and connect to the
+		new one.   pcibr_intr_list_func expects to run at interrupt level
+		rather than in a thread.  With today's devices, this can't happen,
+		so let's punt on writing the code till we need it (probably never).
+		Instead, just ASSERT that we're a NOTHREAD xtalk_intr.
+	    */
+#ifdef IRIX
+	    ASSERT_ALWAYS(!(pcibr_intr->bi_flags & PCIIO_INTR_NOTHREAD) ||
+			xtalk_intr_flags_get(xtalk_intr) & XTALK_INTR_NOTHREAD);
+#endif
+
+	    /* Use the wrapper dispatch function to handle shared Bridge interrupts */
+	    xtalk_intr_connect(xtalk_intr,
+			       pcibr_intr_list_func,
+			       (intr_arg_t) intr_wrap,
+			       (xtalk_intr_setfunc_t) pcibr_setpciint,
+			       (void *) &(bridge->b_int_addr[pcibr_int_bit].addr),
+			       0);
+	    *setptr = 1;
+
+#if DEBUG && INTR_DEBUG
+	    printk("%v bridge bit %d wrapper connected\n",
+		    pcibr_intr->bi_dev, pcibr_int_bit);
+#endif
+	}
+    s = pcibr_lock(pcibr_soft);
+    b_int_enable = bridge->b_int_enable;
+    b_int_enable |= pcibr_int_bits;
+    bridge->b_int_enable = b_int_enable;
+    bridge->b_wid_tflush;		/* wait until Bridge PIO complete */
+    pcibr_unlock(pcibr_soft, s);
+
+    return 0;
+}
+
+/*ARGSUSED */
+void
+pcibr_intr_disconnect(pcibr_intr_t pcibr_intr)
+{
+    pcibr_soft_t            pcibr_soft = pcibr_intr->bi_soft;
+    bridge_t               *bridge = pcibr_soft->bs_base;
+    unsigned                pcibr_int_bits = pcibr_intr->bi_ibits;
+    unsigned                pcibr_int_bit;
+    pcibr_intr_wrap_t       intr_wrap;
+    bridgereg_t             b_int_enable;
+    unsigned                s;
+
+    /* Stop calling the function. Now.
+     */
+    *((volatile unsigned *)&pcibr_intr->bi_flags) &= ~PCIIO_INTR_CONNECTED;
+    pcibr_intr->bi_func = 0;
+    pcibr_intr->bi_arg = 0;
+    /*
+     * For each PCI interrupt line requested, figure
+     * out which Bridge PCI Interrupt Line it maps
+     * to, and disconnect the interrupt.
+     */
+
+    /* don't disable interrupts for lines that
+     * are shared between devices.
+     */
+    for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++)
+	if ((pcibr_int_bits & (1 << pcibr_int_bit)) &&
+	    (pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_wrap_set))
+	    pcibr_int_bits &= ~(1 << pcibr_int_bit);
+    if (!pcibr_int_bits)
+	return;
+
+    s = pcibr_lock(pcibr_soft);
+    b_int_enable = bridge->b_int_enable;
+    b_int_enable &= ~pcibr_int_bits;
+    bridge->b_int_enable = b_int_enable;
+    bridge->b_wid_tflush;		/* wait until Bridge PIO complete */
+    pcibr_unlock(pcibr_soft, s);
+
+    for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++)
+	if (pcibr_int_bits & (1 << pcibr_int_bit)) {
+	    /* if we have set up the share wrapper,
+	     * do not disconnect it.
+	     */
+	    if (pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_wrap_set)
+		continue;
+
+	    xtalk_intr_disconnect(pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr);
+
+	    /* if we have a share wrapper state,
+	     * connect us up; this closes the hole
+	     * where the connection of the wrapper
+	     * was in progress as we disconnected.
+	     */
+	    intr_wrap = pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap;
+	    if (intr_wrap == NULL) 
+		continue;
+
+
+	    xtalk_intr_connect(pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr,
+			       pcibr_intr_list_func,
+			       (intr_arg_t) intr_wrap,
+			       (xtalk_intr_setfunc_t) pcibr_setpciint,
+			       (void *) &(bridge->b_int_addr[pcibr_int_bit].addr),
+			       0);
+	}
+}
+
+/*ARGSUSED */
+devfs_handle_t
+pcibr_intr_cpu_get(pcibr_intr_t pcibr_intr)
+{
+    pcibr_soft_t            pcibr_soft = pcibr_intr->bi_soft;
+    unsigned                pcibr_int_bits = pcibr_intr->bi_ibits;
+    unsigned                pcibr_int_bit;
+
+    for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++)
+	if (pcibr_int_bits & (1 << pcibr_int_bit))
+	    return xtalk_intr_cpu_get(pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr);
+    return 0;
+}
+
+/* =====================================================================
+ *    INTERRUPT HANDLING
+ */
+LOCAL void
+pcibr_clearwidint(bridge_t *bridge)
+{
+    bridge->b_wid_int_upper = 0;
+    bridge->b_wid_int_lower = 0;
+}
+
+
+LOCAL void
+pcibr_setwidint(xtalk_intr_t intr)
+{
+    xwidgetnum_t            targ = xtalk_intr_target_get(intr);
+    iopaddr_t               addr = xtalk_intr_addr_get(intr);
+    xtalk_intr_vector_t     vect = xtalk_intr_vector_get(intr);
+    widgetreg_t		    NEW_b_wid_int_upper, NEW_b_wid_int_lower;
+    widgetreg_t		    OLD_b_wid_int_upper, OLD_b_wid_int_lower;
+
+    bridge_t               *bridge = (bridge_t *)xtalk_intr_sfarg_get(intr);
+
+    NEW_b_wid_int_upper = ( (0x000F0000 & (targ << 16)) |
+			       XTALK_ADDR_TO_UPPER(addr));
+    NEW_b_wid_int_lower = XTALK_ADDR_TO_LOWER(addr);
+
+    OLD_b_wid_int_upper = bridge->b_wid_int_upper;
+    OLD_b_wid_int_lower = bridge->b_wid_int_lower;
+
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+    /* Verify that all interrupts from this Bridge are using a single PI */
+    if ((OLD_b_wid_int_upper != 0) && (OLD_b_wid_int_lower != 0)) {
+	/*
+	 * Once set, these registers shouldn't change; they should
+	 * be set multiple times with the same values.
+	 *
+	 * If we're attempting to change these registers, it means
+	 * that our heuristics for allocating interrupts in a way
+	 * appropriate for IP35 have failed, and the admin needs to
+	 * explicitly direct some interrupts (or we need to make the
+	 * heuristics more clever).
+	 *
+	 * In practice, we hope this doesn't happen very often, if
+	 * at all.
+	 */
+	if ((OLD_b_wid_int_upper != NEW_b_wid_int_upper) ||
+	    (OLD_b_wid_int_lower != NEW_b_wid_int_lower)) {
+		PRINT_WARNING("Interrupt allocation is too complex.\n");
+		PRINT_WARNING("Use explicit administrative interrupt targetting.\n");
+		PRINT_WARNING("bridge=0x%lx targ=0x%x\n", (unsigned long)bridge, targ);
+		PRINT_WARNING("NEW=0x%x/0x%x  OLD=0x%x/0x%x\n",
+			NEW_b_wid_int_upper, NEW_b_wid_int_lower,
+			OLD_b_wid_int_upper, OLD_b_wid_int_lower);
+		PRINT_PANIC("PCI Bridge interrupt targetting error\n");
+	}
+    }
+#endif /* CONFIG_SGI_IP35 */
+
+    bridge->b_wid_int_upper = NEW_b_wid_int_upper;
+    bridge->b_wid_int_lower = NEW_b_wid_int_lower;
+    bridge->b_int_host_err = vect;
+}
+
+/*
+ * pcibr_intr_preset: called during mlreset time
+ * if the platform specific code needs to route
+ * one of the Bridge's xtalk interrupts before the
+ * xtalk infrastructure is available.
+ */
+void
+pcibr_xintr_preset(void *which_widget,
+		   int which_widget_intr,
+		   xwidgetnum_t targ,
+		   iopaddr_t addr,
+		   xtalk_intr_vector_t vect)
+{
+    bridge_t               *bridge = (bridge_t *) which_widget;
+
+    if (which_widget_intr == -1) {
+	/* bridge widget error interrupt */
+	bridge->b_wid_int_upper = ( (0x000F0000 & (targ << 16)) |
+				   XTALK_ADDR_TO_UPPER(addr));
+	bridge->b_wid_int_lower = XTALK_ADDR_TO_LOWER(addr);
+	bridge->b_int_host_err = vect;
+
+	/* turn on all interrupts except
+	 * the PCI interrupt requests,
+	 * at least at heart.
+	 */
+	bridge->b_int_enable |= ~BRIDGE_IMR_INT_MSK;
+
+    } else {
+	/* routing a pci device interrupt.
+	 * targ and low 38 bits of addr must
+	 * be the same as the already set
+	 * value for the widget error interrupt.
+	 */
+	bridge->b_int_addr[which_widget_intr].addr =
+	    ((BRIDGE_INT_ADDR_HOST & (addr >> 30)) |
+	     (BRIDGE_INT_ADDR_FLD & vect));
+	/*
+	 * now bridge can let it through;
+	 * NB: still should be blocked at
+	 * xtalk provider end, until the service
+	 * function is set.
+	 */
+	bridge->b_int_enable |= 1 << vect;
+    }
+    bridge->b_wid_tflush;		/* wait until Bridge PIO complete */
+}
+
+void
+pcibr_intr_list_func(intr_arg_t arg)
+{
+    pcibr_intr_wrap_t       wrap = (pcibr_intr_wrap_t) arg;
+    reg_p                   statp = wrap->iw_stat;
+    bridgereg_t             mask = wrap->iw_intr;
+    reg_p                   wrbf;
+    pcibr_intr_list_t       list;
+    pcibr_intr_t            intr;
+    intr_func_t             func;
+    int                     clearit;
+    int                     thread_count = 0;
+
+    /*
+     * Loop until either
+     * 1) All interrupts have been removed by direct-called interrupt handlers OR
+     * 2) We've woken up at least one interrupt thread that will presumably clear
+     *    Bridge interrupt bits
+     */
+	
+    while ((!thread_count) && (mask & *statp)) {
+	clearit = 1;
+	for (list = wrap->iw_list;
+	     list != NULL;
+	     list = list->il_next) {
+	    if ((intr = list->il_intr) &&
+		(intr->bi_flags & PCIIO_INTR_CONNECTED)) {
+    		int is_threaded;
+
+		ASSERT(intr->bi_func);
+
+		/*
+		 * This device may have initiated write
+		 * requests since the bridge last saw
+		 * an edge on this interrupt input; flushing
+		 * the buffer here should help but may not
+		 * be sufficient if we get more requests after
+		 * the flush, followed by the card deciding
+		 * it wants service, before the interrupt
+		 * handler checks to see if things need
+		 * to be done.
+		 *
+		 * There is a similar race condition if
+		 * an interrupt handler loops around and
+		 * notices further service is requred.
+		 * Perhaps we need to have an explicit
+		 * call that interrupt handlers need to
+		 * do between noticing that DMA to memory
+		 * has completed, but before observing the
+		 * contents of memory?
+		 */
+#ifdef IRIX
+		if (wrbf = list->il_wrbf)
+#else
+		if ((wrbf = list->il_wrbf))
+#endif
+		    (void) *wrbf;	/* write request buffer flush */
+
+		is_threaded = !(intr->bi_flags & PCIIO_INTR_NOTHREAD);
+
+		if (is_threaded) {
+			thread_count++;
+#ifdef IRIX
+			icvsema(&intr->bi_tinfo.thd_isync, intr->bi_tinfo.thd_pri,
+				NULL, NULL, NULL);
+#endif
+		} else {
+			/* Non-threaded.  Call the interrupt handler at interrupt level */
+			func = intr->bi_func;
+			func(intr->bi_arg);
+		}
+
+		clearit = 0;
+	    }
+	}
+
+	/* If there were no handlers,
+	 * disable the interrupt and return.
+	 * It will get enabled again after
+	 * a handler is connected.
+	 * If we don't do this, we would
+	 * sit here and spin through the
+	 * list forever.
+	 */
+	if (clearit) {
+	    pcibr_soft_t            pcibr_soft = wrap->iw_soft;
+	    bridge_t               *bridge = pcibr_soft->bs_base;
+	    bridgereg_t             b_int_enable;
+	    unsigned                s;
+
+	    s = pcibr_lock(pcibr_soft);
+	    b_int_enable = bridge->b_int_enable;
+	    b_int_enable &= ~mask;
+	    bridge->b_int_enable = b_int_enable;
+	    bridge->b_wid_tflush;	/* wait until Bridge PIO complete */
+	    pcibr_unlock(pcibr_soft, s);
+	    return;
+	}
+    }
+}
+
+/* =====================================================================
+ *    ERROR HANDLING
+ */
+
+#ifdef	DEBUG
+#ifdef	ERROR_DEBUG
+#define BRIDGE_PIOERR_TIMEOUT	100	/* Timeout with ERROR_DEBUG defined */
+#else
+#define BRIDGE_PIOERR_TIMEOUT	40	/* Timeout in debug mode  */
+#endif
+#else
+#define BRIDGE_PIOERR_TIMEOUT	1	/* Timeout in non-debug mode                            */
+#endif
+
+LOCAL void
+print_bridge_errcmd(uint32_t cmdword, char *errtype)
+{
+#ifdef SUPPORT_PRINTING_R_FORMAT
+    PRINT_WARNING(
+	    "	Bridge %s error command word register %R",
+	    errtype, cmdword, xio_cmd_bits);
+#else
+    PRINT_WARNING(
+	    "	Bridge %s error command word register 0x%x",
+	    errtype, cmdword);
+#endif
+}
+
+LOCAL char             *pcibr_isr_errs[] =
+{
+    "", "", "", "", "", "", "", "",
+    "08: GIO non-contiguous byte enable in crosstalk packet",
+    "09: PCI to Crosstalk read request timeout",
+    "10: PCI retry operation count exhausted.",
+    "11: PCI bus device select timeout",
+    "12: PCI device reported parity error",
+    "13: PCI Address/Cmd parity error ",
+    "14: PCI Bridge detected parity error",
+    "15: PCI abort condition",
+    "16: SSRAM parity error",
+    "17: LLP Transmitter Retry count wrapped",
+    "18: LLP Transmitter side required Retry",
+    "19: LLP Receiver retry count wrapped",
+    "20: LLP Receiver check bit error",
+    "21: LLP Receiver sequence number error",
+    "22: Request packet overflow",
+    "23: Request operation not supported by bridge",
+    "24: Request packet has invalid address for bridge widget",
+    "25: Incoming request xtalk command word error bit set or invalid sideband",
+    "26: Incoming response xtalk command word error bit set or invalid sideband",
+    "27: Framing error, request cmd data size does not match actual",
+    "28: Framing error, response cmd data size does not match actual",
+    "29: Unexpected response arrived",
+    "30: Access to SSRAM beyond device limits",
+    "31: Multiple errors occurred",
+};
+
+/*
+ * PCI Bridge Error interrupt handling.
+ * This routine gets invoked from system interrupt dispatcher
+ * and is responsible for invoking appropriate error handler,
+ * depending on the type of error.
+ * This IS a duplicate of bridge_errintr defined specfic to IP30.
+ * There are some minor differences in terms of the return value and
+ * parameters passed. One of these two should be removed at some point
+ * of time.
+ */
+/*ARGSUSED */
+void
+pcibr_error_dump(pcibr_soft_t pcibr_soft)
+{
+    bridge_t               *bridge = pcibr_soft->bs_base;
+    bridgereg_t             int_status;
+    int                     i;
+
+    int_status = (bridge->b_int_status & ~BRIDGE_ISR_INT_MSK);
+
+    PRINT_ALERT( "%s PCI BRIDGE ERROR: int_status is 0x%X",
+	    pcibr_soft->bs_name, int_status);
+
+    for (i = PCIBR_ISR_ERR_START; i < PCIBR_ISR_MAX_ERRS; i++) {
+	if (int_status & (1 << i)) {
+	    PRINT_WARNING( "%s", pcibr_isr_errs[i]);
+	}
+    }
+
+    if (int_status & BRIDGE_ISR_XTALK_ERROR) {
+	print_bridge_errcmd(bridge->b_wid_err_cmdword, "");
+
+	PRINT_WARNING("   Bridge error address 0x%lx",
+		(((uint64_t) bridge->b_wid_err_upper << 32) |
+		 bridge->b_wid_err_lower));
+
+	print_bridge_errcmd(bridge->b_wid_aux_err, "Aux");
+
+	if (int_status & (BRIDGE_ISR_BAD_XRESP_PKT | BRIDGE_ISR_RESP_XTLK_ERR)) {
+	    PRINT_WARNING("	Bridge response buffer: dev-num %d buff-num %d addr 0x%lx\n",
+		    ((bridge->b_wid_resp_upper >> 20) & 0x3),
+		    ((bridge->b_wid_resp_upper >> 16) & 0xF),
+		    (((uint64_t) (bridge->b_wid_resp_upper & 0xFFFF) << 32) |
+		     bridge->b_wid_resp_lower));
+	}
+    }
+    if (int_status & BRIDGE_ISR_SSRAM_PERR)
+	PRINT_WARNING("   Bridge SSRAM parity error register 0x%x",
+		bridge->b_ram_perr);
+
+    if (int_status & BRIDGE_ISR_PCIBUS_ERROR) {
+	PRINT_WARNING("   PCI/GIO error upper address register 0x%x",
+		bridge->b_pci_err_upper);
+
+	PRINT_WARNING("   PCI/GIO error lower address register 0x%x",
+		bridge->b_pci_err_lower);
+    }
+    if (int_status & BRIDGE_ISR_ERROR_FATAL) {
+	cmn_err_tag(14, (int)CE_PANIC, "PCI Bridge Error interrupt killed the system");
+	/*NOTREACHED */
+    } else {
+	PRINT_ALERT( "Non-fatal Error in Bridge..");
+    }
+}
+
+#define PCIBR_ERRINTR_GROUP(error)	\
+		(( error & (BRIDGE_IRR_PCI_GRP|BRIDGE_IRR_GIO_GRP)
+
+uint32_t
+pcibr_errintr_group(uint32_t error)
+{
+    uint32_t              group = BRIDGE_IRR_MULTI_CLR;
+
+    if (error & BRIDGE_IRR_PCI_GRP)
+	group |= BRIDGE_IRR_PCI_GRP_CLR;
+    if (error & BRIDGE_IRR_SSRAM_GRP)
+	group |= BRIDGE_IRR_SSRAM_GRP_CLR;
+    if (error & BRIDGE_IRR_LLP_GRP)
+	group |= BRIDGE_IRR_LLP_GRP_CLR;
+    if (error & BRIDGE_IRR_REQ_DSP_GRP)
+	group |= BRIDGE_IRR_REQ_DSP_GRP_CLR;
+    if (error & BRIDGE_IRR_RESP_BUF_GRP)
+	group |= BRIDGE_IRR_RESP_BUF_GRP_CLR;
+    if (error & BRIDGE_IRR_CRP_GRP)
+	group |= BRIDGE_IRR_CRP_GRP_CLR;
+
+    return group;
+
+}
+
+
+/* pcibr_pioerr_check():
+ *	Check to see if this pcibr has a PCI PIO
+ *	TIMEOUT error; if so, clear it and bump
+ *	the timeout-count on any piomaps that
+ *	could cover the address.
+ */
+static void
+pcibr_pioerr_check(pcibr_soft_t soft)
+{
+    bridge_t		   *bridge;
+    bridgereg_t		    b_int_status;
+    bridgereg_t		    b_pci_err_lower;
+    bridgereg_t		    b_pci_err_upper;
+    iopaddr_t		    pci_addr;
+    pciio_slot_t	    slot;
+    pcibr_piomap_t	    map;
+    iopaddr_t		    base;
+    size_t		    size;
+    unsigned		    win;
+    int			    func;
+
+    bridge = soft->bs_base;
+    b_int_status = bridge->b_int_status;
+    if (b_int_status & BRIDGE_ISR_PCIBUS_PIOERR) {
+	b_pci_err_lower = bridge->b_pci_err_lower;
+	b_pci_err_upper = bridge->b_pci_err_upper;
+	b_int_status = bridge->b_int_status;
+	if (b_int_status & BRIDGE_ISR_PCIBUS_PIOERR) {
+	    bridge->b_int_rst_stat = (BRIDGE_IRR_PCI_GRP_CLR|
+				      BRIDGE_IRR_MULTI_CLR);
+
+	    pci_addr = b_pci_err_upper & BRIDGE_ERRUPPR_ADDRMASK;
+	    pci_addr = (pci_addr << 32) | b_pci_err_lower;
+
+	    slot = 8;
+	    while (slot-- > 0) {
+		int 		nfunc = soft->bs_slot[slot].bss_ninfo;
+		pcibr_info_h	pcibr_infoh = soft->bs_slot[slot].bss_infos;
+
+		for (func = 0; func < nfunc; func++) {
+		    pcibr_info_t 	pcibr_info = pcibr_infoh[func];
+
+		    if (!pcibr_info)
+			continue;
+
+		    for (map = pcibr_info->f_piomap;
+			 map != NULL; map = map->bp_next) {
+			base = map->bp_pciaddr;
+			size = map->bp_mapsz;
+			win = map->bp_space - PCIIO_SPACE_WIN(0);
+			if (win < 6)
+			    base += 
+				soft->bs_slot[slot].bss_window[win].bssw_base;
+			else if (map->bp_space == PCIIO_SPACE_ROM)
+			    base += pcibr_info->f_rbase;
+#ifdef IRIX
+			if ((pci_addr >= base) && (pci_addr < (base + size)))
+			    atomicAddInt(map->bp_toc, 1);
+#endif
+		    }
+		}
+	    }
+	}
+    }
+}
+
+/*
+ * PCI Bridge Error interrupt handler.
+ *      This gets invoked, whenever a PCI bridge sends an error interrupt.
+ *      Primarily this servers two purposes.
+ *              - If an error can be handled (typically a PIO read/write
+ *                error, we try to do it silently.
+ *              - If an error cannot be handled, we die violently.
+ *      Interrupt due to PIO errors:
+ *              - Bridge sends an interrupt, whenever a PCI operation
+ *                done by the bridge as the master fails. Operations could
+ *                be either a PIO read or a PIO write.
+ *                PIO Read operation also triggers a bus error, and it's
+ *                We primarily ignore this interrupt in that context..
+ *                For PIO write errors, this is the only indication.
+ *                and we have to handle with the info from here.
+ *
+ *                So, there is no way to distinguish if an interrupt is
+ *                due to read or write error!.
+ */
+
+
+LOCAL void
+pcibr_error_intr_handler(intr_arg_t arg)
+{
+    pcibr_soft_t            pcibr_soft;
+    bridge_t               *bridge;
+    bridgereg_t             int_status;
+    bridgereg_t             err_status;
+    int                     i;
+
+#if defined(SN0_HWDEBUG)
+    extern int		    la_trigger_nasid1;
+    extern int		    la_trigger_nasid2;
+    extern long		    la_trigger_val;
+#endif
+
+    /* REFERENCED */
+    bridgereg_t             disable_errintr_mask = 0;
+#ifdef IRIX
+    int 		    rv;
+#else
+    int                     rv = 0;
+#endif
+    int 		    error_code = IOECODE_DMA | IOECODE_READ;
+    ioerror_mode_t 	    mode = MODE_DEVERROR;
+    ioerror_t 	            ioe;
+
+#if defined(SN0_HWDEBUG)
+   /*
+    * trigger points for logic analyzer. Used to debug the DMA timeout
+    * note that 0xcafe is added to the trigger values to avoid false
+    * triggers when la_trigger_val shows up in a cacheline as data
+    */
+   if (la_trigger_nasid1 != -1) 
+	REMOTE_HUB_PI_S(la_trigger_nasid1, 0, PI_CPU_NUM, la_trigger_val + 0xcafe);
+   if (la_trigger_nasid2 != -1) 
+	REMOTE_HUB_PI_S(la_trigger_nasid2, 0, PI_CPU_NUM, la_trigger_val + 0xcafe);
+#endif
+
+#if PCIBR_SOFT_LIST
+    /* IP27 seems to be handing us junk.
+     */
+    {
+	pcibr_list_p            entry;
+
+	entry = pcibr_list;
+	while (1) {
+	    if (entry == NULL) {
+		printk("pcibr_error_intr_handler:\n"
+			"\tparameter (0x%p) is not a pcibr_soft!",
+			arg);
+		PRINT_PANIC("Invalid parameter to pcibr_error_intr_handler");
+	    }
+	    if ((intr_arg_t) entry->bl_soft == arg)
+		break;
+	    entry = entry->bl_next;
+	}
+    }
+#endif
+    pcibr_soft = (pcibr_soft_t) arg;
+    bridge = pcibr_soft->bs_base;
+
+    /*
+     * pcibr_error_intr_handler gets invoked whenever bridge encounters
+     * an error situation, and the interrupt for that error is enabled.
+     * This routine decides if the error is fatal or not, and takes
+     * action accordingly.
+     *
+     * In one case there is a need for special action.
+     * In case of PIO read/write timeouts due to user level, we do
+     * get an error interrupt. In this case, way to handle would
+     * be to start a timeout. If the error was due to "read", bus
+     * error handling code takes care of it. If error is due to write,
+     * it's handled at timeout
+     */
+
+    /* int_status is which bits we have to clear;
+     * err_status is the bits we haven't handled yet.
+     */
+
+    int_status = bridge->b_int_status &  ~BRIDGE_ISR_INT_MSK;
+    err_status = int_status & ~BRIDGE_ISR_MULTI_ERR;
+
+    if (!(int_status & ~BRIDGE_ISR_INT_MSK)) {
+	/*
+	 * No error bit set!!.
+	 */
+	return;
+    }
+    /* If we have a PCIBUS_PIOERR,
+     * hand it to the logger but otherwise
+     * ignore the event.
+     */
+    if (int_status & BRIDGE_ISR_PCIBUS_PIOERR) {
+	pcibr_pioerr_check(pcibr_soft);
+	err_status &= ~BRIDGE_ISR_PCIBUS_PIOERR;
+	int_status &= ~BRIDGE_ISR_PCIBUS_PIOERR;
+    }
+
+
+    if (err_status) {
+	struct bs_errintr_stat_s *bs_estat = pcibr_soft->bs_errintr_stat;
+
+	for (i = PCIBR_ISR_ERR_START; i < PCIBR_ISR_MAX_ERRS; i++, bs_estat++) {
+	    if (err_status & (1 << i)) {
+		uint32_t              errrate = 0;
+		uint32_t              errcount = 0;
+		uint32_t              errinterval = 0, current_tick = 0;
+		int                     panic_on_llp_tx_retry = 0;
+		int                     is_llp_tx_retry_intr = 0;
+
+		bs_estat->bs_errcount_total++;
+
+#ifdef IRIX
+		current_tick = lbolt;
+#else
+		current_tick = 0;
+#endif
+		errinterval = (current_tick - bs_estat->bs_lasterr_timestamp);
+		errcount = (bs_estat->bs_errcount_total -
+			    bs_estat->bs_lasterr_snapshot);
+
+		is_llp_tx_retry_intr = (BRIDGE_ISR_LLP_TX_RETRY == (1 << i));
+
+		/* On a non-zero error rate (which is equivalent to
+		 * to 100 errors /sec at least) for the LLP transmitter
+		 * retry interrupt we need to panic the system
+		 * to prevent potential data corruption .
+		 * NOTE : errcount is being compared to PCIBR_ERRTIME_THRESHOLD
+		 * to make sure that we are not seing cases like x error
+		 * interrupts per y ticks for very low x ,y (x > y ) which
+		 * makes error rate be > 100 /sec.
+		 */
+
+		/* Check for the divide by zero condition while
+		 * calculating the error rates.
+		 */
+
+		if (errinterval) {
+		    errrate = errcount / errinterval;
+		    /* If able to calculate error rate
+		     * on a LLP transmitter retry interrupt check
+		     * if the error rate is nonzero and we have seen
+		     * a certain minimum number of errors.
+		     */
+		    if (is_llp_tx_retry_intr &&
+			errrate &&
+			(errcount >= PCIBR_ERRTIME_THRESHOLD)) {
+			panic_on_llp_tx_retry = 1;
+		    }
+		} else {
+		    errrate = 0;
+		    /* Since we are not able to calculate the
+		     * error rate check if we exceeded a certain
+		     * minimum number of errors for LLP transmitter
+		     * retries. Note that this can only happen
+		     * within the first tick after the last snapshot.
+		     */
+		    if (is_llp_tx_retry_intr &&
+			(errcount >= PCIBR_ERRINTR_DISABLE_LEVEL)) {
+			panic_on_llp_tx_retry = 1;
+		    }
+		}
+		if (panic_on_llp_tx_retry) {
+		    static uint32_t       last_printed_rate;
+
+		    if (errrate > last_printed_rate) {
+			last_printed_rate = errrate;
+			/* Print the warning only if the error rate
+			 * for the transmitter retry interrupt
+			 * exceeded the previously printed rate.
+			 */
+			PRINT_WARNING(
+				"%s: %s, Excessive error interrupts : %d/tick\n",
+				pcibr_soft->bs_name,
+				pcibr_isr_errs[i],
+				errrate);
+
+		    }
+		    /*
+		     * Update snapshot, and time
+		     */
+		    bs_estat->bs_lasterr_timestamp = current_tick;
+		    bs_estat->bs_lasterr_snapshot =
+			bs_estat->bs_errcount_total;
+
+		}
+		/*
+		 * If the error rate is high enough, print the error rate.
+		 */
+		if (errinterval > PCIBR_ERRTIME_THRESHOLD) {
+
+		    if (errrate > PCIBR_ERRRATE_THRESHOLD) {
+			PRINT_NOTICE( "%s: %s, Error rate %d/tick",
+				pcibr_soft->bs_name,
+				pcibr_isr_errs[i],
+				errrate);
+			/*
+			 * Update snapshot, and time
+			 */
+			bs_estat->bs_lasterr_timestamp = current_tick;
+			bs_estat->bs_lasterr_snapshot =
+			    bs_estat->bs_errcount_total;
+		    }
+		}
+		if (bs_estat->bs_errcount_total > PCIBR_ERRINTR_DISABLE_LEVEL) {
+		    /*
+		     * We have seen a fairly large number of errors of
+		     * this type. Let's disable the interrupt. But flash
+		     * a message about the interrupt being disabled.
+		     */
+		    PRINT_NOTICE(
+			    "%s Disabling error interrupt type %s. Error count %d",
+			    pcibr_soft->bs_name,
+			    pcibr_isr_errs[i],
+			    bs_estat->bs_errcount_total);
+		    disable_errintr_mask |= (1 << i);
+		}
+	    }
+	}
+    }
+
+    if (disable_errintr_mask) {
+	/*
+	 * Disable some high frequency errors as they
+	 * could eat up too much cpu time.
+	 */
+	bridge->b_int_enable &= ~disable_errintr_mask;
+    }
+    /*
+     * If we leave the PROM cacheable, T5 might
+     * try to do a cache line sized writeback to it,
+     * which will cause a BRIDGE_ISR_INVLD_ADDR.
+     */
+    if ((err_status & BRIDGE_ISR_INVLD_ADDR) &&
+	(0x00000000 == bridge->b_wid_err_upper) &&
+	(0x00C00000 == (0xFFC00000 & bridge->b_wid_err_lower)) &&
+	(0x00402000 == (0x00F07F00 & bridge->b_wid_err_cmdword))) {
+	err_status &= ~BRIDGE_ISR_INVLD_ADDR;
+    }
+#if defined (PCIBR_LLP_CONTROL_WAR)
+    /*
+     * The bridge bug, where the llp_config or control registers
+     * need to be read back after being written, affects an MP
+     * system since there could be small windows between writing
+     * the register and reading it back on one cpu while another
+     * cpu is fielding an interrupt. If we run into this scenario,
+     * workaround the problem by ignoring the error. (bug 454474)
+     * pcibr_llp_control_war_cnt keeps an approximate number of
+     * times we saw this problem on a system.
+     */
+
+    if ((err_status & BRIDGE_ISR_INVLD_ADDR) &&
+	((((uint64_t) bridge->b_wid_err_upper << 32) | (bridge->b_wid_err_lower))
+	 == (BRIDGE_INT_RST_STAT & 0xff0))) {
+#ifdef IRIX
+	if (kdebug)
+	    PRINT_NOTICE( "%s bridge: ignoring llp/control address interrupt",
+		    pcibr_soft->bs_name);
+#endif
+	pcibr_llp_control_war_cnt++;
+	err_status &= ~BRIDGE_ISR_INVLD_ADDR;
+    }
+#endif				/* PCIBR_LLP_CONTROL_WAR */
+
+    /* Check if this is the RESP_XTALK_ERROR interrupt. 
+     * This can happen due to a failed DMA READ operation.
+     */
+    if (err_status & BRIDGE_ISR_RESP_XTLK_ERR) {
+	/* Phase 1 : Look at the error state in the bridge and further
+	 * down in the device layers.
+	 */
+#if defined(CONFIG_SGI_IO_ERROR_HANDLING)
+	(void)error_state_set(pcibr_soft->bs_conn, ERROR_STATE_LOOKUP);
+#endif
+	IOERROR_SETVALUE(&ioe, widgetnum, pcibr_soft->bs_xid);
+	(void)pcibr_error_handler((error_handler_arg_t)pcibr_soft,
+				  error_code,
+				  mode,
+				  &ioe);
+	/* Phase 2 : Perform the action agreed upon in phase 1.
+	 */
+#if defined(CONFIG_SGI_IO_ERROR_HANDLING)
+	(void)error_state_set(pcibr_soft->bs_conn, ERROR_STATE_ACTION);
+#endif
+	rv = pcibr_error_handler((error_handler_arg_t)pcibr_soft,
+				 error_code,
+				 mode,
+				 &ioe);
+    }
+    if (rv != IOERROR_HANDLED) {
+#ifdef	DEBUG
+	if (err_status & BRIDGE_ISR_ERROR_DUMP)
+	    pcibr_error_dump(pcibr_soft);
+#else	
+	if (err_status & BRIDGE_ISR_ERROR_FATAL) {
+	    printk("BRIDGE ERR STATUS 0x%x\n", err_status);
+	    pcibr_error_dump(pcibr_soft);
+	}
+#endif
+    }
+    /*
+     * We can't return without re-enabling the interrupt, since
+     * it would cause problems for devices like IOC3 (Lost
+     * interrupts ?.). So, just cleanup the interrupt, and
+     * use saved values later..
+     */
+    bridge->b_int_rst_stat = pcibr_errintr_group(int_status);
+}
+
+/*
+ * pcibr_addr_toslot
+ *      Given the 'pciaddr' find out which slot this address is
+ *      allocated to, and return the slot number.
+ *      While we have the info handy, construct the
+ *      function number, space code and offset as well.
+ *
+ * NOTE: if this routine is called, we don't know whether
+ * the address is in CFG, MEM, or I/O space. We have to guess.
+ * This will be the case on PIO stores, where the only way
+ * we have of getting the address is to check the Bridge, which
+ * stores the PCI address but not the space and not the xtalk
+ * address (from which we could get it).
+ */
+LOCAL int
+pcibr_addr_toslot(pcibr_soft_t pcibr_soft,
+		  iopaddr_t pciaddr,
+		  pciio_space_t *spacep,
+		  iopaddr_t *offsetp,
+		  pciio_function_t *funcp)
+{
+#ifdef IRIX
+    int                     s, f, w;
+#else
+    int                     s, f=0, w;
+#endif
+    iopaddr_t               base;
+    size_t                  size;
+    pciio_piospace_t        piosp;
+
+    /*
+     * Check if the address is in config space
+     */
+
+    if ((pciaddr >= BRIDGE_CONFIG_BASE) && (pciaddr < BRIDGE_CONFIG_END)) {
+
+	if (pciaddr >= BRIDGE_CONFIG1_BASE)
+	    pciaddr -= BRIDGE_CONFIG1_BASE;
+	else
+	    pciaddr -= BRIDGE_CONFIG_BASE;
+
+	s = pciaddr / BRIDGE_CONFIG_SLOT_SIZE;
+	pciaddr %= BRIDGE_CONFIG_SLOT_SIZE;
+
+	if (funcp) {
+	    f = pciaddr / 0x100;
+	    pciaddr %= 0x100;
+	}
+	if (spacep)
+	    *spacep = PCIIO_SPACE_CFG;
+	if (offsetp)
+	    *offsetp = pciaddr;
+	if (funcp)
+	    *funcp = f;
+
+	return s;
+    }
+    for (s = 0; s < 8; s++) {
+	int                     nf = pcibr_soft->bs_slot[s].bss_ninfo;
+	pcibr_info_h            pcibr_infoh = pcibr_soft->bs_slot[s].bss_infos;
+
+	for (f = 0; f < nf; f++) {
+	    pcibr_info_t            pcibr_info = pcibr_infoh[f];
+
+	    if (!pcibr_info)
+		continue;
+	    for (w = 0; w < 6; w++) {
+		if (pcibr_info->f_window[w].w_space
+		    == PCIIO_SPACE_NONE) {
+		    continue;
+		}
+		base = pcibr_info->f_window[w].w_base;
+		size = pcibr_info->f_window[w].w_size;
+
+		if ((pciaddr >= base) && (pciaddr < (base + size))) {
+		    if (spacep)
+			*spacep = PCIIO_SPACE_WIN(w);
+		    if (offsetp)
+			*offsetp = pciaddr - base;
+		    if (funcp)
+			*funcp = f;
+		    return s;
+		}			/* endif match */
+	    }				/* next window */
+	}				/* next func */
+    }					/* next slot */
+
+    /*
+     * Check if the address was allocated as part of the
+     * pcibr_piospace_alloc calls.
+     */
+    for (s = 0; s < 8; s++) {
+	int                     nf = pcibr_soft->bs_slot[s].bss_ninfo;
+	pcibr_info_h            pcibr_infoh = pcibr_soft->bs_slot[s].bss_infos;
+
+	for (f = 0; f < nf; f++) {
+	    pcibr_info_t            pcibr_info = pcibr_infoh[f];
+
+	    if (!pcibr_info)
+		continue;
+	    piosp = pcibr_info->f_piospace;
+	    while (piosp) {
+		if ((piosp->start <= pciaddr) &&
+		    ((piosp->count + piosp->start) > pciaddr)) {
+		    if (spacep)
+			*spacep = piosp->space;
+		    if (offsetp)
+			*offsetp = pciaddr - piosp->start;
+		    return s;
+		}			/* endif match */
+		piosp = piosp->next;
+	    }				/* next piosp */
+	}				/* next func */
+    }					/* next slot */
+
+    /*
+     * Some other random address on the PCI bus ...
+     * we have no way of knowing whether this was
+     * a MEM or I/O access; so, for now, we just
+     * assume that the low 1G is MEM, the next
+     * 3G is I/O, and anything above the 4G limit
+     * is obviously MEM.
+     */
+
+    if (spacep)
+	*spacep = ((pciaddr < (1ul << 30)) ? PCIIO_SPACE_MEM :
+		   (pciaddr < (4ul << 30)) ? PCIIO_SPACE_IO :
+		   PCIIO_SPACE_MEM);
+    if (offsetp)
+	*offsetp = pciaddr;
+
+    return PCIIO_SLOT_NONE;
+
+}
+
+LOCAL void
+pcibr_error_cleanup(pcibr_soft_t pcibr_soft, int error_code)
+{
+    bridge_t               *bridge = pcibr_soft->bs_base;
+
+    ASSERT(error_code & IOECODE_PIO);
+    error_code = error_code;
+
+    bridge->b_int_rst_stat =
+	(BRIDGE_IRR_PCI_GRP_CLR | BRIDGE_IRR_MULTI_CLR);
+    (void) bridge->b_wid_tflush;	/* flushbus */
+}
+
+/*
+ * pcibr_error_extract
+ *      Given the 'pcibr vertex handle' find out which slot
+ *      the bridge status error address (from pcibr_soft info
+ *      hanging off the vertex)
+ *      allocated to, and return the slot number.
+ *      While we have the info handy, construct the
+ *      space code and offset as well.
+ *
+ * NOTE: if this routine is called, we don't know whether
+ * the address is in CFG, MEM, or I/O space. We have to guess.
+ * This will be the case on PIO stores, where the only way
+ * we have of getting the address is to check the Bridge, which
+ * stores the PCI address but not the space and not the xtalk
+ * address (from which we could get it).
+ *
+ * XXX- this interface has no way to return the function
+ * number on a multifunction card, even though that data
+ * is available.
+ */
+
+pciio_slot_t
+pcibr_error_extract(devfs_handle_t pcibr_vhdl,
+		    pciio_space_t *spacep,
+		    iopaddr_t *offsetp)
+{
+    pcibr_soft_t            pcibr_soft = 0;
+    iopaddr_t               bserr_addr;
+    bridge_t               *bridge;
+    pciio_slot_t            slot = PCIIO_SLOT_NONE;
+    arbitrary_info_t	    rev;
+
+    /* Do a sanity check as to whether we really got a 
+     * bridge vertex handle.
+     */
+    if (hwgraph_info_get_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, &rev) !=
+	GRAPH_SUCCESS) 
+	return(slot);
+
+    pcibr_soft = pcibr_soft_get(pcibr_vhdl);
+    if (pcibr_soft) {
+	bridge = pcibr_soft->bs_base;
+	bserr_addr =
+	    bridge->b_pci_err_lower |
+	    ((uint64_t) (bridge->b_pci_err_upper &
+			   BRIDGE_ERRUPPR_ADDRMASK) << 32);
+
+	slot = pcibr_addr_toslot(pcibr_soft, bserr_addr,
+				 spacep, offsetp, NULL);
+    }
+    return slot;
+}
+
+/*ARGSUSED */
+void
+pcibr_device_disable(pcibr_soft_t pcibr_soft, int devnum)
+{
+    /*
+     * XXX
+     * Device failed to handle error. Take steps to
+     * disable this device ? HOW TO DO IT ?
+     *
+     * If there are any Read response buffers associated
+     * with this device, it's time to get them back!!
+     *
+     * We can disassociate any interrupt level associated
+     * with this device, and disable that interrupt level
+     *
+     * For now it's just a place holder
+     */
+}
+
+/*
+ * pcibr_pioerror
+ *      Handle PIO error that happened at the bridge pointed by pcibr_soft.
+ *
+ *      Queries the Bus interface attached to see if the device driver
+ *      mapping the device-number that caused error can handle the
+ *      situation. If so, it will clean up any error, and return
+ *      indicating the error was handled. If the device driver is unable
+ *      to handle the error, it expects the bus-interface to disable that
+ *      device, and takes any steps needed here to take away any resources
+ *      associated with this device.
+ */
+
+#define BEM_ADD_STR(s)	printk("%s", (s))
+#ifdef SUPPORT_SGI_CMN_ERR_STUFF
+#define BEM_ADD_VAR(v)	printk("\t%20s: 0x%x\n", #v, (v))
+#define BEM_ADD_REG(r)	printk("\t%20s: %R\n", #r, (r), r ## _desc)
+
+#define BEM_ADD_NSPC(n,s)	printk("\t%20s: %R\n", n, s, space_desc)
+#else
+#define BEM_ADD_VAR(v)	
+#define BEM_ADD_REG(r)	
+#define BEM_ADD_NSPC(n,s)
+#endif
+#define BEM_ADD_SPC(s)		BEM_ADD_NSPC(#s, s)
+
+/* BEM_ADD_IOE doesn't dump the whole ioerror, it just
+ * decodes the PCI specific portions -- we count on our
+ * callers to dump the raw IOE data.
+ */
+#ifdef colin
+#define BEM_ADD_IOE(ioe)						\
+	do {								\
+	    if (IOERROR_FIELDVALID(ioe, busspace)) {			\
+		unsigned		spc;				\
+		unsigned		win;				\
+									\
+		spc = IOERROR_GETVALUE(ioe, busspace);			\
+		win = spc - PCIIO_SPACE_WIN(0);				\
+									\
+		switch (spc) {						\
+		case PCIIO_SPACE_CFG:					\
+		    printk("\tPCI Slot %d Func %d CFG space Offset 0x%x\n",	\
+	    pciio_widgetdev_slot_get(IOERROR_GETVALUE(ioe, widgetdev)),	\
+	    pciio_widgetdev_func_get(IOERROR_GETVALUE(ioe, widgetdev)),	\
+			    IOERROR_GETVALUE(ioe, busaddr));		\
+		    break;						\
+		case PCIIO_SPACE_IO:					\
+		    printk("\tPCI I/O space  Offset 0x%x\n",		\
+			    IOERROR_GETVALUE(ioe, busaddr));		\
+		    break;						\
+		case PCIIO_SPACE_MEM:					\
+		case PCIIO_SPACE_MEM32:					\
+		case PCIIO_SPACE_MEM64:					\
+		    printk("\tPCI MEM space Offset 0x%x\n",		\
+			    IOERROR_GETVALUE(ioe, busaddr));		\
+		    break;						\
+		default:						\
+		    if (win < 6) {					\
+		    printk("\tPCI Slot %d Func %d Window %d Offset 0x%x\n",\
+	    pciio_widgetdev_slot_get(IOERROR_GETVALUE(ioe, widgetdev)),	\
+	    pciio_widgetdev_func_get(IOERROR_GETVALUE(ioe, widgetdev)),	\
+			    win,					\
+			    IOERROR_GETVALUE(ioe, busaddr));		\
+		    }							\
+		    break;						\
+		}							\
+	    }								\
+	} while (0)
+#else
+#define BEM_ADD_IOE(ioe)
+#endif
+
+/*ARGSUSED */
+LOCAL int
+pcibr_pioerror(
+		  pcibr_soft_t pcibr_soft,
+		  int error_code,
+		  ioerror_mode_t mode,
+		  ioerror_t *ioe)
+{
+    int                     retval = IOERROR_HANDLED;
+
+    devfs_handle_t            pcibr_vhdl = pcibr_soft->bs_vhdl;
+    bridge_t               *bridge = pcibr_soft->bs_base;
+
+    bridgereg_t             bridge_int_status;
+    bridgereg_t             bridge_pci_err_lower;
+    bridgereg_t             bridge_pci_err_upper;
+    bridgereg_t             bridge_pci_err_addr;
+
+    iopaddr_t               bad_xaddr;
+
+    pciio_space_t           raw_space;	/* raw PCI space */
+    iopaddr_t               raw_paddr;	/* raw PCI address */
+
+    pciio_space_t           space;	/* final PCI space */
+    pciio_slot_t            slot;	/* final PCI slot, if appropriate */
+    pciio_function_t        func;	/* final PCI func, if appropriate */
+    iopaddr_t               offset;	/* final PCI offset */
+    
+    int                     cs, cw, cf;
+    pciio_space_t           wx;
+    iopaddr_t               wb;
+    size_t                  ws;
+    iopaddr_t               wl;
+
+
+    /*
+     * We expect to have an "xtalkaddr" coming in,
+     * and need to construct the slot/space/offset.
+     */
+
+#ifdef colin
+    bad_xaddr = IOERROR_GETVALUE(ioe, xtalkaddr);
+#else
+    bad_xaddr = -1;
+#endif
+
+    slot = PCIIO_SLOT_NONE;
+    func = PCIIO_FUNC_NONE;
+    raw_space = PCIIO_SPACE_NONE;
+    raw_paddr = 0;
+
+    if ((bad_xaddr >= BRIDGE_TYPE0_CFG_DEV0) &&
+	(bad_xaddr < BRIDGE_TYPE1_CFG)) {
+	raw_paddr = bad_xaddr - BRIDGE_TYPE0_CFG_DEV0;
+	slot = raw_paddr / BRIDGE_TYPE0_CFG_SLOT_OFF;
+	raw_paddr = raw_paddr % BRIDGE_TYPE0_CFG_SLOT_OFF;
+	raw_space = PCIIO_SPACE_CFG;
+    }
+    if ((bad_xaddr >= BRIDGE_TYPE1_CFG) &&
+	(bad_xaddr < (BRIDGE_TYPE1_CFG + 0x1000))) {
+	/* Type 1 config space:
+	 * slot and function numbers not known.
+	 * Perhaps we can read them back?
+	 */
+	raw_paddr = bad_xaddr - BRIDGE_TYPE1_CFG;
+	raw_space = PCIIO_SPACE_CFG;
+    }
+    if ((bad_xaddr >= BRIDGE_DEVIO0) &&
+	(bad_xaddr < BRIDGE_DEVIO(BRIDGE_DEV_CNT))) {
+	int                     x;
+
+	raw_paddr = bad_xaddr - BRIDGE_DEVIO0;
+	x = raw_paddr / BRIDGE_DEVIO_OFF;
+	raw_paddr %= BRIDGE_DEVIO_OFF;
+	/* first two devio windows are double-sized */
+	if ((x == 1) || (x == 3))
+	    raw_paddr += BRIDGE_DEVIO_OFF;
+	if (x > 0)
+	    x--;
+	if (x > 1)
+	    x--;
+	/* x is which devio reg; no guarantee
+	 * pci slot x will be responding.
+	 * still need to figure out who decodes
+	 * space/offset on the bus.
+	 */
+	raw_space = pcibr_soft->bs_slot[x].bss_devio.bssd_space;
+	if (raw_space == PCIIO_SPACE_NONE) {
+	    /* Someone got an error because they
+	     * accessed the PCI bus via a DevIO(x)
+	     * window that pcibr has not yet assigned
+	     * to any specific PCI address. It is
+	     * quite possible that the Device(x)
+	     * register has been changed since they
+	     * made their access, but we will give it
+	     * our best decode shot.
+	     */
+	    raw_space = pcibr_soft->bs_slot[x].bss_device
+		& BRIDGE_DEV_DEV_IO_MEM
+		? PCIIO_SPACE_MEM
+		: PCIIO_SPACE_IO;
+	    raw_paddr +=
+		(pcibr_soft->bs_slot[x].bss_device &
+		 BRIDGE_DEV_OFF_MASK) <<
+		BRIDGE_DEV_OFF_ADDR_SHFT;
+	} else
+	    raw_paddr += pcibr_soft->bs_slot[x].bss_devio.bssd_base;
+    }
+    if ((bad_xaddr >= BRIDGE_PCI_MEM32_BASE) &&
+	(bad_xaddr <= BRIDGE_PCI_MEM32_LIMIT)) {
+	raw_space = PCIIO_SPACE_MEM32;
+	raw_paddr = bad_xaddr - BRIDGE_PCI_MEM32_BASE;
+    }
+    if ((bad_xaddr >= BRIDGE_PCI_MEM64_BASE) &&
+	(bad_xaddr <= BRIDGE_PCI_MEM64_LIMIT)) {
+	raw_space = PCIIO_SPACE_MEM64;
+	raw_paddr = bad_xaddr - BRIDGE_PCI_MEM64_BASE;
+    }
+    if ((bad_xaddr >= BRIDGE_PCI_IO_BASE) &&
+	(bad_xaddr <= BRIDGE_PCI_IO_LIMIT)) {
+	raw_space = PCIIO_SPACE_IO;
+	raw_paddr = bad_xaddr - BRIDGE_PCI_IO_BASE;
+    }
+    space = raw_space;
+    offset = raw_paddr;
+
+    if ((slot == PCIIO_SLOT_NONE) && (space != PCIIO_SPACE_NONE)) {
+	/* we've got a space/offset but not which
+	 * pci slot decodes it. Check through our
+	 * notions of which devices decode where.
+	 *
+	 * Yes, this "duplicates" some logic in
+	 * pcibr_addr_toslot; the difference is,
+	 * this code knows which space we are in,
+	 * and can really really tell what is
+	 * going on (no guessing).
+	 */
+
+	for (cs = 0; (cs < 8) && (slot == PCIIO_SLOT_NONE); cs++) {
+	    int                     nf = pcibr_soft->bs_slot[cs].bss_ninfo;
+	    pcibr_info_h            pcibr_infoh = pcibr_soft->bs_slot[cs].bss_infos;
+
+	    for (cf = 0; (cf < nf) && (slot == PCIIO_SLOT_NONE); cf++) {
+		pcibr_info_t            pcibr_info = pcibr_infoh[cf];
+
+		if (!pcibr_info)
+		    continue;
+		for (cw = 0; (cw < 6) && (slot == PCIIO_SLOT_NONE); ++cw) {
+		    if (((wx = pcibr_info->f_window[cw].w_space) != PCIIO_SPACE_NONE) &&
+			((wb = pcibr_info->f_window[cw].w_base) != 0) &&
+			((ws = pcibr_info->f_window[cw].w_size) != 0) &&
+			((wl = wb + ws) > wb) &&
+			((wb <= offset) && (wl > offset))) {
+			/* MEM, MEM32 and MEM64 need to
+			 * compare as equal ...
+			 */
+			if ((wx == space) ||
+			    (((wx == PCIIO_SPACE_MEM) ||
+			      (wx == PCIIO_SPACE_MEM32) ||
+			      (wx == PCIIO_SPACE_MEM64)) &&
+			     ((space == PCIIO_SPACE_MEM) ||
+			      (space == PCIIO_SPACE_MEM32) ||
+			      (space == PCIIO_SPACE_MEM64)))) {
+			    slot = cs;
+			    func = cf;
+			    space = PCIIO_SPACE_WIN(cw);
+			    offset -= wb;
+			}		/* endif window space match */
+		    }			/* endif window valid and addr match */
+		}			/* next window unless slot set */
+	    }				/* next func unless slot set */
+	}				/* next slot unless slot set */
+	/* XXX- if slot is still -1, no PCI devices are
+	 * decoding here using their standard PCI BASE
+	 * registers. This would be a really good place
+	 * to cross-coordinate with the pciio PCI
+	 * address space allocation routines, to find
+	 * out if this address is "allocated" by any of
+	 * our subsidiary devices.
+	 */
+    }
+    /* Scan all piomap records on this PCI bus to update
+     * the TimeOut Counters on all matching maps. If we
+     * don't already know the slot number, take it from
+     * the first matching piomap. Note that we have to
+     * compare maps against raw_space and raw_paddr
+     * since space and offset could already be
+     * window-relative.
+     *
+     * There is a chance that one CPU could update
+     * through this path, and another CPU could also
+     * update due to an interrupt. Closing this hole
+     * would only result in the possibility of some
+     * errors never getting logged at all, and since the
+     * use for bp_toc is as a logical test rather than a
+     * strict count, the excess counts are not a
+     * problem.
+     */
+    for (cs = 0; cs < 8; ++cs) {
+	int 		nf = pcibr_soft->bs_slot[cs].bss_ninfo;
+	pcibr_info_h	pcibr_infoh = pcibr_soft->bs_slot[cs].bss_infos;
+
+	for (cf = 0; cf < nf; cf++) {
+	    pcibr_info_t 	pcibr_info = pcibr_infoh[cf];
+	    pcibr_piomap_t	map;    
+
+	    if (!pcibr_info)
+		continue;
+
+	    for (map = pcibr_info->f_piomap;
+	     map != NULL; map = map->bp_next) {
+	    wx = map->bp_space;
+	    wb = map->bp_pciaddr;
+	    ws = map->bp_mapsz;
+	    cw = wx - PCIIO_SPACE_WIN(0);
+	    if (cw < 6) {
+		wb += pcibr_soft->bs_slot[cs].bss_window[cw].bssw_base;
+		wx = pcibr_soft->bs_slot[cs].bss_window[cw].bssw_space;
+	    }
+	    if (wx == PCIIO_SPACE_ROM) {
+		wb += pcibr_info->f_rbase;
+		wx = PCIIO_SPACE_MEM;
+	    }
+	    if ((wx == PCIIO_SPACE_MEM32) ||
+		(wx == PCIIO_SPACE_MEM64))
+		wx = PCIIO_SPACE_MEM;
+	    wl = wb + ws;
+	    if ((wx == raw_space) && (raw_paddr >= wb) && (raw_paddr < wl)) {
+#ifdef IRIX
+		atomicAddInt(map->bp_toc, 1);
+#endif
+		if (slot == PCIIO_SLOT_NONE) {
+		    slot = cs;
+		    space = map->bp_space;
+		    if (cw < 6)
+			offset -= pcibr_soft->bs_slot[cs].bss_window[cw].bssw_base;
+		}
+	    }
+	    }
+	}
+    }
+
+    if (space != PCIIO_SPACE_NONE) {
+	if (slot != PCIIO_SLOT_NONE) {
+#ifdef IRIX
+	    if (func != PCIIO_FUNC_NONE)
+		IOERROR_SETVALUE(ioe, widgetdev, 
+				 pciio_widgetdev_create(slot,func));
+	    else
+    		IOERROR_SETVALUE(ioe, widgetdev, 
+				 pciio_widgetdev_create(slot,0));
+#else
+            if (func != PCIIO_FUNC_NONE) {
+                IOERROR_SETVALUE(ioe, widgetdev,
+                                 pciio_widgetdev_create(slot,func));
+            } else {
+                IOERROR_SETVALUE(ioe, widgetdev,
+                                 pciio_widgetdev_create(slot,0));
+	    }
+#endif
+	}
+
+	IOERROR_SETVALUE(ioe, busspace, space);
+	IOERROR_SETVALUE(ioe, busaddr, offset);
+    }
+    if (mode == MODE_DEVPROBE) {
+	/*
+	 * During probing, we don't really care what the
+	 * error is. Clean up the error in Bridge, notify
+	 * subsidiary devices, and return success.
+	 */
+	pcibr_error_cleanup(pcibr_soft, error_code);
+
+	/* if appropriate, give the error handler for this slot
+	 * a shot at this probe access as well.
+	 */
+	return (slot == PCIIO_SLOT_NONE) ? IOERROR_HANDLED :
+	    pciio_error_handler(pcibr_vhdl, error_code, mode, ioe);
+    }
+    /*
+     * If we don't know what "PCI SPACE" the access
+     * was targeting, we may have problems at the
+     * Bridge itself. Don't touch any bridge registers,
+     * and do complain loudly.
+     */
+
+    if (space == PCIIO_SPACE_NONE) {
+	printk("XIO Bus Error at %s\n"
+		"\taccess to XIO bus offset 0x%lx\n"
+		"\tdoes not correspond to any PCI address\n",
+		pcibr_soft->bs_name, bad_xaddr);
+
+	/* caller will dump contents of ioe struct */
+	return IOERROR_XTALKLEVEL;
+    }
+    /*
+     * Read the PCI Bridge error log registers.
+     */
+    bridge_int_status = bridge->b_int_status;
+    bridge_pci_err_upper = bridge->b_pci_err_upper;
+    bridge_pci_err_lower = bridge->b_pci_err_lower;
+
+    bridge_pci_err_addr =
+	bridge_pci_err_lower
+	| (((iopaddr_t) bridge_pci_err_upper
+	    & BRIDGE_ERRUPPR_ADDRMASK) << 32);
+
+    /*
+     * Actual PCI Error handling situation.
+     * Typically happens when a user level process accesses
+     * PCI space, and it causes some error.
+     *
+     * Due to PCI Bridge implementation, we get two indication
+     * for a read error: an interrupt and a Bus error.
+     * We like to handle read error in the bus error context.
+     * But the interrupt comes and goes before bus error
+     * could make much progress. (NOTE: interrupd does
+     * come in _after_ bus error processing starts. But it's
+     * completed by the time bus error code reaches PCI PIO
+     * error handling.
+     * Similarly write error results in just an interrupt,
+     * and error handling has to be done at interrupt level.
+     * There is no way to distinguish at interrupt time, if an
+     * error interrupt is due to read/write error..
+     */
+
+    /* We know the xtalk addr, the raw pci bus space,
+     * the raw pci bus address, the decoded pci bus
+     * space, the offset within that space, and the
+     * decoded pci slot (which may be "PCIIO_SLOT_NONE" if no slot
+     * is known to be involved).
+     */
+
+    /*
+     * Hand the error off to the handler registered
+     * for the slot that should have decoded the error,
+     * or to generic PCI handling (if pciio decides that
+     * such is appropriate).
+     */
+    retval = pciio_error_handler(pcibr_vhdl, error_code, mode, ioe);
+
+    if (retval != IOERROR_HANDLED) {
+
+	/* Generate a generic message for IOERROR_UNHANDLED
+	 * since the subsidiary handlers were silent, and
+	 * did no recovery.
+	 */
+	if (retval == IOERROR_UNHANDLED) {
+	    retval = IOERROR_PANIC;
+
+	    /* we may or may not want to print some of this,
+	     * depending on debug level and which error code.
+	     */
+
+	    PRINT_ALERT(
+		    "PIO Error on PCI Bus %s",
+		    pcibr_soft->bs_name);
+	    /* this decodes part of the ioe; our caller
+	     * will dump the raw details in DEBUG and
+	     * kdebug kernels.
+	     */
+	    BEM_ADD_IOE(ioe);
+	}
+#if defined(FORCE_ERRORS)
+	if (0) {
+#elif !DEBUG
+	if (kdebug) {
+#endif
+	    /*
+	       * dump raw data from bridge
+	     */
+
+	    BEM_ADD_STR("DEBUG DATA -- raw info from Bridge ASIC:\n");
+	    BEM_ADD_REG(bridge_int_status);
+	    BEM_ADD_VAR(bridge_pci_err_upper);
+	    BEM_ADD_VAR(bridge_pci_err_lower);
+	    BEM_ADD_VAR(bridge_pci_err_addr);
+	    BEM_ADD_SPC(raw_space);
+	    BEM_ADD_VAR(raw_paddr);
+	    if (IOERROR_FIELDVALID(ioe, widgetdev)) {
+
+#ifdef colin
+		slot = pciio_widgetdev_slot_get(IOERROR_GETVALUE(ioe, 
+								 widgetdev));
+		func = pciio_widgetdev_func_get(IOERROR_GETVALUE(ioe, 
+								 widgetdev));
+#else
+		slot = -1;
+		func = -1;
+#endif
+		if (slot < 8) {
+#ifdef SUPPORT_SGI_CMN_ERR_STUFF
+		    bridgereg_t             device = bridge->b_device[slot].reg;
+#endif
+
+		    BEM_ADD_VAR(slot);
+		    BEM_ADD_VAR(func);
+		    BEM_ADD_REG(device);
+		}
+	    }
+#if !DEBUG || defined(FORCE_ERRORS)
+	}
+#endif
+
+	/*
+	 * Since error could not be handled at lower level,
+	 * error data logged has not  been cleared.
+	 * Clean up errors, and
+	 * re-enable bridge to interrupt on error conditions.
+	 * NOTE: Wheather we get the interrupt on PCI_ABORT or not is
+	 * dependent on INT_ENABLE register. This write just makes sure
+	 * that if the interrupt was enabled, we do get the interrupt.
+	 *
+	 * CAUTION: Resetting bit BRIDGE_IRR_PCI_GRP_CLR, acknowledges
+	 *      a group of interrupts. If while handling this error,
+	 *      some other error has occured, that would be
+	 *      implicitly cleared by this write.
+	 *      Need a way to ensure we don't inadvertently clear some
+	 *      other errors.
+	 */
+#ifdef IRIX
+	if (IOERROR_FIELDVALID(ioe, widgetdev))
+	    pcibr_device_disable(pcibr_soft, 
+				 pciio_widgetdev_slot_get(
+					  IOERROR_GETVALUE(ioe, widgetdev)));
+#endif
+
+	if (mode == MODE_DEVUSERERROR)
+	    pcibr_error_cleanup(pcibr_soft, error_code);
+    }
+    return retval;
+}
+
+/*
+ * bridge_dmaerror
+ *      Some error was identified in a DMA transaction.
+ *      This routine will identify the <device, address> that caused the error,
+ *      and try to invoke the appropriate bus service to handle this.
+ */
+
+#define BRIDGE_DMA_READ_ERROR (BRIDGE_ISR_RESP_XTLK_ERR|BRIDGE_ISR_XREAD_REQ_TIMEOUT)
+
+int
+pcibr_dmard_error(
+		     pcibr_soft_t pcibr_soft,
+		     int error_code,
+		     ioerror_mode_t mode,
+		     ioerror_t *ioe)
+{
+    devfs_handle_t            pcibr_vhdl = pcibr_soft->bs_vhdl;
+    bridge_t               *bridge = pcibr_soft->bs_base;
+    bridgereg_t             bus_lowaddr, bus_uppraddr;
+    int                     retval = 0;
+    int                     bufnum;
+
+    /*
+     * In case of DMA errors, bridge should have logged the
+     * address that caused the error.
+     * Look up the address, in the bridge error registers, and
+     * take appropriate action
+     */
+#ifdef colin
+    ASSERT(IOERROR_GETVALUE(ioe, widgetnum) == pcibr_soft->bs_xid);
+    ASSERT(bridge);
+#endif
+
+    /*
+     * read error log registers
+     */
+    bus_lowaddr = bridge->b_wid_resp_lower;
+    bus_uppraddr = bridge->b_wid_resp_upper;
+
+    bufnum = BRIDGE_RESP_ERRUPPR_BUFNUM(bus_uppraddr);
+    IOERROR_SETVALUE(ioe, widgetdev, 
+		     pciio_widgetdev_create(
+				    BRIDGE_RESP_ERRUPPR_DEVICE(bus_uppraddr),
+				    0));
+    IOERROR_SETVALUE(ioe, busaddr,
+		     (bus_lowaddr |
+		      ((iopaddr_t)
+		       (bus_uppraddr &
+			BRIDGE_ERRUPPR_ADDRMASK) << 32)));
+
+    /*
+     * need to ensure that the xtalk adress in ioe
+     * maps to PCI error address read from bridge.
+     * How to convert PCI address back to Xtalk address ?
+     * (better idea: convert XTalk address to PCI address
+     * and then do the compare!)
+     */
+
+    retval = pciio_error_handler(pcibr_vhdl, error_code, mode, ioe);
+    if (retval != IOERROR_HANDLED)
+#ifdef colin
+	pcibr_device_disable(pcibr_soft, 
+			     pciio_widgetdev_slot_get(
+				      IOERROR_GETVALUE(ioe,widgetdev)));
+#else
+	pcibr_device_disable(pcibr_soft,
+			     pciio_widgetdev_slot_get(-1));
+#endif
+
+    /*
+     * Re-enable bridge to interrupt on BRIDGE_IRR_RESP_BUF_GRP_CLR
+     * NOTE: Wheather we get the interrupt on BRIDGE_IRR_RESP_BUF_GRP_CLR or
+     * not is dependent on INT_ENABLE register. This write just makes sure
+     * that if the interrupt was enabled, we do get the interrupt.
+     */
+    bridge->b_int_rst_stat = BRIDGE_IRR_RESP_BUF_GRP_CLR;
+
+    /*
+     * Also, release the "bufnum" back to buffer pool that could be re-used.
+     * This is done by "disabling" the buffer for a moment, then restoring
+     * the original assignment.
+     */
+
+    {
+	reg_p                   regp;
+	bridgereg_t             regv;
+	bridgereg_t             mask;
+
+	regp = (bufnum & 1)
+	    ? &bridge->b_odd_resp
+	    : &bridge->b_even_resp;
+
+	mask = 0xF << ((bufnum >> 1) * 4);
+
+	regv = *regp;
+	*regp = regv & ~mask;
+	*regp = regv;
+    }
+
+    return retval;
+}
+
+/*
+ * pcibr_dmawr_error:
+ *      Handle a dma write error caused by a device attached to this bridge.
+ *
+ *      ioe has the widgetnum, widgetdev, and memaddr fields updated
+ *      But we don't know the PCI address that corresponds to "memaddr"
+ *      nor do we know which device driver is generating this address.
+ *
+ *      There is no easy way to find out the PCI address(es) that map
+ *      to a specific system memory address. Bus handling code is also
+ *      of not much help, since they don't keep track of the DMA mapping
+ *      that have been handed out.
+ *      So it's a dead-end at this time.
+ *
+ *      If translation is available, we could invoke the error handling
+ *      interface of the device driver.
+ */
+/*ARGSUSED */
+int
+pcibr_dmawr_error(
+		     pcibr_soft_t pcibr_soft,
+		     int error_code,
+		     ioerror_mode_t mode,
+		     ioerror_t *ioe)
+{
+    devfs_handle_t            pcibr_vhdl = pcibr_soft->bs_vhdl;
+    int                     retval;
+
+    retval = pciio_error_handler(pcibr_vhdl, error_code, mode, ioe);
+
+#ifdef IRIX
+    if (retval != IOERROR_HANDLED) {
+	pcibr_device_disable(pcibr_soft, 
+			     pciio_widgetdev_slot_get(
+				      IOERROR_GETVALUE(ioe, widgetdev)));
+
+    }
+#endif
+    return retval;
+}
+
+/*
+ * Bridge error handler.
+ *      Interface to handle all errors that involve bridge in some way.
+ *
+ *      This normally gets called from xtalk error handler.
+ *      ioe has different set of fields set depending on the error that
+ *      was encountered. So, we have a bit field indicating which of the
+ *      fields are valid.
+ *
+ * NOTE: This routine could be operating in interrupt context. So,
+ *      don't try to sleep here (till interrupt threads work!!)
+ */
+LOCAL int
+pcibr_error_handler(
+		       error_handler_arg_t einfo,
+		       int error_code,
+		       ioerror_mode_t mode,
+		       ioerror_t *ioe)
+{
+    pcibr_soft_t            pcibr_soft;
+    int                     retval = IOERROR_BADERRORCODE;
+    devfs_handle_t	    xconn_vhdl,pcibr_vhdl;
+#if defined(CONFIG_SGI_IO_ERROR_HANDLING)
+    error_state_t	    e_state;
+#endif
+    pcibr_soft = (pcibr_soft_t) einfo;
+
+    xconn_vhdl = pcibr_soft->bs_conn;
+    pcibr_vhdl = pcibr_soft->bs_vhdl;
+
+#if defined(CONFIG_SGI_IO_ERROR_HANDLING)
+    e_state = error_state_get(xconn_vhdl);
+    
+    if (error_state_set(pcibr_vhdl, e_state) == 
+	ERROR_RETURN_CODE_CANNOT_SET_STATE)
+	return(IOERROR_UNHANDLED);
+#endif
+
+    /* If we are in the action handling phase clean out the error state
+     * on the xswitch.
+     */
+#if defined(CONFIG_SGI_IO_ERROR_HANDLING)
+    if (e_state == ERROR_STATE_ACTION)
+	(void)error_state_set(xconn_vhdl, ERROR_STATE_NONE);
+#endif
+
+#if DEBUG && ERROR_DEBUG
+    printk("%s: pcibr_error_handler\n", pcibr_soft->bs_name);
+#endif
+
+    ASSERT(pcibr_soft != NULL);
+
+    if (error_code & IOECODE_PIO)
+	retval = pcibr_pioerror(pcibr_soft, error_code, mode, ioe);
+
+    if (error_code & IOECODE_DMA) {
+	if (error_code & IOECODE_READ) {
+	    /*
+	     * DMA read error occurs when a device attached to the bridge
+	     * tries to read some data from system memory, and this
+	     * either results in a timeout or access error.
+	     * First case is indicated by the bit "XREAD_REQ_TOUT"
+	     * and second case by "RESP_XTALK_ERROR" bit in bridge error
+	     * interrupt status register.
+	     *
+	     * pcibr_error_intr_handler would get invoked first, and it has
+	     * the responsibility of calling pcibr_error_handler with
+	     * suitable parameters.
+	     */
+
+	    retval = pcibr_dmard_error(pcibr_soft, error_code, MODE_DEVERROR, ioe);
+	}
+	if (error_code & IOECODE_WRITE) {
+	    /*
+	     * A device attached to this bridge has been generating
+	     * bad DMA writes. Find out the device attached, and
+	     * slap on it's wrist.
+	     */
+
+	    retval = pcibr_dmawr_error(pcibr_soft, error_code, MODE_DEVERROR, ioe);
+	}
+    }
+    return retval;
+
+}
+
+/*
+ * Reenable a device after handling the error.
+ * This is called by the lower layers when they wish to be reenabled
+ * after an error.
+ * Note that each layer would be calling the previous layer to reenable
+ * first, before going ahead with their own re-enabling.
+ */
+
+int
+pcibr_error_devenable(devfs_handle_t pconn_vhdl, int error_code)
+{
+    pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);
+    pciio_slot_t            pciio_slot = pciio_info_slot_get(pciio_info);
+    pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
+
+    ASSERT(error_code & IOECODE_PIO);
+
+    /* If the error is not known to be a write,
+     * we have to call devenable.
+     * write errors are isolated to the bridge.
+     */
+    if (!(error_code & IOECODE_WRITE)) {
+	devfs_handle_t            xconn_vhdl = pcibr_soft->bs_conn;
+	int                     rc;
+
+	rc = xtalk_error_devenable(xconn_vhdl, pciio_slot, error_code);
+	if (rc != IOERROR_HANDLED)
+	    return rc;
+    }
+    pcibr_error_cleanup(pcibr_soft, error_code);
+    return IOERROR_HANDLED;
+}
+
+/* =====================================================================
+ *    CONFIGURATION MANAGEMENT
+ */
+/*ARGSUSED */
+void
+pcibr_provider_startup(devfs_handle_t pcibr)
+{
+}
+
+/*ARGSUSED */
+void
+pcibr_provider_shutdown(devfs_handle_t pcibr)
+{
+}
+
+int
+pcibr_reset(devfs_handle_t conn)
+{
+    pciio_info_t            pciio_info = pciio_info_get(conn);
+    pciio_slot_t            pciio_slot = pciio_info_slot_get(pciio_info);
+    pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
+    bridge_t               *bridge = pcibr_soft->bs_base;
+    bridgereg_t             ctlreg;
+    unsigned                cfgctl[8];
+    unsigned                s;
+    int                     f, nf;
+    pcibr_info_h            pcibr_infoh;
+    pcibr_info_t            pcibr_info;
+    int                     win;
+
+    if (pcibr_soft->bs_slot[pciio_slot].has_host) {
+	pciio_slot = pcibr_soft->bs_slot[pciio_slot].host_slot;
+	pcibr_info = pcibr_soft->bs_slot[pciio_slot].bss_infos[0];
+    }
+    if (pciio_slot < 4) {
+	s = pcibr_lock(pcibr_soft);
+	nf = pcibr_soft->bs_slot[pciio_slot].bss_ninfo;
+	pcibr_infoh = pcibr_soft->bs_slot[pciio_slot].bss_infos;
+	for (f = 0; f < nf; ++f)
+	    if (pcibr_infoh[f])
+		cfgctl[f] = bridge->b_type0_cfg_dev[pciio_slot].f[f].l[PCI_CFG_COMMAND / 4];
+
+	ctlreg = bridge->b_wid_control;
+	bridge->b_wid_control = ctlreg | BRIDGE_CTRL_RST(pciio_slot);
+	/* XXX delay? */
+	bridge->b_wid_control = ctlreg;
+	/* XXX delay? */
+
+	for (f = 0; f < nf; ++f)
+#ifdef IRIX
+	    if (pcibr_info = pcibr_infoh[f])
+#else
+	    if ((pcibr_info = pcibr_infoh[f]))
+#endif
+		for (win = 0; win < 6; ++win)
+		    if (pcibr_info->f_window[win].w_base != 0)
+			bridge->b_type0_cfg_dev[pciio_slot].f[f].l[PCI_CFG_BASE_ADDR(win) / 4] =
+			    pcibr_info->f_window[win].w_base;
+	for (f = 0; f < nf; ++f)
+	    if (pcibr_infoh[f])
+		bridge->b_type0_cfg_dev[pciio_slot].f[f].l[PCI_CFG_COMMAND / 4] = cfgctl[f];
+	pcibr_unlock(pcibr_soft, s);
+
+	return 0;
+    }
+#ifdef SUPPORT_PRINTING_V_FORMAT
+    PRINT_WARNING( "%v: pcibr_reset unimplemented for slot %d\n",
+	    conn, pciio_slot);
+#endif
+    return -1;
+}
+
+pciio_endian_t
+pcibr_endian_set(devfs_handle_t pconn_vhdl,
+		 pciio_endian_t device_end,
+		 pciio_endian_t desired_end)
+{
+    pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);
+    pciio_slot_t            pciio_slot = pciio_info_slot_get(pciio_info);
+    pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
+    bridgereg_t             devreg;
+    unsigned                s;
+
+    /*
+     * Bridge supports hardware swapping; so we can always
+     * arrange for the caller's desired endianness.
+     */
+
+    s = pcibr_lock(pcibr_soft);
+    devreg = pcibr_soft->bs_slot[pciio_slot].bss_device;
+    if (device_end != desired_end)
+	devreg |= BRIDGE_DEV_SWAP_BITS;
+    else
+	devreg &= ~BRIDGE_DEV_SWAP_BITS;
+
+    /* NOTE- if we ever put SWAP bits
+     * onto the disabled list, we will
+     * have to change the logic here.
+     */
+    if (pcibr_soft->bs_slot[pciio_slot].bss_device != devreg) {
+	bridge_t               *bridge = pcibr_soft->bs_base;
+
+	bridge->b_device[pciio_slot].reg = devreg;
+	pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
+	bridge->b_wid_tflush;		/* wait until Bridge PIO complete */
+    }
+    pcibr_unlock(pcibr_soft, s);
+
+#if DEBUG && PCIBR_DEV_DEBUG
+    printk("pcibr Device(%d): 0x%p\n", pciio_slot, bridge->b_device[pciio_slot].reg);
+#endif
+
+    return desired_end;
+}
+
+/* This (re)sets the GBR and REALTIME bits and also keeps track of how
+ * many sets are outstanding. Reset succeeds only if the number of outstanding
+ * sets == 1.
+ */
+int
+pcibr_priority_bits_set(pcibr_soft_t pcibr_soft,
+			pciio_slot_t pciio_slot,
+			pciio_priority_t device_prio)
+{
+    int                     s;
+    int                    *counter;
+    bridgereg_t             rtbits = 0;
+    bridgereg_t             devreg;
+    int                     rc = PRIO_SUCCESS;
+
+    /* in dual-slot configurations, the host and the
+     * guest have separate DMA resources, so they
+     * have separate requirements for priority bits.
+     */
+
+    counter = &(pcibr_soft->bs_slot[pciio_slot].bss_pri_uctr);
+
+    /*
+     * Bridge supports PCI notions of LOW and HIGH priority
+     * arbitration rings via a "REAL_TIME" bit in the per-device
+     * Bridge register. The "GBR" bit controls access to the GBR
+     * ring on the xbow. These two bits are (re)set together.
+     *
+     * XXX- Bug in Rev B Bridge Si:
+     * Symptom: Prefetcher starts operating incorrectly. This happens
+     * due to corruption of the address storage ram in the prefetcher
+     * when a non-real time pci request is pulled and a real-time one is
+     * put in it's place. Workaround: Use only a single arbitration ring
+     * on pci bus. GBR and RR can still be uniquely used per
+     * device. NETLIST MERGE DONE, WILL BE FIXED IN REV C.
+     */
+
+    if (pcibr_soft->bs_rev_num != BRIDGE_PART_REV_B)
+	rtbits |= BRIDGE_DEV_RT;
+
+    /* NOTE- if we ever put DEV_RT or DEV_GBR on
+     * the disabled list, we will have to take
+     * it into account here.
+     */
+
+    s = pcibr_lock(pcibr_soft);
+    devreg = pcibr_soft->bs_slot[pciio_slot].bss_device;
+    if (device_prio == PCI_PRIO_HIGH) {
+#ifdef IRIX
+	if (++*counter == 1)
+#else
+	if ((++*counter == 1)) {
+#endif
+	    if (rtbits)
+		devreg |= rtbits;
+	    else
+		rc = PRIO_FAIL;
+#ifndef IRIX
+	}
+#endif
+    } else if (device_prio == PCI_PRIO_LOW) {
+	if (*counter <= 0)
+	    rc = PRIO_FAIL;
+	else if (--*counter == 0)
+	    if (rtbits)
+		devreg &= ~rtbits;
+    }
+    if (pcibr_soft->bs_slot[pciio_slot].bss_device != devreg) {
+	bridge_t               *bridge = pcibr_soft->bs_base;
+
+	bridge->b_device[pciio_slot].reg = devreg;
+	pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
+	bridge->b_wid_tflush;		/* wait until Bridge PIO complete */
+    }
+    pcibr_unlock(pcibr_soft, s);
+
+    return rc;
+}
+
+pciio_priority_t
+pcibr_priority_set(devfs_handle_t pconn_vhdl,
+		   pciio_priority_t device_prio)
+{
+    pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);
+    pciio_slot_t            pciio_slot = pciio_info_slot_get(pciio_info);
+    pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
+
+    (void) pcibr_priority_bits_set(pcibr_soft, pciio_slot, device_prio);
+
+    return device_prio;
+}
+
+/*
+ * Interfaces to allow special (e.g. SGI) drivers to set/clear
+ * Bridge-specific device flags.  Many flags are modified through
+ * PCI-generic interfaces; we don't allow them to be directly
+ * manipulated here.  Only flags that at this point seem pretty
+ * Bridge-specific can be set through these special interfaces.
+ * We may add more flags as the need arises, or remove flags and
+ * create PCI-generic interfaces as the need arises.
+ *
+ * Returns 0 on failure, 1 on success
+ */
+int
+pcibr_device_flags_set(devfs_handle_t pconn_vhdl,
+		       pcibr_device_flags_t flags)
+{
+    pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);
+    pciio_slot_t            pciio_slot = pciio_info_slot_get(pciio_info);
+    pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
+    bridgereg_t             set = 0;
+    bridgereg_t             clr = 0;
+
+    ASSERT((flags & PCIBR_DEVICE_FLAGS) == flags);
+
+    if (flags & PCIBR_WRITE_GATHER)
+	set |= BRIDGE_DEV_PMU_WRGA_EN;
+    if (flags & PCIBR_NOWRITE_GATHER)
+	clr |= BRIDGE_DEV_PMU_WRGA_EN;
+
+    if (flags & PCIBR_WRITE_GATHER)
+	set |= BRIDGE_DEV_DIR_WRGA_EN;
+    if (flags & PCIBR_NOWRITE_GATHER)
+	clr |= BRIDGE_DEV_DIR_WRGA_EN;
+
+    if (flags & PCIBR_PREFETCH)
+	set |= BRIDGE_DEV_PREF;
+    if (flags & PCIBR_NOPREFETCH)
+	clr |= BRIDGE_DEV_PREF;
+
+    if (flags & PCIBR_PRECISE)
+	set |= BRIDGE_DEV_PRECISE;
+    if (flags & PCIBR_NOPRECISE)
+	clr |= BRIDGE_DEV_PRECISE;
+
+    if (flags & PCIBR_BARRIER)
+	set |= BRIDGE_DEV_BARRIER;
+    if (flags & PCIBR_NOBARRIER)
+	clr |= BRIDGE_DEV_BARRIER;
+
+    if (flags & PCIBR_64BIT)
+	set |= BRIDGE_DEV_DEV_SIZE;
+    if (flags & PCIBR_NO64BIT)
+	clr |= BRIDGE_DEV_DEV_SIZE;
+
+    if (set || clr) {
+	bridgereg_t             devreg;
+	unsigned                s;
+
+	s = pcibr_lock(pcibr_soft);
+	devreg = pcibr_soft->bs_slot[pciio_slot].bss_device;
+#ifdef IRIX
+	devreg = devreg & ~clr | set;
+#else
+	devreg = (devreg & ~clr) | set;
+#endif
+	if (pcibr_soft->bs_slot[pciio_slot].bss_device != devreg) {
+	    bridge_t               *bridge = pcibr_soft->bs_base;
+
+	    bridge->b_device[pciio_slot].reg = devreg;
+	    pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
+	    bridge->b_wid_tflush;	/* wait until Bridge PIO complete */
+	}
+	pcibr_unlock(pcibr_soft, s);
+#if DEBUG && PCIBR_DEV_DEBUG
+	printk("pcibr Device(%d): %R\n", pciio_slot, bridge->b_device[pciio_slot].regbridge->b_device[pciio_slot].reg, device_bits);
+#endif
+    }
+    return (1);
+}
+
+#ifdef LITTLE_ENDIAN
+/*
+ * on sn-ia we need to twiddle the the addresses going out
+ * the pci bus because we use the unswizzled synergy space
+ * (the alternative is to use the swizzled synergy space
+ * and byte swap the data)
+ */
+#define	CB(b,r)	(((volatile uint8_t *) b)[((r)^4)])
+#define	CS(b,r)	(((volatile uint16_t *) b)[((r^4)/2)])
+#define	CW(b,r)	(((volatile uint32_t *) b)[((r^4)/4)])
+#else
+#define	CB(b,r)	(((volatile uint8_t *) cfgbase)[(r)^3])
+#define	CS(b,r)	(((volatile uint16_t *) cfgbase)[((r)/2)^1])
+#define	CW(b,r)	(((volatile uint32_t *) cfgbase)[(r)/4])
+#endif /* LITTLE_ENDIAN */
+
+
+LOCAL                   cfg_p
+pcibr_config_addr(devfs_handle_t conn,
+		  unsigned reg)
+{
+    pcibr_info_t            pcibr_info;
+    pciio_slot_t            pciio_slot;
+    pciio_function_t        pciio_func;
+    pcibr_soft_t            pcibr_soft;
+    bridge_t               *bridge;
+    cfg_p                   cfgbase = (cfg_p)0;
+
+    pcibr_info = pcibr_info_get(conn);
+
+    pciio_slot = pcibr_info->f_slot;
+    if (pciio_slot == PCIIO_SLOT_NONE)
+	pciio_slot = PCI_TYPE1_SLOT(reg);
+
+    pciio_func = pcibr_info->f_func;
+    if (pciio_func == PCIIO_FUNC_NONE)
+	pciio_func = PCI_TYPE1_FUNC(reg);
+
+    pcibr_soft = (pcibr_soft_t) pcibr_info->f_mfast;
+
+    if ( (pcibr_soft_t)0 != pcibr_soft ) {
+	bridge = pcibr_soft->bs_base;
+	if ( (bridge_t *)0 != bridge ) {
+		cfgbase = bridge->b_type0_cfg_dev[pciio_slot].f[pciio_func].l;
+	}
+    }
+
+
+    return cfgbase;
+}
+
+uint64_t
+pcibr_config_get(devfs_handle_t conn,
+		 unsigned reg,
+		 unsigned size)
+{
+    return do_pcibr_config_get(pcibr_config_addr(conn, reg),
+			       PCI_TYPE1_REG(reg), size);
+}
+
+LOCAL uint64_t
+do_pcibr_config_get(
+		       cfg_p cfgbase,
+		       unsigned reg,
+		       unsigned size)
+{
+    unsigned                value;
+
+   
+    value = CW(cfgbase, reg);
+
+    if (reg & 3)
+	value >>= 8 * (reg & 3);
+    if (size < 4)
+	value &= (1 << (8 * size)) - 1;
+
+    return value;
+}
+
+void
+pcibr_config_set(devfs_handle_t conn,
+		 unsigned reg,
+		 unsigned size,
+		 uint64_t value)
+{
+    do_pcibr_config_set(pcibr_config_addr(conn, reg),
+			PCI_TYPE1_REG(reg), size, value);
+}
+
+LOCAL void
+do_pcibr_config_set(cfg_p cfgbase,
+		    unsigned reg,
+		    unsigned size,
+		    uint64_t value)
+{
+    switch (size) {
+    case 1:
+	CB(cfgbase, reg) = value;
+	break;
+    case 2:
+	if (reg & 1) {
+	    CB(cfgbase, reg) = value;
+	    CB(cfgbase, reg + 1) = value >> 8;
+	} else
+	    CS(cfgbase, reg) = value;
+	break;
+    case 3:
+	if (reg & 1) {
+	    CB(cfgbase, reg) = value;
+	    CS(cfgbase, reg + 1) = value >> 8;
+	} else {
+	    CS(cfgbase, reg) = value;
+	    CB(cfgbase, reg + 2) = value >> 16;
+	}
+	break;
+
+    case 4:
+	CW(cfgbase, reg) = value;
+	break;
+    }
+}
+
+pciio_provider_t        pcibr_provider =
+{
+    (pciio_piomap_alloc_f *) pcibr_piomap_alloc,
+    (pciio_piomap_free_f *) pcibr_piomap_free,
+    (pciio_piomap_addr_f *) pcibr_piomap_addr,
+    (pciio_piomap_done_f *) pcibr_piomap_done,
+    (pciio_piotrans_addr_f *) pcibr_piotrans_addr,
+    (pciio_piospace_alloc_f *) pcibr_piospace_alloc,
+    (pciio_piospace_free_f *) pcibr_piospace_free,
+
+    (pciio_dmamap_alloc_f *) pcibr_dmamap_alloc,
+    (pciio_dmamap_free_f *) pcibr_dmamap_free,
+    (pciio_dmamap_addr_f *) pcibr_dmamap_addr,
+    (pciio_dmamap_list_f *) pcibr_dmamap_list,
+    (pciio_dmamap_done_f *) pcibr_dmamap_done,
+    (pciio_dmatrans_addr_f *) pcibr_dmatrans_addr,
+    (pciio_dmatrans_list_f *) pcibr_dmatrans_list,
+    (pciio_dmamap_drain_f *) pcibr_dmamap_drain,
+    (pciio_dmaaddr_drain_f *) pcibr_dmaaddr_drain,
+    (pciio_dmalist_drain_f *) pcibr_dmalist_drain,
+
+    (pciio_intr_alloc_f *) pcibr_intr_alloc,
+    (pciio_intr_free_f *) pcibr_intr_free,
+    (pciio_intr_connect_f *) pcibr_intr_connect,
+    (pciio_intr_disconnect_f *) pcibr_intr_disconnect,
+    (pciio_intr_cpu_get_f *) pcibr_intr_cpu_get,
+
+    (pciio_provider_startup_f *) pcibr_provider_startup,
+    (pciio_provider_shutdown_f *) pcibr_provider_shutdown,
+    (pciio_reset_f *) pcibr_reset,
+    (pciio_write_gather_flush_f *) pcibr_write_gather_flush,
+    (pciio_endian_set_f *) pcibr_endian_set,
+    (pciio_priority_set_f *) pcibr_priority_set,
+    (pciio_config_get_f *) pcibr_config_get,
+    (pciio_config_set_f *) pcibr_config_set,
+
+    (pciio_error_devenable_f *) pcibr_error_devenable,
+    (pciio_error_extract_f *) pcibr_error_extract,
+};
+
+LOCAL                   pcibr_hints_t
+pcibr_hints_get(devfs_handle_t xconn_vhdl, int alloc)
+{
+    arbitrary_info_t        ainfo = 0;
+    graph_error_t	    rv;
+    pcibr_hints_t           hint;
+
+    rv = hwgraph_info_get_LBL(xconn_vhdl, INFO_LBL_PCIBR_HINTS, &ainfo);
+
+    if (alloc && (rv != GRAPH_SUCCESS)) {
+
+	NEW(hint);
+	hint->rrb_alloc_funct = NULL;
+	hint->ph_intr_bits = NULL;
+	rv = hwgraph_info_add_LBL(xconn_vhdl, 
+				  INFO_LBL_PCIBR_HINTS, 	
+				  (arbitrary_info_t) hint);
+	if (rv != GRAPH_SUCCESS)
+	    goto abnormal_exit;
+
+	rv = hwgraph_info_get_LBL(xconn_vhdl, INFO_LBL_PCIBR_HINTS, &ainfo);
+	
+	if (rv != GRAPH_SUCCESS)
+	    goto abnormal_exit;
+
+	if (ainfo != (arbitrary_info_t) hint)
+	    goto abnormal_exit;
+    }
+    return (pcibr_hints_t) ainfo;
+
+abnormal_exit:
+#ifdef IRIX
+    printf("SHOULD NOT BE HERE\n");
+#endif
+    DEL(hint);
+    return(NULL);
+
+}
+
+void
+pcibr_hints_fix_some_rrbs(devfs_handle_t xconn_vhdl, unsigned mask)
+{
+    pcibr_hints_t           hint = pcibr_hints_get(xconn_vhdl, 1);
+
+    if (hint)
+	hint->ph_rrb_fixed = mask;
+#if DEBUG
+    else
+	printk("pcibr_hints_fix_rrbs: pcibr_hints_get failed at\n"
+		"\t%p\n", xconn_vhdl);
+#endif
+}
+
+void
+pcibr_hints_fix_rrbs(devfs_handle_t xconn_vhdl)
+{
+    pcibr_hints_fix_some_rrbs(xconn_vhdl, 0xFF);
+}
+
+void
+pcibr_hints_dualslot(devfs_handle_t xconn_vhdl,
+		     pciio_slot_t host,
+		     pciio_slot_t guest)
+{
+    pcibr_hints_t           hint = pcibr_hints_get(xconn_vhdl, 1);
+
+    if (hint)
+	hint->ph_host_slot[guest] = host + 1;
+#if DEBUG
+    else
+	printk("pcibr_hints_dualslot: pcibr_hints_get failed at\n"
+		"\t%p\n", xconn_vhdl);
+#endif
+}
+
+void
+pcibr_hints_intr_bits(devfs_handle_t xconn_vhdl,
+		      pcibr_intr_bits_f *xxx_intr_bits)
+{
+    pcibr_hints_t           hint = pcibr_hints_get(xconn_vhdl, 1);
+
+    if (hint)
+	hint->ph_intr_bits = xxx_intr_bits;
+#if DEBUG
+    else
+	printk("pcibr_hints_intr_bits: pcibr_hints_get failed at\n"
+	       "\t%p\n", xconn_vhdl);
+#endif
+}
+
+void
+pcibr_set_rrb_callback(devfs_handle_t xconn_vhdl, rrb_alloc_funct_t rrb_alloc_funct)
+{
+    pcibr_hints_t           hint = pcibr_hints_get(xconn_vhdl, 1);
+
+    if (hint)
+	hint->rrb_alloc_funct = rrb_alloc_funct;
+}
+
+void
+pcibr_hints_handsoff(devfs_handle_t xconn_vhdl)
+{
+    pcibr_hints_t           hint = pcibr_hints_get(xconn_vhdl, 1);
+
+    if (hint)
+	hint->ph_hands_off = 1;
+#if DEBUG
+    else
+	printk("pcibr_hints_handsoff: pcibr_hints_get failed at\n"
+		"\t%p\n", xconn_vhdl);
+#endif
+}
+
+void
+pcibr_hints_subdevs(devfs_handle_t xconn_vhdl,
+		    pciio_slot_t slot,
+		    uint64_t subdevs)
+{
+    arbitrary_info_t        ainfo = 0;
+    char                    sdname[16];
+    devfs_handle_t            pconn_vhdl = GRAPH_VERTEX_NONE;
+
+    sprintf(sdname, "pci/%d", slot);
+    (void) hwgraph_path_add(xconn_vhdl, sdname, &pconn_vhdl);
+    if (pconn_vhdl == GRAPH_VERTEX_NONE) {
+#if DEBUG
+	printk("pcibr_hints_subdevs: hwgraph_path_create failed at\n"
+		"\t%p (seeking %s)\n", xconn_vhdl, sdname);
+#endif
+	return;
+    }
+    hwgraph_info_get_LBL(pconn_vhdl, INFO_LBL_SUBDEVS, &ainfo);
+    if (ainfo == 0) {
+	uint64_t                *subdevp;
+
+	NEW(subdevp);
+	if (!subdevp) {
+#if DEBUG
+	    printk("pcibr_hints_subdevs: subdev ptr alloc failed at\n"
+		    "\t%p\n", pconn_vhdl);
+#endif
+	    return;
+	}
+	*subdevp = subdevs;
+	hwgraph_info_add_LBL(pconn_vhdl, INFO_LBL_SUBDEVS, (arbitrary_info_t) subdevp);
+	hwgraph_info_get_LBL(pconn_vhdl, INFO_LBL_SUBDEVS, &ainfo);
+	if (ainfo == (arbitrary_info_t) subdevp)
+	    return;
+	DEL(subdevp);
+#ifdef IRIX
+	if (ainfo == NULL)
+#else
+	if (ainfo == (arbitrary_info_t) NULL)
+#endif
+	{
+#if DEBUG
+	    printk("pcibr_hints_subdevs: null subdevs ptr at\n"
+		    "\t%p\n", pconn_vhdl);
+#endif
+	    return;
+	}
+#if DEBUG
+	printk("pcibr_subdevs_get: dup subdev add_LBL at\n"
+		"\t%p\n", pconn_vhdl);
+#endif
+    }
+    *(uint64_t *) ainfo = subdevs;
+}
+
+
+#ifdef colin
+
+#include <sys/idbg.h>
+#include <sys/idbgentry.h>
+
+char *pci_space[] = {"NONE", 
+		     "ROM",
+		     "IO",
+		     "",
+		     "MEM",
+		     "MEM32",
+		     "MEM64",
+		     "CFG",
+		     "WIN0",
+		     "WIN1",
+		     "WIN2",
+		     "WIN3",
+		     "WIN4",
+		     "WIN5",
+		     "",
+		     "BAD"};
+
+void
+idbg_pss_func(pcibr_info_h pcibr_infoh, int func)
+{
+    pcibr_info_t	pcibr_info = pcibr_infoh[func];
+    char		name[MAXDEVNAME];
+    int			win;
+    
+    if (!pcibr_info)
+	return;
+    qprintf("Per-slot Function Info\n");
+#ifdef SUPPORT_PRINTING_V_FORMAT
+    sprintf(name, "%v", pcibr_info->f_vertex);
+#endif
+    qprintf("\tSlot Name : %s\n",name);
+    qprintf("\tPCI Bus : %d ",pcibr_info->f_bus);
+    qprintf("Slot : %d ", pcibr_info->f_slot);
+    qprintf("Function : %d ", pcibr_info->f_func);
+    qprintf("VendorId : 0x%x " , pcibr_info->f_vendor);
+    qprintf("DeviceId : 0x%x\n", pcibr_info->f_device);
+#ifdef SUPPORT_PRINTING_V_FORMAT
+    sprintf(name, "%v", pcibr_info->f_master);
+#endif
+    qprintf("\tBus provider : %s\n",name);
+    qprintf("\tProvider Fns : 0x%x ", pcibr_info->f_pops);
+    qprintf("Error Handler : 0x%x Arg 0x%x\n", 
+	    pcibr_info->f_efunc,pcibr_info->f_einfo);
+    for(win = 0 ; win < 6 ; win++) 
+	qprintf("\tBase Reg #%d space %s base 0x%x size 0x%x\n",
+		win,pci_space[pcibr_info->f_window[win].w_space],
+		pcibr_info->f_window[win].w_base,
+		pcibr_info->f_window[win].w_size);
+
+    qprintf("\tRom base 0x%x size 0x%x\n", 
+	    pcibr_info->f_rbase,pcibr_info->f_rsize);
+
+    qprintf("\tInterrupt Bit Map\n");
+    qprintf("\t\tPCI Int#\tBridge Pin#\n");
+    for (win = 0 ; win < 4; win++)
+	qprintf("\t\tINT%c\t\t%d\n",win+'A',pcibr_info->f_ibit[win]);
+    qprintf("\n");
+}
+
+
+void
+idbg_pss_info(pcibr_soft_t pcibr_soft, pciio_slot_t slot)
+{
+    pcibr_soft_slot_t	pss;
+    char		slot_conn_name[MAXDEVNAME];
+    int			func;
+
+    pss = &pcibr_soft->bs_slot[slot];
+    qprintf("PCI INFRASTRUCTURAL INFO FOR SLOT %d\n", slot);
+    qprintf("\tHost Present ? %s ", pss->has_host ? "yes" : "no");
+    qprintf("\tHost Slot : %d\n",pss->host_slot);
+    sprintf(slot_conn_name, "%v", pss->slot_conn);
+    qprintf("\tSlot Conn : %s\n",slot_conn_name);	
+    qprintf("\t#Functions : %d\n",pss->bss_ninfo);
+    for (func = 0; func < pss->bss_ninfo; func++)
+	idbg_pss_func(pss->bss_infos,func);
+    qprintf("\tSpace : %s ",pci_space[pss->bss_devio.bssd_space]);
+    qprintf("\tBase : 0x%x ", pss->bss_devio.bssd_base);
+    qprintf("\tShadow Devreg : 0x%x\n", pss->bss_device);
+    qprintf("\tUsage counts : pmu %d d32 %d d64 %d\n",
+	    pss->bss_pmu_uctr,pss->bss_d32_uctr,pss->bss_d64_uctr);
+    
+    qprintf("\tDirect Trans Info : d64_base 0x%x d64_flags 0x%x"
+	    "d32_base 0x%x d32_flags 0x%x\n",
+	    pss->bss_d64_base, pss->bss_d64_flags,
+	    pss->bss_d32_base, pss->bss_d32_flags);
+    
+    qprintf("\tExt ATEs active ? %s", 
+	    pss->bss_ext_ates_active ? "yes" : "no");
+    qprintf(" Command register : 0x%x ", pss->bss_cmd_pointer);
+    qprintf(" Shadow command val : 0x%x\n", pss->bss_cmd_shadow);
+
+    qprintf("\tRRB Info : Valid %d+%d Reserved %d\n",
+	    pcibr_soft->bs_rrb_valid[slot],
+	    pcibr_soft->bs_rrb_valid[slot + PCIBR_RRB_SLOT_VIRTUAL],
+	    pcibr_soft->bs_rrb_res[slot]);
+		
+}
+
+int	ips = 0;
+
+void
+idbg_pss(pcibr_soft_t pcibr_soft)
+{
+    pciio_slot_t	slot;
+
+    
+    if (ips >= 0 && ips < 8)
+	idbg_pss_info(pcibr_soft,ips);
+    else if (ips < 0)
+	for (slot = 0; slot < 8; slot++) 
+	    idbg_pss_info(pcibr_soft,slot);
+    else
+	qprintf("Invalid ips %d\n",ips);
+}
+
+#endif /* colin */
+
+int
+pcibr_dma_enabled(devfs_handle_t pconn_vhdl)
+{
+    pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);
+    pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
+	
+
+    return xtalk_dma_enabled(pcibr_soft->bs_conn);
+}
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/io/pciio.c linux/arch/ia64/sn/io/pciio.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/io/pciio.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/io/pciio.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,1562 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+#define	USRPCI	0
+
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/iograph.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/hcl_util.h>
+#include <asm/sn/labelcl.h>
+#include <asm/sn/pci/bridge.h>
+#include <asm/sn/ioerror_handling.h>
+#include <asm/sn/pci/pciio.h>
+#include <asm/sn/pci/pciio_private.h>
+
+#define DEBUG_PCIIO
+#undef DEBUG_PCIIO	/* turn this on for yet more console output */
+
+
+#define NEW(ptr)	(ptr = kmalloc(sizeof (*(ptr)), GFP_KERNEL))
+#define DEL(ptr)	(kfree(ptr))
+
+char                    pciio_info_fingerprint[] = "pciio_info";
+
+cdl_p                   pciio_registry = NULL;
+
+int
+badaddr_val(volatile void *addr, int len, volatile void *ptr)
+{
+	switch (len) {
+		case 4: *(volatile u32*)ptr = *(((volatile u32*)(((u64) addr)^4)));
+		default: printk("FIXME: argh fix badaddr_val\n");
+	}
+	/* no such thing as a bad addr .... */
+	return(0);
+}
+
+
+void
+cmn_err_tag(int seqnumber, register int level, char *fmt, ...)
+{
+}
+
+nasid_t
+get_console_nasid(void)
+{
+#ifdef IRIX
+	return console_nasid;
+#else
+	return 0;
+#endif
+}
+
+int
+hub_dma_enabled(devfs_handle_t xconn_vhdl)
+{
+	return(0);
+}
+
+int
+hub_error_devenable(devfs_handle_t xconn_vhdl, int devnum, int error_code)
+{
+	return(0);
+}
+
+void
+ioerror_dump(char *name, int error_code, int error_mode, ioerror_t *ioerror)
+{
+}
+
+/******
+ ****** end hack defines ......
+ ******/
+
+
+
+
+/* =====================================================================
+ *    PCI Generic Bus Provider
+ * Implement PCI provider operations.  The pciio* layer provides a
+ * platform-independent interface for PCI devices.  This layer
+ * switches among the possible implementations of a PCI adapter.
+ */
+
+/* =====================================================================
+ *    Provider Function Location SHORTCUT
+ *
+ * On platforms with only one possible PCI provider, macros can be
+ * set up at the top that cause the table lookups and indirections to
+ * completely disappear.
+ */
+
+#if CONFIG_SGI_IP35 || CONFIG_IA64_SGI_SN1 || CONFIG_IA64_GENERIC
+/*
+ *    For the moment, we will assume that IP27
+ *      only use Bridge ASICs to provide PCI support.
+ */
+#include <asm/sn/pci/pcibr.h>
+#define DEV_FUNC(dev,func)	pcibr_##func
+#define CAST_PIOMAP(x)		((pcibr_piomap_t)(x))
+#define CAST_DMAMAP(x)		((pcibr_dmamap_t)(x))
+#define CAST_INTR(x)		((pcibr_intr_t)(x))
+#endif /* CONFIG_SGI_IP35 || CONFIG_IA64_SGI_SN1 */
+
+/* =====================================================================
+ *    Function Table of Contents
+ */
+
+#if !defined(DEV_FUNC)
+static pciio_provider_t *pciio_to_provider_fns(devfs_handle_t dev);
+#endif
+
+pciio_piomap_t          pciio_piomap_alloc(devfs_handle_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, size_t, unsigned);
+void                    pciio_piomap_free(pciio_piomap_t);
+caddr_t                 pciio_piomap_addr(pciio_piomap_t, iopaddr_t, size_t);
+
+void                    pciio_piomap_done(pciio_piomap_t);
+caddr_t                 pciio_piotrans_addr(devfs_handle_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, unsigned);
+caddr_t			pciio_pio_addr(devfs_handle_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, pciio_piomap_t *, unsigned);
+
+iopaddr_t               pciio_piospace_alloc(devfs_handle_t, device_desc_t, pciio_space_t, size_t, size_t);
+void                    pciio_piospace_free(devfs_handle_t, pciio_space_t, iopaddr_t, size_t);
+
+pciio_dmamap_t          pciio_dmamap_alloc(devfs_handle_t, device_desc_t, size_t, unsigned);
+void                    pciio_dmamap_free(pciio_dmamap_t);
+iopaddr_t               pciio_dmamap_addr(pciio_dmamap_t, paddr_t, size_t);
+alenlist_t              pciio_dmamap_list(pciio_dmamap_t, alenlist_t, unsigned);
+void                    pciio_dmamap_done(pciio_dmamap_t);
+iopaddr_t               pciio_dmatrans_addr(devfs_handle_t, device_desc_t, paddr_t, size_t, unsigned);
+alenlist_t              pciio_dmatrans_list(devfs_handle_t, device_desc_t, alenlist_t, unsigned);
+void			pciio_dmamap_drain(pciio_dmamap_t);
+void			pciio_dmaaddr_drain(devfs_handle_t, paddr_t, size_t);
+void			pciio_dmalist_drain(devfs_handle_t, alenlist_t);
+iopaddr_t               pciio_dma_addr(devfs_handle_t, device_desc_t, paddr_t, size_t, pciio_dmamap_t *, unsigned);
+
+pciio_intr_t            pciio_intr_alloc(devfs_handle_t, device_desc_t, pciio_intr_line_t, devfs_handle_t);
+void                    pciio_intr_free(pciio_intr_t);
+int                     pciio_intr_connect(pciio_intr_t, intr_func_t, intr_arg_t, void *thread);
+void                    pciio_intr_disconnect(pciio_intr_t);
+devfs_handle_t            pciio_intr_cpu_get(pciio_intr_t);
+
+void			pciio_slot_func_to_name(char *, pciio_slot_t, pciio_function_t);
+static pciio_info_t     pciio_cardinfo_get(devfs_handle_t, pciio_slot_t);
+int                     pciio_error_handler(devfs_handle_t, int, ioerror_mode_t, ioerror_t *);
+int                     pciio_error_devenable(devfs_handle_t, int);
+
+void                    pciio_provider_startup(devfs_handle_t);
+void                    pciio_provider_shutdown(devfs_handle_t);
+
+pciio_endian_t          pciio_endian_set(devfs_handle_t, pciio_endian_t, pciio_endian_t);
+pciio_priority_t        pciio_priority_set(devfs_handle_t, pciio_priority_t);
+devfs_handle_t            pciio_intr_dev_get(pciio_intr_t);
+
+devfs_handle_t            pciio_pio_dev_get(pciio_piomap_t);
+pciio_slot_t            pciio_pio_slot_get(pciio_piomap_t);
+pciio_space_t           pciio_pio_space_get(pciio_piomap_t);
+iopaddr_t               pciio_pio_pciaddr_get(pciio_piomap_t);
+ulong                   pciio_pio_mapsz_get(pciio_piomap_t);
+caddr_t                 pciio_pio_kvaddr_get(pciio_piomap_t);
+
+devfs_handle_t            pciio_dma_dev_get(pciio_dmamap_t);
+pciio_slot_t            pciio_dma_slot_get(pciio_dmamap_t);
+
+pciio_info_t            pciio_info_chk(devfs_handle_t);
+pciio_info_t            pciio_info_get(devfs_handle_t);
+void                    pciio_info_set(devfs_handle_t, pciio_info_t);
+devfs_handle_t            pciio_info_dev_get(pciio_info_t);
+pciio_slot_t            pciio_info_slot_get(pciio_info_t);
+pciio_function_t        pciio_info_function_get(pciio_info_t);
+pciio_vendor_id_t       pciio_info_vendor_id_get(pciio_info_t);
+pciio_device_id_t       pciio_info_device_id_get(pciio_info_t);
+devfs_handle_t            pciio_info_master_get(pciio_info_t);
+arbitrary_info_t        pciio_info_mfast_get(pciio_info_t);
+pciio_provider_t       *pciio_info_pops_get(pciio_info_t);
+error_handler_f	       *pciio_info_efunc_get(pciio_info_t);
+error_handler_arg_t    *pciio_info_einfo_get(pciio_info_t);
+pciio_space_t		pciio_info_bar_space_get(pciio_info_t, int);
+iopaddr_t		pciio_info_bar_base_get(pciio_info_t, int);
+size_t			pciio_info_bar_size_get(pciio_info_t, int);
+iopaddr_t		pciio_info_rom_base_get(pciio_info_t);
+size_t			pciio_info_rom_size_get(pciio_info_t);
+
+void                    pciio_init(void);
+int                     pciio_attach(devfs_handle_t);
+
+void                    pciio_provider_register(devfs_handle_t, pciio_provider_t *pciio_fns);
+void                    pciio_provider_unregister(devfs_handle_t);
+pciio_provider_t       *pciio_provider_fns_get(devfs_handle_t);
+
+int                     pciio_driver_register(pciio_vendor_id_t, pciio_device_id_t, char *driver_prefix, unsigned);
+void                    pciio_driver_unregister(char *driver_prefix);
+
+devfs_handle_t            pciio_device_register(devfs_handle_t, devfs_handle_t, pciio_slot_t, pciio_function_t, pciio_vendor_id_t, pciio_device_id_t);
+
+void			pciio_device_unregister(devfs_handle_t);
+pciio_info_t		pciio_device_info_new(pciio_info_t, devfs_handle_t, pciio_slot_t, pciio_function_t, pciio_vendor_id_t, pciio_device_id_t);
+void			pciio_device_info_free(pciio_info_t);
+devfs_handle_t		pciio_device_info_register(devfs_handle_t, pciio_info_t);
+void			pciio_device_info_unregister(devfs_handle_t, pciio_info_t);
+int                     pciio_device_attach(devfs_handle_t);
+int			pciio_device_detach(devfs_handle_t);
+void                    pciio_error_register(devfs_handle_t, error_handler_f *, error_handler_arg_t);
+
+int                     pciio_reset(devfs_handle_t);
+int                     pciio_write_gather_flush(devfs_handle_t);
+int                     pciio_slot_inuse(devfs_handle_t);
+
+/* =====================================================================
+ *    Provider Function Location
+ *
+ *      If there is more than one possible provider for
+ *      this platform, we need to examine the master
+ *      vertex of the current vertex for a provider
+ *      function structure, and indirect through the
+ *      appropriately named member.
+ */
+
+#if !defined(DEV_FUNC)
+
+static pciio_provider_t *
+pciio_to_provider_fns(devfs_handle_t dev)
+{
+    pciio_info_t            card_info;
+    pciio_provider_t       *provider_fns;
+
+    card_info = pciio_info_get(dev);
+    ASSERT(card_info != NULL);
+
+    provider_fns = pciio_info_pops_get(card_info);
+    ASSERT(provider_fns != NULL);
+
+    return (provider_fns);
+}
+
+#define DEV_FUNC(dev,func)	pciio_to_provider_fns(dev)->func
+#define CAST_PIOMAP(x)		((pciio_piomap_t)(x))
+#define CAST_DMAMAP(x)		((pciio_dmamap_t)(x))
+#define CAST_INTR(x)		((pciio_intr_t)(x))
+#endif
+
+/*
+ * Many functions are not passed their vertex
+ * information directly; rather, they must
+ * dive through a resource map. These macros
+ * are available to coordinate this detail.
+ */
+#define PIOMAP_FUNC(map,func)		DEV_FUNC((map)->pp_dev,func)
+#define DMAMAP_FUNC(map,func)		DEV_FUNC((map)->pd_dev,func)
+#define INTR_FUNC(intr_hdl,func)	DEV_FUNC((intr_hdl)->pi_dev,func)
+
+/* =====================================================================
+ *          PIO MANAGEMENT
+ *
+ *      For mapping system virtual address space to
+ *      pciio space on a specified card
+ */
+
+pciio_piomap_t
+pciio_piomap_alloc(devfs_handle_t dev,	/* set up mapping for this device */
+		   device_desc_t dev_desc,	/* device descriptor */
+		   pciio_space_t space,	/* CFG, MEM, IO, or a device-decoded window */
+		   iopaddr_t addr,	/* lowest address (or offset in window) */
+		   size_t byte_count,	/* size of region containing our mappings */
+		   size_t byte_count_max,	/* maximum size of a mapping */
+		   unsigned flags)
+{					/* defined in sys/pio.h */
+    return (pciio_piomap_t) DEV_FUNC(dev, piomap_alloc)
+	(dev, dev_desc, space, addr, byte_count, byte_count_max, flags);
+}
+
+void
+pciio_piomap_free(pciio_piomap_t pciio_piomap)
+{
+    PIOMAP_FUNC(pciio_piomap, piomap_free)
+	(CAST_PIOMAP(pciio_piomap));
+}
+
+caddr_t
+pciio_piomap_addr(pciio_piomap_t pciio_piomap,	/* mapping resources */
+		  iopaddr_t pciio_addr,	/* map for this pciio address */
+		  size_t byte_count)
+{					/* map this many bytes */
+    pciio_piomap->pp_kvaddr = PIOMAP_FUNC(pciio_piomap, piomap_addr)
+	(CAST_PIOMAP(pciio_piomap), pciio_addr, byte_count);
+
+    return pciio_piomap->pp_kvaddr;
+}
+
+void
+pciio_piomap_done(pciio_piomap_t pciio_piomap)
+{
+    PIOMAP_FUNC(pciio_piomap, piomap_done)
+	(CAST_PIOMAP(pciio_piomap));
+}
+
+caddr_t
+pciio_piotrans_addr(devfs_handle_t dev,	/* translate for this device */
+		    device_desc_t dev_desc,	/* device descriptor */
+		    pciio_space_t space,	/* CFG, MEM, IO, or a device-decoded window */
+		    iopaddr_t addr,	/* starting address (or offset in window) */
+		    size_t byte_count,	/* map this many bytes */
+		    unsigned flags)
+{					/* (currently unused) */
+    return DEV_FUNC(dev, piotrans_addr)
+	(dev, dev_desc, space, addr, byte_count, flags);
+}
+
+caddr_t
+pciio_pio_addr(devfs_handle_t dev,	/* translate for this device */
+	       device_desc_t dev_desc,	/* device descriptor */
+	       pciio_space_t space,	/* CFG, MEM, IO, or a device-decoded window */
+	       iopaddr_t addr,		/* starting address (or offset in window) */
+	       size_t byte_count,	/* map this many bytes */
+	       pciio_piomap_t *mapp,	/* where to return the map pointer */
+	       unsigned flags)
+{					/* PIO flags */
+    pciio_piomap_t          map = 0;
+    int			    errfree = 0;
+    caddr_t                 res;
+
+    if (mapp) {
+	map = *mapp;			/* possible pre-allocated map */
+	*mapp = 0;			/* record "no map used" */
+    }
+
+    res = pciio_piotrans_addr
+	(dev, dev_desc, space, addr, byte_count, flags);
+    if (res)
+	return res;			/* pciio_piotrans worked */
+
+    if (!map) {
+	map = pciio_piomap_alloc
+	    (dev, dev_desc, space, addr, byte_count, byte_count, flags);
+	if (!map)
+	    return res;			/* pciio_piomap_alloc failed */
+	errfree = 1;
+    }
+
+    res = pciio_piomap_addr
+	(map, addr, byte_count);
+    if (!res) {
+	if (errfree)
+	    pciio_piomap_free(map);
+	return res;			/* pciio_piomap_addr failed */
+    }
+    if (mapp)
+	*mapp = map;			/* pass back map used */
+
+    return res;				/* pciio_piomap_addr succeeded */
+}
+
+iopaddr_t
+pciio_piospace_alloc(devfs_handle_t dev,	/* Device requiring space */
+		     device_desc_t dev_desc,	/* Device descriptor */
+		     pciio_space_t space,	/* MEM32/MEM64/IO */
+		     size_t byte_count,	/* Size of mapping */
+		     size_t align)
+{					/* Alignment needed */
+    if (align < NBPP)
+	align = NBPP;
+    return DEV_FUNC(dev, piospace_alloc)
+	(dev, dev_desc, space, byte_count, align);
+}
+
+void
+pciio_piospace_free(devfs_handle_t dev,	/* Device freeing space */
+		    pciio_space_t space,	/* Type of space        */
+		    iopaddr_t pciaddr,	/* starting address */
+		    size_t byte_count)
+{					/* Range of address   */
+    DEV_FUNC(dev, piospace_free)
+	(dev, space, pciaddr, byte_count);
+}
+
+/* =====================================================================
+ *          DMA MANAGEMENT
+ *
+ *      For mapping from pci space to system
+ *      physical space.
+ */
+
+pciio_dmamap_t
+pciio_dmamap_alloc(devfs_handle_t dev,	/* set up mappings for this device */
+		   device_desc_t dev_desc,	/* device descriptor */
+		   size_t byte_count_max,	/* max size of a mapping */
+		   unsigned flags)
+{					/* defined in dma.h */
+    return (pciio_dmamap_t) DEV_FUNC(dev, dmamap_alloc)
+	(dev, dev_desc, byte_count_max, flags);
+}
+
+void
+pciio_dmamap_free(pciio_dmamap_t pciio_dmamap)
+{
+    DMAMAP_FUNC(pciio_dmamap, dmamap_free)
+	(CAST_DMAMAP(pciio_dmamap));
+}
+
+iopaddr_t
+pciio_dmamap_addr(pciio_dmamap_t pciio_dmamap,	/* use these mapping resources */
+		  paddr_t paddr,	/* map for this address */
+		  size_t byte_count)
+{					/* map this many bytes */
+    return DMAMAP_FUNC(pciio_dmamap, dmamap_addr)
+	(CAST_DMAMAP(pciio_dmamap), paddr, byte_count);
+}
+
+alenlist_t
+pciio_dmamap_list(pciio_dmamap_t pciio_dmamap,	/* use these mapping resources */
+		  alenlist_t alenlist,	/* map this Address/Length List */
+		  unsigned flags)
+{
+    return DMAMAP_FUNC(pciio_dmamap, dmamap_list)
+	(CAST_DMAMAP(pciio_dmamap), alenlist, flags);
+}
+
+void
+pciio_dmamap_done(pciio_dmamap_t pciio_dmamap)
+{
+    DMAMAP_FUNC(pciio_dmamap, dmamap_done)
+	(CAST_DMAMAP(pciio_dmamap));
+}
+
+iopaddr_t
+pciio_dmatrans_addr(devfs_handle_t dev,	/* translate for this device */
+		    device_desc_t dev_desc,	/* device descriptor */
+		    paddr_t paddr,	/* system physical address */
+		    size_t byte_count,	/* length */
+		    unsigned flags)
+{					/* defined in dma.h */
+    return DEV_FUNC(dev, dmatrans_addr)
+	(dev, dev_desc, paddr, byte_count, flags);
+}
+
+alenlist_t
+pciio_dmatrans_list(devfs_handle_t dev,	/* translate for this device */
+		    device_desc_t dev_desc,	/* device descriptor */
+		    alenlist_t palenlist,	/* system address/length list */
+		    unsigned flags)
+{					/* defined in dma.h */
+    return DEV_FUNC(dev, dmatrans_list)
+	(dev, dev_desc, palenlist, flags);
+}
+
+iopaddr_t
+pciio_dma_addr(devfs_handle_t dev,	/* translate for this device */
+	       device_desc_t dev_desc,	/* device descriptor */
+	       paddr_t paddr,		/* system physical address */
+	       size_t byte_count,	/* length */
+	       pciio_dmamap_t *mapp,	/* map to use, then map we used */
+	       unsigned flags)
+{					/* PIO flags */
+    pciio_dmamap_t          map = 0;
+    int			    errfree = 0;
+    iopaddr_t               res;
+
+    if (mapp) {
+	map = *mapp;			/* possible pre-allocated map */
+	*mapp = 0;			/* record "no map used" */
+    }
+
+    res = pciio_dmatrans_addr
+	(dev, dev_desc, paddr, byte_count, flags);
+    if (res)
+	return res;			/* pciio_dmatrans worked */
+
+    if (!map) {
+	map = pciio_dmamap_alloc
+	    (dev, dev_desc, byte_count, flags);
+	if (!map)
+	    return res;			/* pciio_dmamap_alloc failed */
+	errfree = 1;
+    }
+
+    res = pciio_dmamap_addr
+	(map, paddr, byte_count);
+    if (!res) {
+	if (errfree)
+	    pciio_dmamap_free(map);
+	return res;			/* pciio_dmamap_addr failed */
+    }
+    if (mapp)
+	*mapp = map;			/* pass back map used */
+
+    return res;				/* pciio_dmamap_addr succeeded */
+}
+
+void
+pciio_dmamap_drain(pciio_dmamap_t map)
+{
+    DMAMAP_FUNC(map, dmamap_drain)
+	(CAST_DMAMAP(map));
+}
+
+void
+pciio_dmaaddr_drain(devfs_handle_t dev, paddr_t addr, size_t size)
+{
+    DEV_FUNC(dev, dmaaddr_drain)
+	(dev, addr, size);
+}
+
+void
+pciio_dmalist_drain(devfs_handle_t dev, alenlist_t list)
+{
+    DEV_FUNC(dev, dmalist_drain)
+	(dev, list);
+}
+
+/* =====================================================================
+ *          INTERRUPT MANAGEMENT
+ *
+ *      Allow crosstalk devices to establish interrupts
+ */
+
+/*
+ * Allocate resources required for an interrupt as specified in intr_desc.
+ * Return resource handle in intr_hdl.
+ */
+pciio_intr_t
+pciio_intr_alloc(devfs_handle_t dev,	/* which Crosstalk device */
+		 device_desc_t dev_desc,	/* device descriptor */
+		 pciio_intr_line_t lines,	/* INTR line(s) to attach */
+		 devfs_handle_t owner_dev)
+{					/* owner of this interrupt */
+    return (pciio_intr_t) DEV_FUNC(dev, intr_alloc)
+	(dev, dev_desc, lines, owner_dev);
+}
+
+/*
+ * Free resources consumed by intr_alloc.
+ */
+void
+pciio_intr_free(pciio_intr_t intr_hdl)
+{
+    INTR_FUNC(intr_hdl, intr_free)
+	(CAST_INTR(intr_hdl));
+}
+
+/*
+ * Associate resources allocated with a previous pciio_intr_alloc call with the
+ * described handler, arg, name, etc.
+ *
+ * Returns 0 on success, returns <0 on failure.
+ */
+int
+pciio_intr_connect(pciio_intr_t intr_hdl,	/* pciio intr resource handle */
+		   intr_func_t intr_func,	/* pciio intr handler */
+		   intr_arg_t intr_arg,	/* arg to intr handler */
+		   void *thread)
+{					/* intr thread to use */
+    return INTR_FUNC(intr_hdl, intr_connect)
+	(CAST_INTR(intr_hdl), intr_func, intr_arg, thread);
+}
+
+/*
+ * Disassociate handler with the specified interrupt.
+ */
+void
+pciio_intr_disconnect(pciio_intr_t intr_hdl)
+{
+    INTR_FUNC(intr_hdl, intr_disconnect)
+	(CAST_INTR(intr_hdl));
+}
+
+/*
+ * Return a hwgraph vertex that represents the CPU currently
+ * targeted by an interrupt.
+ */
+devfs_handle_t
+pciio_intr_cpu_get(pciio_intr_t intr_hdl)
+{
+    return INTR_FUNC(intr_hdl, intr_cpu_get)
+	(CAST_INTR(intr_hdl));
+}
+
+/* =====================================================================
+ *          ERROR MANAGEMENT
+ */
+
+void
+pciio_slot_func_to_name(char		       *name,
+			pciio_slot_t		slot,
+			pciio_function_t	func)
+{
+    /*
+     * standard connection points:
+     *
+     * PCIIO_SLOT_NONE:	.../pci/direct
+     * PCIIO_FUNC_NONE: .../pci/<SLOT>			ie. .../pci/3
+     * multifunction:   .../pci/<SLOT><FUNC>		ie. .../pci/3c
+     */
+
+    if (slot == PCIIO_SLOT_NONE)
+	sprintf(name, "direct");
+    else if (func == PCIIO_FUNC_NONE)
+	sprintf(name, "%d", slot);
+    else
+	sprintf(name, "%d%c", slot, 'a'+func);
+}
+
+/*
+ * pciio_cardinfo_get
+ *
+ * Get the pciio info structure corresponding to the
+ * specified PCI "slot" (we like it when the same index
+ * number is used for the PCI IDSEL, the REQ/GNT pair,
+ * and the interrupt line being used for INTA. We like
+ * it so much we call it the slot number).
+ */
+static pciio_info_t
+pciio_cardinfo_get(
+		      devfs_handle_t pciio_vhdl,
+		      pciio_slot_t pci_slot)
+{
+    char                    namebuf[16];
+    pciio_info_t	    info = 0;
+    devfs_handle_t	    conn;
+
+    pciio_slot_func_to_name(namebuf, pci_slot, PCIIO_FUNC_NONE);
+    if (GRAPH_SUCCESS ==
+	hwgraph_traverse(pciio_vhdl, namebuf, &conn)) {
+	info = pciio_info_chk(conn);
+	hwgraph_vertex_unref(conn);
+    }
+
+    return info;
+}
+
+/*
+ * pciio_error_handler:
+ * dispatch an error to the appropriate
+ * pciio connection point, or process
+ * it as a generic pci error.
+ * Yes, the first parameter is the
+ * provider vertex at the middle of
+ * the bus; we get to the pciio connect
+ * point using the ioerror widgetdev field.
+ *
+ * This function is called by the
+ * specific PCI provider, after it has figured
+ * out where on the PCI bus (including which slot,
+ * if it can tell) the error came from.
+ */
+/*ARGSUSED */
+int
+pciio_error_handler(
+		       devfs_handle_t pciio_vhdl,
+		       int error_code,
+		       ioerror_mode_t mode,
+		       ioerror_t *ioerror)
+{
+    pciio_info_t            pciio_info;
+    devfs_handle_t            pconn_vhdl;
+#if USRPCI
+    devfs_handle_t            usrpci_v;
+#endif
+    pciio_slot_t            slot;
+
+    int                     retval;
+#if defined(CONFIG_SGI_IO_ERROR_HANDLING)
+    error_state_t	    e_state;
+#endif
+
+#ifdef IRIX
+#if DEBUG && ERROR_DEBUG
+    cmn_err(CE_CONT, "%v: pciio_error_handler\n", pciio_vhdl);
+#endif
+#endif
+
+    IOERR_PRINTF(cmn_err(CE_NOTE,
+			 "%v: PCI Bus Error: Error code: %d Error mode: %d\n",
+			 pciio_vhdl, error_code, mode));
+
+    /* If there is an error handler sitting on
+     * the "no-slot" connection point, give it
+     * first crack at the error. NOTE: it is
+     * quite possible that this function may
+     * do further refining of the ioerror.
+     */
+    pciio_info = pciio_cardinfo_get(pciio_vhdl, PCIIO_SLOT_NONE);
+    if (pciio_info && pciio_info->c_efunc) {
+	pconn_vhdl = pciio_info_dev_get(pciio_info);
+#if defined(CONFIG_SGI_IO_ERROR_HANDLING)
+	e_state = error_state_get(pciio_vhdl);
+
+	if (e_state == ERROR_STATE_ACTION)
+	    (void)error_state_set(pciio_vhdl, ERROR_STATE_NONE);
+
+	if (error_state_set(pconn_vhdl,e_state) ==
+	    ERROR_RETURN_CODE_CANNOT_SET_STATE)
+	    return(IOERROR_UNHANDLED);
+#endif
+	retval = pciio_info->c_efunc
+	    (pciio_info->c_einfo, error_code, mode, ioerror);
+	if (retval != IOERROR_UNHANDLED)
+	    return retval;
+    }
+
+    /* Is the error associated with a particular slot?
+     */
+    if (IOERROR_FIELDVALID(ioerror, widgetdev)) {
+	/*
+	 * NOTE : 
+	 * widgetdev is a 4byte value encoded as slot in the higher order
+	 * 2 bytes and function in the lower order 2 bytes.
+	 */
+#ifdef IRIX
+	slot = pciio_widgetdev_slot_get(IOERROR_GETVALUE(ioerror, widgetdev));
+#else
+	slot = 0;
+#endif
+
+	/* If this slot has an error handler,
+	 * deliver the error to it.
+	 */
+	pciio_info = pciio_cardinfo_get(pciio_vhdl, slot);
+	if (pciio_info != NULL) {
+	    if (pciio_info->c_efunc != NULL) {
+
+		pconn_vhdl = pciio_info_dev_get(pciio_info);
+#if defined(CONFIG_SGI_IO_ERROR_HANDLING)
+		e_state = error_state_get(pciio_vhdl);
+
+		if (e_state == ERROR_STATE_ACTION)
+		    (void)error_state_set(pciio_vhdl, ERROR_STATE_NONE);
+
+		if (error_state_set(pconn_vhdl,e_state) ==
+		    ERROR_RETURN_CODE_CANNOT_SET_STATE)
+		    return(IOERROR_UNHANDLED);
+#endif
+		retval = pciio_info->c_efunc
+		    (pciio_info->c_einfo, error_code, mode, ioerror);
+		if (retval != IOERROR_UNHANDLED)
+		    return retval;
+	    }
+
+#if USRPCI
+	    /* If the USRPCI driver is available and
+	     * knows about this connection point,
+	     * deliver the error to it.
+	     *
+	     * OK to use pconn_vhdl here, even though we
+	     * have already UNREF'd it, since we know that
+	     * it is not going away.
+	     */
+	    pconn_vhdl = pciio_info_dev_get(pciio_info);
+	    if (GRAPH_SUCCESS ==
+		hwgraph_traverse(pconn_vhdl, EDGE_LBL_USRPCI, &usrpci_v)) {
+		retval = usrpci_error_handler
+		    (usrpci_v, error_code, IOERROR_GETVALUE(ioerror, busaddr));
+		hwgraph_vertex_unref(usrpci_v);
+		if (retval != IOERROR_UNHANDLED) {
+		    /*
+		     * This unref is not needed.  If this code is called often enough,
+		     * the system will crash, due to vertex reference count reaching 0,
+		     * causing vertex to be unallocated.  -jeremy
+		     * hwgraph_vertex_unref(pconn_vhdl);
+		     */
+		    return retval;
+		}
+	    }
+#endif
+	}
+    }
+
+    return (mode == MODE_DEVPROBE)
+	? IOERROR_HANDLED	/* probes are OK */
+	: IOERROR_UNHANDLED;	/* otherwise, foo! */
+}
+
+int
+pciio_error_devenable(devfs_handle_t pconn_vhdl, int error_code)
+{
+    return DEV_FUNC(pconn_vhdl, error_devenable)
+	(pconn_vhdl, error_code);
+    /* no cleanup specific to this layer. */
+}
+
+/* =====================================================================
+ *          CONFIGURATION MANAGEMENT
+ */
+
+/*
+ * Startup a crosstalk provider
+ */
+void
+pciio_provider_startup(devfs_handle_t pciio_provider)
+{
+    DEV_FUNC(pciio_provider, provider_startup)
+	(pciio_provider);
+}
+
+/*
+ * Shutdown a crosstalk provider
+ */
+void
+pciio_provider_shutdown(devfs_handle_t pciio_provider)
+{
+    DEV_FUNC(pciio_provider, provider_shutdown)
+	(pciio_provider);
+}
+
+/*
+ * Specify endianness constraints.  The driver tells us what the device
+ * does and how it would like to see things in memory.  We reply with
+ * how things will actually appear in memory.
+ */
+pciio_endian_t
+pciio_endian_set(devfs_handle_t dev,
+		 pciio_endian_t device_end,
+		 pciio_endian_t desired_end)
+{
+    ASSERT((device_end == PCIDMA_ENDIAN_BIG) || (device_end == PCIDMA_ENDIAN_LITTLE));
+    ASSERT((desired_end == PCIDMA_ENDIAN_BIG) || (desired_end == PCIDMA_ENDIAN_LITTLE));
+
+#if DEBUG
+    cmn_err(CE_ALERT,
+	    "%v: pciio_endian_set is going away.\n"
+	    "\tplease use PCIIO_BYTE_STREAM or PCIIO_WORD_VALUES in your\n"
+	    "\tpciio_dmamap_alloc and pciio_dmatrans calls instead.\n",
+	    dev);
+#endif
+
+    return DEV_FUNC(dev, endian_set)
+	(dev, device_end, desired_end);
+}
+
+/*
+ * Specify PCI arbitration priority.
+ */
+pciio_priority_t
+pciio_priority_set(devfs_handle_t dev,
+		   pciio_priority_t device_prio)
+{
+    ASSERT((device_prio == PCI_PRIO_HIGH) || (device_prio == PCI_PRIO_LOW));
+
+    return DEV_FUNC(dev, priority_set)
+	(dev, device_prio);
+}
+
+/*
+ * Read value of configuration register
+ */
+uint64_t
+pciio_config_get(devfs_handle_t	dev,
+		 unsigned	reg,
+		 unsigned	size)
+{
+    uint64_t	value = 0;
+    unsigned	shift = 0;
+
+    /* handle accesses that cross words here,
+     * since that's common code between all
+     * possible providers.
+     */
+    while (size > 0) {
+	unsigned	biw = 4 - (reg&3);
+	if (biw > size)
+	    biw = size;
+
+	value |= DEV_FUNC(dev, config_get)
+	    (dev, reg, biw) << shift;
+
+	shift += 8*biw;
+	reg += biw;
+	size -= biw;
+    }
+    return value;
+}
+
+/*
+ * Change value of configuration register
+ */
+void
+pciio_config_set(devfs_handle_t	dev,
+		 unsigned	reg,
+		 unsigned	size,
+		 uint64_t	value)
+{
+    /* handle accesses that cross words here,
+     * since that's common code between all
+     * possible providers.
+     */
+    while (size > 0) {
+	unsigned	biw = 4 - (reg&3);
+	if (biw > size)
+	    biw = size;
+	    
+	DEV_FUNC(dev, config_set)
+	    (dev, reg, biw, value);
+	reg += biw;
+	size -= biw;
+	value >>= biw * 8;
+    }
+}
+
+/* =====================================================================
+ *          GENERIC PCI SUPPORT FUNCTIONS
+ */
+pciio_slot_t
+pciio_error_extract(devfs_handle_t 	dev,
+		   pciio_space_t 	*space,
+		   iopaddr_t		*offset)
+{
+	ASSERT(dev != NODEV);
+	return DEV_FUNC(dev,error_extract)(dev,space,offset);
+}
+
+/*
+ * Issue a hardware reset to a card.
+ */
+int
+pciio_reset(devfs_handle_t dev)
+{
+    return DEV_FUNC(dev, reset) (dev);
+}
+
+/*
+ * flush write gather buffers
+ */
+int
+pciio_write_gather_flush(devfs_handle_t dev)
+{
+    return DEV_FUNC(dev, write_gather_flush) (dev);
+}
+
+devfs_handle_t
+pciio_intr_dev_get(pciio_intr_t pciio_intr)
+{
+    return (pciio_intr->pi_dev);
+}
+
+/****** Generic crosstalk pio interfaces ******/
+devfs_handle_t
+pciio_pio_dev_get(pciio_piomap_t pciio_piomap)
+{
+    return (pciio_piomap->pp_dev);
+}
+
+pciio_slot_t
+pciio_pio_slot_get(pciio_piomap_t pciio_piomap)
+{
+    return (pciio_piomap->pp_slot);
+}
+
+pciio_space_t
+pciio_pio_space_get(pciio_piomap_t pciio_piomap)
+{
+    return (pciio_piomap->pp_space);
+}
+
+iopaddr_t
+pciio_pio_pciaddr_get(pciio_piomap_t pciio_piomap)
+{
+    return (pciio_piomap->pp_pciaddr);
+}
+
+ulong
+pciio_pio_mapsz_get(pciio_piomap_t pciio_piomap)
+{
+    return (pciio_piomap->pp_mapsz);
+}
+
+caddr_t
+pciio_pio_kvaddr_get(pciio_piomap_t pciio_piomap)
+{
+    return (pciio_piomap->pp_kvaddr);
+}
+
+/****** Generic crosstalk dma interfaces ******/
+devfs_handle_t
+pciio_dma_dev_get(pciio_dmamap_t pciio_dmamap)
+{
+    return (pciio_dmamap->pd_dev);
+}
+
+pciio_slot_t
+pciio_dma_slot_get(pciio_dmamap_t pciio_dmamap)
+{
+    return (pciio_dmamap->pd_slot);
+}
+
+/****** Generic pci slot information interfaces ******/
+
+pciio_info_t
+pciio_info_chk(devfs_handle_t pciio)
+{
+    arbitrary_info_t        ainfo = 0;
+
+    hwgraph_info_get_LBL(pciio, INFO_LBL_PCIIO, &ainfo);
+    return (pciio_info_t) ainfo;
+}
+
+pciio_info_t
+pciio_info_get(devfs_handle_t pciio)
+{
+    pciio_info_t            pciio_info;
+
+    pciio_info = (pciio_info_t) hwgraph_fastinfo_get(pciio);
+
+#ifdef DEBUG_PCIIO
+    {
+	int pos;
+	char dname[256];
+	pos = devfs_generate_path(pciio, dname, 256);
+	printk("%s : path= %s\n", __FUNCTION__, &dname[pos]);
+    }
+#endif /* DEBUG_PCIIO */
+
+#ifdef BRINGUP
+    if ((pciio_info != NULL) &&
+	(pciio_info->c_fingerprint != pciio_info_fingerprint)
+	&& (pciio_info->c_fingerprint != NULL)) {
+#else
+    if ((pciio_info != NULL) &&
+	(pciio_info->c_fingerprint != pciio_info_fingerprint)) {
+#endif /* BRINGUP */
+
+	printk("pciio_info_get: Found fastinfo 0x%p but wrong fingerprint %s\n", pciio_info,
+	pciio_info->c_fingerprint);
+	return((pciio_info_t)-1); /* Should panic .. */
+    }
+	
+
+    return pciio_info;
+}
+
+void
+pciio_info_set(devfs_handle_t pciio, pciio_info_t pciio_info)
+{
+    if (pciio_info != NULL)
+	pciio_info->c_fingerprint = pciio_info_fingerprint;
+    hwgraph_fastinfo_set(pciio, (arbitrary_info_t) pciio_info);
+
+    /* Also, mark this vertex as a PCI slot
+     * and use the pciio_info, so pciio_info_chk
+     * can work (and be fairly efficient).
+     */
+    hwgraph_info_add_LBL(pciio, INFO_LBL_PCIIO,
+			 (arbitrary_info_t) pciio_info);
+}
+
+devfs_handle_t
+pciio_info_dev_get(pciio_info_t pciio_info)
+{
+    return (pciio_info->c_vertex);
+}
+
+/*ARGSUSED*/
+pciio_bus_t
+pciio_info_bus_get(pciio_info_t pciio_info)
+{
+    /* XXX for now O2 always gets back bus 0 */
+    return (pciio_bus_t)0;
+}
+
+pciio_slot_t
+pciio_info_slot_get(pciio_info_t pciio_info)
+{
+    return (pciio_info->c_slot);
+}
+
+pciio_function_t
+pciio_info_function_get(pciio_info_t pciio_info)
+{
+    return (pciio_info->c_func);
+}
+
+pciio_vendor_id_t
+pciio_info_vendor_id_get(pciio_info_t pciio_info)
+{
+    return (pciio_info->c_vendor);
+}
+
+pciio_device_id_t
+pciio_info_device_id_get(pciio_info_t pciio_info)
+{
+    return (pciio_info->c_device);
+}
+
+devfs_handle_t
+pciio_info_master_get(pciio_info_t pciio_info)
+{
+    return (pciio_info->c_master);
+}
+
+arbitrary_info_t
+pciio_info_mfast_get(pciio_info_t pciio_info)
+{
+    return (pciio_info->c_mfast);
+}
+
+pciio_provider_t       *
+pciio_info_pops_get(pciio_info_t pciio_info)
+{
+    return (pciio_info->c_pops);
+}
+
+error_handler_f	       *
+pciio_info_efunc_get(pciio_info_t pciio_info)
+{
+    return (pciio_info->c_efunc);
+}
+
+error_handler_arg_t    *
+pciio_info_einfo_get(pciio_info_t pciio_info)
+{
+    return (pciio_info->c_einfo);
+}
+
+pciio_space_t
+pciio_info_bar_space_get(pciio_info_t info, int win)
+{
+    return info->c_window[win].w_space;
+}
+
+iopaddr_t
+pciio_info_bar_base_get(pciio_info_t info, int win)
+{
+    return info->c_window[win].w_base;
+}
+
+size_t
+pciio_info_bar_size_get(pciio_info_t info, int win)
+{
+    return info->c_window[win].w_size;
+}
+
+iopaddr_t
+pciio_info_rom_base_get(pciio_info_t info)
+{
+    return info->c_rbase;
+}
+
+size_t
+pciio_info_rom_size_get(pciio_info_t info)
+{
+    return info->c_rsize;
+}
+
+
+/* =====================================================================
+ *          GENERIC PCI INITIALIZATION FUNCTIONS
+ */
+
+/*
+ *    pciioinit: called once during device driver
+ *      initializtion if this driver is configured into
+ *      the system.
+ */
+void
+pciio_init(void)
+{
+    cdl_p                   cp;
+
+#if DEBUG && ATTACH_DEBUG
+    printf("pciio_init\n");
+#endif
+    /* Allocate the registry.
+     * We might already have one.
+     * If we don't, go get one.
+     * MPness: someone might have
+     * set one up for us while we
+     * were not looking; use an atomic
+     * compare-and-swap to commit to
+     * using the new registry if and
+     * only if nobody else did first.
+     * If someone did get there first,
+     * toss the one we allocated back
+     * into the pool.
+     */
+    if (pciio_registry == NULL) {
+	cp = cdl_new(EDGE_LBL_PCI, "vendor", "device");
+	if (!compare_and_swap_ptr((void **) &pciio_registry, NULL, (void *) cp)) {
+	    cdl_del(cp);
+	}
+    }
+    ASSERT(pciio_registry != NULL);
+}
+
+/*
+ *    pciioattach: called for each vertex in the graph
+ *      that is a PCI provider.
+ */
+/*ARGSUSED */
+int
+pciio_attach(devfs_handle_t pciio)
+{
+#if DEBUG && ATTACH_DEBUG
+    cmn_err(CE_CONT, "%v: pciio_attach\n", pciio);
+#endif
+    return 0;
+}
+
+/*
+ * Associate a set of pciio_provider functions with a vertex.
+ */
+void
+pciio_provider_register(devfs_handle_t provider, pciio_provider_t *pciio_fns)
+{
+    hwgraph_info_add_LBL(provider, INFO_LBL_PFUNCS, (arbitrary_info_t) pciio_fns);
+}
+
+/*
+ * Disassociate a set of pciio_provider functions with a vertex.
+ */
+void
+pciio_provider_unregister(devfs_handle_t provider)
+{
+    arbitrary_info_t        ainfo;
+
+#ifdef IRIX
+    hwgraph_info_remove_LBL(provider, INFO_LBL_PFUNCS, &ainfo);
+#else
+    hwgraph_info_remove_LBL(provider, INFO_LBL_PFUNCS, (long *) &ainfo);
+#endif
+}
+
+/*
+ * Obtain a pointer to the pciio_provider functions for a specified Crosstalk
+ * provider.
+ */
+pciio_provider_t       *
+pciio_provider_fns_get(devfs_handle_t provider)
+{
+    arbitrary_info_t        ainfo = 0;
+
+    (void) hwgraph_info_get_LBL(provider, INFO_LBL_PFUNCS, &ainfo);
+    return (pciio_provider_t *) ainfo;
+}
+
+/*ARGSUSED4 */
+int
+pciio_driver_register(
+			 pciio_vendor_id_t vendor_id,
+			 pciio_device_id_t device_id,
+			 char *driver_prefix,
+			 unsigned flags)
+{
+    /* a driver's init routine might call
+     * pciio_driver_register before the
+     * system calls pciio_init; so we
+     * make the init call ourselves here.
+     */
+    if (pciio_registry == NULL)
+	pciio_init();
+
+    return cdl_add_driver(pciio_registry,
+			  vendor_id, device_id,
+			  driver_prefix, flags);
+}
+
+/*
+ * Remove an initialization function.
+ */
+void
+pciio_driver_unregister(
+			   char *driver_prefix)
+{
+    /* before a driver calls unregister,
+     * it must have called register; so
+     * we can assume we have a registry here.
+     */
+    ASSERT(pciio_registry != NULL);
+
+    cdl_del_driver(pciio_registry, driver_prefix);
+}
+
+/*
+ * Call some function with each vertex that
+ * might be one of this driver's attach points.
+ */
+void
+pciio_iterate(char *driver_prefix,
+	      pciio_iter_f * func)
+{
+    /* a driver's init routine might call
+     * pciio_iterate before the
+     * system calls pciio_init; so we
+     * make the init call ourselves here.
+     */
+    if (pciio_registry == NULL)
+	pciio_init();
+
+    ASSERT(pciio_registry != NULL);
+
+    cdl_iterate(pciio_registry, driver_prefix, (cdl_iter_f *) func);
+}
+
+devfs_handle_t
+pciio_device_register(
+		devfs_handle_t connectpt,	/* vertex for /hw/.../pciio/%d */
+		devfs_handle_t master,	/* card's master ASIC (PCI provider) */
+		pciio_slot_t slot,	/* card's slot */
+		pciio_function_t func,	/* card's func */
+		pciio_vendor_id_t vendor_id,
+		pciio_device_id_t device_id)
+{
+
+    return pciio_device_info_register
+	(connectpt, pciio_device_info_new (NULL, master, slot, func,
+					   vendor_id, device_id));
+}
+
+void
+pciio_device_unregister(devfs_handle_t pconn)
+{
+    DEV_FUNC(pconn,device_unregister)(pconn);
+}
+
+pciio_info_t
+pciio_device_info_new(
+		pciio_info_t pciio_info,
+		devfs_handle_t master,
+		pciio_slot_t slot,
+		pciio_function_t func,
+		pciio_vendor_id_t vendor_id,
+		pciio_device_id_t device_id)
+{
+    if (!pciio_info)
+	NEW(pciio_info);
+    ASSERT(pciio_info != NULL);
+
+    pciio_info->c_slot = slot;
+    pciio_info->c_func = func;
+    pciio_info->c_vendor = vendor_id;
+    pciio_info->c_device = device_id;
+    pciio_info->c_master = master;
+    pciio_info->c_mfast = hwgraph_fastinfo_get(master);
+    pciio_info->c_pops = pciio_provider_fns_get(master);
+    pciio_info->c_efunc = 0;
+    pciio_info->c_einfo = 0;
+
+    return pciio_info;
+}
+
+void
+pciio_device_info_free(pciio_info_t pciio_info)
+{
+    /* NOTE : pciio_info is a structure within the pcibr_info
+     *	      and not a pointer to memory allocated on the heap !!
+     */
+    BZERO((char *)pciio_info,sizeof(pciio_info));
+}
+
+devfs_handle_t
+pciio_device_info_register(
+		devfs_handle_t connectpt,		/* vertex at center of bus */
+		pciio_info_t pciio_info)	/* details about the connectpt */
+{
+    char		name[32];
+    devfs_handle_t	pconn;
+
+    pciio_slot_func_to_name(name,
+			    pciio_info->c_slot,
+			    pciio_info->c_func);
+
+    printk("pciio_device_info_register: connectpt 0x%p, pciio_info 0x%p\n", connectpt, pciio_info);
+
+    if (GRAPH_SUCCESS !=
+	hwgraph_path_add(connectpt, name, &pconn))
+	return pconn;
+
+    pciio_info->c_vertex = pconn;
+    pciio_info_set(pconn, pciio_info);
+#ifdef BRINGUP
+    {
+	int pos;
+	char dname[256];
+	pos = devfs_generate_path(pconn, dname, 256);
+	printk("%s : pconn path= %s \n", __FUNCTION__, &dname[pos]);
+    }
+#endif /* BRINGUP */
+
+    /*
+     * create link to our pci provider
+     */
+
+    device_master_set(pconn, pciio_info->c_master);
+
+#if USRPCI
+    /*
+     * Call into usrpci provider to let it initialize for
+     * the given slot.
+     */
+    if (pciio_info->c_slot != PCIIO_SLOT_NONE)
+	usrpci_device_register(pconn, pciio_info->c_master, pciio_info->c_slot);
+#endif
+
+    return pconn;
+}
+
+void
+pciio_device_info_unregister(devfs_handle_t connectpt,
+			     pciio_info_t pciio_info)
+{
+    char		name[32];
+    devfs_handle_t	pconn;
+
+    if (!pciio_info)
+	return;
+
+    pciio_slot_func_to_name(name,
+			    pciio_info->c_slot,
+			    pciio_info->c_func);
+
+    hwgraph_edge_remove(connectpt,name,&pconn);
+    pciio_info_set(pconn,0);
+
+    /* Remove the link to our pci provider */
+    hwgraph_edge_remove(pconn, EDGE_LBL_MASTER, NULL);
+
+    hwgraph_vertex_unref(pconn);
+    hwgraph_vertex_destroy(pconn);
+    
+}
+/* Add the pci card inventory information to the hwgraph
+ */
+static void
+pciio_device_inventory_add(devfs_handle_t pconn_vhdl)
+{
+    pciio_info_t	pciio_info = pciio_info_get(pconn_vhdl);
+
+    ASSERT(pciio_info);
+    ASSERT(pciio_info->c_vertex == pconn_vhdl);
+
+    /* Donot add inventory  for non-existent devices */
+    if ((pciio_info->c_vendor == PCIIO_VENDOR_ID_NONE)	||
+	(pciio_info->c_device == PCIIO_DEVICE_ID_NONE))
+	return;
+    device_inventory_add(pconn_vhdl,INV_IOBD,INV_PCIADAP,
+			 pciio_info->c_vendor,pciio_info->c_device,
+			 pciio_info->c_slot);
+}
+
+static void
+pciio_device_inventory_remove(devfs_handle_t pconn_vhdl)
+{
+#ifdef IRIX
+    hwgraph_inventory_remove(pconn_vhdl,-1,-1,-1,-1,-1);
+#endif
+}
+
+/*ARGSUSED */
+int
+pciio_device_attach(devfs_handle_t pconn)
+{
+    pciio_info_t            pciio_info;
+    pciio_vendor_id_t       vendor_id;
+    pciio_device_id_t       device_id;
+
+    pciio_device_inventory_add(pconn);
+    pciio_info = pciio_info_get(pconn);
+
+    vendor_id = pciio_info->c_vendor;
+    device_id = pciio_info->c_device;
+
+    printk("pciio_device_attach: Function 0x%p, vendor 0x%x, device_id %x\n", pconn, vendor_id, device_id);
+
+    /* we don't start attaching things until
+     * all the driver init routines (including
+     * pciio_init) have been called; so we
+     * can assume here that we have a registry.
+     */
+    ASSERT(pciio_registry != NULL);
+
+    return(cdl_add_connpt(pciio_registry, vendor_id, device_id, pconn));
+
+}
+
+int
+pciio_device_detach(devfs_handle_t pconn)
+{
+    pciio_info_t            pciio_info;
+    pciio_vendor_id_t       vendor_id;
+    pciio_device_id_t       device_id;
+
+    pciio_device_inventory_remove(pconn);
+    pciio_info = pciio_info_get(pconn);
+
+    vendor_id = pciio_info->c_vendor;
+    device_id = pciio_info->c_device;
+
+    /* we don't start attaching things until
+     * all the driver init routines (including
+     * pciio_init) have been called; so we
+     * can assume here that we have a registry.
+     */
+    ASSERT(pciio_registry != NULL);
+
+    cdl_del_connpt(pciio_registry, vendor_id, device_id, pconn);
+
+    return(0);
+    
+}
+
+/*
+ * pciio_error_register:
+ * arrange for a function to be called with
+ * a specified first parameter plus other
+ * information when an error is encountered
+ * and traced to the pci slot corresponding
+ * to the connection point pconn.
+ *
+ * may also be called with a null function
+ * pointer to "unregister" the error handler.
+ *
+ * NOTE: subsequent calls silently overwrite
+ * previous data for this vertex. We assume that
+ * cooperating drivers, well, cooperate ...
+ */
+void
+pciio_error_register(devfs_handle_t pconn,
+		     error_handler_f *efunc,
+		     error_handler_arg_t einfo)
+{
+    pciio_info_t            pciio_info;
+
+    pciio_info = pciio_info_get(pconn);
+    ASSERT(pciio_info != NULL);
+    pciio_info->c_efunc = efunc;
+    pciio_info->c_einfo = einfo;
+}
+
+/*
+ * Check if any device has been found in this slot, and return
+ * true or false
+ * vhdl is the vertex for the slot
+ */
+int
+pciio_slot_inuse(devfs_handle_t pconn_vhdl)
+{
+    pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);
+
+    ASSERT(pciio_info);
+    ASSERT(pciio_info->c_vertex == pconn_vhdl);
+    if (pciio_info->c_vendor) {
+	/*
+	 * Non-zero value for vendor indicate
+	 * a board being found in this slot.
+	 */
+	return 1;
+    }
+    return 0;
+}
+
+int
+pciio_dma_enabled(devfs_handle_t pconn_vhdl)
+{
+	return DEV_FUNC(pconn_vhdl, dma_enabled)(pconn_vhdl);
+}
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/io/sgi_if.c linux/arch/ia64/sn/io/sgi_if.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/io/sgi_if.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/io/sgi_if.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,73 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/ctype.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/labelcl.h>
+#include <asm/sn/pci/bridge.h>
+#include <asm/sn/ioerror_handling.h>
+#include <asm/sn/pci/pciio.h>
+#include <asm/sn/slotnum.h>
+
+#define spinlock_init(x,name) mutex_init(x, MUTEX_DEFAULT, name);
+
+void *
+kmem_zalloc(size_t size, int flag)
+{
+        void *ptr = kmalloc(size, GFP_KERNEL);
+        BZERO(ptr, size);
+        return ptr;
+}
+
+#define xtod(c)         ((c) <= '9' ? '0' - (c) : 'a' - (c) - 10)
+long
+atoi(register char *p)
+{
+        register long n;
+        register int c, neg = 0;
+
+        if (p == NULL)
+                return 0;
+
+        if (!isdigit(c = *p)) {
+                while (isspace(c))
+                        c = *++p;
+                switch (c) {
+                case '-':
+                        neg++;
+                case '+': /* fall-through */
+                        c = *++p;
+                }
+                if (!isdigit(c))
+                        return (0);
+        }
+        if (c == '0' && *(p + 1) == 'x') {
+                p += 2;
+                c = *p;
+                n = xtod(c);
+                while ((c = *++p) && isxdigit(c)) {
+                        n *= 16; /* two steps to avoid unnecessary overflow */
+                        n += xtod(c); /* accum neg to avoid surprises at MAX */
+                }
+        } else {
+                n = '0' - c;
+                while ((c = *++p) && isdigit(c)) {
+                        n *= 10; /* two steps to avoid unnecessary overflow */
+                        n += '0' - c; /* accum neg to avoid surprises at MAX */
+                }
+        }
+        return (neg ? n : -n);
+}
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/io/sgi_io_init.c linux/arch/ia64/sn/io/sgi_io_init.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/io/sgi_io_init.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/io/sgi_io_init.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,312 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/agent.h>
+#include <asm/sn/klconfig.h>
+#include <asm/sn/sn_private.h>
+#include <asm/sn/synergy.h>
+#include <linux/smp.h>
+
+extern void mlreset(int );
+extern int init_hcl(void);
+extern void klgraph_hack_init(void);
+extern void per_hub_init(cnodeid_t);
+extern void hubspc_init(void);
+extern void pciba_init(void);
+extern void pciio_init(void);
+extern void pcibr_init(void);
+extern void xtalk_init(void);
+extern void xbow_init(void);
+extern void xbmon_init(void);
+extern void pciiox_init(void);
+extern void usrpci_init(void);
+extern void ioc3_init(void);
+extern void initialize_io(void);
+extern void init_platform_nodepda(nodepda_t *, cnodeid_t );
+extern void intr_clear_all(nasid_t);
+extern void klhwg_add_all_modules(devfs_handle_t);
+extern void klhwg_add_all_nodes(devfs_handle_t);
+
+void sn_mp_setup(void);
+extern devfs_handle_t hwgraph_root;
+extern void io_module_init(void);
+extern cnodeid_t nasid_to_compact_node[];
+extern void pci_bus_cvlink_init(void);
+extern void temp_hack(void);
+extern void init_platform_pda(cpuid_t cpu);
+
+extern int pci_bus_to_hcl_cvlink(void);
+extern synergy_da_t	*Synergy_da_indr[];
+
+#define DEBUG_IO_INIT
+#ifdef DEBUG_IO_INIT
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif /* DEBUG_IO_INIT */
+
+/*
+ * kern/ml/csu.s calls mlsetup
+ *   mlsetup calls mlreset(master) - kern/os/startup.c
+ *   j main
+ *
+ 
+ * SN/slave.s start_slave_loop calls slave_entry
+ * SN/slave.s slave_entry calls slave_loop
+ * SN/slave.s slave_loop calls bootstrap
+ * bootstrap in SN1/SN1asm.s calls cboot
+ * cboot calls mlreset(slave) - ml/SN/mp.c
+ *
+ * sgi_io_infrastructure_init() gets called right before pci_init() 
+ * in Linux mainline.  This routine actually mirrors the IO Infrastructure 
+ * call sequence in IRIX, ofcourse, nicely modified for Linux.
+ *
+ * It is very IMPORTANT that this call is only made by the Master CPU!
+ *
+ */
+
+void
+sgi_master_io_infr_init(void)
+{
+#ifdef Colin
+	/*
+	 * Simulate Big Window 0.
+	 * Only when we build for lutsen etc. ..
+	 */
+	simulated_BW0_init();
+#endif
+
+	/*
+	 * Do any early init stuff .. einit_tbl[] etc.
+	 */
+	DBG("--> sgi_master_io_infr_init: calling init_hcl().\n");
+	init_hcl(); /* Sets up the hwgraph compatibility layer with devfs */
+
+	/*
+	 * initialize the Linux PCI to xwidget vertexes ..
+	 */
+	DBG("--> sgi_master_io_infr_init: calling pci_bus_cvlink_init().\n");
+	pci_bus_cvlink_init();
+
+	/*
+	 * Hack to provide statically initialzed klgraph entries.
+	 */
+	DBG("--> sgi_master_io_infr_init: calling klgraph_hack_init()\n");
+	klgraph_hack_init();
+
+	/*
+	 * This is the Master CPU.  Emulate mlsetup and main.c in Irix.
+	 */
+	DBG("--> sgi_master_io_infr_init: calling mlreset(0).\n");
+	mlreset(0); /* Master .. */
+
+	/*
+	 * allowboot() is called by kern/os/main.c in main()
+	 * Emulate allowboot() ...
+	 *   per_cpu_init() - only need per_hub_init()
+	 *   cpu_io_setup() - Nothing to do.
+	 * 
+	 */
+	DBG("--> sgi_master_io_infr_init: calling sn_mp_setup().\n");
+	sn_mp_setup();
+
+	DBG("--> sgi_master_io_infr_init: calling per_hub_init(0).\n");
+	per_hub_init(0); /* Need to get and send in actual cnode number */
+
+	/* We can do headless hub cnodes here .. */
+
+	/*
+	 * io_init[] stuff.
+	 *
+	 * Get SGI IO Infrastructure drivers to init and register with 
+	 * each other etc.
+	 */
+
+	DBG("--> sgi_master_io_infr_init: calling hubspc_init()\n");
+	hubspc_init();
+
+	DBG("--> sgi_master_io_infr_init: calling pciba_init()\n");
+	pciba_init();
+
+	DBG("--> sgi_master_io_infr_init: calling pciio_init()\n");
+	pciio_init();
+
+	DBG("--> sgi_master_io_infr_init: calling pcibr_init()\n");
+	pcibr_init();
+
+	DBG("--> sgi_master_io_infr_init: calling xtalk_init()\n");
+	xtalk_init();
+
+	DBG("--> sgi_master_io_infr_init: calling xbow_init()\n");
+	xbow_init();
+
+	DBG("--> sgi_master_io_infr_init: calling xbmon_init()\n");
+	xbmon_init();
+
+	DBG("--> sgi_master_io_infr_init: calling pciiox_init()\n");
+	pciiox_init();
+
+	DBG("--> sgi_master_io_infr_init: calling usrpci_init()\n");
+	usrpci_init();
+
+	DBG("--> sgi_master_io_infr_init: calling ioc3_init()\n");
+	ioc3_init();
+
+	/*
+	 *
+	 * Our IO Infrastructure drivers are in place .. 
+	 * Initialize the whole IO Infrastructure .. xwidget/device probes.
+	 *
+	 */
+	DBG("--> sgi_master_io_infr_init: Start Probe and IO Initialization\n");
+	initialize_io();
+
+	DBG("--> sgi_master_io_infr_init: Setting up SGI IO Links for Linux PCI\n");
+	pci_bus_to_hcl_cvlink();
+
+	DBG("--> Leave sgi_master_io_infr_init: DONE setting up SGI Links for PCI\n");
+}
+
+/*
+ * sgi_slave_io_infr_init - This routine must be called on all cpus except 
+ * the Master CPU.
+ */
+void
+sgi_slave_io_infr_init(void)
+{
+	/* Emulate cboot() .. */
+	mlreset(1); /* This is a slave cpu */
+
+	per_hub_init(0); /* Need to get and send in actual cnode number */
+
+	/* Done */
+}
+
+/*
+ * One-time setup for MP SN.
+ * Allocate per-node data, slurp prom klconfig information and
+ * convert it to hwgraph information.
+ */
+void
+sn_mp_setup(void)
+{
+	cnodeid_t	cnode;
+	extern int	maxnodes;
+	cpuid_t		cpu;
+
+	DBG("sn_mp_setup: Entered.\n");
+	/*
+	 * NODEPDA(x) Macro depends on nodepda
+	 * subnodepda is also statically set to calias space which we 
+	 * do not currently support yet .. just a hack for now.
+	 */
+#ifdef NUMA_BASE
+	DBG("sn_mp_setup(): maxnodes= %d  numnodes= %d\n", maxnodes,numnodes);
+        maxnodes = numnodes;
+#ifdef SIMULATED_KLGRAPH
+	maxnodes = 1;
+	numnodes = 1;
+#endif /* SIMULATED_KLGRAPH */
+        printk("sn_mp_setup(): Allocating backing store for *Nodepdaindr[%2d] \n",
+                maxnodes);
+
+        /*
+         * Initialize Nodpdaindr and per-node nodepdaindr array
+         */
+        *Nodepdaindr = (nodepda_t *) kmalloc(sizeof(nodepda_t *)*numnodes, GFP_KERNEL);
+        for (cnode=0; cnode<maxnodes; cnode++) {
+            Nodepdaindr[cnode] = (nodepda_t *) kmalloc(sizeof(struct nodepda_s),
+                                                                GFP_KERNEL);
+	    Synergy_da_indr[cnode * 2] = (synergy_da_t *) kmalloc(
+		sizeof(synergy_da_t), GFP_KERNEL);
+	    Synergy_da_indr[(cnode * 2) + 1] = (synergy_da_t *) kmalloc(
+		sizeof(synergy_da_t), GFP_KERNEL);
+            Nodepdaindr[cnode]->pernode_pdaindr = Nodepdaindr;
+            subnodepda = &Nodepdaindr[cnode]->snpda[cnode];
+        }
+        nodepda = Nodepdaindr[0];
+#else
+        Nodepdaindr = (nodepda_t *) kmalloc(sizeof(struct nodepda_s), GFP_KERNEL);
+        nodepda = Nodepdaindr[0];
+        subnodepda = &Nodepdaindr[0]->snpda[0];
+
+#endif /* NUMA_BASE */
+
+	/*
+	 * Before we let the other processors run, set up the platform specific
+	 * stuff in the nodepda.
+	 *
+	 * ???? maxnodes set in mlreset .. who sets it now ????
+	 * ???? cpu_node_probe() called in mlreset to set up the following:
+	 *      compact_to_nasid_node[] - cnode id gives nasid
+	 *      nasid_to_compact_node[] - nasid gives cnode id
+	 *
+	 *	do_cpumask() sets the following:
+	 *      cpuid_to_compact_node[] - cpuid gives cnode id
+	 *
+	 *      nasid comes from gdap->g_nasidtable[]
+	 *      ml/SN/promif.c
+	 */
+
+	for (cnode = 0; cnode < maxnodes; cnode++) {
+		/*
+		 * Set up platform-dependent nodepda fields.
+		 * The following routine actually sets up the hubinfo struct
+		 * in nodepda.
+		 */
+		DBG("sn_mp_io_setup: calling init_platform_nodepda(%2d)\n",cnode);
+		init_platform_nodepda(Nodepdaindr[cnode], cnode);
+
+		/*
+		 * This routine clears the Hub's Interrupt registers.
+		 */
+#ifndef CONFIG_IA64_SGI_IO
+		/*
+		 * We need to move this intr_clear_all() routine 
+		 * from SN/intr.c to a more appropriate file.
+		 * Talk to Al Mayer.
+		 */
+                intr_clear_all(COMPACT_TO_NASID_NODEID(cnode));
+#endif
+	}
+
+#ifdef CONFIG_IA64_SGI_IO
+	for (cpu = 0; cpu < smp_num_cpus; cpu++) {
+		/* Skip holes in CPU space */
+		if (cpu_enabled(cpu)) {
+			init_platform_pda(cpu);
+		}
+	}
+#endif
+
+	/*
+	 * Initialize platform-dependent vertices in the hwgraph:
+	 *	module
+	 *	node
+	 *	cpu
+	 *	memory
+	 *	slot
+	 *	hub
+	 *	router
+	 *	xbow
+	 */
+
+	DBG("sn_mp_io_setup: calling io_module_init()\n");
+	io_module_init(); /* Use to be called module_init() .. */
+
+	DBG("sn_mp_setup: calling klhwg_add_all_modules()\n");
+	klhwg_add_all_modules(hwgraph_root);
+	DBG("sn_mp_setup: calling klhwg_add_all_nodes()\n");
+	klhwg_add_all_nodes(hwgraph_root);
+}
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/io/sgi_io_sim.c linux/arch/ia64/sn/io/sgi_io_sim.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/io/sgi_io_sim.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/io/sgi_io_sim.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,162 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+#include <linux/types.h>
+#include <linux/config.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/agent.h>
+#include <asm/sn/klconfig.h>
+#include <asm/sn/module.h>
+#include <asm/sn/nic.h>
+#include <asm/sn/sn_private.h>
+#include <asm/sn/synergy.h>
+
+cnodeid_t nasid_to_compact_node[MAX_NASIDS];
+nasid_t compact_to_nasid_node[MAX_COMPACT_NODES];
+cnodeid_t cpuid_to_compact_node[MAXCPUS];
+cpuid_t         master_procid = 0;
+int maxnodes;
+char arg_maxnodes[4];
+
+nodepda_t       *Nodepdaindr[MAX_COMPACT_NODES];
+nodepda_t        *nodepda;
+subnode_pda_t    *subnodepda;
+
+synergy_da_t	*Synergy_da_indr[MAX_COMPACT_NODES * 2];
+
+extern void init_all_devices(void);
+
+
+/*
+ * Return non-zero if the given variable was specified
+ */
+int
+is_specified(char *s)
+{
+        return (strlen(s) != 0);
+}
+
+
+void pciba_init(void)
+{
+	FIXME("pciba_init : no-op\n");
+}
+
+void xbmon_init(void)
+{
+	FIXME("xbmon_init : no-op\n");
+
+}
+
+void pciiox_init(void)
+{
+	FIXME("pciiox_init : no-op\n");
+
+}
+
+void usrpci_init(void)
+{
+	FIXME("usrpci_init : no-op\n");
+
+}
+
+void ioc3_init(void)
+{
+	FIXME("ioc3_init : no-op\n");
+
+}
+
+void initialize_io(void)
+{
+
+	init_all_devices();
+}
+
+/*
+ * Routines provided by ml/SN/promif.c.
+ */
+static __psunsigned_t master_bridge_base = (__psunsigned_t)NULL;
+static nasid_t console_nasid;
+static char console_wid;
+static char console_pcislot;
+
+void
+set_master_bridge_base(void)
+{
+
+#ifdef SIMULATED_KLGRAPH
+	printk("set_master_bridge_base: SIMULATED_KLGRAPH FIXME hardwired master.\n");
+	console_nasid = 0;
+	console_wid = 0x8;
+	console_pcislot = 0x2;
+#else
+        console_nasid = KL_CONFIG_CH_CONS_INFO(master_nasid)->nasid;
+        console_wid = WIDGETID_GET(KL_CONFIG_CH_CONS_INFO(master_nasid)->memory_base);
+        console_pcislot = KL_CONFIG_CH_CONS_INFO(master_nasid)->npci;
+#endif /* SIMULATED_KLGRAPH */
+
+        master_bridge_base = (__psunsigned_t)NODE_SWIN_BASE(console_nasid,
+                                                            console_wid);
+}
+
+int
+check_nasid_equiv(nasid_t nasida, nasid_t nasidb)
+{
+        if ((nasida == nasidb) ||
+            (nasida == NODEPDA(NASID_TO_COMPACT_NODEID(nasidb))->xbow_peer))
+                return 1;
+        else
+                return 0;
+}
+
+int
+is_master_nasid_widget(nasid_t test_nasid, xwidgetnum_t test_wid)
+{
+
+        /*
+         * If the widget numbers are different, we're not the master.
+         */
+        if (test_wid != (xwidgetnum_t)console_wid)
+                return 0;
+
+        /*
+         * If the NASIDs are the same or equivalent, we're the master.
+         */
+        if (check_nasid_equiv(test_nasid, console_nasid)) {
+                return 1;
+        } else {
+                return 0;
+        }
+}
+
+cnodeid_t
+nasid_to_compact_nodeid(nasid_t nasid)
+{
+        ASSERT(nasid >= 0 && nasid < MAX_NASIDS);
+        return nasid_to_compact_node[nasid];
+}
+
+nasid_t
+compact_to_nasid_nodeid(cnodeid_t cnode)
+{
+        ASSERT(cnode >= 0 && cnode <= MAX_COMPACT_NODES);
+        ASSERT(compact_to_nasid_node[cnode] >= 0);
+        return compact_to_nasid_node[cnode];
+}
+
+/*
+ * Routines provided by ml/SN/nvram.c
+ */
+void
+nvram_baseinit(void)
+{
+	FIXME("nvram_baseinit : no-op\n");
+
+}
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/io/stubs.c linux/arch/ia64/sn/io/stubs.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/io/stubs.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/io/stubs.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,257 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/ctype.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/labelcl.h>
+#include <asm/sn/pci/bridge.h>
+#include <asm/sn/ioerror_handling.h>
+#include <asm/sn/pci/pciio.h>
+#include <asm/sn/slotnum.h>
+#include <asm/sn/vector.h>
+
+/******
+ ****** hack defines ......
+ ******/
+
+int pcibr_prefetch_enable_rev, pcibr_wg_enable_rev;
+int default_intr_pri;
+int force_fire_and_forget;
+int ignore_conveyor_override;
+
+#define spinlock_init(x,name) mutex_init(x, MUTEX_DEFAULT, name);
+
+devfs_handle_t dummy_vrtx;	/* Needed for cpuid_to_vertex() in hack.h */
+
+
+/* ARGSUSED */
+void hub_widgetdev_enable(devfs_handle_t xconn_vhdl, int devnum)
+        {FIXME("hub_widgetdev_enable");}
+
+/* ARGSUSED */
+void hub_widgetdev_shutdown(devfs_handle_t xconn_vhdl, int devnum)
+        {FIXME("hub_widgetdev_shutdown");}
+
+/* ARGSUSED */
+void hub_widget_reset(devfs_handle_t hubv, xwidgetnum_t widget)
+        {FIXME("hub_widget_reset");}
+
+boolean_t
+is_sys_critical_vertex(devfs_handle_t x)
+{
+	FIXME("is_sys_critical_vertex : returns 0");
+	return(0);
+}
+
+char *
+nic_bridge_vertex_info(devfs_handle_t v, nic_data_t mcr)
+{
+	FIXME("nic_bridge_vertex_info : returns NULL");
+	return((char *)0);
+}
+
+void *
+kmem_alloc_node(register size_t size, register int flags, cnodeid_t node)
+{
+        /* Allocates on node 'node' */
+	FIXME("kmem_alloc_node : use kmalloc");
+	return(kmalloc(size, GFP_KERNEL));
+}
+
+void *
+kmem_zalloc_node(register size_t size, register int flags, cnodeid_t node)
+{
+	FIXME("kmem_zalloc_node : use kmalloc");
+	return(kmalloc(size, GFP_KERNEL));
+}
+
+void
+kmem_free(void *where, int size)
+{
+	FIXME("kmem_free : use kfree");
+	return(kfree(where));
+}
+
+
+void *
+kmem_zone_alloc(register zone_t *zone, int flags)
+{
+	FIXME("kmem_zone_alloc : return null");
+	return((void *)0);
+}
+
+void
+kmem_zone_free(register zone_t *zone, void *ptr)
+{
+	FIXME("kmem_zone_free : no-op");
+}
+
+zone_t *
+kmem_zone_init(register int size, char *zone_name)
+{
+	FIXME("kmem_zone_free : returns NULL");
+	return((zone_t *)0);
+}
+
+uint64_t
+rmalloc(struct map *mp, size_t size)
+{
+	FIXME("rmalloc : returns NULL");
+	return((uint64_t)0);
+}
+
+void
+rmfree(struct map *mp, size_t size, uint64_t a)
+{
+	FIXME("rmfree : no-op");
+}
+
+struct map *
+rmallocmap(uint64_t mapsiz)
+{
+	FIXME("rmallocmap : returns NULL");
+	return((struct map *)0);
+}
+
+void
+rmfreemap(struct map *mp)
+{
+	FIXME("rmfreemap : no-op");
+}
+
+int
+compare_and_swap_ptr(void **location, void *old_ptr, void *new_ptr)
+{
+	FIXME("compare_and_swap_ptr : NOT ATOMIC");
+	if (*location == old_ptr) {
+		*location = new_ptr;
+		return(1);
+	}
+	else
+		return(0);
+}
+
+void *
+swap_ptr(void **loc, void *new)
+{
+	FIXME("swap_ptr : returns null");
+	return((void *)0);
+}
+
+/* For ml/SN/SN1/slots.c */
+/* ARGSUSED */
+slotid_t get_widget_slotnum(int xbow, int widget)
+        {FIXME("get_widget_slotnum"); return (unsigned char)NULL;}
+
+/* For router */
+int
+router_init(cnodeid_t cnode,int writeid, void *npda_rip)
+        {FIXME("router_init"); return(0);}
+
+/* From io/ioerror_handling.c */
+error_return_code_t
+sys_critical_graph_vertex_add(devfs_handle_t parent, devfs_handle_t child)
+	{FIXME("sys_critical_graph_vertex_add"); return(0);}
+
+/* From io/ioc3.c */
+devfs_handle_t
+ioc3_console_vhdl_get(void)
+	{FIXME("ioc3_console_vhdl_get"); return( (devfs_handle_t)-1);}
+
+
+#if 0
+#define io_splock(l) 1
+#define io_spunlock(l,s)
+
+#define spinlock_destroy(a)     /* needed by pcibr_detach() */
+#define mutex_spinlock(a) 0
+#define mutex_spinunlock(a,b)
+#define mutex_init(a,b,c)               ;
+#define mutex_lock(a,b)                 ;
+#define mutex_unlock(a)                 ;
+#define dev_to_vhdl(dev) 0
+#define get_timestamp() 0
+#define us_delay(a)
+#define v_mapphys(a,b,c) 0
+#define splhi()  0
+#define splx(s)
+#define spinlock_init(x,name) mutex_init(x, MUTEX_DEFAULT, name);
+#endif /* 0 */
+
+int
+cap_able(uint64_t x)
+{
+	FIXME("cap_able : returns 1");
+	return(1);
+}
+
+int
+cap_able_cred(uint64_t a, uint64_t b)
+{
+	FIXME("cap_able_cred : returns 1");
+	return(1);
+}
+
+void
+nic_vmc_check(devfs_handle_t vhdl, char *nicinfo)
+{
+
+	FIXME("nic_vmc_check\n");
+
+}
+
+char *
+nic_vertex_info_get(devfs_handle_t v)
+{
+
+	FIXME("nic_vertex_info_get\n");
+	return(NULL);
+
+}
+
+int
+vector_read_node(net_vec_t dest, nasid_t nasid,
+             int write_id, int address,
+             uint64_t *value)
+{
+	FIXME("vector_read_node\n");
+	return(0);
+}
+
+int
+vector_write_node(net_vec_t dest, nasid_t nasid,
+              int write_id, int address,
+              uint64_t value)
+{
+	FIXME("vector_write_node\n");
+	return(0);
+}
+
+int
+atomicAddInt(int *int_ptr, int value)
+{
+//	FIXME("atomicAddInt : simple add\n");
+	*int_ptr += value;
+	return(0);
+}
+
+int
+atomicClearInt(int *int_ptr, int value)
+{
+	FIXME("atomicClearInt : simple clear\n");
+	*int_ptr &= ~value;
+	return(0);
+}
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/io/xbow.c linux/arch/ia64/sn/io/xbow.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/io/xbow.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/io/xbow.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,1866 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/iograph.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/labelcl.h>
+#include <asm/sn/pci/bridge.h>
+#include <asm/sn/xtalk/xtalk_private.h>
+
+#define DEBUG		1
+#define XBOW_DEBUG	1
+
+
+/*
+ * Files needed to get the device driver entry points
+ */
+
+/* #include <asm/cred.h> */
+
+#include <asm/sn/xtalk/xbow.h>
+#include <asm/sn/xtalk/xtalk.h>
+#include <asm/sn/xtalk/xswitch.h>
+#include <asm/sn/xtalk/xwidget.h>
+
+#include <asm/sn/prio.h>
+#include <asm/sn/hcl_util.h>
+
+
+#define NEW(ptr)	(ptr = kmalloc(sizeof (*(ptr)), GFP_KERNEL))
+#define DEL(ptr)	(kfree(ptr))
+
+int                     xbow_devflag = D_MP;
+
+/*
+ * This file supports the Xbow chip.  Main functions: initializtion,
+ * error handling, and GBR.
+ */
+
+/*
+ * each vertex corresponding to an xbow chip
+ * has a "fastinfo" pointer pointing at one
+ * of these things.
+ */
+typedef struct xbow_soft_s *xbow_soft_t;
+
+struct xbow_soft_s {
+    devfs_handle_t            conn;	/* our connection point */
+    devfs_handle_t            vhdl;	/* xbow's private vertex */
+    devfs_handle_t            busv;	/* the xswitch vertex */
+    xbow_t                 *base;	/* PIO pointer to crossbow chip */
+    char                   *name;	/* hwgraph name */
+
+    xbow_perf_t             xbow_perfcnt[XBOW_PERF_COUNTERS];
+    xbow_perf_link_t        xbow_perflink[MAX_XBOW_PORTS];
+    xbow_link_status_t      xbow_link_status[MAX_XBOW_PORTS];
+    lock_t                  xbow_perf_lock;
+    int                     link_monitor;
+    widget_cfg_t	   *wpio[MAX_XBOW_PORTS];	/* cached PIO pointer */
+
+    /* Bandwidth allocation state. Bandwidth values are for the
+     * destination port since contention happens there.
+     * Implicit mapping from xbow ports (8..f) -> (0..7) array indices.
+     */
+    lock_t		    xbow_bw_alloc_lock;		/* bw allocation lock */
+    unsigned long long	    bw_hiwm[MAX_XBOW_PORTS];	/* hiwater mark values */
+    unsigned long long      bw_cur_used[MAX_XBOW_PORTS]; /* bw used currently */
+};
+
+#define xbow_soft_set(v,i)	hwgraph_fastinfo_set((v), (arbitrary_info_t)(i))
+#define xbow_soft_get(v)	((xbow_soft_t)hwgraph_fastinfo_get((v)))
+
+/*
+ * Function Table of Contents
+ */
+
+void                    xbow_mlreset(xbow_t *);
+void                    xbow_init(void);
+int                     xbow_attach(devfs_handle_t);
+
+int                     xbow_open(devfs_handle_t *, int, int, cred_t *);
+int                     xbow_close(devfs_handle_t, int, int, cred_t *);
+
+int                     xbow_map(devfs_handle_t, vhandl_t *, off_t, size_t, uint);
+int                     xbow_unmap(devfs_handle_t, vhandl_t *);
+int                     xbow_ioctl(devfs_handle_t, int, void *, int, struct cred *, int *);
+
+int                     xbow_widget_present(xbow_t *, int);
+static int              xbow_link_alive(xbow_t *, int);
+devfs_handle_t            xbow_widget_lookup(devfs_handle_t, int);
+
+#ifdef LATER
+static void             xbow_setwidint(xtalk_intr_t);
+static void             xbow_errintr_handler(intr_arg_t);
+static error_handler_f  xbow_error_handler;
+#endif
+void                    xbow_intr_preset(void *, int, xwidgetnum_t, iopaddr_t, xtalk_intr_vector_t);
+
+
+
+void                    xbow_update_perf_counters(devfs_handle_t);
+xbow_perf_link_t       *xbow_get_perf_counters(devfs_handle_t);
+int                     xbow_enable_perf_counter(devfs_handle_t, int, int, int);
+xbow_link_status_t     *xbow_get_llp_status(devfs_handle_t);
+void                    xbow_update_llp_status(devfs_handle_t);
+
+int                     xbow_disable_llp_monitor(devfs_handle_t);
+int                     xbow_enable_llp_monitor(devfs_handle_t);
+
+#ifdef IRIX
+int			xbow_prio_bw_alloc(devfs_handle_t, xwidgetnum_t, xwidgetnum_t,
+				unsigned long long, unsigned long long);
+#else
+int                     xbow_prio_bw_alloc(devfs_handle_t, xwidgetnum_t, xwidgetnum_t,
+                                unsigned long long, unsigned long long);
+#endif
+
+
+xswitch_reset_link_f    xbow_reset_link;
+
+void                    idbg_xbowregs(int64_t);
+
+xswitch_provider_t      xbow_provider =
+{
+    xbow_reset_link,
+};
+
+/*
+ *    xbow_mlreset: called at mlreset time if the
+ *      platform specific code determines that there is
+ *      a crossbow in a critical path that must be
+ *      functional before the driver would normally get
+ *      the device properly set up.
+ *
+ *      what do we need to do, that the boot prom can
+ *      not be counted on to have already done, that is
+ *      generic across all platforms using crossbows?
+ */
+/*ARGSUSED */
+void
+xbow_mlreset(xbow_t * xbow)
+{
+}
+
+/*
+ *    xbow_init: called with the rest of the device
+ *      driver XXX_init routines. This platform *might*
+ *      have a Crossbow chip, or even several, but it
+ *      might have none. Register with the crosstalk
+ *      generic provider so when we encounter the chip
+ *      the right magic happens.
+ */
+void
+xbow_init(void)
+{
+
+#if DEBUG && ATTACH_DEBUG
+    printf("xbow_init\n");
+#endif
+
+    xwidget_driver_register(XXBOW_WIDGET_PART_NUM,
+			    0, /* XXBOW_WIDGET_MFGR_NUM, */
+			    "xbow_",
+			    CDL_PRI_HI);	/* attach before friends */
+
+    xwidget_driver_register(XBOW_WIDGET_PART_NUM,
+			    XBOW_WIDGET_MFGR_NUM,
+			    "xbow_",
+			    CDL_PRI_HI);	/* attach before friends */
+}
+
+#ifdef XBRIDGE_REGS_SIM
+/*    xbow_set_simulated_regs: sets xbow regs as needed
+ *	for powering through the boot
+ */
+void
+xbow_set_simulated_regs(xbow_t *xbow, int port)
+{
+    /*
+     * turn on link
+     */
+    xbow->xb_link(port).link_status = (1<<31);
+    /*
+     * and give it a live widget too
+     */
+    xbow->xb_link(port).link_aux_status = XB_AUX_STAT_PRESENT;
+    /*
+     * zero the link control reg
+     */
+    xbow->xb_link(port).link_control = 0x0;
+}
+#endif /* XBRIDGE_REGS_SIM */
+
+/*
+ *    xbow_attach: the crosstalk provider has
+ *      determined that there is a crossbow widget
+ *      present, and has handed us the connection
+ *      point for that vertex.
+ *
+ *      We not only add our own vertex, but add
+ *      some "xtalk switch" data to the switch
+ *      vertex (at the connect point's parent) if
+ *      it does not have any.
+ */
+
+/*ARGSUSED */
+int
+xbow_attach(devfs_handle_t conn)
+{
+    /*REFERENCED */
+    devfs_handle_t            vhdl;
+    devfs_handle_t            busv;
+    xbow_t                 *xbow;
+    xbow_soft_t             soft;
+    int                     port;
+    xswitch_info_t          info;
+#ifdef LATER
+    xtalk_intr_t            intr_hdl;
+    device_desc_t           dev_desc;
+#endif
+    char                    devnm[MAXDEVNAME], *s;
+    xbowreg_t               id;
+    int                     rev;
+    int			    i;
+    int			    xbow_num;
+	
+#if DEBUG && ATTACH_DEBUG
+    cmn_err(CE_CONT, "%v: xbow_attach\n", conn);
+#endif
+
+    /*
+     * Get a PIO pointer to the base of the crossbow
+     * chip.
+     */
+#ifdef XBRIDGE_REGS_SIM
+    printk("xbow_attach: XBRIDGE_REGS_SIM FIXME: allocating %ld bytes for xbow_s\n", sizeof(xbow_t));
+    xbow = (xbow_t *) kmalloc(sizeof(xbow_t), GFP_KERNEL);
+    /*
+     * turn on ports e and f like in a real live ibrick
+     */
+    xbow_set_simulated_regs(xbow, 0xe);
+    xbow_set_simulated_regs(xbow, 0xf);
+#else
+    xbow = (xbow_t *) xtalk_piotrans_addr(conn, 0, 0, sizeof(xbow_t), 0);
+#endif /* XBRIDGE_REGS_SIM */
+
+    /*
+     * Locate the "switch" vertex: it is the parent
+     * of our connection point.
+     */
+    busv = hwgraph_connectpt_get(conn);
+    printk("xbow_attach: Bus Vertex 0x%p, conn 0x%p, xbow register 0x%p wid= 0x%x\n", busv, conn, xbow, *(volatile u32 *)xbow);
+
+    ASSERT(busv != GRAPH_VERTEX_NONE);
+
+    /*
+     * Create our private vertex, and connect our
+     * driver information to it. This makes it possible
+     * for diagnostic drivers to open the crossbow
+     * vertex for access to registers.
+     */
+
+    /*
+     * We need to teach xbow drivers to provide the right set of
+     * file ops.
+     */
+    vhdl = NULL;
+    vhdl = hwgraph_register(conn, EDGE_LBL_XBOW,
+                        0, DEVFS_FL_AUTO_DEVNUM,
+                        0, 0,
+                        S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
+                        /* &hcl_fops */ (void *)&vhdl, NULL);
+    if (!vhdl) {
+        printk("xbow_attach: Unable to create char device for xbow conn
+0x%p\n",
+                conn);
+    }
+
+    /*
+     * Allocate the soft state structure and attach
+     * it to the xbow's vertex
+     */
+    NEW(soft);
+    soft->conn = conn;
+    soft->vhdl = vhdl;
+    soft->busv = busv;
+    soft->base = xbow;
+    /* does the universe really need another macro?  */
+    /* xbow_soft_set(vhdl, (arbitrary_info_t) soft); */
+    hwgraph_fastinfo_set(vhdl, (arbitrary_info_t) soft);
+
+#define XBOW_NUM_SUFFIX_FORMAT	"[xbow# %d]"
+
+    /* Add xbow number as a suffix to the hwgraph name of the xbow.
+     * This is helpful while looking at the error/warning messages.
+     */
+#if CONFIG_SGI_IP35 || CONFIG_IA64_SGI_SN1 || CONFIG_IA64_GENERIC
+    xbow_num = 0;
+#else
+    xbow_num = xswitch_id_get(busv);
+#endif
+
+    /*
+     * get the name of this xbow vertex and keep the info.
+     * This is needed during errors and interupts, but as
+     * long as we have it, we can use it elsewhere.
+     */
+    s = dev_to_name(vhdl, devnm, MAXDEVNAME);
+    soft->name = kmalloc(strlen(s) + strlen(XBOW_NUM_SUFFIX_FORMAT) + 1, 
+			    GFP_KERNEL);
+    sprintf(soft->name,"%s"XBOW_NUM_SUFFIX_FORMAT, s,xbow_num);
+
+#ifdef XBRIDGE_REGS_SIM
+    /* my o200/ibrick has id=0x2d002049, but XXBOW_WIDGET_PART_NUM is defined
+     * as 0xd000, so I'm using that for the partnum bitfield.
+     */
+    printk("xbow_attach: XBRIDGE_REGS_SIM FIXME: need xb_wid_id value!!\n");
+    id = 0x2d000049;
+#else
+    id = xbow->xb_wid_id;
+#endif /* XBRIDGE_REGS_SIM */
+    rev = XWIDGET_PART_REV_NUM(id);
+
+    /*
+     * Print the revision if DEBUG, or SHOW_REVS and kdebug,
+     * or the xbow is downrev.
+     *
+     * If xbow is downrev, make it a WARNING that the
+     * Crossbow is DOWNREV: these chips are not good
+     * to have around, and the operator should be told.
+     */
+#ifdef IRIX
+#if !DEBUG
+    if (
+#if SHOW_REVS
+	   (kdebug) ||
+#endif	/* SHOW_REVS */
+	   (rev < XBOW_REV_1_1))
+#endif	/* !DEBUG  */
+	cmn_err((rev < XBOW_REV_1_1) ? CE_WARN : CE_CONT,
+		"%sCrossbow ASIC: rev %s (code=%d) at %s%s",
+		(rev < XBOW_REV_1_1) ? "DOWNREV " : "",
+		(rev == XBOW_REV_1_0) ? "1.0" :
+		(rev == XBOW_REV_1_1) ? "1.1" :
+		(rev == XBOW_REV_1_2) ? "1.2" :
+		(rev == XBOW_REV_1_3) ? "1.3" :
+		(rev == XBOW_REV_2_0) ? "2.0" :
+		(rev == XXBOW_PART_REV_1_0) ? "Xbridge 1.0" :
+		(rev == XXBOW_PART_REV_2_0) ? "Xbridge 2.0" :
+		"unknown",
+		rev, soft->name,
+		(rev < XBOW_REV_1_1) ? "" : "\n");
+#endif	/* IRIX */
+
+    spinlock_init(&soft->xbow_perf_lock, "xbow_perf_lock");
+    soft->xbow_perfcnt[0].xp_perf_reg = &xbow->xb_perf_ctr_a;
+    soft->xbow_perfcnt[1].xp_perf_reg = &xbow->xb_perf_ctr_b;
+
+    /* Initialization for GBR bw allocation */
+    spinlock_init(&soft->xbow_bw_alloc_lock, "xbow_bw_alloc_lock");
+
+#define	XBOW_8_BIT_PORT_BW_MAX		(400 * 1000 * 1000)	/* 400 MB/s */
+#define XBOW_16_BIT_PORT_BW_MAX		(800 * 1000 * 1000)	/* 800 MB/s */
+
+    /* Set bandwidth hiwatermark and current values */
+    for (i = 0; i < MAX_XBOW_PORTS; i++) {
+	soft->bw_hiwm[i] = XBOW_16_BIT_PORT_BW_MAX;	/* for now */
+	soft->bw_cur_used[i] = 0;
+    }
+
+    /*
+     * attach the crossbow error interrupt.
+     */
+#ifdef LATER
+    dev_desc = device_desc_dup(vhdl);
+    device_desc_flags_set(dev_desc,
+			  device_desc_flags_get(dev_desc) | D_INTR_ISERR);
+    device_desc_intr_name_set(dev_desc, "Crossbow error");
+
+    intr_hdl = xtalk_intr_alloc(conn, dev_desc, vhdl);
+    ASSERT(intr_hdl != NULL);
+
+    xtalk_intr_connect(intr_hdl,
+		       (intr_func_t) xbow_errintr_handler,
+		       (intr_arg_t) soft,
+		       (xtalk_intr_setfunc_t) xbow_setwidint,
+		       (void *) xbow,
+		       (void *) 0);
+    device_desc_free(dev_desc);
+
+    xwidget_error_register(conn, xbow_error_handler, soft);
+
+#else
+    printk("xbow_attach: Fixme: we bypassed attaching xbow error interrupt.\n");
+#endif /* LATER */
+
+    /*
+     * Enable xbow error interrupts
+     */
+    xbow->xb_wid_control = (XB_WID_CTRL_REG_ACC_IE |
+			    XB_WID_CTRL_XTALK_IE);
+
+    /*
+     * take a census of the widgets present,
+     * leaving notes at the switch vertex.
+     */
+    info = xswitch_info_new(busv);
+
+    for (port = MAX_PORT_NUM - MAX_XBOW_PORTS;
+	 port < MAX_PORT_NUM; ++port) {
+	if (!xbow_link_alive(xbow, port)) {
+#if DEBUG && XBOW_DEBUG
+	    printk(KERN_INFO "0x%p link %d is not alive\n",
+		    busv, port);
+#endif
+	    continue;
+	}
+	if (!xbow_widget_present(xbow, port)) {
+#if DEBUG && XBOW_DEBUG
+	    printk(KERN_INFO "0x%p link %d is alive but no widget is present\n", busv, port);
+#endif
+	    continue;
+	}
+#if DEBUG && XBOW_DEBUG
+	printk(KERN_INFO "0x%p link %d has a widget\n",
+		busv, port);
+#endif
+
+	xswitch_info_link_is_ok(info, port);
+	/*
+	 * Turn some error interrupts on
+	 * and turn others off. The PROM has
+	 * some things turned on we don't
+	 * want to see (bandwidth allocation
+	 * errors for instance); so if it
+	 * is not listed here, it is not on.
+	 */
+	xbow->xb_link(port).link_control =
+	    ( (xbow->xb_link(port).link_control
+	/*
+	 * Turn off these bits; they are non-fatal,
+	 * but we might want to save some statistics
+	 * on the frequency of these errors.
+	 * XXX FIXME XXX
+	 */
+	    & ~XB_CTRL_RCV_CNT_OFLOW_IE
+	    & ~XB_CTRL_XMT_CNT_OFLOW_IE
+	    & ~XB_CTRL_BNDWDTH_ALLOC_IE
+	    & ~XB_CTRL_RCV_IE)
+	/*
+	 * These are the ones we want to turn on.
+	 */
+	    | (XB_CTRL_ILLEGAL_DST_IE
+	    | XB_CTRL_OALLOC_IBUF_IE
+	    | XB_CTRL_XMT_MAX_RTRY_IE
+	    | XB_CTRL_MAXREQ_TOUT_IE
+	    | XB_CTRL_XMT_RTRY_IE
+	    | XB_CTRL_SRC_TOUT_IE) );
+    }
+
+    xswitch_provider_register(busv, &xbow_provider);
+
+    return 0;				/* attach successful */
+}
+
+/*ARGSUSED */
+int
+xbow_open(devfs_handle_t *devp, int oflag, int otyp, cred_t *credp)
+{
+    if (!_CAP_CRABLE((uint64_t)credp, CAP_DEVICE_MGT))
+	return EPERM;
+    return 0;
+
+}
+
+/*ARGSUSED */
+int
+xbow_close(devfs_handle_t dev, int oflag, int otyp, cred_t *crp)
+{
+    return 0;
+}
+
+/*ARGSUSED */
+int
+xbow_map(devfs_handle_t dev, vhandl_t *vt, off_t off, size_t len, uint prot)
+{
+    devfs_handle_t            vhdl = dev_to_vhdl(dev);
+    xbow_soft_t             soft = xbow_soft_get(vhdl);
+    int                     error;
+
+    ASSERT(soft);
+    len = ctob(btoc(len));
+    /* XXX- this ignores the offset!!! */
+    error = v_mapphys(vt, (void *) soft->base, len);
+    return error;
+}
+
+/*ARGSUSED */
+int
+xbow_unmap(devfs_handle_t dev, vhandl_t *vt)
+{
+    return 0;
+}
+
+/* This contains special-case code for grio. There are plans to make
+ * this general sometime in the future, but till then this should
+ * be good enough.
+ */
+xwidgetnum_t
+xbow_widget_num_get(devfs_handle_t dev)
+{
+	devfs_handle_t	tdev;
+	char		devname[MAXDEVNAME];
+	xwidget_info_t	xwidget_info;
+	int		i;
+#if IP27
+	cnodeid_t	cnodeid = CNODEID_NONE;
+#endif
+
+	vertex_to_name(dev, devname, MAXDEVNAME);
+
+#if IP30
+	/* If there is a ".connection" edge from this vertex,
+	 * then it must be "/hw/node" vertex. Return the widget
+	 * number for heart: 8.
+	 */
+	if (hwgraph_edge_get(dev, EDGE_LBL_CONN, &tdev) ==
+			GRAPH_SUCCESS) {
+		return ((xwidgetnum_t) 8);
+	}
+#elif IP27
+	if ((cnodeid = nodevertex_to_cnodeid(dev)) != CNODEID_NONE) {
+		ASSERT(cnodeid < maxnodes);
+		return(hub_widget_id(COMPACT_TO_NASID_NODEID(cnodeid)));
+	}
+#endif
+
+	/* If this is a pci controller vertex, traverse up using
+	 * the ".." links to get to the widget.
+	 */
+	if (strstr(devname, EDGE_LBL_PCI) &&
+			strstr(devname, EDGE_LBL_CONTROLLER)) {
+		tdev = dev;
+		for (i=0; i< 2; i++) {
+			if (hwgraph_edge_get(tdev,
+				HWGRAPH_EDGELBL_DOTDOT, &tdev) !=
+					GRAPH_SUCCESS)
+				return XWIDGET_NONE;
+		}
+
+		if ((xwidget_info = xwidget_info_chk(tdev)) != NULL) {
+			return (xwidget_info_id_get(xwidget_info));
+		} else {
+			return XWIDGET_NONE;
+		}
+	}
+
+	return XWIDGET_NONE;
+}
+
+int
+xbow_ioctl(devfs_handle_t dev,
+	   int cmd,
+	   void *arg,
+	   int flag,
+	   struct cred *cr,
+	   int *rvalp)
+{
+    devfs_handle_t            vhdl;
+    int                     error = 0;
+
+#if defined (DEBUG)
+    int                     rc;
+    devfs_handle_t            conn;
+    struct xwidget_info_s  *xwidget_info;
+    xbow_soft_t             xbow_soft;
+#endif
+    *rvalp = 0;
+
+    vhdl = dev_to_vhdl(dev);
+#if defined (DEBUG)
+    xbow_soft = xbow_soft_get(vhdl);
+    conn = xbow_soft->conn;
+
+    xwidget_info = xwidget_info_get(conn);
+    ASSERT_ALWAYS(xwidget_info != NULL);
+
+    rc = xwidget_hwid_is_xswitch(&xwidget_info->w_hwid);
+    ASSERT_ALWAYS(rc != 0);
+#endif
+    switch (cmd) {
+#ifdef IRIX
+    case XBOWIOC_PERF_ENABLE:
+    case XBOWIOC_PERF_DISABLE:
+	{
+	    struct xbow_perfarg_t   xbow_perf_en;
+
+	    if (!_CAP_CRABLE(cr, CAP_DEVICE_MGT)) {
+		error = EPERM;
+		break;
+	    }
+	    if ((flag & FWRITE) == 0) {
+		error = EBADF;
+		break;
+	    }
+	    if (COPYIN(arg, &xbow_perf_en, sizeof(xbow_perf_en))) {
+		error = EFAULT;
+		break;
+	    }
+	    if (error = xbow_enable_perf_counter(vhdl,
+						 xbow_perf_en.link,
+						 (cmd == XBOWIOC_PERF_DISABLE) ? 0 : xbow_perf_en.mode,
+						 xbow_perf_en.counter)) {
+		error = EINVAL;
+		break;
+	    }
+	    break;
+	}
+#endif
+
+#ifdef IRIX
+    case XBOWIOC_PERF_GET:
+	{
+	    xbow_perf_link_t       *xbow_perf_cnt;
+
+	    if ((flag & FREAD) == 0) {
+		error = EBADF;
+		break;
+	    }
+	    xbow_perf_cnt = xbow_get_perf_counters(vhdl);
+	    ASSERT_ALWAYS(xbow_perf_cnt != NULL);
+
+	    if (COPYOUT((void *) xbow_perf_cnt, (void *) arg,
+			MAX_XBOW_PORTS * sizeof(xbow_perf_link_t))) {
+		error = EFAULT;
+		break;
+	    }
+	    break;
+	}
+#endif
+
+    case XBOWIOC_LLP_ERROR_ENABLE:
+	if (!_CAP_CRABLE((uint64_t)cr, CAP_DEVICE_MGT)) {
+	    error = EPERM;
+	    break;
+	}
+	if ((error = xbow_enable_llp_monitor(vhdl)) != 0)
+	    error = EINVAL;
+
+	break;
+
+    case XBOWIOC_LLP_ERROR_DISABLE:
+
+	if (!_CAP_CRABLE((uint64_t)cr, CAP_DEVICE_MGT)) {
+	    error = EPERM;
+	    break;
+	}
+	if ((error = xbow_disable_llp_monitor(vhdl)) != 0)
+	    error = EINVAL;
+
+	break;
+
+#ifdef IRIX
+    case XBOWIOC_LLP_ERROR_GET:
+	{
+	    xbow_link_status_t     *xbow_llp_status;
+
+	    if ((flag & FREAD) == 0) {
+		error = EBADF;
+		break;
+	    }
+	    xbow_llp_status = xbow_get_llp_status(vhdl);
+	    ASSERT_ALWAYS(xbow_llp_status != NULL);
+
+	    if (COPYOUT((void *) xbow_llp_status, (void *) arg,
+			MAX_XBOW_PORTS * sizeof(xbow_link_status_t))) {
+		error = EFAULT;
+		break;
+	    }
+	    break;
+	}
+#endif
+
+#ifdef IRIX
+    case GIOCSETBW:
+	{
+	    grio_ioctl_info_t info;
+	    xwidgetnum_t src_widgetnum, dest_widgetnum;
+
+	    if (!cap_able(CAP_DEVICE_MGT)) {
+		error = EPERM;
+		break;
+	    }
+
+	    if (COPYIN(arg, &info, sizeof(grio_ioctl_info_t))) {
+		error = EFAULT;
+		break;
+	    }
+#ifdef GRIO_DEBUG
+	    printf("xbow:: prev_vhdl: %d next_vhdl: %d reqbw: %lld\n",
+			info.prev_vhdl, info.next_vhdl, info.reqbw);
+#endif /* GRIO_DEBUG */
+
+	    src_widgetnum = xbow_widget_num_get(info.prev_vhdl);
+	    dest_widgetnum = xbow_widget_num_get(info.next_vhdl);
+
+	    /* Bandwidth allocation is bi-directional. Since bandwidth
+	     * reservations have already been done at an earlier stage,
+	     * we cannot fail here for lack of bandwidth.
+	     */
+	    xbow_prio_bw_alloc(dev, src_widgetnum, dest_widgetnum,
+			0, info.reqbw);
+	    xbow_prio_bw_alloc(dev, dest_widgetnum, src_widgetnum,
+			0, info.reqbw);
+
+	    break;
+	}
+
+    case GIOCRELEASEBW:
+	{
+	    grio_ioctl_info_t info;
+	    xwidgetnum_t src_widgetnum, dest_widgetnum;
+
+	    if (!cap_able(CAP_DEVICE_MGT)) {
+		error = EPERM;
+		break;
+	    }
+
+	    if (COPYIN(arg, &info, sizeof(grio_ioctl_info_t))) {
+		error = EFAULT;
+		break;
+	    }
+#ifdef GRIO_DEBUG
+	    printf("xbow:: prev_vhdl: %d next_vhdl: %d reqbw: %lld\n",
+			info.prev_vhdl, info.next_vhdl, info.reqbw);
+#endif /* GRIO_DEBUG */
+
+	    src_widgetnum = xbow_widget_num_get(info.prev_vhdl);
+	    dest_widgetnum = xbow_widget_num_get(info.next_vhdl);
+
+	    /* Bandwidth reservation is bi-directional. Hence, remove
+	     * bandwidth reservations for both directions.
+	     */
+	    xbow_prio_bw_alloc(dev, src_widgetnum, dest_widgetnum,
+			info.reqbw, (-1 * info.reqbw));
+	    xbow_prio_bw_alloc(dev, dest_widgetnum, src_widgetnum,
+			info.reqbw, (-1 * info.reqbw));
+
+	    break;
+	}
+#endif
+
+    default:
+	break;
+
+    }
+    return error;
+}
+
+/*
+ * xbow_widget_present: See if a device is present
+ * on the specified port of this crossbow.
+ */
+int
+xbow_widget_present(xbow_t * xbow, int port)
+{
+	if ( IS_RUNNING_ON_SIMULATOR() ) {
+		if ( (port == 14) || (port == 15) ) {
+			return 1;
+		}
+		else {
+			return 0;
+		}
+	}
+	else {
+		return xbow->xb_link(port).link_aux_status & XB_AUX_STAT_PRESENT;
+	}
+}
+
+static int
+xbow_link_alive(xbow_t * xbow, int port)
+{
+    xbwX_stat_t             xbow_linkstat;
+
+    xbow_linkstat.linkstatus = xbow->xb_link(port).link_status;
+    return (xbow_linkstat.link_alive);
+}
+
+/*
+ * xbow_widget_lookup
+ *      Lookup the edges connected to the xbow specified, and
+ *      retrieve the handle corresponding to the widgetnum
+ *      specified.
+ *      If not found, return 0.
+ */
+devfs_handle_t
+xbow_widget_lookup(devfs_handle_t vhdl,
+		   int widgetnum)
+{
+    xswitch_info_t          xswitch_info;
+    devfs_handle_t            conn;
+
+    xswitch_info = xswitch_info_get(vhdl);
+    conn = xswitch_info_vhdl_get(xswitch_info, widgetnum);
+    return conn;
+}
+
+/*
+ * xbow_setwidint: called when xtalk
+ * is establishing or migrating our
+ * interrupt service.
+ */
+#ifdef LATER
+static void
+xbow_setwidint(xtalk_intr_t intr)
+{
+    xwidgetnum_t            targ = xtalk_intr_target_get(intr);
+    iopaddr_t               addr = xtalk_intr_addr_get(intr);
+    xtalk_intr_vector_t     vect = xtalk_intr_vector_get(intr);
+    xbow_t                 *xbow = (xbow_t *) xtalk_intr_sfarg_get(intr);
+
+    xbow_intr_preset((void *) xbow, 0, targ, addr, vect);
+}
+#endif	/* LATER */
+
+/*
+ * xbow_intr_preset: called during mlreset time
+ * if the platform specific code needs to route
+ * an xbow interrupt before the xtalk infrastructure
+ * is available for use.
+ *
+ * Also called from xbow_setwidint, so we don't
+ * replicate the guts of the routine.
+ *
+ * XXX- probably should be renamed xbow_wid_intr_set or
+ * something to reduce confusion.
+ */
+/*ARGSUSED3 */
+void
+xbow_intr_preset(void *which_widget,
+		 int which_widget_intr,
+		 xwidgetnum_t targ,
+		 iopaddr_t addr,
+		 xtalk_intr_vector_t vect)
+{
+    xbow_t                 *xbow = (xbow_t *) which_widget;
+
+    xbow->xb_wid_int_upper = ((0xFF000000 & (vect << 24)) |
+			      (0x000F0000 & (targ << 16)) |
+			      XTALK_ADDR_TO_UPPER(addr));
+    xbow->xb_wid_int_lower = XTALK_ADDR_TO_LOWER(addr);
+}
+
+#define	XEM_ADD_STR(s)		cmn_err(CE_CONT, "%s", (s))
+#define	XEM_ADD_NVAR(n,v)	cmn_err(CE_CONT, "\t%20s: 0x%x\n", (n), (v))
+#define	XEM_ADD_VAR(v)		XEM_ADD_NVAR(#v,(v))
+#define XEM_ADD_IOEF(n) 	if (IOERROR_FIELDVALID(ioe,n))		    \
+				    XEM_ADD_NVAR("ioe." #n,		    \
+						 IOERROR_GETVALUE(ioe,n))
+
+#ifdef IRIX
+static void
+xem_add_ioe(ioerror_t *ioe)
+{
+    XEM_ADD_IOEF(errortype);
+    XEM_ADD_IOEF(widgetnum);
+    XEM_ADD_IOEF(widgetdev);
+    XEM_ADD_IOEF(srccpu);
+    XEM_ADD_IOEF(srcnode);
+    XEM_ADD_IOEF(errnode);
+    XEM_ADD_IOEF(sysioaddr);
+    XEM_ADD_IOEF(xtalkaddr);
+    XEM_ADD_IOEF(busspace);
+    XEM_ADD_IOEF(busaddr);
+    XEM_ADD_IOEF(vaddr);
+    XEM_ADD_IOEF(memaddr);
+    XEM_ADD_IOEF(epc);
+    XEM_ADD_IOEF(ef);
+}
+
+#define XEM_ADD_IOE()	(xem_add_ioe(ioe))
+#endif	/* IRIX */
+
+int                     xbow_xmit_retry_errors = 0;
+
+int
+xbow_xmit_retry_error(xbow_soft_t soft,
+		      int port)
+{
+    xswitch_info_t          info;
+    devfs_handle_t            vhdl;
+    widget_cfg_t           *wid;
+    widgetreg_t             id;
+    int                     part;
+    int                     mfgr;
+
+    wid = soft->wpio[port - BASE_XBOW_PORT];
+    if (wid == NULL) {
+	/* If we can't track down a PIO
+	 * pointer to our widget yet,
+	 * leave our caller knowing that
+	 * we are interested in this
+	 * interrupt if it occurs in
+	 * the future.
+	 */
+	info = xswitch_info_get(soft->busv);
+	if (!info)
+	    return 1;
+	vhdl = xswitch_info_vhdl_get(info, port);
+	if (vhdl == GRAPH_VERTEX_NONE)
+	    return 1;
+	wid = (widget_cfg_t *) xtalk_piotrans_addr
+	    (vhdl, 0, 0, sizeof *wid, 0);
+	if (!wid)
+	    return 1;
+	soft->wpio[port - BASE_XBOW_PORT] = wid;
+    }
+    id = wid->w_id;
+    part = XWIDGET_PART_NUM(id);
+    mfgr = XWIDGET_MFG_NUM(id);
+
+    /* If this thing is not a Bridge,
+     * do not activate the WAR, and
+     * tell our caller we do not need
+     * to be called again.
+     */
+    if ((part != BRIDGE_WIDGET_PART_NUM) ||
+	(mfgr != BRIDGE_WIDGET_MFGR_NUM)) {
+		/* FIXME: add Xbridge to the WAR.
+		 * Shouldn't hurt anything.  Later need to
+		 * check if we can remove this.
+                 */
+    		if ((part != XBRIDGE_WIDGET_PART_NUM) ||
+		    (mfgr != XBRIDGE_WIDGET_MFGR_NUM))
+			return 0;
+    }
+
+    /* count how many times we
+     * have picked up after
+     * LLP Transmit problems.
+     */
+    xbow_xmit_retry_errors++;
+
+    /* rewrite the control register
+     * to fix things up.
+     */
+    wid->w_control = wid->w_control;
+    wid->w_control;
+
+    return 1;
+}
+
+/*
+ * xbow_errintr_handler will be called if the xbow
+ * sends an interrupt request to report an error.
+ */
+
+#ifdef LATER
+static void
+xbow_errintr_handler(intr_arg_t arg)
+{
+#ifdef IRIX
+    ioerror_t               ioe[1];
+    xbow_soft_t             soft = (xbow_soft_t) arg;
+    xbow_t                 *xbow = soft->base;
+    xbowreg_t               wid_control;
+    xbowreg_t               wid_stat;
+    xbowreg_t               wid_err_cmdword;
+    xbowreg_t               wid_err_upper;
+    xbowreg_t               wid_err_lower;
+    w_err_cmd_word_u        wid_err;
+    uint64_t                 wid_err_addr;
+
+    int                     fatal = 0;
+    int                     dump_ioe = 0;
+
+    wid_control = xbow->xb_wid_control;
+    wid_stat = xbow->xb_wid_stat_clr;
+    wid_err_cmdword = xbow->xb_wid_err_cmdword;
+    wid_err_upper = xbow->xb_wid_err_upper;
+    wid_err_lower = xbow->xb_wid_err_lower;
+    xbow->xb_wid_err_cmdword = 0;
+
+    wid_err_addr =
+	wid_err_lower
+	| (((iopaddr_t) wid_err_upper
+	    & WIDGET_ERR_UPPER_ADDR_ONLY)
+	   << 32);
+
+    if (wid_stat & XB_WID_STAT_LINK_INTR_MASK) {
+	int                     port;
+
+	wid_err.r = wid_err_cmdword;
+
+	for (port = MAX_PORT_NUM - MAX_XBOW_PORTS;
+	     port < MAX_PORT_NUM; port++) {
+	    if (wid_stat & XB_WID_STAT_LINK_INTR(port)) {
+		xb_linkregs_t          *link = &(xbow->xb_link(port));
+		xbowreg_t               link_control = link->link_control;
+		xbowreg_t               link_status = link->link_status_clr;
+		xbowreg_t               link_aux_status = link->link_aux_status;
+		xbowreg_t               link_pend;
+
+		link_pend = link_status & link_control &
+		    (XB_STAT_ILLEGAL_DST_ERR
+		     | XB_STAT_OALLOC_IBUF_ERR
+		     | XB_STAT_RCV_CNT_OFLOW_ERR
+		     | XB_STAT_XMT_CNT_OFLOW_ERR
+		     | XB_STAT_XMT_MAX_RTRY_ERR
+		     | XB_STAT_RCV_ERR
+		     | XB_STAT_XMT_RTRY_ERR
+		     | XB_STAT_MAXREQ_TOUT_ERR
+		     | XB_STAT_SRC_TOUT_ERR
+		    );
+
+		if (link_pend & XB_STAT_ILLEGAL_DST_ERR) {
+		    if (wid_err.f.sidn == port) {
+			IOERROR_INIT(ioe);
+			IOERROR_SETVALUE(ioe, widgetnum, port);
+			IOERROR_SETVALUE(ioe, xtalkaddr, wid_err_addr);
+			if (IOERROR_HANDLED ==
+			    xbow_error_handler(soft,
+					       IOECODE_DMA,
+					       MODE_DEVERROR,
+					       ioe)) {
+			    link_pend &= ~XB_STAT_ILLEGAL_DST_ERR;
+			} else {
+			    dump_ioe++;
+			}
+		    }
+		}
+		/* Xbow/Bridge WAR:
+		 * if the bridge signals an LLP Transmitter Retry,
+		 * rewrite its control register.
+		 * If someone else triggers this interrupt,
+		 * ignore (and disable) the interrupt.
+		 */
+		if (link_pend & XB_STAT_XMT_RTRY_ERR) {
+		    if (!xbow_xmit_retry_error(soft, port)) {
+			link_control &= ~XB_CTRL_XMT_RTRY_IE;
+			link->link_control = link_control;
+			link->link_control;	/* stall until written */
+		    }
+		    link_pend &= ~XB_STAT_XMT_RTRY_ERR;
+		}
+		if (link_pend) {
+		    devfs_handle_t	xwidget_vhdl;
+		    char		*xwidget_name;
+		    
+		    /* Get the widget name corresponding to the current
+		     * xbow link.
+		     */
+		    xwidget_vhdl = xbow_widget_lookup(soft->busv,port);
+		    xwidget_name = xwidget_name_get(xwidget_vhdl);
+
+#ifdef IRIX
+		    cmn_err(CE_CONT,
+			    "%s port %X[%s] XIO Bus Error",
+			    soft->name, port, xwidget_name);
+		    if (link_status & XB_STAT_MULTI_ERR)
+			XEM_ADD_STR("\tMultiple Errors\n");
+		    if (link_status & XB_STAT_ILLEGAL_DST_ERR)
+			XEM_ADD_STR("\tInvalid Packet Destination\n");
+		    if (link_status & XB_STAT_OALLOC_IBUF_ERR)
+			XEM_ADD_STR("\tInput Overallocation Error\n");
+		    if (link_status & XB_STAT_RCV_CNT_OFLOW_ERR)
+			XEM_ADD_STR("\tLLP receive error counter overflow\n");
+		    if (link_status & XB_STAT_XMT_CNT_OFLOW_ERR)
+			XEM_ADD_STR("\tLLP transmit retry counter overflow\n");
+		    if (link_status & XB_STAT_XMT_MAX_RTRY_ERR)
+			XEM_ADD_STR("\tLLP Max Transmitter Retry\n");
+		    if (link_status & XB_STAT_RCV_ERR)
+			XEM_ADD_STR("\tLLP Receiver error\n");
+		    if (link_status & XB_STAT_XMT_RTRY_ERR)
+			XEM_ADD_STR("\tLLP Transmitter Retry\n");
+		    if (link_status & XB_STAT_MAXREQ_TOUT_ERR)
+			XEM_ADD_STR("\tMaximum Request Timeout\n");
+		    if (link_status & XB_STAT_SRC_TOUT_ERR)
+			XEM_ADD_STR("\tSource Timeout Error\n");
+#endif
+
+		    {
+			int                     other_port;
+
+			for (other_port = 8; other_port < 16; ++other_port) {
+			    if (link_aux_status & (1 << other_port)) {
+				/* XXX- need to go to "other_port"
+				 * and clean up after the timeout?
+				 */
+				XEM_ADD_VAR(other_port);
+			    }
+			}
+		    }
+
+#if !DEBUG
+		    if (kdebug) {
+#endif
+			XEM_ADD_VAR(link_control);
+			XEM_ADD_VAR(link_status);
+			XEM_ADD_VAR(link_aux_status);
+
+			if (dump_ioe) {
+			    XEM_ADD_IOE();
+			    dump_ioe = 0;
+			}
+#if !DEBUG
+		    }
+#endif
+		    fatal++;
+		}
+	    }
+	}
+    }
+    if (wid_stat & wid_control & XB_WID_STAT_WIDGET0_INTR) {
+	/* we have a "widget zero" problem */
+
+	if (wid_stat & (XB_WID_STAT_MULTI_ERR
+			| XB_WID_STAT_XTALK_ERR
+			| XB_WID_STAT_REG_ACC_ERR)) {
+
+	    cmn_err(CE_CONT,
+		    "%s Port 0 XIO Bus Error",
+		    soft->name);
+	    if (wid_stat & XB_WID_STAT_MULTI_ERR)
+		XEM_ADD_STR("\tMultiple Error\n");
+	    if (wid_stat & XB_WID_STAT_XTALK_ERR)
+		XEM_ADD_STR("\tXIO Error\n");
+	    if (wid_stat & XB_WID_STAT_REG_ACC_ERR)
+		XEM_ADD_STR("\tRegister Access Error\n");
+
+	    fatal++;
+	}
+    }
+    if (fatal) {
+	XEM_ADD_VAR(wid_stat);
+	XEM_ADD_VAR(wid_control);
+	XEM_ADD_VAR(wid_err_cmdword);
+	XEM_ADD_VAR(wid_err_upper);
+	XEM_ADD_VAR(wid_err_lower);
+	XEM_ADD_VAR(wid_err_addr);
+	cmn_err_tag(8, CE_PANIC, "XIO Bus Error");
+    }
+#endif
+}
+#endif	/* LATER */
+
+/*
+ * XBOW ERROR Handling routines.
+ * These get invoked as part of walking down the error handling path
+ * from hub/heart towards the I/O device that caused the error.
+ */
+
+/*
+ * xbow_error_handler
+ *      XBow error handling dispatch routine.
+ *      This is the primary interface used by external world to invoke
+ *      in case of an error related to a xbow.
+ *      Only functionality in this layer is to identify the widget handle
+ *      given the widgetnum. Otherwise, xbow does not gathers any error
+ *      data.
+ */
+
+#ifdef LATER
+static int
+xbow_error_handler(
+		      void *einfo,
+		      int error_code,
+		      ioerror_mode_t mode,
+		      ioerror_t *ioerror)
+{
+#ifdef IRIX
+    int                     retval = IOERROR_WIDGETLEVEL;
+
+    xbow_soft_t             soft = (xbow_soft_t) einfo;
+    int                     port;
+    devfs_handle_t            conn;
+    devfs_handle_t            busv;
+
+    xbow_t                 *xbow = soft->base;
+    xbowreg_t               wid_stat;
+    xbowreg_t               wid_err_cmdword;
+    xbowreg_t               wid_err_upper;
+    xbowreg_t               wid_err_lower;
+    uint64_t                 wid_err_addr;
+
+    xb_linkregs_t          *link;
+    xbowreg_t               link_control;
+    xbowreg_t               link_status;
+    xbowreg_t               link_aux_status;
+
+    ASSERT(soft != 0);
+    busv = soft->busv;
+
+#if DEBUG && ERROR_DEBUG
+    cmn_err(CE_CONT, "%s: xbow_error_handler\n", soft->name, busv);
+#endif
+
+    port = IOERROR_GETVALUE(ioerror, widgetnum);
+
+    if (port == 0) {
+	/* error during access to xbow:
+	 * do NOT attempt to access xbow regs.
+	 */
+	if (mode == MODE_DEVPROBE)
+	    return IOERROR_HANDLED;
+
+	if (error_code & IOECODE_DMA) {
+	    cmn_err(CE_ALERT,
+		    "DMA error blamed on Crossbow at %s\n"
+		    "\tbut Crosbow never initiates DMA!",
+		    soft->name);
+	}
+	if (error_code & IOECODE_PIO) {
+	    cmn_err(CE_ALERT,
+		    "PIO Error on XIO Bus %s\n"
+		    "\tattempting to access XIO controller\n"
+		    "\twith offset 0x%X",
+		    soft->name,
+		    IOERROR_GETVALUE(ioerror, xtalkaddr));
+	}
+	/* caller will dump contents of ioerror
+	 * in DEBUG and kdebug kernels.
+	 */
+
+	return retval;
+    }
+    /*
+     * error not on port zero:
+     * safe to read xbow registers.
+     */
+    wid_stat = xbow->xb_wid_stat;
+    wid_err_cmdword = xbow->xb_wid_err_cmdword;
+    wid_err_upper = xbow->xb_wid_err_upper;
+    wid_err_lower = xbow->xb_wid_err_lower;
+
+    wid_err_addr =
+	wid_err_lower
+	| (((iopaddr_t) wid_err_upper
+	    & WIDGET_ERR_UPPER_ADDR_ONLY)
+	   << 32);
+
+    if ((port < BASE_XBOW_PORT) ||
+	(port >= MAX_PORT_NUM)) {
+
+	if (mode == MODE_DEVPROBE)
+	    return IOERROR_HANDLED;
+
+	if (error_code & IOECODE_DMA) {
+	    cmn_err(CE_ALERT,
+		    "DMA error blamed on XIO port at %s/%d\n"
+		    "\tbut Crossbow does not support that port",
+		    soft->name, port);
+	}
+	if (error_code & IOECODE_PIO) {
+	    cmn_err(CE_ALERT,
+		    "PIO Error on XIO Bus %s\n"
+		    "\tattempting to access XIO port %d\n"
+		    "\t(which Crossbow does not support)"
+		    "\twith offset 0x%X",
+		    soft->name, port,
+		    IOERROR_GETVALUE(ioerror, xtalkaddr));
+	}
+#if !DEBUG
+	if (kdebug) {
+#endif
+	    XEM_ADD_STR("Raw status values for Crossbow:\n");
+	    XEM_ADD_VAR(wid_stat);
+	    XEM_ADD_VAR(wid_err_cmdword);
+	    XEM_ADD_VAR(wid_err_upper);
+	    XEM_ADD_VAR(wid_err_lower);
+	    XEM_ADD_VAR(wid_err_addr);
+#if !DEBUG
+	}
+#endif
+
+	/* caller will dump contents of ioerror
+	 * in DEBUG and kdebug kernels.
+	 */
+
+	return retval;
+    }
+    /* access to valid port:
+     * ok to check port status.
+     */
+
+    link = &(xbow->xb_link(port));
+    link_control = link->link_control;
+    link_status = link->link_status;
+    link_aux_status = link->link_aux_status;
+
+    /* Check that there is something present
+     * in that XIO port.
+     */
+    if (!(link_aux_status & XB_AUX_STAT_PRESENT)) {
+	/* nobody connected. */
+	if (mode == MODE_DEVPROBE)
+	    return IOERROR_HANDLED;
+
+	if (error_code & IOECODE_DMA) {
+	    cmn_err(CE_ALERT,
+		    "DMA error blamed on XIO port at %s/%d\n"
+		    "\tbut there is no device connected there.",
+		    soft->name, port);
+	}
+	if (error_code & IOECODE_PIO) {
+	    cmn_err(CE_ALERT,
+		    "PIO Error on XIO Bus %s\n"
+		    "\tattempting to access XIO port %d\n"
+		    "\t(which has no device connected)"
+		    "\twith offset 0x%X",
+		    soft->name, port,
+		    IOERROR_GETVALUE(ioerror, xtalkaddr));
+	}
+#if !DEBUG
+	if (kdebug) {
+#endif
+	    XEM_ADD_STR("Raw status values for Crossbow:\n");
+	    XEM_ADD_VAR(wid_stat);
+	    XEM_ADD_VAR(wid_err_cmdword);
+	    XEM_ADD_VAR(wid_err_upper);
+	    XEM_ADD_VAR(wid_err_lower);
+	    XEM_ADD_VAR(wid_err_addr);
+	    XEM_ADD_VAR(port);
+	    XEM_ADD_VAR(link_control);
+	    XEM_ADD_VAR(link_status);
+	    XEM_ADD_VAR(link_aux_status);
+#if !DEBUG
+	}
+#endif
+	return retval;
+
+    }
+    /* Check that the link is alive.
+     */
+    if (!(link_status & XB_STAT_LINKALIVE)) {
+	/* nobody connected. */
+	if (mode == MODE_DEVPROBE)
+	    return IOERROR_HANDLED;
+
+	cmn_err(CE_ALERT,
+		"%s%sError on XIO Bus %s port %d",
+		(error_code & IOECODE_DMA) ? "DMA " : "",
+		(error_code & IOECODE_PIO) ? "PIO " : "",
+		soft->name, port);
+
+	if ((error_code & IOECODE_PIO) &&
+	    (IOERROR_FIELDVALID(ioerror, xtalkaddr))) {
+	    cmn_err(CE_CONT,
+		    "\tAccess attempted to offset 0x%X\n",
+		    IOERROR_GETVALUE(ioerror, xtalkaddr));
+	}
+	if (link_aux_status & XB_AUX_LINKFAIL_RST_BAD)
+	    XEM_ADD_STR("\tLink never came out of reset\n");
+	else
+	    XEM_ADD_STR("\tLink failed while transferring data\n");
+
+    }
+    /* get the connection point for the widget
+     * involved in this error; if it exists and
+     * is not our connectpoint, cycle back through
+     * xtalk_error_handler to deliver control to
+     * the proper handler (or to report a generic
+     * crosstalk error).
+     *
+     * If the downstream handler won't handle
+     * the problem, we let our upstream caller
+     * deal with it, after (in DEBUG and kdebug
+     * kernels) dumping the xbow state for this
+     * port.
+     */
+    conn = xbow_widget_lookup(busv, port);
+    if ((conn != GRAPH_VERTEX_NONE) &&
+	(conn != soft->conn)) {
+	retval = xtalk_error_handler(conn, error_code, mode, ioerror);
+	if (retval == IOERROR_HANDLED)
+	    return IOERROR_HANDLED;
+    }
+    if (mode == MODE_DEVPROBE)
+	return IOERROR_HANDLED;
+
+    if (retval == IOERROR_UNHANDLED) {
+	retval = IOERROR_PANIC;
+
+	cmn_err(CE_ALERT,
+		"%s%sError on XIO Bus %s port %d",
+		(error_code & IOECODE_DMA) ? "DMA " : "",
+		(error_code & IOECODE_PIO) ? "PIO " : "",
+		soft->name, port);
+
+	if ((error_code & IOECODE_PIO) &&
+	    (IOERROR_FIELDVALID(ioerror, xtalkaddr))) {
+	    cmn_err(CE_CONT,
+		    "\tAccess attempted to offset 0x%X\n",
+		    IOERROR_GETVALUE(ioerror, xtalkaddr));
+	}
+    }
+
+#if !DEBUG
+    if (kdebug) {
+#endif
+	XEM_ADD_STR("Raw status values for Crossbow:\n");
+	XEM_ADD_VAR(wid_stat);
+	XEM_ADD_VAR(wid_err_cmdword);
+	XEM_ADD_VAR(wid_err_upper);
+	XEM_ADD_VAR(wid_err_lower);
+	XEM_ADD_VAR(wid_err_addr);
+	XEM_ADD_VAR(port);
+	XEM_ADD_VAR(link_control);
+	XEM_ADD_VAR(link_status);
+	XEM_ADD_VAR(link_aux_status);
+#if !DEBUG
+    }
+#endif
+    /* caller will dump raw ioerror data
+     * in DEBUG and kdebug kernels.
+     */
+
+    return retval;
+#endif /* IRIX */
+}
+
+#endif	/* LATER */
+
+void
+xbow_update_perf_counters(devfs_handle_t vhdl)
+{
+    xbow_soft_t             xbow_soft = xbow_soft_get(vhdl);
+    xbow_perf_t            *xbow_perf = xbow_soft->xbow_perfcnt;
+    xbow_perf_link_t       *xbow_plink = xbow_soft->xbow_perflink;
+    xbow_perfcount_t        perf_reg;
+    int                     link, s, i;
+
+    for (i = 0; i < XBOW_PERF_COUNTERS; i++, xbow_perf++) {
+	if (xbow_perf->xp_mode == XBOW_MONITOR_NONE)
+	    continue;
+
+	s = mutex_spinlock(&xbow_soft->xbow_perf_lock);
+
+	perf_reg.xb_counter_val = *(xbowreg_t *) xbow_perf->xp_perf_reg;
+
+	link = perf_reg.xb_perf.link_select;
+
+	(xbow_plink + link)->xlp_cumulative[xbow_perf->xp_curmode] +=
+	    ((perf_reg.xb_perf.count - xbow_perf->xp_current) & XBOW_COUNTER_MASK);
+	xbow_perf->xp_current = perf_reg.xb_perf.count;
+
+	mutex_spinunlock(&xbow_soft->xbow_perf_lock, s);
+    }
+    /* Do port /mode multiplexing here */
+
+#ifdef IRIX
+    (void) timeout(xbow_update_perf_counters,
+		   (void *) (__psunsigned_t) vhdl, XBOW_PERF_TIMEOUT);
+#endif
+
+}
+
+xbow_perf_link_t       *
+xbow_get_perf_counters(devfs_handle_t vhdl)
+{
+    xbow_soft_t             xbow_soft = xbow_soft_get(vhdl);
+    xbow_perf_link_t       *xbow_perf_link = xbow_soft->xbow_perflink;
+
+    return xbow_perf_link;
+}
+
+int
+xbow_enable_perf_counter(devfs_handle_t vhdl, int link, int mode, int counter)
+{
+    xbow_soft_t             xbow_soft = xbow_soft_get(vhdl);
+    xbow_perf_t            *xbow_perf = xbow_soft->xbow_perfcnt;
+    xbow_linkctrl_t         xbow_link_ctrl;
+    xbow_t                 *xbow = xbow_soft->base;
+    xbow_perfcount_t        perf_reg;
+    int                     s, i;
+
+    link -= BASE_XBOW_PORT;
+    if ((link < 0) || (link >= MAX_XBOW_PORTS))
+	return -1;
+
+    if ((mode < XBOW_MONITOR_NONE) || (mode > XBOW_MONITOR_DEST_LINK))
+	return -1;
+
+    if ((counter < 0) || (counter >= XBOW_PERF_COUNTERS))
+	return -1;
+
+    s = mutex_spinlock(&xbow_soft->xbow_perf_lock);
+
+    if ((xbow_perf + counter)->xp_mode && mode) {
+	mutex_spinunlock(&xbow_soft->xbow_perf_lock, s);
+	return -1;
+    }
+    for (i = 0; i < XBOW_PERF_COUNTERS; i++) {
+	if (i == counter)
+	    continue;
+	if (((xbow_perf + i)->xp_link == link) &&
+	    ((xbow_perf + i)->xp_mode)) {
+	    mutex_spinunlock(&xbow_soft->xbow_perf_lock, s);
+	    return -1;
+	}
+    }
+    xbow_perf += counter;
+
+    xbow_perf->xp_curlink = xbow_perf->xp_link = link;
+    xbow_perf->xp_curmode = xbow_perf->xp_mode = mode;
+
+    xbow_link_ctrl.xbl_ctrlword = xbow->xb_link_raw[link].link_control;
+    xbow_link_ctrl.xb_linkcontrol.perf_mode = mode;
+    xbow->xb_link_raw[link].link_control = xbow_link_ctrl.xbl_ctrlword;
+
+    perf_reg.xb_counter_val = *(xbowreg_t *) xbow_perf->xp_perf_reg;
+    perf_reg.xb_perf.link_select = link;
+    *(xbowreg_t *) xbow_perf->xp_perf_reg = perf_reg.xb_counter_val;
+    xbow_perf->xp_current = perf_reg.xb_perf.count;
+
+#ifdef IRIX
+    (void) timeout(xbow_update_perf_counters,
+		   (void *) (__psunsigned_t) vhdl, XBOW_PERF_TIMEOUT);
+#endif
+
+    mutex_spinunlock(&xbow_soft->xbow_perf_lock, s);
+
+    return 0;
+}
+
+xbow_link_status_t     *
+xbow_get_llp_status(devfs_handle_t vhdl)
+{
+    xbow_soft_t             xbow_soft = xbow_soft_get(vhdl);
+    xbow_link_status_t     *xbow_llp_status = xbow_soft->xbow_link_status;
+
+    return xbow_llp_status;
+}
+
+void
+xbow_update_llp_status(devfs_handle_t vhdl)
+{
+    xbow_soft_t             xbow_soft = xbow_soft_get(vhdl);
+    xbow_link_status_t     *xbow_llp_status = xbow_soft->xbow_link_status;
+    xbow_t                 *xbow;
+    xbwX_stat_t             lnk_sts;
+    xbow_aux_link_status_t  aux_sts;
+    int                     link;
+    devfs_handle_t	    xwidget_vhdl;
+    char		   *xwidget_name;	
+
+    xbow = (xbow_t *) xbow_soft->base;
+    for (link = 0; link < MAX_XBOW_PORTS; link++, xbow_llp_status++) {
+	/* Get the widget name corresponding the current link.
+	 * Note : 0 <= link < MAX_XBOW_PORTS(8).
+	 * 	  BASE_XBOW_PORT(0x8) <= xwidget number < MAX_PORT_NUM (0x10)
+	 */
+	xwidget_vhdl = xbow_widget_lookup(xbow_soft->busv,link+BASE_XBOW_PORT);
+	xwidget_name = xwidget_name_get(xwidget_vhdl);
+	aux_sts.aux_linkstatus
+	    = xbow->xb_link_raw[link].link_aux_status;
+	lnk_sts.linkstatus = xbow->xb_link_raw[link].link_status_clr;
+
+	if (lnk_sts.link_alive == 0)
+	    continue;
+
+	xbow_llp_status->rx_err_count +=
+	    aux_sts.xb_aux_linkstatus.rx_err_cnt;
+
+	xbow_llp_status->tx_retry_count +=
+	    aux_sts.xb_aux_linkstatus.tx_retry_cnt;
+
+	if (lnk_sts.linkstatus & ~(XB_STAT_RCV_ERR | XB_STAT_XMT_RTRY_ERR | XB_STAT_LINKALIVE)) {
+#ifdef IRIX
+	    cmn_err(CE_WARN, "link %d[%s]: bad status 0x%x\n",
+		    link, xwidget_name, lnk_sts.linkstatus);
+#endif
+	}
+    }
+#ifdef IRIX
+    if (xbow_soft->link_monitor)
+	(void) timeout(xbow_update_llp_status,
+		       (void *) (__psunsigned_t) vhdl, XBOW_STATS_TIMEOUT);
+#endif
+}
+
+int
+xbow_disable_llp_monitor(devfs_handle_t vhdl)
+{
+    xbow_soft_t             xbow_soft = xbow_soft_get(vhdl);
+    int                     port;
+
+    for (port = 0; port < MAX_XBOW_PORTS; port++) {
+	xbow_soft->xbow_link_status[port].rx_err_count = 0;
+	xbow_soft->xbow_link_status[port].tx_retry_count = 0;
+    }
+
+    xbow_soft->link_monitor = 0;
+    return 0;
+}
+
+int
+xbow_enable_llp_monitor(devfs_handle_t vhdl)
+{
+    xbow_soft_t             xbow_soft = xbow_soft_get(vhdl);
+
+#ifdef IRIX
+    (void) timeout(xbow_update_llp_status,
+		   (void *) (__psunsigned_t) vhdl, XBOW_STATS_TIMEOUT);
+#endif
+    xbow_soft->link_monitor = 1;
+    return 0;
+}
+
+
+int
+xbow_reset_link(devfs_handle_t xconn_vhdl)
+{
+    xwidget_info_t          widget_info;
+    xwidgetnum_t            port;
+    xbow_t                 *xbow;
+    xbowreg_t               ctrl;
+    xbwX_stat_t             stat;
+    unsigned                itick;
+    unsigned                dtick;
+    static int              ticks_per_ms = 0;
+
+    if (!ticks_per_ms) {
+	itick = get_timestamp();
+	us_delay(1000);
+	ticks_per_ms = get_timestamp() - itick;
+    }
+    widget_info = xwidget_info_get(xconn_vhdl);
+    port = xwidget_info_id_get(widget_info);
+
+#ifdef XBOW_K1PTR			/* defined if we only have one xbow ... */
+    xbow = XBOW_K1PTR;
+#else
+    {
+	devfs_handle_t            xbow_vhdl;
+	xbow_soft_t             xbow_soft;
+
+	hwgraph_traverse(xconn_vhdl, ".master/xtalk/0/xbow", &xbow_vhdl);
+	xbow_soft = xbow_soft_get(xbow_vhdl);
+	xbow = xbow_soft->base;
+    }
+#endif
+
+    /*
+     * This requires three PIOs (reset the link, check for the
+     * reset, restore the control register for the link) plus
+     * 10us to wait for the reset. We allow up to 1ms for the
+     * widget to come out of reset before giving up and
+     * returning a failure.
+     */
+    ctrl = xbow->xb_link(port).link_control;
+    xbow->xb_link(port).link_reset = 0;
+    itick = get_timestamp();
+    while (1) {
+	stat.linkstatus = xbow->xb_link(port).link_status;
+	if (stat.link_alive)
+	    break;
+	dtick = get_timestamp() - itick;
+	if (dtick > ticks_per_ms) {
+	    return -1;			/* never came out of reset */
+	}
+	DELAY(2);			/* don't beat on link_status */
+    }
+    xbow->xb_link(port).link_control = ctrl;
+    return 0;
+}
+
+/*
+ * Dump xbow registers.
+ * input parameter is either a pointer to
+ * the xbow chip or the vertex handle for
+ * an xbow vertex.
+ */
+void
+idbg_xbowregs(int64_t regs)
+{
+    xbow_t                 *xbow;
+    int                     i;
+    xb_linkregs_t          *link;
+
+#ifdef IRIX
+    if (dev_is_vertex((devfs_handle_t) regs)) {
+	devfs_handle_t            vhdl = (devfs_handle_t) regs;
+	xbow_soft_t             soft = xbow_soft_get(vhdl);
+
+	xbow = soft->base;
+    } else
+#endif
+    {
+	xbow = (xbow_t *) regs;
+    }
+
+#ifdef IRIX
+    qprintf("Printing xbow registers starting at 0x%x\n", xbow);
+    qprintf("wid %x status %x erruppr %x errlower %x control %x timeout %x\n",
+	    xbow->xb_wid_id, xbow->xb_wid_stat, xbow->xb_wid_err_upper,
+	    xbow->xb_wid_err_lower, xbow->xb_wid_control,
+	    xbow->xb_wid_req_timeout);
+    qprintf("intr uppr %x lower %x errcmd %x llp ctrl %x arb_reload %x\n",
+	    xbow->xb_wid_int_upper, xbow->xb_wid_int_lower,
+	    xbow->xb_wid_err_cmdword, xbow->xb_wid_llp,
+	    xbow->xb_wid_arb_reload);
+#endif
+
+    for (i = 8; i <= 0xf; i++) {
+	link = &xbow->xb_link(i);
+#ifdef IRIX
+	qprintf("Link %d registers\n", i);
+	qprintf("\tctrl %x stat %x arbuppr %x arblowr %x auxstat %x\n",
+		link->link_control, link->link_status,
+		link->link_arb_upper, link->link_arb_lower,
+		link->link_aux_status);
+#endif
+    }
+}
+
+
+#define XBOW_ARB_RELOAD_TICKS		25
+					/* granularity: 4 MB/s, max: 124 MB/s */
+#define GRANULARITY			((100 * 1000000) / XBOW_ARB_RELOAD_TICKS)
+
+#define XBOW_BYTES_TO_GBR(BYTES_per_s)	(int) (BYTES_per_s / GRANULARITY)
+
+#define XBOW_GBR_TO_BYTES(cnt)		(bandwidth_t) ((cnt) * GRANULARITY)
+
+#define CEILING_BYTES_TO_GBR(gbr, bytes_per_sec)	\
+			((XBOW_GBR_TO_BYTES(gbr) < bytes_per_sec) ? gbr+1 : gbr)
+
+#define XBOW_ARB_GBR_MAX		31
+
+#define ABS(x)				((x > 0) ? (x) : (-1 * x))
+					/* absolute value */
+
+int
+xbow_bytes_to_gbr(bandwidth_t old_bytes_per_sec, bandwidth_t bytes_per_sec)
+{
+    int                     gbr_granted;
+    int                     new_total_gbr;
+    int                     change_gbr;
+    bandwidth_t             new_total_bw;
+
+#ifdef GRIO_DEBUG
+    printf("xbow_bytes_to_gbr: old_bytes_per_sec %lld bytes_per_sec %lld\n",
+		old_bytes_per_sec, bytes_per_sec);
+#endif	/* GRIO_DEBUG */
+
+    gbr_granted = CEILING_BYTES_TO_GBR((XBOW_BYTES_TO_GBR(old_bytes_per_sec)),
+			old_bytes_per_sec);
+    new_total_bw = old_bytes_per_sec + bytes_per_sec;
+    new_total_gbr = CEILING_BYTES_TO_GBR((XBOW_BYTES_TO_GBR(new_total_bw)),
+			new_total_bw);
+
+    change_gbr = new_total_gbr - gbr_granted;
+
+#ifdef GRIO_DEBUG
+    printf("xbow_bytes_to_gbr: gbr_granted %d new_total_gbr %d change_gbr %d\n",
+		gbr_granted, new_total_gbr, change_gbr);
+#endif	/* GRIO_DEBUG */
+
+    return (change_gbr);
+}
+
+/* Conversion from GBR to bytes */
+bandwidth_t
+xbow_gbr_to_bytes(int gbr)
+{
+    return (XBOW_GBR_TO_BYTES(gbr));
+}
+
+/* Given the vhdl for the desired xbow, the src and dest. widget ids
+ * and the req_bw value, this xbow driver entry point accesses the
+ * xbow registers and allocates the desired bandwidth if available.
+ *
+ * If bandwidth allocation is successful, return success else return failure.
+ */
+int
+xbow_prio_bw_alloc(devfs_handle_t vhdl,
+		xwidgetnum_t src_wid,
+		xwidgetnum_t dest_wid,
+		unsigned long long old_alloc_bw,
+		unsigned long long req_bw)
+{
+    xbow_soft_t             soft = xbow_soft_get(vhdl);
+    volatile xbowreg_t     *xreg;
+    xbowreg_t               mask;
+    int                     s;
+    int                     error = 0;
+    bandwidth_t             old_bw_BYTES, req_bw_BYTES;
+    xbowreg_t               old_xreg;
+    int                     old_bw_GBR, req_bw_GBR, new_bw_GBR;
+
+#ifdef GRIO_DEBUG
+    printf("xbow_prio_bw_alloc: vhdl %d src_wid %d dest_wid %d req_bw %lld\n",
+		(int) vhdl, (int) src_wid, (int) dest_wid, req_bw);
+#endif
+
+    ASSERT(XBOW_WIDGET_IS_VALID(src_wid));
+    ASSERT(XBOW_WIDGET_IS_VALID(dest_wid));
+
+    s = mutex_spinlock(&soft->xbow_bw_alloc_lock);
+
+    /* Get pointer to the correct register */
+    xreg = XBOW_PRIO_ARBREG_PTR(soft->base, dest_wid, src_wid);
+
+    /* Get mask for GBR count value */
+    mask = XB_ARB_GBR_MSK << XB_ARB_GBR_SHFT(src_wid);
+
+    req_bw_GBR = xbow_bytes_to_gbr(old_alloc_bw, req_bw);
+    req_bw_BYTES = (req_bw_GBR < 0) ? (-1 * xbow_gbr_to_bytes(ABS(req_bw_GBR)))
+		: xbow_gbr_to_bytes(req_bw_GBR);
+
+#ifdef GRIO_DEBUG
+    printf("req_bw %lld req_bw_BYTES %lld req_bw_GBR %d\n",
+		req_bw, req_bw_BYTES, req_bw_GBR);
+#endif	/* GRIO_DEBUG */
+
+    old_bw_BYTES = soft->bw_cur_used[(int) dest_wid - MAX_XBOW_PORTS];
+    old_xreg = *xreg;
+    old_bw_GBR = (((*xreg) & mask) >> XB_ARB_GBR_SHFT(src_wid));
+
+#ifdef GRIO_DEBUG
+    ASSERT(XBOW_BYTES_TO_GBR(old_bw_BYTES) == old_bw_GBR);
+
+    printf("old_bw_BYTES %lld old_bw_GBR %d\n", old_bw_BYTES, old_bw_GBR);
+
+    printf("req_bw_BYTES %lld old_bw_BYTES %lld soft->bw_hiwm %lld\n",
+		req_bw_BYTES, old_bw_BYTES,
+		soft->bw_hiwm[(int) dest_wid - MAX_XBOW_PORTS]);
+	   
+#endif				/* GRIO_DEBUG */
+
+    /* Accept the request only if we don't exceed the destination
+     * port HIWATER_MARK *AND* the max. link GBR arbitration count
+     */
+    if (((old_bw_BYTES + req_bw_BYTES) <=
+		soft->bw_hiwm[(int) dest_wid - MAX_XBOW_PORTS]) &&
+		(req_bw_GBR + old_bw_GBR <= XBOW_ARB_GBR_MAX)) {
+
+	new_bw_GBR = (old_bw_GBR + req_bw_GBR);
+
+	/* Set this in the xbow link register */
+	*xreg = (old_xreg & ~mask) | \
+	    (new_bw_GBR << XB_ARB_GBR_SHFT(src_wid) & mask);
+
+	soft->bw_cur_used[(int) dest_wid - MAX_XBOW_PORTS] =
+			xbow_gbr_to_bytes(new_bw_GBR);
+    } else {
+	error = 1;
+    }
+
+    mutex_spinunlock(&soft->xbow_bw_alloc_lock, s);
+
+    return (error);
+}
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/io/xswitch.c linux/arch/ia64/sn/io/xswitch.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/io/xswitch.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/io/xswitch.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,268 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/iobus.h>
+#include <asm/sn/iograph.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/labelcl.h>
+#include <asm/sn/xtalk/xtalk.h>
+#include <asm/sn/xtalk/xswitch.h>
+#include <asm/sn/xtalk/xwidget.h>
+#include <asm/sn/xtalk/xtalk_private.h>
+
+#define	NEW(ptr)	(ptr = kmalloc(sizeof (*(ptr)), GFP_KERNEL))
+#define	DEL(ptr)	(kfree(ptr))
+
+int                     xswitch_devflag = D_MP;
+
+/*
+ * This file provides generic support for Crosstalk
+ * Switches, in a way that insulates crosstalk providers
+ * from specifics about the switch chips being used.
+ */
+
+#include <asm/sn/xtalk/xbow.h>
+#define DEV_FUNC(dev,func)      xbow_##func
+
+#if !defined(DEV_FUNC)
+/*
+ * There is more than one possible provider
+ * for this platform. We need to examine the
+ * master vertex of the current vertex for
+ * a provider function structure, and indirect
+ * through the appropriately named member.
+ */
+#define	DEV_FUNC(dev,func)	xwidget_to_provider_fns(dev)->func
+
+static xswitch_provider_t *
+xwidget_to_provider_fns(devfs_handle_t xconn)
+{
+    devfs_handle_t            busv;
+    xswitch_info_t          xswitch_info;
+    xswitch_provider_t      provider_fns;
+
+    busv = hwgraph_connectpt_get(xconn_vhdl);
+    ASSERT(busv != GRAPH_VERTEX_NONE);
+
+    xswitch_info = xswitch_info_get(busv);
+    ASSERT(xswitch_info != NULL);
+
+    provider_fns = xswitch_info->xswitch_fns;
+    ASSERT(provider_fns != NULL);
+
+    return provider_fns;
+}
+#endif
+
+#define	XSWITCH_CENSUS_BIT(port)		(1<<(port))
+#define	XSWITCH_CENSUS_PORT_MIN			(0x0)
+#define	XSWITCH_CENSUS_PORT_MAX			(0xF)
+#define	XSWITCH_CENSUS_PORTS			(0x10)
+#define	XSWITCH_WIDGET_PRESENT(infop,port)	((infop)->census & XSWITCH_CENSUS_BIT(port))
+
+static char             xswitch_info_fingerprint[] = "xswitch_info";
+
+struct xswitch_info_s {
+    char                   *fingerprint;
+    unsigned                census;
+    devfs_handle_t            vhdl[XSWITCH_CENSUS_PORTS];
+    devfs_handle_t            master_vhdl[XSWITCH_CENSUS_PORTS];
+    xswitch_provider_t     *xswitch_fns;
+};
+
+xswitch_info_t
+xswitch_info_get(devfs_handle_t xwidget)
+{
+    xswitch_info_t          xswitch_info;
+
+    xswitch_info = (xswitch_info_t)
+	hwgraph_fastinfo_get(xwidget);
+#ifdef IRIX
+    if ((xswitch_info != NULL) &&
+	(xswitch_info->fingerprint != xswitch_info_fingerprint))
+	cmn_err(CE_PANIC, "%v xswitch_info_get bad fingerprint", xwidget);
+#endif
+
+    printk("xswitch_info_get: xwidget 0x%p xswitch_info 0x%p\n", xwidget, xswitch_info);
+
+    return (xswitch_info);
+}
+
+void
+xswitch_info_vhdl_set(xswitch_info_t xswitch_info,
+		      xwidgetnum_t port,
+		      devfs_handle_t xwidget)
+{
+#if XSWITCH_CENSUS_PORT_MIN
+    if (port < XSWITCH_CENSUS_PORT_MIN)
+	return;
+#endif
+    if (port > XSWITCH_CENSUS_PORT_MAX)
+	return;
+
+    xswitch_info->vhdl[port - XSWITCH_CENSUS_PORT_MIN] = xwidget;
+}
+
+devfs_handle_t
+xswitch_info_vhdl_get(xswitch_info_t xswitch_info,
+		      xwidgetnum_t port)
+{
+#ifdef IRIX
+    if (xswitch_info == NULL)
+	cmn_err(CE_PANIC, "xswitch_info_vhdl_get: null xswitch_info");
+#endif
+
+#if XSWITCH_CENSUS_PORT_MIN
+    if (port < XSWITCH_CENSUS_PORT_MIN)
+	return GRAPH_VERTEX_NONE;
+#endif
+    if (port > XSWITCH_CENSUS_PORT_MAX)
+	return GRAPH_VERTEX_NONE;
+
+    return xswitch_info->vhdl[port - XSWITCH_CENSUS_PORT_MIN];
+}
+
+/*
+ * Some systems may allow for multiple switch masters.  On such systems,
+ * we assign a master for each port on the switch.  These interfaces
+ * establish and retrieve that assignment.
+ */
+void
+xswitch_info_master_assignment_set(xswitch_info_t xswitch_info,
+				   xwidgetnum_t port,
+				   devfs_handle_t master_vhdl)
+{
+#if XSWITCH_CENSUS_PORT_MIN
+    if (port < XSWITCH_CENSUS_PORT_MIN)
+	return;
+#endif
+    if (port > XSWITCH_CENSUS_PORT_MAX)
+	return;
+
+    xswitch_info->master_vhdl[port - XSWITCH_CENSUS_PORT_MIN] = master_vhdl;
+}
+
+devfs_handle_t
+xswitch_info_master_assignment_get(xswitch_info_t xswitch_info,
+				   xwidgetnum_t port)
+{
+#if XSWITCH_CENSUS_PORT_MIN
+    if (port < XSWITCH_CENSUS_PORT_MIN)
+	return GRAPH_VERTEX_NONE;
+#endif
+    if (port > XSWITCH_CENSUS_PORT_MAX)
+	return GRAPH_VERTEX_NONE;
+
+    return xswitch_info->master_vhdl[port - XSWITCH_CENSUS_PORT_MIN];
+}
+
+void
+xswitch_info_set(devfs_handle_t xwidget, xswitch_info_t xswitch_info)
+{
+    xswitch_info->fingerprint = xswitch_info_fingerprint;
+    hwgraph_fastinfo_set(xwidget, (arbitrary_info_t) xswitch_info);
+}
+
+xswitch_info_t
+xswitch_info_new(devfs_handle_t xwidget)
+{
+    xswitch_info_t          xswitch_info;
+
+    xswitch_info = xswitch_info_get(xwidget);
+    if (xswitch_info == NULL) {
+	int                     port;
+
+	NEW(xswitch_info);
+	xswitch_info->census = 0;
+	for (port = XSWITCH_CENSUS_PORT_MIN;
+	     port <= XSWITCH_CENSUS_PORT_MAX;
+	     port++) {
+	    xswitch_info_vhdl_set(xswitch_info, port,
+				  GRAPH_VERTEX_NONE);
+
+	    xswitch_info_master_assignment_set(xswitch_info,
+					       port,
+					       GRAPH_VERTEX_NONE);
+	}
+	xswitch_info_set(xwidget, xswitch_info);
+	printk("xswitch_info_new: xswitch_info_set xwidget 0x%p, xswitch_info 0x%p\n",
+		xwidget, xswitch_info);
+    }
+    return xswitch_info;
+}
+
+void
+xswitch_provider_register(devfs_handle_t busv,
+			  xswitch_provider_t * xswitch_fns)
+{
+    xswitch_info_t          xswitch_info = xswitch_info_get(busv);
+
+    ASSERT(xswitch_info);
+    xswitch_info->xswitch_fns = xswitch_fns;
+}
+
+void
+xswitch_info_link_is_ok(xswitch_info_t xswitch_info, xwidgetnum_t port)
+{
+    xswitch_info->census |= XSWITCH_CENSUS_BIT(port);
+}
+
+int
+xswitch_info_link_ok(xswitch_info_t xswitch_info, xwidgetnum_t port)
+{
+#if XSWITCH_CENSUS_PORT_MIN
+    if (port < XSWITCH_CENSUS_PORT_MIN)
+	return 0;
+#endif
+
+    if (port > XSWITCH_CENSUS_PORT_MAX)
+	return 0;
+
+    return (xswitch_info->census & XSWITCH_CENSUS_BIT(port));
+}
+
+int
+xswitch_reset_link(devfs_handle_t xconn_vhdl)
+{
+    return DEV_FUNC(xconn_vhdl, reset_link)
+	(xconn_vhdl);
+}
+
+/* Given a vertex handle to the xswitch get its logical
+ * id.
+ */
+int
+xswitch_id_get(devfs_handle_t	xconn_vhdl)
+{
+    arbitrary_info_t 	xbow_num;
+    graph_error_t	rv;
+
+    rv = hwgraph_info_get_LBL(xconn_vhdl,INFO_LBL_XSWITCH_ID,&xbow_num);
+    ASSERT(rv == GRAPH_SUCCESS);
+    return(xbow_num);
+}
+
+/* Given a vertex handle to the xswitch set its logical
+ * id.
+ */
+void
+xswitch_id_set(devfs_handle_t	xconn_vhdl,int xbow_num)
+{
+    graph_error_t	rv;
+
+    rv = hwgraph_info_add_LBL(xconn_vhdl,INFO_LBL_XSWITCH_ID,
+			      (arbitrary_info_t)xbow_num);
+    ASSERT(rv == GRAPH_SUCCESS);
+}
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/io/xtalk.c linux/arch/ia64/sn/io/xtalk.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/io/xtalk.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/io/xtalk.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,1137 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/iobus.h>
+#include <asm/sn/iograph.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/labelcl.h>
+#include <asm/sn/hcl_util.h>
+
+#include <asm/sn/xtalk/xtalk.h>
+#include <asm/sn/xtalk/xswitch.h>
+#include <asm/sn/xtalk/xwidget.h>
+
+#include <asm/sn/xtalk/xtalk_private.h>
+
+/*
+ * Implement crosstalk provider operations.  The xtalk* layer provides a
+ * platform-independent interface for crosstalk devices.  This layer
+ * switches among the possible implementations of a crosstalk adapter.
+ *
+ * On platforms with only one possible xtalk provider, macros can be
+ * set up at the top that cause the table lookups and indirections to
+ * completely disappear.
+ */
+
+#define	NEW(ptr)	(ptr = kmalloc(sizeof (*(ptr)), GFP_KERNEL))
+#define	DEL(ptr)	(kfree(ptr))
+
+char                    widget_info_fingerprint[] = "widget_info";
+
+cdl_p                   xtalk_registry = NULL;
+
+#include <asm/sn/agent.h>
+#define	DEV_FUNC(dev,func)	hub_##func
+#define	CAST_PIOMAP(x)		((hub_piomap_t)(x))
+#define	CAST_DMAMAP(x)		((hub_dmamap_t)(x))
+#define	CAST_INTR(x)		((hub_intr_t)(x))
+
+/* =====================================================================
+ *            Function Table of Contents
+ */
+xtalk_piomap_t          xtalk_piomap_alloc(devfs_handle_t, device_desc_t, iopaddr_t, size_t, size_t, unsigned);
+void                    xtalk_piomap_free(xtalk_piomap_t);
+caddr_t                 xtalk_piomap_addr(xtalk_piomap_t, iopaddr_t, size_t);
+void                    xtalk_piomap_done(xtalk_piomap_t);
+caddr_t                 xtalk_piotrans_addr(devfs_handle_t, device_desc_t, iopaddr_t, size_t, unsigned);
+caddr_t                 xtalk_pio_addr(devfs_handle_t, device_desc_t, iopaddr_t, size_t, xtalk_piomap_t *, unsigned);
+void                    xtalk_set_early_piotrans_addr(xtalk_early_piotrans_addr_f *);
+caddr_t                 xtalk_early_piotrans_addr(xwidget_part_num_t, xwidget_mfg_num_t, int, iopaddr_t, size_t, unsigned);
+static caddr_t          null_xtalk_early_piotrans_addr(xwidget_part_num_t, xwidget_mfg_num_t, int, iopaddr_t, size_t, unsigned);
+xtalk_dmamap_t          xtalk_dmamap_alloc(devfs_handle_t, device_desc_t, size_t, unsigned);
+void                    xtalk_dmamap_free(xtalk_dmamap_t);
+iopaddr_t               xtalk_dmamap_addr(xtalk_dmamap_t, paddr_t, size_t);
+alenlist_t              xtalk_dmamap_list(xtalk_dmamap_t, alenlist_t, unsigned);
+void                    xtalk_dmamap_done(xtalk_dmamap_t);
+iopaddr_t               xtalk_dmatrans_addr(devfs_handle_t, device_desc_t, paddr_t, size_t, unsigned);
+alenlist_t              xtalk_dmatrans_list(devfs_handle_t, device_desc_t, alenlist_t, unsigned);
+void			xtalk_dmamap_drain(xtalk_dmamap_t);
+void			xtalk_dmaaddr_drain(devfs_handle_t, iopaddr_t, size_t);
+void			xtalk_dmalist_drain(devfs_handle_t, alenlist_t);
+xtalk_intr_t            xtalk_intr_alloc(devfs_handle_t, device_desc_t, devfs_handle_t);
+void                    xtalk_intr_free(xtalk_intr_t);
+int                     xtalk_intr_connect(xtalk_intr_t, intr_func_t, intr_arg_t, xtalk_intr_setfunc_t, void *, void *);
+void                    xtalk_intr_disconnect(xtalk_intr_t);
+devfs_handle_t            xtalk_intr_cpu_get(xtalk_intr_t);
+int                     xtalk_error_handler(devfs_handle_t, int, ioerror_mode_t, ioerror_t *);
+int                     xtalk_error_devenable(devfs_handle_t, int, int);
+void                    xtalk_provider_startup(devfs_handle_t);
+void                    xtalk_provider_shutdown(devfs_handle_t);
+devfs_handle_t            xtalk_intr_dev_get(xtalk_intr_t);
+xwidgetnum_t            xtalk_intr_target_get(xtalk_intr_t);
+xtalk_intr_vector_t     xtalk_intr_vector_get(xtalk_intr_t);
+iopaddr_t               xtalk_intr_addr_get(struct xtalk_intr_s *);
+void                   *xtalk_intr_sfarg_get(xtalk_intr_t);
+devfs_handle_t            xtalk_pio_dev_get(xtalk_piomap_t);
+xwidgetnum_t            xtalk_pio_target_get(xtalk_piomap_t);
+iopaddr_t               xtalk_pio_xtalk_addr_get(xtalk_piomap_t);
+ulong                   xtalk_pio_mapsz_get(xtalk_piomap_t);
+caddr_t                 xtalk_pio_kvaddr_get(xtalk_piomap_t);
+devfs_handle_t            xtalk_dma_dev_get(xtalk_dmamap_t);
+xwidgetnum_t            xtalk_dma_target_get(xtalk_dmamap_t);
+xwidget_info_t          xwidget_info_chk(devfs_handle_t);
+xwidget_info_t          xwidget_info_get(devfs_handle_t);
+void                    xwidget_info_set(devfs_handle_t, xwidget_info_t);
+devfs_handle_t            xwidget_info_dev_get(xwidget_info_t);
+xwidgetnum_t            xwidget_info_id_get(xwidget_info_t);
+devfs_handle_t            xwidget_info_master_get(xwidget_info_t);
+xwidgetnum_t            xwidget_info_masterid_get(xwidget_info_t);
+xwidget_part_num_t      xwidget_info_part_num_get(xwidget_info_t);
+xwidget_mfg_num_t       xwidget_info_mfg_num_get(xwidget_info_t);
+char 			*xwidget_info_name_get(xwidget_info_t);
+void                    xtalk_init(void);
+void                    xtalk_provider_register(devfs_handle_t, xtalk_provider_t *);
+void                    xtalk_provider_unregister(devfs_handle_t);
+xtalk_provider_t       *xtalk_provider_fns_get(devfs_handle_t);
+int                     xwidget_driver_register(xwidget_part_num_t, 
+						xwidget_mfg_num_t, 
+						char *, unsigned);
+void                    xwidget_driver_unregister(char *);
+int                     xwidget_register(xwidget_hwid_t, devfs_handle_t, 
+					 xwidgetnum_t, devfs_handle_t, 
+					 xwidgetnum_t, async_attach_t);
+int			xwidget_unregister(devfs_handle_t);
+void                    xwidget_error_register(devfs_handle_t, error_handler_f *,
+					       error_handler_arg_t);
+void                    xwidget_reset(devfs_handle_t);
+char			*xwidget_name_get(devfs_handle_t);
+#if !defined(DEV_FUNC)
+/*
+ * There is more than one possible provider
+ * for this platform. We need to examine the
+ * master vertex of the current vertex for
+ * a provider function structure, and indirect
+ * through the appropriately named member.
+ */
+#define	DEV_FUNC(dev,func)	xwidget_to_provider_fns(dev)->func
+#define	CAST_PIOMAP(x)		((xtalk_piomap_t)(x))
+#define	CAST_DMAMAP(x)		((xtalk_dmamap_t)(x))
+#define	CAST_INTR(x)		((xtalk_intr_t)(x))
+
+static xtalk_provider_t *
+xwidget_to_provider_fns(devfs_handle_t xconn)
+{
+    xwidget_info_t          widget_info;
+    xtalk_provider_t       *provider_fns;
+
+    widget_info = xwidget_info_get(xconn);
+    ASSERT(widget_info != NULL);
+
+    provider_fns = xwidget_info_pops_get(widget_info);
+    ASSERT(provider_fns != NULL);
+
+    return (provider_fns);
+}
+#endif
+
+/*
+ * Many functions are not passed their vertex
+ * information directly; rather, they must
+ * dive through a resource map. These macros
+ * are available to coordinate this detail.
+ */
+#define	PIOMAP_FUNC(map,func)	DEV_FUNC(map->xp_dev,func)
+#define	DMAMAP_FUNC(map,func)	DEV_FUNC(map->xd_dev,func)
+#define	INTR_FUNC(intr,func)	DEV_FUNC(intr_hdl->xi_dev,func)
+
+/* =====================================================================
+ *                    PIO MANAGEMENT
+ *
+ *      For mapping system virtual address space to
+ *      xtalk space on a specified widget
+ */
+
+xtalk_piomap_t
+xtalk_piomap_alloc(devfs_handle_t dev,	/* set up mapping for this device */
+		   device_desc_t dev_desc,	/* device descriptor */
+		   iopaddr_t xtalk_addr,	/* map for this xtalk_addr range */
+		   size_t byte_count,
+		   size_t byte_count_max,	/* maximum size of a mapping */
+		   unsigned flags)
+{				/* defined in sys/pio.h */
+    return (xtalk_piomap_t) DEV_FUNC(dev, piomap_alloc)
+	(dev, dev_desc, xtalk_addr, byte_count, byte_count_max, flags);
+}
+
+
+void
+xtalk_piomap_free(xtalk_piomap_t xtalk_piomap)
+{
+    PIOMAP_FUNC(xtalk_piomap, piomap_free)
+	(CAST_PIOMAP(xtalk_piomap));
+}
+
+
+caddr_t
+xtalk_piomap_addr(xtalk_piomap_t xtalk_piomap,	/* mapping resources */
+		  iopaddr_t xtalk_addr,		/* map for this xtalk address */
+		  size_t byte_count)
+{				/* map this many bytes */
+    return PIOMAP_FUNC(xtalk_piomap, piomap_addr)
+	(CAST_PIOMAP(xtalk_piomap), xtalk_addr, byte_count);
+}
+
+
+void
+xtalk_piomap_done(xtalk_piomap_t xtalk_piomap)
+{
+    PIOMAP_FUNC(xtalk_piomap, piomap_done)
+	(CAST_PIOMAP(xtalk_piomap));
+}
+
+
+caddr_t
+xtalk_piotrans_addr(devfs_handle_t dev,	/* translate for this device */
+		    device_desc_t dev_desc,	/* device descriptor */
+		    iopaddr_t xtalk_addr,	/* Crosstalk address */
+		    size_t byte_count,	/* map this many bytes */
+		    unsigned flags)
+{				/* (currently unused) */
+    return DEV_FUNC(dev, piotrans_addr)
+	(dev, dev_desc, xtalk_addr, byte_count, flags);
+}
+
+caddr_t
+xtalk_pio_addr(devfs_handle_t dev,	/* translate for this device */
+	       device_desc_t dev_desc,	/* device descriptor */
+	       iopaddr_t addr,		/* starting address (or offset in window) */
+	       size_t byte_count,	/* map this many bytes */
+	       xtalk_piomap_t *mapp,	/* where to return the map pointer */
+	       unsigned flags)
+{					/* PIO flags */
+    xtalk_piomap_t          map = 0;
+    caddr_t                 res;
+
+    if (mapp)
+	*mapp = 0;			/* record "no map used" */
+
+    res = xtalk_piotrans_addr
+	(dev, dev_desc, addr, byte_count, flags);
+    if (res)
+	return res;			/* xtalk_piotrans worked */
+
+    map = xtalk_piomap_alloc
+	(dev, dev_desc, addr, byte_count, byte_count, flags);
+    if (!map)
+	return res;			/* xtalk_piomap_alloc failed */
+
+    res = xtalk_piomap_addr
+	(map, addr, byte_count);
+    if (!res) {
+	xtalk_piomap_free(map);
+	return res;			/* xtalk_piomap_addr failed */
+    }
+    if (mapp)
+	*mapp = map;			/* pass back map used */
+
+    return res;				/* xtalk_piomap_addr succeeded */
+}
+
+/* =====================================================================
+ *            EARLY PIOTRANS SUPPORT
+ *
+ *      There are places where drivers (mgras, for instance)
+ *      need to get PIO translations before the infrastructure
+ *      is extended to them (setting up textports, for
+ *      instance). These drivers should call
+ *      xtalk_early_piotrans_addr with their xtalk ID
+ *      information, a sequence number (so we can use the second
+ *      mgras for instance), and the usual piotrans parameters.
+ *
+ *      Machine specific code should provide an implementation
+ *      of early_piotrans_addr, and present a pointer to this
+ *      function to xtalk_set_early_piotrans_addr so it can be
+ *      used by clients without the clients having to know what
+ *      platform or what xtalk provider is in use.
+ */
+
+static xtalk_early_piotrans_addr_f null_xtalk_early_piotrans_addr;
+
+xtalk_early_piotrans_addr_f *impl_early_piotrans_addr = null_xtalk_early_piotrans_addr;
+
+/* xtalk_set_early_piotrans_addr:
+ * specify the early_piotrans_addr implementation function.
+ */
+void
+xtalk_set_early_piotrans_addr(xtalk_early_piotrans_addr_f *impl)
+{
+    impl_early_piotrans_addr = impl;
+}
+
+/* xtalk_early_piotrans_addr:
+ * figure out a PIO address for the "nth" crosstalk widget that
+ * matches the specified part and mfgr number. Returns NULL if
+ * there is no such widget, or if the requested mapping can not
+ * be constructed.
+ * Limitations on which crosstalk slots (and busses) are
+ * checked, and definitions of the ordering of the search across
+ * the crosstalk slots, are defined by the platform.
+ */
+caddr_t
+xtalk_early_piotrans_addr(xwidget_part_num_t part_num,
+			  xwidget_mfg_num_t mfg_num,
+			  int which,
+			  iopaddr_t xtalk_addr,
+			  size_t byte_count,
+			  unsigned flags)
+{
+    return impl_early_piotrans_addr
+	(part_num, mfg_num, which, xtalk_addr, byte_count, flags);
+}
+
+/* null_xtalk_early_piotrans_addr:
+ * used as the early_piotrans_addr implementation until and
+ * unless a real implementation is provided. In DEBUG kernels,
+ * we want to know who is calling before the implementation is
+ * registered; in non-DEBUG kernels, return NULL representing
+ * lack of mapping support.
+ */
+/*ARGSUSED */
+static caddr_t
+null_xtalk_early_piotrans_addr(xwidget_part_num_t part_num,
+			       xwidget_mfg_num_t mfg_num,
+			       int which,
+			       iopaddr_t xtalk_addr,
+			       size_t byte_count,
+			       unsigned flags)
+{
+#if DEBUG
+    cmn_err(CE_PANIC, "null_xtalk_early_piotrans_addr");
+#endif
+    return NULL;
+}
+
+/* =====================================================================
+ *                    DMA MANAGEMENT
+ *
+ *      For mapping from crosstalk space to system
+ *      physical space.
+ */
+
+xtalk_dmamap_t
+xtalk_dmamap_alloc(devfs_handle_t dev,	/* set up mappings for this device */
+		   device_desc_t dev_desc,	/* device descriptor */
+		   size_t byte_count_max,	/* max size of a mapping */
+		   unsigned flags)
+{				/* defined in dma.h */
+    return (xtalk_dmamap_t) DEV_FUNC(dev, dmamap_alloc)
+	(dev, dev_desc, byte_count_max, flags);
+}
+
+
+void
+xtalk_dmamap_free(xtalk_dmamap_t xtalk_dmamap)
+{
+    DMAMAP_FUNC(xtalk_dmamap, dmamap_free)
+	(CAST_DMAMAP(xtalk_dmamap));
+}
+
+
+iopaddr_t
+xtalk_dmamap_addr(xtalk_dmamap_t xtalk_dmamap,	/* use these mapping resources */
+		  paddr_t paddr,	/* map for this address */
+		  size_t byte_count)
+{				/* map this many bytes */
+    return DMAMAP_FUNC(xtalk_dmamap, dmamap_addr)
+	(CAST_DMAMAP(xtalk_dmamap), paddr, byte_count);
+}
+
+
+alenlist_t
+xtalk_dmamap_list(xtalk_dmamap_t xtalk_dmamap,	/* use these mapping resources */
+		  alenlist_t alenlist,	/* map this Address/Length List */
+		  unsigned flags)
+{
+    return DMAMAP_FUNC(xtalk_dmamap, dmamap_list)
+	(CAST_DMAMAP(xtalk_dmamap), alenlist, flags);
+}
+
+
+void
+xtalk_dmamap_done(xtalk_dmamap_t xtalk_dmamap)
+{
+    DMAMAP_FUNC(xtalk_dmamap, dmamap_done)
+	(CAST_DMAMAP(xtalk_dmamap));
+}
+
+
+iopaddr_t
+xtalk_dmatrans_addr(devfs_handle_t dev,	/* translate for this device */
+		    device_desc_t dev_desc,	/* device descriptor */
+		    paddr_t paddr,	/* system physical address */
+		    size_t byte_count,	/* length */
+		    unsigned flags)
+{				/* defined in dma.h */
+    return DEV_FUNC(dev, dmatrans_addr)
+	(dev, dev_desc, paddr, byte_count, flags);
+}
+
+
+alenlist_t
+xtalk_dmatrans_list(devfs_handle_t dev,	/* translate for this device */
+		    device_desc_t dev_desc,	/* device descriptor */
+		    alenlist_t palenlist,	/* system address/length list */
+		    unsigned flags)
+{				/* defined in dma.h */
+    return DEV_FUNC(dev, dmatrans_list)
+	(dev, dev_desc, palenlist, flags);
+}
+
+void
+xtalk_dmamap_drain(xtalk_dmamap_t map)
+{
+    DMAMAP_FUNC(map, dmamap_drain)
+	(CAST_DMAMAP(map));
+}
+
+void
+xtalk_dmaaddr_drain(devfs_handle_t dev, paddr_t addr, size_t size)
+{
+    DEV_FUNC(dev, dmaaddr_drain)
+	(dev, addr, size);
+}
+
+void
+xtalk_dmalist_drain(devfs_handle_t dev, alenlist_t list)
+{
+    DEV_FUNC(dev, dmalist_drain)
+	(dev, list);
+}
+
+/* =====================================================================
+ *                    INTERRUPT MANAGEMENT
+ *
+ *      Allow crosstalk devices to establish interrupts
+ */
+
+/*
+ * Allocate resources required for an interrupt as specified in intr_desc.
+ * Return resource handle in intr_hdl.
+ */
+xtalk_intr_t
+xtalk_intr_alloc(devfs_handle_t dev,	/* which Crosstalk device */
+		 device_desc_t dev_desc,	/* device descriptor */
+		 devfs_handle_t owner_dev)
+{				/* owner of this interrupt */
+    return (xtalk_intr_t) DEV_FUNC(dev, intr_alloc)
+	(dev, dev_desc, owner_dev);
+}
+
+
+/*
+ * Free resources consumed by intr_alloc.
+ */
+void
+xtalk_intr_free(xtalk_intr_t intr_hdl)
+{
+    INTR_FUNC(intr_hdl, intr_free)
+	(CAST_INTR(intr_hdl));
+}
+
+
+/*
+ * Associate resources allocated with a previous xtalk_intr_alloc call with the
+ * described handler, arg, name, etc.
+ *
+ * Returns 0 on success, returns <0 on failure.
+ */
+int
+xtalk_intr_connect(xtalk_intr_t intr_hdl,	/* xtalk intr resource handle */
+		   intr_func_t intr_func,	/* xtalk intr handler */
+		   intr_arg_t intr_arg,		/* arg to intr handler */
+		   xtalk_intr_setfunc_t setfunc,	/* func to set intr hw */
+		   void *setfunc_arg,	/* arg to setfunc */
+		   void *thread)
+{				/* intr thread to use */
+    return INTR_FUNC(intr_hdl, intr_connect)
+	(CAST_INTR(intr_hdl), intr_func, intr_arg, setfunc, setfunc_arg, thread);
+}
+
+
+/*
+ * Disassociate handler with the specified interrupt.
+ */
+void
+xtalk_intr_disconnect(xtalk_intr_t intr_hdl)
+{
+    INTR_FUNC(intr_hdl, intr_disconnect)
+	(CAST_INTR(intr_hdl));
+}
+
+
+/*
+ * Return a hwgraph vertex that represents the CPU currently
+ * targeted by an interrupt.
+ */
+devfs_handle_t
+xtalk_intr_cpu_get(xtalk_intr_t intr_hdl)
+{
+    return INTR_FUNC(intr_hdl, intr_cpu_get)
+	(CAST_INTR(intr_hdl));
+}
+
+
+/*
+ * =====================================================================
+ *                      ERROR MANAGEMENT
+ */
+
+/*
+ * xtalk_error_handler:
+ * pass this error on to the handler registered
+ * at the specified xtalk connecdtion point,
+ * or complain about it here if there is no handler.
+ *
+ * This routine plays two roles during error delivery
+ * to most widgets: first, the external agent (heart,
+ * hub, or whatever) calls in with the error and the
+ * connect point representing the crosstalk switch,
+ * or whatever crosstalk device is directly connected
+ * to the agent.
+ *
+ * If there is a switch, it will generally look at the
+ * widget number stashed in the ioerror structure; and,
+ * if the error came from some widget other than the
+ * switch, it will call back into xtalk_error_handler
+ * with the connection point of the offending port.
+ */
+int
+xtalk_error_handler(
+		       devfs_handle_t xconn,
+		       int error_code,
+		       ioerror_mode_t mode,
+		       ioerror_t *ioerror)
+{
+    xwidget_info_t          xwidget_info;
+
+#if DEBUG && ERROR_DEBUG
+    cmn_err(CE_CONT, "%v: xtalk_error_handler\n", xconn);
+#endif
+
+    xwidget_info = xwidget_info_get(xconn);
+    /* Make sure that xwidget_info is a valid pointer before derefencing it.
+     * We could come in here during very early initialization. 
+     */
+    if (xwidget_info && xwidget_info->w_efunc)
+	return xwidget_info->w_efunc
+	    (xwidget_info->w_einfo,
+	     error_code, mode, ioerror);
+    /*
+     * no error handler registered for
+     * the offending port. it's not clear
+     * what needs to be done, but reporting
+     * it would be a good thing, unless it
+     * is a mode that requires nothing.
+     */
+    if ((mode == MODE_DEVPROBE) || (mode == MODE_DEVUSERERROR) ||
+	(mode == MODE_DEVREENABLE))
+	return IOERROR_HANDLED;
+
+#ifdef IRIX
+    cmn_err(CE_WARN, "Xbow at %v encountered Fatal error", xconn);
+#endif
+    ioerror_dump("xtalk", error_code, mode, ioerror);
+
+    return IOERROR_UNHANDLED;
+}
+
+int
+xtalk_error_devenable(devfs_handle_t xconn_vhdl, int devnum, int error_code)
+{
+    return DEV_FUNC(xconn_vhdl, error_devenable) (xconn_vhdl, devnum, error_code);
+}
+
+
+/* =====================================================================
+ *                    CONFIGURATION MANAGEMENT
+ */
+
+/*
+ * Startup a crosstalk provider
+ */
+void
+xtalk_provider_startup(devfs_handle_t xtalk_provider)
+{
+    DEV_FUNC(xtalk_provider, provider_startup)
+	(xtalk_provider);
+}
+
+
+/*
+ * Shutdown a crosstalk provider
+ */
+void
+xtalk_provider_shutdown(devfs_handle_t xtalk_provider)
+{
+    DEV_FUNC(xtalk_provider, provider_shutdown)
+	(xtalk_provider);
+}
+
+/* 
+ * Enable a device on a xtalk widget 
+ */
+void
+xtalk_widgetdev_enable(devfs_handle_t xconn_vhdl, int devnum)
+{
+    DEV_FUNC(xconn_vhdl, widgetdev_enable) (xconn_vhdl, devnum);
+}
+
+/* 
+ * Shutdown a device on a xtalk widget 
+ */
+void
+xtalk_widgetdev_shutdown(devfs_handle_t xconn_vhdl, int devnum)
+{
+    DEV_FUNC(xconn_vhdl, widgetdev_shutdown) (xconn_vhdl, devnum);
+}
+
+int
+xtalk_dma_enabled(devfs_handle_t xconn_vhdl)
+{
+    return DEV_FUNC(xconn_vhdl, dma_enabled) (xconn_vhdl);
+}
+/*
+ * Generic crosstalk functions, for use with all crosstalk providers
+ * and all crosstalk devices.
+ */
+
+/****** Generic crosstalk interrupt interfaces ******/
+devfs_handle_t
+xtalk_intr_dev_get(xtalk_intr_t xtalk_intr)
+{
+    return (xtalk_intr->xi_dev);
+}
+
+xwidgetnum_t
+xtalk_intr_target_get(xtalk_intr_t xtalk_intr)
+{
+    return (xtalk_intr->xi_target);
+}
+
+xtalk_intr_vector_t
+xtalk_intr_vector_get(xtalk_intr_t xtalk_intr)
+{
+    return (xtalk_intr->xi_vector);
+}
+
+iopaddr_t
+xtalk_intr_addr_get(struct xtalk_intr_s *xtalk_intr)
+{
+    return (xtalk_intr->xi_addr);
+}
+
+void                   *
+xtalk_intr_sfarg_get(xtalk_intr_t xtalk_intr)
+{
+    return (xtalk_intr->xi_sfarg);
+}
+
+
+int
+xtalk_intr_flags_get(xtalk_intr_t xtalk_intr)
+{
+	return(xtalk_intr->xi_flags);
+}
+
+/****** Generic crosstalk pio interfaces ******/
+devfs_handle_t
+xtalk_pio_dev_get(xtalk_piomap_t xtalk_piomap)
+{
+    return (xtalk_piomap->xp_dev);
+}
+
+xwidgetnum_t
+xtalk_pio_target_get(xtalk_piomap_t xtalk_piomap)
+{
+    return (xtalk_piomap->xp_target);
+}
+
+iopaddr_t
+xtalk_pio_xtalk_addr_get(xtalk_piomap_t xtalk_piomap)
+{
+    return (xtalk_piomap->xp_xtalk_addr);
+}
+
+ulong
+xtalk_pio_mapsz_get(xtalk_piomap_t xtalk_piomap)
+{
+    return (xtalk_piomap->xp_mapsz);
+}
+
+caddr_t
+xtalk_pio_kvaddr_get(xtalk_piomap_t xtalk_piomap)
+{
+    return (xtalk_piomap->xp_kvaddr);
+}
+
+
+/****** Generic crosstalk dma interfaces ******/
+devfs_handle_t
+xtalk_dma_dev_get(xtalk_dmamap_t xtalk_dmamap)
+{
+    return (xtalk_dmamap->xd_dev);
+}
+
+xwidgetnum_t
+xtalk_dma_target_get(xtalk_dmamap_t xtalk_dmamap)
+{
+    return (xtalk_dmamap->xd_target);
+}
+
+
+/****** Generic crosstalk widget information interfaces ******/
+
+/* xwidget_info_chk:
+ * check to see if this vertex is a widget;
+ * if so, return its widget_info (if any).
+ * if not, return NULL.
+ */
+xwidget_info_t
+xwidget_info_chk(devfs_handle_t xwidget)
+{
+    arbitrary_info_t        ainfo = 0;
+
+    hwgraph_info_get_LBL(xwidget, INFO_LBL_XWIDGET, &ainfo);
+    return (xwidget_info_t) ainfo;
+}
+
+
+xwidget_info_t
+xwidget_info_get(devfs_handle_t xwidget)
+{
+    xwidget_info_t          widget_info;
+
+    widget_info = (xwidget_info_t)
+	hwgraph_fastinfo_get(xwidget);
+
+#ifdef IRIX
+    if ((widget_info != NULL) &&
+	(widget_info->w_fingerprint != widget_info_fingerprint))
+	cmn_err(CE_PANIC, "%v bad xwidget_info", xwidget);
+#endif
+
+    return (widget_info);
+}
+
+void
+xwidget_info_set(devfs_handle_t xwidget, xwidget_info_t widget_info)
+{
+    if (widget_info != NULL)
+	widget_info->w_fingerprint = widget_info_fingerprint;
+
+    hwgraph_fastinfo_set(xwidget, (arbitrary_info_t) widget_info);
+
+    /* Also, mark this vertex as an xwidget,
+     * and use the widget_info, so xwidget_info_chk
+     * can work (and be fairly efficient).
+     */
+    hwgraph_info_add_LBL(xwidget, INFO_LBL_XWIDGET,
+			 (arbitrary_info_t) widget_info);
+}
+
+devfs_handle_t
+xwidget_info_dev_get(xwidget_info_t xwidget_info)
+{
+    if (xwidget_info == NULL)
+	panic("null xwidget_info");
+    return (xwidget_info->w_vertex);
+}
+
+xwidgetnum_t
+xwidget_info_id_get(xwidget_info_t xwidget_info)
+{
+    if (xwidget_info == NULL)
+	panic("null xwidget_info");
+    return (xwidget_info->w_id);
+}
+
+
+devfs_handle_t
+xwidget_info_master_get(xwidget_info_t xwidget_info)
+{
+    if (xwidget_info == NULL)
+	panic("null xwidget_info");
+    return (xwidget_info->w_master);
+}
+
+xwidgetnum_t
+xwidget_info_masterid_get(xwidget_info_t xwidget_info)
+{
+    if (xwidget_info == NULL)
+	panic("null xwidget_info");
+    return (xwidget_info->w_masterid);
+}
+
+xwidget_part_num_t
+xwidget_info_part_num_get(xwidget_info_t xwidget_info)
+{
+    if (xwidget_info == NULL)
+	panic("null xwidget_info");
+    return (xwidget_info->w_hwid.part_num);
+}
+
+xwidget_mfg_num_t
+xwidget_info_mfg_num_get(xwidget_info_t xwidget_info)
+{
+    if (xwidget_info == NULL)
+	panic("null xwidget_info");
+    return (xwidget_info->w_hwid.mfg_num);
+}
+/* Extract the widget name from the widget information
+ * for the xtalk widget.
+ */
+char *
+xwidget_info_name_get(xwidget_info_t xwidget_info)
+{
+    if (xwidget_info == NULL)
+	panic("null xwidget info");
+    return(xwidget_info->w_name);
+}
+/****** Generic crosstalk initialization interfaces ******/
+
+/*
+ * One-time initialization needed for systems that support crosstalk.
+ */
+void
+xtalk_init(void)
+{
+    cdl_p                   cp;
+
+#if DEBUG && ATTACH_DEBUG
+    printf("xtalk_init\n");
+#endif
+    /* Allocate the registry.
+     * We might already have one.
+     * If we don't, go get one.
+     * MPness: someone might have
+     * set one up for us while we
+     * were not looking; use an atomic
+     * compare-and-swap to commit to
+     * using the new registry if and
+     * only if nobody else did first.
+     * If someone did get there first,
+     * toss the one we allocated back
+     * into the pool.
+     */
+    if (xtalk_registry == NULL) {
+	cp = cdl_new(EDGE_LBL_XIO, "part", "mfgr");
+	if (!compare_and_swap_ptr((void **) &xtalk_registry, NULL, (void *) cp)) {
+	    cdl_del(cp);
+	}
+    }
+    ASSERT(xtalk_registry != NULL);
+}
+
+/*
+ * Associate a set of xtalk_provider functions with a vertex.
+ */
+void
+xtalk_provider_register(devfs_handle_t provider, xtalk_provider_t *xtalk_fns)
+{
+    hwgraph_fastinfo_set(provider, (arbitrary_info_t) xtalk_fns);
+}
+
+/*
+ * Disassociate a set of xtalk_provider functions with a vertex.
+ */
+void
+xtalk_provider_unregister(devfs_handle_t provider)
+{
+    hwgraph_fastinfo_set(provider, (arbitrary_info_t)NULL);
+}
+
+/*
+ * Obtain a pointer to the xtalk_provider functions for a specified Crosstalk
+ * provider.
+ */
+xtalk_provider_t       *
+xtalk_provider_fns_get(devfs_handle_t provider)
+{
+    return ((xtalk_provider_t *) hwgraph_fastinfo_get(provider));
+}
+
+/*
+ * Announce a driver for a particular crosstalk part.
+ * Returns 0 on success or -1 on failure.  Failure occurs if the
+ * specified hardware already has a driver.
+ */
+/*ARGSUSED4 */
+int
+xwidget_driver_register(xwidget_part_num_t part_num,
+			xwidget_mfg_num_t mfg_num,
+			char *driver_prefix,
+			unsigned flags)
+{
+    /* a driver's init routine could call
+     * xwidget_driver_register before the
+     * system calls xtalk_init; so, we
+     * make the call here.
+     */
+    if (xtalk_registry == NULL)
+	xtalk_init();
+
+    return cdl_add_driver(xtalk_registry,
+			  part_num, mfg_num,
+			  driver_prefix, flags);
+}
+
+/*
+ * Inform xtalk infrastructure that a driver is no longer available for
+ * handling any widgets.
+ */
+void
+xwidget_driver_unregister(char *driver_prefix)
+{
+    /* before a driver calls unregister,
+     * it must have called registger; so we
+     * can assume we have a registry here.
+     */
+    ASSERT(xtalk_registry != NULL);
+
+    cdl_del_driver(xtalk_registry, driver_prefix);
+}
+
+/*
+ * Call some function with each vertex that
+ * might be one of this driver's attach points.
+ */
+void
+xtalk_iterate(char *driver_prefix,
+	      xtalk_iter_f *func)
+{
+    ASSERT(xtalk_registry != NULL);
+
+    cdl_iterate(xtalk_registry, driver_prefix, (cdl_iter_f *)func);
+}
+
+/*
+ * xwidget_register:
+ *	Register a xtalk device (xwidget) by doing the following.
+ *      -allocate and initialize xwidget_info data
+ *      -allocate a hwgraph vertex with name based on widget number (id)
+ *      -look up the widget's initialization function and call it,
+ *      or remember the vertex for later initialization.
+ *
+ */
+int
+xwidget_register(xwidget_hwid_t hwid,		/* widget's hardware ID */
+		 devfs_handle_t 	widget,		/* widget to initialize */
+		 xwidgetnum_t 	id,		/* widget's target id (0..f) */
+		 devfs_handle_t 	master,		/* widget's master vertex */
+		 xwidgetnum_t 	targetid,	/* master's target id (9/a) */
+		 async_attach_t aa)
+{			
+    xwidget_info_t          widget_info;
+    char		    *s,devnm[MAXDEVNAME];
+
+    /* Allocate widget_info and associate it with widget vertex */
+    NEW(widget_info);
+
+    /* Initialize widget_info */
+    widget_info->w_vertex = widget;
+    widget_info->w_id = id;
+    widget_info->w_master = master;
+    widget_info->w_masterid = targetid;
+    widget_info->w_hwid = *hwid;	/* structure copy */
+    widget_info->w_efunc = 0;
+    widget_info->w_einfo = 0;
+    /*
+     * get the name of this xwidget vertex and keep the info.
+     * This is needed during errors and interupts, but as
+     * long as we have it, we can use it elsewhere.
+     */
+    s = dev_to_name(widget,devnm,MAXDEVNAME);
+    printk("xwidget_register: dev_to_name widget id 0x%p, s = %s\n", widget, s);
+    widget_info->w_name = kmalloc(strlen(s) + 1, GFP_KERNEL);
+    strcpy(widget_info->w_name,s);
+    
+    xwidget_info_set(widget, widget_info);
+
+    device_master_set(widget, master);
+
+    /* All the driver init routines (including
+     * xtalk_init) are called before we get into
+     * attaching devices, so we can assume we
+     * have a registry here.
+     */
+    ASSERT(xtalk_registry != NULL);
+
+    /* 
+     * Add pointer to async attach info -- tear down will be done when
+     * the particular descendant is done with the info.
+     */
+    if (aa)
+	    async_attach_add_info(widget, aa);
+
+    return cdl_add_connpt(xtalk_registry, hwid->part_num, hwid->mfg_num, widget);
+}
+
+/*
+ * xwidget_unregister :
+ *	Unregister the xtalk device and detach all its hwgraph namespace.
+ */
+int
+xwidget_unregister(devfs_handle_t widget)
+{
+    xwidget_info_t	widget_info;
+    xwidget_hwid_t	hwid;
+
+    /* Make sure that we have valid widget information initialized */
+    if (!(widget_info = xwidget_info_get(widget)))
+	return(1);
+
+    /* Remove the inventory information associated
+     * with the widget.
+     */
+    hwgraph_inventory_remove(widget, -1, -1, -1, -1, -1);
+    
+    hwid = &(widget_info->w_hwid);
+
+    cdl_del_connpt(xtalk_registry, hwid->part_num, 
+		   hwid->mfg_num, widget);
+
+    /* Clean out the xwidget information */
+    (void)kfree(widget_info->w_name);
+    BZERO((void *)widget_info, sizeof(widget_info));
+    DEL(widget_info);
+    
+    return(0);
+}
+
+void
+xwidget_error_register(devfs_handle_t xwidget,
+		       error_handler_f *efunc,
+		       error_handler_arg_t einfo)
+{
+    xwidget_info_t          xwidget_info;
+
+    xwidget_info = xwidget_info_get(xwidget);
+    ASSERT(xwidget_info != NULL);
+    xwidget_info->w_efunc = efunc;
+    xwidget_info->w_einfo = einfo;
+}
+
+/*
+ * Issue a link reset to a widget.
+ */
+void
+xwidget_reset(devfs_handle_t xwidget)
+{
+    xswitch_reset_link(xwidget);
+
+}
+
+
+void
+xwidget_gfx_reset(devfs_handle_t xwidget)
+{
+    xwidget_info_t info;
+
+    xswitch_reset_link(xwidget);
+    info = xwidget_info_get(xwidget);
+#ifdef IRIX
+    ASSERT_ALWAYS(info != NULL);
+#endif
+
+    /*
+     * Enable this for other architectures once we add widget_reset to the
+     * xtalk provider interface.
+     */
+    DEV_FUNC(xtalk_provider, widget_reset)
+	(xwidget_info_master_get(info), xwidget_info_id_get(info));
+}
+
+#define ANON_XWIDGET_NAME	"No Name"	/* Default Widget Name */
+
+/* Get the canonical hwgraph  name of xtalk widget */
+char *
+xwidget_name_get(devfs_handle_t xwidget_vhdl)
+{
+	xwidget_info_t  info;
+
+	/* If we have a bogus widget handle then return
+	 * a default anonymous widget name.
+	 */
+	if (xwidget_vhdl == GRAPH_VERTEX_NONE)
+	    return(ANON_XWIDGET_NAME);
+	/* Read the widget name stored in the widget info
+	 * for the widget setup during widget initialization.
+	 */
+	info = xwidget_info_get(xwidget_vhdl);
+	ASSERT(info != NULL);
+	return(xwidget_info_name_get(info));
+}
+/*
+ * xtalk_device_powerup
+ *	Reset and initialize the specified xtalk widget
+ */
+int 
+xtalk_device_powerup(devfs_handle_t xbus_vhdl, xwidgetnum_t widget)
+{
+#ifndef CONFIG_IA64_SGI_IO
+	extern void	io_xswitch_widget_init(devfs_handle_t,
+					       devfs_handle_t,
+					       xwidgetnum_t,
+					       async_attach_t);
+	io_xswitch_widget_init(xbus_vhdl, 
+			       hwgraph_connectpt_get(xbus_vhdl),
+			       widget,
+			       NULL);
+#endif	/* CONFIG_IA64_SGI_IO */
+	
+	return(0);
+}
+/*
+ * xtalk_device_shutdown
+ *	Disable  the specified xtalk widget and clean out all the software
+ *	state associated with it.
+ */
+int
+xtalk_device_shutdown(devfs_handle_t xbus_vhdl, xwidgetnum_t widget)
+{
+	devfs_handle_t	widget_vhdl;
+	char		edge_name[8];
+
+	sprintf(edge_name, "%d", widget);
+	if (hwgraph_traverse(xbus_vhdl, edge_name, &widget_vhdl) 
+	    != GRAPH_SUCCESS)
+		return(1);
+
+	xwidget_unregister(widget_vhdl);
+	
+	return(0);
+}
+/*
+ * xtalk_device_inquiry
+ *	Find out hardware information about the xtalk widget.
+ */
+int
+xtalk_device_inquiry(devfs_handle_t xbus_vhdl, xwidgetnum_t widget)
+{
+
+	extern void hub_device_inquiry(devfs_handle_t, xwidgetnum_t);
+	hub_device_inquiry(xbus_vhdl, widget);
+	return(0);
+}
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/sn1/Makefile linux/arch/ia64/sn/sn1/Makefile
--- v2.4.0-prerelease/linux/arch/ia64/sn/sn1/Makefile	Thu Mar 30 16:56:04 2000
+++ linux/arch/ia64/sn/sn1/Makefile	Thu Jan  4 13:00:15 2001
@@ -5,20 +5,27 @@
 # Copyright (C) Srinivasa Thirumalachar (sprasad@engr.sgi.com)
 #
 
-CFLAGS          :=     $(CFLAGS) -DCONFIG_SGI_SN1 -DSN1 -DSN -DSOFTSDV \
-			-DLANGUAGE_C=1 -D_LANGUAGE_C=1
-AFLAGS          :=      $(AFLAGS) -DCONFIG_SGI_SN1 -DSN1 -DSOFTSDV
+EXTRA_CFLAGS	:= -DSN -DLANGUAGE_C=1 -D_LANGUAGE_C=1 -I. -DBRINGUP \
+		   -DDIRECT_L1_CONSOLE -DNUMA_BASE -DSIMULATED_KLGRAPH \
+		   -DNUMA_MIGR_CONTROL -DLITTLE_ENDIAN -DREAL_HARDWARE \
+		   -DNEW_INTERRUPTS -DCONFIG_IA64_SGI_IO
 
 .S.s:
-	$(CPP) $(AFLAGS) -o $*.s $<
+	$(CPP) $(AFLAGS) $(AFLAGS_KERNEL) -o $*.s $<
 .S.o:
-	$(CC) $(AFLAGS) -c -o $*.o $<
+	$(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -o $*.o $<
 
 all: sn1.a
 
 O_TARGET        = sn1.a
 O_HEADERS       =
-O_OBJS          = irq.o setup.o
+O_OBJS          = irq.o setup.o iomv.o mm.o smp.o synergy.o sn1_asm.o \
+		discontig.o
+
+ifeq ($(CONFIG_IA64_SGI_AUTOTEST),y)
+O_OBJS          += llsc4.o
+endif
+
 
 ifeq ($(CONFIG_IA64_GENERIC),y)
 O_OBJS		+= machvec.o
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/sn1/discontig.c linux/arch/ia64/sn/sn1/discontig.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/sn1/discontig.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/sn1/discontig.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,187 @@
+/*
+ * Copyright 2000, Silicon Graphics, sprasad@engr.sgi.com
+ * Copyright 2000, Kanoj Sarcar, kanoj@sgi.com
+ */
+
+/*
+ * Contains common definitions and globals for NUMA platform
+ * support. For now, SN-IA64 and SN-MIPS are the NUMA platforms.
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/bootmem.h>
+#include <asm/sn/mmzone.h>
+#include <asm/efi.h>
+
+extern int numnodes ;
+
+plat_pg_data_t plat_node_data[MAXNODES];
+bootmem_data_t bdata[MAXNODES];
+int chunktonid[MAXCHUNKS];
+int nasid_map[MAXNASIDS];
+
+void __init
+init_chunktonid(void)
+{
+	memset(chunktonid, -1, sizeof(chunktonid)) ;
+}
+
+void __init
+init_nodeidmap(void)
+{
+	memset(nasid_map, -1, sizeof(nasid_map)) ;
+}
+
+int		cnodeid_map[MAXNODES] ;
+void __init
+init_cnodeidmap(void)
+{
+	memset(cnodeid_map, -1, sizeof(cnodeid_map)) ;
+}
+
+int
+numa_debug(void)
+{
+       panic("NUMA debug\n");
+       return(0);
+}
+
+int __init
+build_cnodeid_map(void)
+{
+	int	i,j ;
+
+	for (i=0,j=0;i<MAXNASIDS;i++) {
+		if (nasid_map[i] >= 0)
+			cnodeid_map[j++] = i ;
+	}
+	return j ;
+}
+
+/*
+ * Since efi_memmap_walk merges contiguous banks, this code will need
+ * to find all the nasids covered by the input memory descriptor.
+ */
+static int __init
+build_nasid_map(unsigned long start, unsigned long end, void *arg)
+{
+	unsigned long vaddr = start;
+	int nasid = GetNasId(__pa(vaddr));
+
+	while (vaddr < end) {
+		if (nasid < MAXNASIDS)
+			nasid_map[nasid] = 0;
+		else
+			panic("build_nasid_map");
+		vaddr = (unsigned long)__va((unsigned long)(++nasid) << 
+							SN1_NODE_ADDR_SHIFT);
+	}
+	return 0;
+}
+
+void __init
+fix_nasid_map(void)
+{
+	int	i ;
+	int		j ;
+
+	/* For every nasid */
+	for (j=0;j<MAXNASIDS;j++) {
+		for (i=0;i<MAXNODES;i++) {
+			if (CNODEID_TO_NASID(i) == j)
+				break ;
+		}
+		if (i<MAXNODES)
+			nasid_map[j] = i ;
+	}
+}
+
+static void __init
+dump_bootmem_info(void)
+{
+        int     i;
+        struct bootmem_data *bdata ;
+
+	printk("CNODE INFO ....\n") ;
+        for (i=0;i<numnodes;i++) {
+		printk("%d ", CNODEID_TO_NASID(i)) ;
+	}
+	printk("\n") ;
+
+	printk("BOOT MEM INFO ....\n") ;
+        printk("Node   Start                LowPfn               BootmemMap\n") ;
+        for (i=0;i<numnodes;i++) {
+                bdata = NODE_DATA(i)->bdata ;
+                printk("%d      0x%016lx   0x%016lx   0x%016lx\n", i,
+                        bdata->node_boot_start, bdata->node_low_pfn,
+                        (unsigned long)bdata->node_bootmem_map) ;
+        }
+}
+
+void __init
+discontig_mem_init(void)
+{
+	extern void setup_sn1_bootmem(int);
+	int		maxnodes ;
+
+        init_chunktonid() ;
+	init_nodeidmap() ;
+	init_cnodeidmap() ;
+	efi_memmap_walk(build_nasid_map, 0) ;
+	maxnodes = build_cnodeid_map() ;
+	fix_nasid_map() ;
+#ifdef CONFIG_DISCONTIGMEM
+	setup_sn1_bootmem(maxnodes) ;
+#endif
+	numnodes = maxnodes;
+	dump_bootmem_info() ;
+}
+
+void __init
+discontig_paging_init(void)
+{
+	int i;
+	unsigned long max_dma, zones_size[MAX_NR_ZONES];
+	void dump_node_data(void);
+
+        max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
+	for (i = 0; i < numnodes; i++) {
+	       extern void free_unused_memmap_node(int);
+               unsigned long startpfn = __pa((void *)NODE_START(i)) >> PAGE_SHIFT;
+               unsigned long numpfn = NODE_SIZE(i) >> PAGE_SHIFT;
+               memset(zones_size, 0, sizeof(zones_size));
+
+               if ((startpfn + numpfn) < max_dma) {
+                       zones_size[ZONE_DMA] = numpfn;
+               } else if (startpfn > max_dma) {
+                       zones_size[ZONE_NORMAL] = numpfn;
+               } else {
+                       zones_size[ZONE_DMA] = (max_dma - startpfn);
+                       zones_size[ZONE_NORMAL] = numpfn - zones_size[ZONE_DMA];
+               }
+               free_area_init_node(i, NODE_DATA(i), NULL, zones_size, startpfn<<PAGE_SHIFT, 0);
+	       free_unused_memmap_node(i);
+	}
+	dump_node_data();
+}
+
+
+void
+dump_node_data(void)
+{
+        int     i;
+
+	printk("NODE DATA ....\n") ;
+	printk("Node, Start, Size, MemMap, BitMap, StartP, Mapnr, Size, Id\n") ;
+        for (i=0;i<numnodes;i++) {
+		printk("%d, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, %d\n", 
+			CNODEID_TO_NASID(i), NODE_START(i), NODE_SIZE(i), 
+			(long)NODE_MEM_MAP(i), (long)NODE_DATA(i)->valid_addr_bitmap, 
+			NODE_DATA(i)->node_start_paddr, 
+			NODE_DATA(i)->node_start_mapnr,
+			NODE_DATA(i)->node_size,
+			NODE_DATA(i)->node_id)  ;
+	}
+}
+
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/sn1/iomv.c linux/arch/ia64/sn/sn1/iomv.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/sn1/iomv.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/sn1/iomv.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,100 @@
+/* 
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Jack Steiner (steiner@sgi.com)
+ * Copyright (C) 2000 Kanoj Sarcar (kanoj@sgi.com)
+ */
+
+#include <asm/io.h>
+#include <linux/pci.h>
+
+static inline void *
+sn1_io_addr(unsigned long port)
+{
+	if (!IS_RUNNING_ON_SIMULATOR()) {
+		return( (void *)  (port | __IA64_UNCACHED_OFFSET));
+	} else {
+		unsigned long io_base;
+		unsigned long addr;
+ 
+		/*
+ 		 * word align port, but need more than 10 bits
+ 		 * for accessing registers in bedrock local block
+ 		 * (so we don't do port&0xfff)
+ 		 */
+		if (port == 0x1f6 || port == 0x1f7
+			|| port == 0x3f6 || port == 0x3f7
+			|| port == 0x1f0 || port == 0x1f1
+			|| port == 0x1f3 || port == 0x1f4
+			|| port == 0x1f2 || port == 0x1f5)  {
+			io_base = __IA64_UNCACHED_OFFSET | 0x00000FFFFC000000;
+			addr = io_base | ((port >> 2) << 12) | (port & 0xfff);
+		} else {
+			addr = __ia64_get_io_port_base() | ((port >> 2) << 2);
+		}
+		return(void *) addr;
+	}
+}
+
+unsigned int
+sn1_inb (unsigned long port)
+{
+	volatile unsigned char *addr = sn1_io_addr(port);
+	unsigned char ret;
+
+	ret = *addr;
+	__ia64_mf_a();
+	return ret;
+}
+
+unsigned int
+sn1_inw (unsigned long port)
+{
+	volatile unsigned short *addr = sn1_io_addr(port);
+	unsigned short ret;
+
+	ret = *addr;
+	__ia64_mf_a();
+	return ret;
+}
+
+unsigned int
+sn1_inl (unsigned long port)
+{
+	volatile unsigned int *addr = sn1_io_addr(port);
+	unsigned int ret;
+
+	ret = *addr;
+	__ia64_mf_a();
+	return ret;
+}
+
+void
+sn1_outb (unsigned char val, unsigned long port)
+{
+	volatile unsigned char *addr = sn1_io_addr(port);
+
+	*addr = val;
+	__ia64_mf_a();
+}
+
+void
+sn1_outw (unsigned short val, unsigned long port)
+{
+	volatile unsigned short *addr = sn1_io_addr(port);
+
+	*addr = val;
+	__ia64_mf_a();
+}
+
+void
+sn1_outl (unsigned int val, unsigned long port)
+{
+	volatile unsigned int *addr = sn1_io_addr(port);
+
+	*addr = val;
+	__ia64_mf_a();
+}
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/sn1/irq.c linux/arch/ia64/sn/sn1/irq.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/sn1/irq.c	Fri Aug 11 19:09:06 2000
+++ linux/arch/ia64/sn/sn1/irq.c	Thu Jan  4 13:00:15 2001
@@ -1,8 +1,56 @@
-#include <linux/kernel.h>
+/*
+ * Platform dependent support for SGI SN1
+ *
+ * Copyright (C) 2000   Silicon Graphics
+ * Copyright (C) 2000   Jack Steiner (steiner@sgi.com)
+ * Copyright (C) 2000   Alan Mayer (ajm@sgi.com)
+ */
+
+#include <linux/init.h>
 #include <linux/sched.h>
+#include <asm/current.h>
 #include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/iograph.h>
+#include <asm/sn/invent.h>
+#include <linux/devfs_fs_kernel.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/types.h>
+#include <asm/sn/pci/bridge.h>
+#include <asm/sn/pci/pciio.h>
+#include <asm/sn/pci/pciio_private.h>
+#include <asm/sn/sn_cpuid.h>
+#include <asm/sn/sn1/bedrock.h>
+#include <asm/sn/intr.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/sn1/addrs.h>
+#include <asm/sn/iobus.h>
+#include <asm/sn/sn1/arch.h>
+#include <asm/sn/synergy.h>
+
+
+int bit_pos_to_irq(int bit);
+int irq_to_bit_pos(int irq);
+void add_interrupt_randomness(int irq);
+void * kmalloc(size_t size, int flags);
+void kfree(const void *);
+int sgi_pci_intr_support (unsigned int, device_desc_t *, devfs_handle_t *, pciio_intr_line_t *, devfs_handle_t *);
+pciio_intr_t pciio_intr_alloc(devfs_handle_t, device_desc_t, pciio_intr_line_t, devfs_handle_t);
+int request_irq(unsigned int, void (*)(int, void *, struct pt_regs *), unsigned long, const char *, void *);
+
+/* This should be dynamically allocated, at least part of it. */
+/* For the time being, though, we'll statically allocate it */
+/* because kmalloc hasn't been initiallized at the time this */
+/* array is initiallized.  One way to do it would be to statically */
+/* allocate the data for node 0, then let other nodes, as they */
+/* need it, dynamically allocate their own data space. */
 
-#include <asm/ptrace.h>
+struct sn1_cnode_action_list *sn1_node_actions[MAX_COMPACT_NODES];
+struct sn1_cnode_action_list sn1_actions[MAX_COMPACT_NODES][256];
+
+
+extern int numnodes;
 
 static unsigned int
 sn1_startup_irq(unsigned int irq)
@@ -25,20 +73,192 @@
 {
 }
 
+static void
+sn1_ack_irq(unsigned int irq)
+{
+}
+
+static void
+sn1_end_irq(unsigned int irq)
+{
+}
+
+static void
+sn1_set_affinity_irq(unsigned int irq, unsigned long mask)
+{
+}
+
+
+static void
+sn1_handle_irq(int irq, void *dummy, struct pt_regs *regs)
+{
+	int bit, cnode;
+	struct sn1_cnode_action_list *alp;
+	struct sn1_intr_action *ap;
+	void (*handler)(int, void *, struct pt_regs *);
+	unsigned long flags = 0;
+	int cpuid = smp_processor_id();
+
+
+	bit = irq_to_bit_pos(irq);
+	LOCAL_HUB_CLR_INTR(bit);
+	cnode = cpuid_to_cnodeid(cpuid);
+	alp = sn1_node_actions[cnode];
+	ap = alp[irq].action_list;
+	if (ap == NULL) {
+		return;
+	}
+	while (ap) {
+		flags |= ap->flags;
+		handler = ap->handler;
+		(*handler)(irq,ap->intr_arg,regs);
+		ap = ap->next;
+	}
+	if ((flags & SA_SAMPLE_RANDOM) != 0)
+                add_interrupt_randomness(irq);
+
+        return;
+}
+
 struct hw_interrupt_type irq_type_sn1 = {
         "sn1_irq",
         sn1_startup_irq,
         sn1_shutdown_irq,
         sn1_enable_irq,
-        sn1_disable_irq
+        sn1_disable_irq,
+        sn1_ack_irq,
+        sn1_end_irq,
+        sn1_set_affinity_irq
+};
+
+struct irqaction sn1_irqaction = {
+	sn1_handle_irq,
+	0,
+	0,
+	NULL,
+	NULL,
+	NULL,
 };
 
 void
 sn1_irq_init (void)
 {
-	int i;
+	int i,j;
+
+	for (i = 0; i <= NR_IRQS; ++i) {
+		if (irq_desc[i].handler == &no_irq_type) {
+			irq_desc[i].handler = &irq_type_sn1;
+			if (i >=71 && i <= 181) {
+				irq_desc[i].action = &sn1_irqaction;
+			}
+		}
+	}
+
+	for (i = 0; i < numnodes; i++) {
+		sn1_node_actions[i] = sn1_actions[i];
+		memset(sn1_node_actions[i], 0,
+			sizeof(struct sn1_cnode_action_list) *
+			(IA64_MAX_VECTORED_IRQ + 1));
+		for (j=0; j<IA64_MAX_VECTORED_IRQ+1; j++) {
+			spin_lock_init(&sn1_node_actions[i][j].action_list_lock);
+		}
+	}
+}
+
+
+int          
+sn1_request_irq (unsigned int requested_irq, void (*handler)(int, void *, struct pt_regs *),
+             unsigned long irqflags, const char * devname, void *dev_id)
+{ 
+	devfs_handle_t curr_dev;
+	devfs_handle_t dev;
+	pciio_intr_t intr_handle;
+	pciio_intr_line_t line;
+	device_desc_t dev_desc;
+        int cpuid, bit, cnode;
+	struct sn1_intr_action *ap, *new_ap;
+	struct sn1_cnode_action_list *alp;
+	int irq;
 
-	for (i = IA64_MIN_VECTORED_IRQ; i <= IA64_MAX_VECTORED_IRQ; ++i) {
-		irq_desc[i].handler = &irq_type_sn1;
+	if ( (requested_irq & 0xff) == 0 ) {
+		int ret;
+
+		sgi_pci_intr_support(requested_irq,
+			&dev_desc, &dev, &line, &curr_dev);
+		intr_handle = pciio_intr_alloc(curr_dev, NULL, line, curr_dev);
+		bit = intr_handle->pi_irq;
+		cpuid = intr_handle->pi_cpu;
+		irq = bit_pos_to_irq(bit);
+		cnode = cpuid_to_cnodeid(cpuid);
+		new_ap = (struct sn1_intr_action *)kmalloc(
+			sizeof(struct sn1_intr_action), GFP_KERNEL);
+		irq_desc[irq].status = 0;
+		new_ap->handler = handler;
+		new_ap->intr_arg = dev_id;
+		new_ap->flags = irqflags;
+		new_ap->next = NULL;
+		alp = sn1_node_actions[cnode];
+
+		spin_lock(&alp[irq].action_list_lock);
+		ap = alp[irq].action_list;
+		/* check action list for "share" consistency */
+		while (ap){
+			if (!(ap->flags & irqflags & SA_SHIRQ) ) {
+				return(-EBUSY);
+				spin_unlock(&alp[irq].action_list_lock);
+			}
+			ap = ap->next;
+		}
+		ap = alp[irq].action_list;
+		if (ap) {
+			while (ap->next) {
+				ap = ap->next;
+			}
+			ap->next = new_ap;
+		} else {
+			alp[irq].action_list = new_ap;
+		}
+		ret = pciio_intr_connect(intr_handle, (intr_func_t)handler, dev_id, NULL);
+		if (ret) { /* connect failed, undo what we did. */
+			new_ap = alp[irq].action_list;
+			if (new_ap == ap) {
+				alp[irq].action_list = NULL;
+				kfree(ap);
+			} else {
+				while (new_ap->next && new_ap->next != ap) {
+					new_ap = new_ap->next;
+				}
+				if (new_ap->next == ap) {
+					new_ap->next = ap->next;
+					kfree(ap);
+				}
+			}
+		}
+			
+		spin_unlock(&alp[irq].action_list_lock);
+		return(ret);
+	} else {
+		return(request_irq(requested_irq, handler, irqflags, devname, dev_id));
 	}
+}
+
+#if !defined(CONFIG_IA64_SGI_IO)
+void
+sn1_pci_fixup(int arg)
+{
+}
+#endif
+
+int
+bit_pos_to_irq(int bit) {
+#define BIT_TO_IRQ 64
+
+        return bit + BIT_TO_IRQ;
+}
+
+int
+irq_to_bit_pos(int irq) {
+#define IRQ_TO_BIT 64
+
+        return irq - IRQ_TO_BIT;
 }
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/sn1/llsc4.c linux/arch/ia64/sn/sn1/llsc4.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/sn1/llsc4.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/sn1/llsc4.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,943 @@
+/* 
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Jack Steiner (steiner@sgi.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/smp.h>
+#include <linux/kernel_stat.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <asm/efi.h>
+#include <asm/page.h>
+#include <linux/threads.h>
+
+extern void bringup_set_led_bits(u8 bits, u8 mask);
+
+#include "llsc4.h"
+
+
+#ifdef STANDALONE
+#include "lock.h"
+#endif
+
+#ifdef INTTEST
+static int	inttest=0;
+#endif
+
+
+/*
+ * Test parameter table for AUTOTEST
+ */
+typedef struct {
+	int	passes;
+	int	linecount;
+	int	linepad;
+} autotest_table_t;
+
+autotest_table_t autotest_table[] = {
+	{1000000,	2,	0x2b4		},
+	{1000000,	16,	0,		},
+	{1000000,	16,	4,		},
+	{1000000,	128,	0x44		},
+	{1000000,	128,	0x84		},
+	{1000000,	128,	0x200		},
+	{1000000,	128,	0x204		},
+	{1000000,	128,	0x2b4		},
+	{1000000,	2,	8*MB+0x2b4	},
+	{1000000,	16,	8*MB+0		},
+	{1000000,	16,	8*MB+4		},
+	{1000000,	128,	8*MB+0x44	},
+	{1000000,	128,	8*MB+0x84	},
+	{1000000,	128,	8*MB+0x200	},
+	{1000000,	128,	8*MB+0x204	},
+	{1000000,	128,	8*MB+0x2b4	},
+	{0}};
+
+/*
+ * Array of virtual addresses available for test purposes.
+ */
+
+typedef struct {
+	long	vstart;
+	long	vend;
+	long	nextaddr;
+	int	wrapcount;
+} memmap_t;
+
+memmap_t 	memmap[MAXCHUNKS];
+int		memmapx=0;
+
+typedef struct {
+	void	*addr;
+	long	data[16];
+	long	data_fc[16];
+} capture_line_t;
+
+typedef struct {
+	int	size;
+	void	*blockaddr;
+	void	*shadaddr;
+	long	blockdata[16];
+	long	shaddata[16];
+	long	blockdata_fc[16];
+	long	shaddata_fc[16];
+	long	synerr;
+} capture_t;
+
+/*
+ * PORTING NOTE: revisit this statement. On hardware we put mbase at 0 and
+ * the rest of the tables have to start at 1MB to skip PROM tables.
+ */
+#define THREADPRIVATE(t)	((threadprivate_t*)(((long)mbase)+1024*1024+t*((sizeof(threadprivate_t)+511)/512*512)))
+
+#define k_capture		mbase->sk_capture
+#define k_go			mbase->sk_go
+#define k_linecount		mbase->sk_linecount
+#define k_passes		mbase->sk_passes
+#define k_napticks		mbase->sk_napticks
+#define k_stop_on_error		mbase->sk_stop_on_error
+#define k_verbose		mbase->sk_verbose
+#define k_threadprivate		mbase->sk_threadprivate
+#define k_blocks		mbase->sk_blocks
+#define k_iter_msg		mbase->sk_iter_msg
+#define k_vv			mbase->sk_vv
+#define k_linepad		mbase->sk_linepad
+#define k_options		mbase->sk_options
+#define k_testnumber		mbase->sk_testnumber
+#define k_currentpass		mbase->sk_currentpass
+
+static long		blocks[MAX_LINECOUNT];		/* addresses of data blocks */
+static control_t	*mbase;
+static vint		initialized=0;
+
+static unsigned int ran_conf_llsc(int);
+static int  rerr(capture_t *, char *, void *, void *, int, int, int, int, int, int);
+static void dumpline(void *, char *, char *, void *, void *, int);
+static int  checkstop(int, int, uint);
+static void spin(int);
+static void capturedata(capture_t *, uint, void *, void *, int);
+static int  randn(uint max, uint *seed);
+static uint zrandom (uint *zranseed);
+static int  set_lock(uint *, uint);
+static int  clr_lock(uint *, uint);
+static void Speedo(void);
+
+int autotest_enabled=0;
+static int autotest_explicit_flush=0;
+static int llsctest_number=-1;
+static int errstop_enabled=0;
+static int fail_enabled=0;
+static int selective_trigger=0;
+
+static int __init autotest_enable(char *str)
+{
+        autotest_enabled = 1;
+	return 1;
+}
+static int __init set_llscxflush(char *str)
+{
+	autotest_explicit_flush = 1;
+	return 1;
+}
+static int __init set_llscselt(char *str)
+{
+	selective_trigger = 1;
+	return 1;
+}
+static int __init set_llsctest(char *str)
+{
+        llsctest_number = simple_strtol(str, &str, 10);
+	if (llsctest_number < 0 || llsctest_number > 15)
+		llsctest_number = -1;
+	return 1;
+}
+static int __init set_llscerrstop(char *str)
+{
+        errstop_enabled = 1;
+	return 1;
+}
+static int __init set_llscfail(char *str)
+{
+        fail_enabled = 8;
+	return 1;
+}
+
+static void print_params(void)
+{
+	printk ("********* Enter AUTOTEST facility on master cpu *************\n");
+	printk ("  Test options:\n");
+	printk ("     llsctest=<n>\t%d\tTest number to run (all = -1)\n", llsctest_number);
+	printk ("     llscerrstop \t%s\tStop on error\n", errstop_enabled ? "on" : "off");
+	printk ("     llscxflush  \t%s\tEnable explicit FC in test\n", autotest_explicit_flush ? "on" : "off");
+	printk ("     llscfail    \t%s\tForce a failure to test the trigger & error messages\n", fail_enabled ? "on" : "off");
+	printk ("     llscselt    \t%s\tSelective triger on failures\n", selective_trigger ? "on" : "off");
+	printk ("\n");
+}
+__setup("autotest", autotest_enable);
+__setup("llsctest=", set_llsctest);
+__setup("llscerrstop", set_llscerrstop);
+__setup("llscxflush", set_llscxflush);
+__setup("llscfail", set_llscfail);
+__setup("llscselt", set_llscselt);
+
+
+extern inline void
+flush_buddy(void *p)
+{
+	long	lp;
+
+	if (autotest_explicit_flush)  {
+		lp = (long)p;
+		lp ^= 0x40;
+		asm volatile ("fc %0" :: "r"(lp) : "memory");
+		ia64_sync_i();
+		ia64_srlz_d();
+	}
+}
+
+static int
+set_lock(uint *lock, uint id)
+{
+	uint	old;
+	flush_buddy(lock);
+	old = cmpxchg_acq(lock, 0, id);
+	return (old == 0);
+}
+
+static int
+clr_lock(uint *lock, uint id)
+{
+	uint	old;
+	flush_buddy(lock);
+	old = cmpxchg_rel(lock, id, 0);
+	return (old == id);
+}
+
+static void
+zero_lock(uint *lock)
+{
+	flush_buddy(lock);
+	*lock = 0;
+}
+
+/*------------------------------------------------------------------------+
+| Routine  :  ran_conf_llsc - ll/sc shared data test                      |
+| Description: This test checks the coherency of shared data              |
++------------------------------------------------------------------------*/
+static unsigned int
+ran_conf_llsc(int thread)
+{
+	private_t	pval;
+	share_t		sval, sval2;
+	uint		vv, linei, slinei, sharei, pass;
+	long		t;
+	lock_t		lockpat;
+	share_t		*sharecopy;
+	long		verbose, napticks, passes, linecount, lcount;
+	dataline_t	*linep, *slinep;
+	int		s, seed;
+	threadprivate_t	*tp;
+	uint		iter_msg, iter_msg_i=0;
+	int		vv_mask;
+	int		correct_errors;
+	int		errs=0;
+	int		stillbad;
+	capture_t	capdata;
+	private_t	*privp;
+	share_t		*sharep;
+
+
+	linecount = k_linecount;
+	napticks = k_napticks;
+	verbose = k_verbose;
+	passes = k_passes;
+	iter_msg = k_iter_msg;
+	seed = (thread + 1) * 647;
+	tp = THREADPRIVATE(thread);
+	vv_mask = (k_vv>>((thread%16)*4)) & 0xf;
+	correct_errors = k_options&0xff;
+
+	memset (&tp->private, 0, sizeof(tp->private));
+	memset (&capdata, 0, sizeof(capdata));
+
+	for (pass = 1; passes == 0 || pass < passes; pass++) {
+		lockpat = (pass & 0x0fffffff) + (thread <<28);
+		tp->threadpasses = pass;
+		if (checkstop(thread, pass, lockpat))
+			return 0;
+		iter_msg_i++;
+		if (iter_msg && iter_msg_i > iter_msg) {
+			printk("Thread %d, Pass %d\n", thread, pass);
+			iter_msg_i = 0;
+		}
+		lcount = 0;
+
+		/*
+		 * Select line to perform operations on.
+		 */
+		linei = randn(linecount, &seed);
+		sharei = randn(2, &seed);
+		slinei = (linei + (linecount/2))%linecount;		/* I dont like this - fix later */
+
+		linep = (dataline_t *)blocks[linei];
+		slinep = (dataline_t *)blocks[slinei];
+		if (sharei == 0)
+			sharecopy = &slinep->share0;
+		else
+			sharecopy = &slinep->share1;
+
+
+		vv = randn(4, &seed);
+		if ((vv_mask & (1<<vv)) == 0)
+			continue;
+
+		if (napticks) {
+			t = randn(napticks, &seed);
+			udelay(t);
+		}
+		privp = &linep->private[thread];
+		sharep = &linep->share[sharei];
+		
+		switch(vv) {
+		case 0:
+			/* Read and verify private count on line. */
+			pval = *privp;
+			if (verbose)
+				printk("Line:%3d, Thread:%d:%d. Val: %x\n", linei, thread, vv, tp->private[linei]);
+			if (pval != tp->private[linei]) {
+				capturedata(&capdata, pass, privp, NULL, sizeof(*privp));
+				stillbad = (*privp != tp->private[linei]);
+				if (rerr(&capdata, "Private count", linep, slinep, thread, pass, linei, tp->private[linei], pval, stillbad)) {
+					return 1;
+				}
+				if (correct_errors) {
+					flush_buddy(privp);
+					tp->private[linei] = *privp;
+				}
+				errs++;
+			}
+			break;
+
+		case 1:
+			/* Read, verify, and increment private count on line. */
+			pval = *privp;
+			if (verbose)
+				printk("Line:%3d, Thread:%d:%d. Val: %x\n", linei, thread, vv, tp->private[linei]);
+			if (pval != tp->private[linei]) {
+				capturedata(&capdata, pass, privp, NULL, sizeof(*privp));
+				stillbad = (*privp != tp->private[linei]);
+				if (rerr(&capdata, "Private count & inc", linep, slinep, thread, pass, linei, tp->private[linei], pval, stillbad)) {
+					return 1;
+				}
+				errs++;
+			}
+			pval++;
+			flush_buddy(privp);
+			*privp = pval;
+			tp->private[linei] = pval;
+			break;
+
+		case 2:
+			/* Lock line, read and verify shared data. */
+			if (verbose)
+				printk("Line:%3d, Thread:%d:%d. Val: %x\n", linei, thread, vv, *sharecopy);
+			lcount = 0;
+			while (LOCK(sharei) != 1) {
+				if (checkstop(thread, pass, lockpat))
+					return 0;
+				if (lcount++>1000000) {
+					capturedata(&capdata, pass, LOCKADDR(sharei), NULL, sizeof(lock_t));
+					stillbad = (GETLOCK(sharei) != 0);
+					rerr(&capdata, "Shared data lock", linep, slinep, thread, pass, linei, 0, GETLOCK(sharei), stillbad);
+					return 1;
+				}
+				if ((lcount&0x3fff) == 0)
+					udelay(1000);
+			}
+
+			sval = *sharep;
+			sval2 = *sharecopy;
+			if (pass > 12 && thread == 0 && fail_enabled == 1)
+				sval++;
+			if (sval != sval2) {
+				capturedata(&capdata, pass, sharep, sharecopy, sizeof(*sharecopy));
+				stillbad = (*sharep != *sharecopy);
+				if (!stillbad && *sharep != sval && *sharecopy == sval2)
+					stillbad = 2;
+				if (rerr(&capdata, "Shared data", linep, slinep, thread, pass, linei, sval2, sval, stillbad)) {
+					return 1;
+				}
+				if (correct_errors)
+					*sharep = *sharecopy;
+				errs++;
+			}
+
+
+			if ( (s=UNLOCK(sharei)) != 1) {
+				capturedata(&capdata, pass, LOCKADDR(sharei), NULL, 4);
+				stillbad = (GETLOCK(sharei) != lockpat);
+				if (rerr(&capdata, "Shared data unlock", linep, slinep, thread, pass, linei, lockpat, GETLOCK(sharei), stillbad))
+					return 1;
+				if (correct_errors)
+					ZEROLOCK(sharei);	
+				errs++;
+			}
+			break;
+
+		case 3:
+			/* Lock line, read and verify shared data, modify shared data. */
+			if (verbose)
+				printk("Line:%3d, Thread:%d:%d. Val: %x\n", linei, thread, vv, *sharecopy);
+			lcount = 0;
+			while (LOCK(sharei) != 1) {
+				if (checkstop(thread, pass, lockpat))
+					return 0;
+				if (lcount++>1000000) {
+					capturedata(&capdata, pass, LOCKADDR(sharei), NULL, sizeof(lock_t));
+					stillbad = (GETLOCK(sharei) != 0);
+					rerr(&capdata, "Shared data lock & inc", linep, slinep, thread, pass, linei, 0, GETLOCK(sharei), stillbad);
+					return 1;
+				}
+				if ((lcount&0x3fff) == 0)
+					udelay(1000);
+			}
+			sval = *sharep;
+			sval2 = *sharecopy;
+			if (sval != sval2) {
+				capturedata(&capdata, pass, sharep, sharecopy, sizeof(*sharecopy));
+				stillbad = (*sharep != *sharecopy);
+				if (!stillbad && *sharep != sval && *sharecopy == sval2)
+					stillbad = 2;
+				if (rerr(&capdata, "Shared data & inc", linep, slinep, thread, pass, linei, sval2, sval, stillbad)) {
+					return 1;
+				}
+				errs++;
+			}
+
+			flush_buddy(sharep);
+			*sharep = lockpat;
+			flush_buddy(sharecopy);
+			*sharecopy = lockpat;
+
+
+			if ( (s=UNLOCK(sharei)) != 1) {
+				capturedata(&capdata, pass, LOCKADDR(sharei), NULL, 4);
+				stillbad = (GETLOCK(sharei) != lockpat);
+				if (rerr(&capdata, "Shared data & inc unlock", linep, slinep, thread, pass, linei, thread, GETLOCK(sharei), stillbad))
+					return 1;
+				if (correct_errors)
+					ZEROLOCK(sharei);	
+				errs++;
+			}
+			break;
+		}
+	}
+
+	return (errs > 0);
+}
+
+static void
+trigger_la(long val)
+{
+	long	*p;
+
+	p = (long*)0xc0000a0001000020L; /* PI_CPU_NUM */
+	*p = val;
+}
+
+static long
+getsynerr(void)
+{
+	long	err, *errp;
+
+	errp = (long*)0xc0000e0000000340L;	/* SYN_ERR */
+	err = *errp;
+	if (err)
+		*errp = -1L;
+	return (err & ~0x60);
+}
+
+static int
+rerr(capture_t *cap, char *msg, void *lp, void *slp, int thread, int pass, int linei, int exp, int found, int stillbad)
+{
+	int		cpu;
+	long 		synerr;
+	int		selt;
+
+
+	selt = selective_trigger && stillbad > 1 && 
+			memcmp(cap->blockdata, cap->blockdata_fc, 128) != 0 &&
+			memcmp(cap->shaddata, cap->shaddata_fc, 128) == 0;
+	if (selt) {
+		trigger_la(pass);
+	} else if (selective_trigger) {
+		k_go = ST_STOP;
+		return k_stop_on_error;;
+	}
+
+	spin(1);
+	printk ("\nDataError!: %-20s, test %ld, thread %d, line:%d, pass %d (0x%x), time %ld expected:%x, found:%x\n",
+	    msg, k_testnumber, thread, linei, pass, pass, jiffies, exp, found);
+
+	dumpline (lp, "Corrupted data", "D ", cap->blockaddr, cap->blockdata, cap->size);
+	if (memcmp(cap->blockdata, cap->blockdata_fc, 128))
+		dumpline (lp, "Corrupted data", "DF", cap->blockaddr, cap->blockdata_fc, cap->size);
+
+	if (cap->shadaddr) {
+		dumpline (slp, "Shadow    data", "S ", cap->shadaddr, cap->shaddata, cap->size);
+		if (memcmp(cap->shaddata, cap->shaddata_fc, 128))
+			dumpline (slp, "Shadow    data", "SF", cap->shadaddr, cap->shaddata_fc, cap->size);
+	}
+	
+	printk("Threadpasses: ");
+	for (cpu=0; cpu<MAXCPUS; cpu++)
+		if (k_threadprivate[cpu]->threadpasses)
+			printk("  %d:0x%x", cpu, k_threadprivate[cpu]->threadpasses);
+
+
+	printk("\nData was %sfixed by flushcache\n", (stillbad == 1 ? "**** NOT **** " : " "));
+	synerr = getsynerr();
+	if (synerr)
+		printk("SYNERR: Thread %d, Synerr: 0x%lx\n", thread, synerr);
+	spin(2);
+	printk("\n\n");
+
+	if (errstop_enabled) {
+		local_irq_disable();
+		while(1);
+	}
+	return k_stop_on_error;
+}
+
+
+static void
+dumpline(void *lp, char *str1, char *str2, void *addr, void *data, int size)
+{
+	long *p;
+	int i, off;
+
+	printk("%s at 0x%lx, size %d, block starts at 0x%lx\n", str1, (long)addr, size, (long)lp);
+	p = (long*) data;
+	for (i=0; i<16; i++, p++) {
+		if (i==0) printk("%2s", str2);
+		if (i==8) printk("  ");
+		printk(" %016lx", *p);
+		if ((i&7)==7) printk("\n");
+	}
+	printk("   ");
+	off = (((long)addr) ^ size) & 63L;
+	for (i=0; i<off+size; i++) {
+		printk("%s", (i>=off) ? "--" : "  ");
+		if ((i%8) == 7)
+			printk(" ");
+	}
+
+	off = ((long)addr) & 127;
+	printk(" (line %d)\n", off/64+1);
+}
+
+
+static int
+randn(uint max, uint *seedp)
+{
+	if (max == 1)
+		return(0);
+	else
+		return((int)(zrandom(seedp)>>10) % max);
+}
+
+
+static int
+checkstop(int thread, int pass, uint lockpat)
+{
+	long	synerr;
+
+	if (k_go == ST_RUN)
+		return 0;
+	if (k_go == ST_STOP)
+		return 1;
+
+	if (errstop_enabled) {
+		local_irq_disable();
+		while(1);
+	}
+	synerr = getsynerr();
+	spin(2);
+	if (k_go == ST_STOP)
+		return 1;
+	if (synerr)
+		printk("SYNERR: Thread %d, Synerr: 0x%lx\n", thread, synerr);
+	return 1;
+}
+
+
+static void
+spin(int j)
+{
+	udelay(j * 500000);
+}
+
+static void
+capturedata(capture_t *cap, uint pass, void *blockaddr, void *shadaddr, int size)
+{
+
+	if (!selective_trigger)
+		trigger_la (pass);
+
+	memcpy (cap->blockdata, CACHEALIGN(blockaddr), 128);
+	if (shadaddr) 
+		memcpy (cap->shaddata, CACHEALIGN(shadaddr), 128);
+
+	if (k_stop_on_error) {
+		k_go = ST_ERRSTOP;
+	}
+
+ 	cap->size = size;
+	cap->blockaddr = blockaddr;
+	cap->shadaddr = shadaddr;
+
+	asm volatile ("fc %0" :: "r"(blockaddr) : "memory");
+	ia64_sync_i();
+	ia64_srlz_d();
+	memcpy (cap->blockdata_fc, CACHEALIGN(blockaddr), 128);
+
+	if (shadaddr) {
+		asm volatile ("fc %0" :: "r"(shadaddr) : "memory");
+		ia64_sync_i();
+		ia64_srlz_d();
+		memcpy (cap->shaddata_fc, CACHEALIGN(shadaddr), 128);
+	}
+}
+
+int             zranmult = 0x48c27395;
+
+static uint  
+zrandom (uint *seedp)
+{
+        *seedp = (*seedp * zranmult) & 0x7fffffff;
+        return (*seedp);
+}
+
+
+void
+set_autotest_params(void)
+{
+	static int	testnumber=-1;
+
+	if (llsctest_number >= 0) {
+		testnumber = llsctest_number;
+	} else {
+		testnumber++;
+		if (autotest_table[testnumber].passes == 0)
+			testnumber = 0;
+	}
+	k_passes = autotest_table[testnumber].passes;
+	k_linepad = autotest_table[testnumber].linepad;
+	k_linecount = autotest_table[testnumber].linecount;
+	k_testnumber = testnumber;
+
+	if (IS_RUNNING_ON_SIMULATOR()) {
+		printk ("llsc start test %ld\n", k_testnumber);
+		k_passes = 1000;
+	}
+}
+
+
+static void
+set_leds(int errs)
+{
+	unsigned char	leds=0;
+
+	/*
+	 * Leds are:
+	 * 	ppppeee-  
+	 *   where
+	 *      pppp = test number
+	 *       eee = error count but top bit is stick
+	 */
+
+	leds =  ((errs&7)<<1) | ((k_testnumber&15)<<4) | (errs ? 0x08 : 0);
+	bringup_set_led_bits(leds, 0xfe);
+}
+
+static void
+setup_block_addresses(void)
+{
+	int		i, stride, memmapi;
+
+	stride = LINESTRIDE;
+	memmapi = 0;
+	for (i=0; i<memmapx; i++) {
+		memmap[i].nextaddr = memmap[i].vstart;
+		memmap[i].wrapcount = 0;
+	}
+
+	for (i=0; i<k_linecount; i++) {
+		blocks[i] = memmap[memmapi].nextaddr;
+		memmap[memmapi].nextaddr += stride;
+		if (memmap[memmapi].nextaddr + sizeof(dataline_t) >= memmap[memmapi].vend) {
+			memmap[memmapi].wrapcount++;
+			memmap[memmapi].nextaddr = memmap[memmapi].vstart + 
+					memmap[memmapi].wrapcount * sizeof(dataline_t);
+		}
+
+		memset((void*)blocks[i], 0, sizeof(dataline_t));
+
+		if (stride > 16384) {
+			memmapi++;
+			if (memmapi == memmapx)
+				memmapi = 0;
+		}
+	}
+
+}
+
+static void
+set_thread_state(int cpuid, int state)
+{
+	if (k_threadprivate[cpuid]->threadstate == TS_KILLED) {
+		bringup_set_led_bits(0xfe, 0xfe);
+		while(1);
+	}
+	k_threadprivate[cpuid]->threadstate = state;
+}
+
+static int
+build_mem_map(unsigned long start, unsigned long end, void *arg)
+{
+	long	lstart;
+	/*
+	 * HACK - skip the kernel on the first node 
+	 */
+
+	printk ("LLSC memmap: start 0x%lx, end 0x%lx, (0x%lx - 0x%lx)\n", 
+		start, end, (long) virt_to_page(start), (long) virt_to_page(end-PAGE_SIZE));
+
+	while (end > start && (PageReserved(virt_to_page(end-PAGE_SIZE)) || virt_to_page(end-PAGE_SIZE)->count.counter > 0))
+		end -= PAGE_SIZE;
+
+	lstart = end;
+	while (lstart > start && (!PageReserved(virt_to_page(lstart-PAGE_SIZE)) && virt_to_page(lstart-PAGE_SIZE)->count.counter == 0))
+		lstart -= PAGE_SIZE;
+
+	printk ("     memmap: start 0x%lx, end 0x%lx\n", lstart, end);
+	if (lstart >= end)
+		return 0;
+
+	memmap[memmapx].vstart = lstart;
+	memmap[memmapx].vend = end;
+	memmapx++;
+	return 0;
+}
+
+void int_test(void);
+
+int
+llsc_main (int cpuid, long mbasex)
+{
+	int		i, cpu, is_master, repeatcnt=0;
+	unsigned int	preverr=0, errs=0, pass=0;
+	int		automode=0;
+
+#ifdef INTTEST
+	if (inttest)
+		int_test();
+#endif
+
+	if (!autotest_enabled)
+		return 0;
+
+#ifdef CONFIG_SMP
+	is_master = !smp_processor_id();
+#else
+	is_master = 1;
+#endif
+
+
+	if (is_master) {
+		print_params();
+		if(!IS_RUNNING_ON_SIMULATOR())
+			spin(10);
+		mbase = (control_t*)mbasex;
+		k_currentpass = 0;
+		k_go = ST_IDLE;
+		k_passes = DEF_PASSES;
+		k_napticks = DEF_NAPTICKS;
+		k_stop_on_error = DEF_STOP_ON_ERROR;
+		k_verbose = DEF_VERBOSE;
+		k_linecount = DEF_LINECOUNT;
+		k_iter_msg = DEF_ITER_MSG;
+		k_vv = DEF_VV;
+		k_linepad = DEF_LINEPAD;
+		k_blocks = (void*)blocks;
+		efi_memmap_walk(build_mem_map, 0);
+	
+#ifdef CONFIG_IA64_SGI_AUTOTEST
+		automode = 1;
+#endif
+
+		for (i=0; i<MAXCPUS; i++) {
+			k_threadprivate[i] = THREADPRIVATE(i);
+			memset(k_threadprivate[i], 0, sizeof(*k_threadprivate[i]));
+		}
+		initialized = 1;
+	} else {
+		while (initialized == 0)
+			udelay(100);
+	}
+
+loop:
+	if (is_master) {
+		if (automode) {
+			if (!preverr || repeatcnt++ > 5) {
+				set_autotest_params();
+				repeatcnt = 0;
+			}
+		} else {
+			while (k_go == ST_IDLE);
+		}
+
+		k_go = ST_INIT;
+		if (k_linecount > MAX_LINECOUNT) k_linecount = MAX_LINECOUNT;
+		k_linecount = k_linecount & ~1;
+		setup_block_addresses();
+
+		k_currentpass = pass++;
+		k_go = ST_RUN;
+		if (fail_enabled)
+			fail_enabled--;
+
+	} else {
+		while (k_go != ST_RUN || k_currentpass != pass);
+		pass++;
+	}
+
+
+	set_leds(errs);
+	set_thread_state(cpuid, TS_RUNNING);
+
+	errs += ran_conf_llsc(cpuid);
+	preverr = (k_go == ST_ERRSTOP);
+
+	set_leds(errs);
+	set_thread_state(cpuid, TS_STOPPED);
+
+	if (is_master) {
+		Speedo();
+		for (i=0, cpu=0; cpu<MAXCPUS; cpu++) {
+			while (k_threadprivate[cpu]->threadstate == TS_RUNNING) {
+				i++;
+				if (i == 10000) { 
+					k_go = ST_STOP;
+					printk ("  llsc master stopping test number %ld\n", k_testnumber);
+				}
+				if (i > 100000) {
+					k_threadprivate[cpu]->threadstate = TS_KILLED;
+					printk ("  llsc: master killing cpuid %d, running test number %ld\n", 
+							cpu, k_testnumber);
+				}
+				udelay(1000);
+			}
+		}
+	}
+
+	goto loop;
+}
+
+
+static void
+Speedo(void)
+{
+	static int i = 0;
+
+	switch (++i%4) {
+	case 0:
+		printk("|\b");
+		break;
+	case 1:
+		printk("\\\b");
+		break;
+	case 2:
+		printk("-\b");
+		break;
+	case 3:
+		printk("/\b");
+		break;
+	}
+}
+
+#ifdef INTTEST
+
+/* ======================================================================================================== 
+ *
+ * Some test code to verify that interrupts work
+ *
+ * Add the following to the arch/ia64/kernel/smp.c after the comment "Reschedule callback"
+ * 		if (zzzprint_resched) printk("  cpu %d got interrupt\n", smp_processor_id());
+ *
+ * Enable the code in arch/ia64/sn/sn1/smp.c to print sending IPIs.
+ *
+ */
+
+static int __init set_inttest(char *str)
+{
+        inttest = 1;
+	autotest_enabled = 1;
+
+	return 1;
+}	
+
+__setup("inttest=", set_inttest);
+
+int	zzzprint_resched=0;
+
+void
+int_test() {
+	int			mycpu, cpu;
+	static volatile int	control_cpu=0;
+
+	mycpu = smp_processor_id();
+	zzzprint_resched = 2;
+
+	printk("Testing cross interrupts\n");
+	
+	while (control_cpu != smp_num_cpus) {
+		if (mycpu == cpu_logical_map(control_cpu)) {
+			for (cpu=0; cpu<smp_num_cpus; cpu++) {
+				printk("Sending interrupt from %d to %d\n", mycpu, cpu_logical_map(cpu));
+				udelay(IS_RUNNING_ON_SIMULATOR ? 10000 : 400000);
+				smp_send_reschedule(cpu_logical_map(cpu));
+				udelay(IS_RUNNING_ON_SIMULATOR ? 10000 : 400000);
+				smp_send_reschedule(cpu_logical_map(cpu));
+				udelay(IS_RUNNING_ON_SIMULATOR ? 10000 : 400000);
+			}
+			control_cpu++;
+		}
+	}
+
+	zzzprint_resched = 1;
+
+	if (mycpu == cpu_logical_map(smp_num_cpus-1)) {
+		printk("\nTight loop of cpu %d sending ints to cpu 0 (every 100 us)\n", mycpu);
+		udelay(IS_RUNNING_ON_SIMULATOR ? 1000 : 1000000);
+		__cli();
+		while (1) {
+			smp_send_reschedule(0);
+			udelay(100);
+		}
+
+	}
+
+	while(1);
+}
+#endif
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/sn1/llsc4.h linux/arch/ia64/sn/sn1/llsc4.h
--- v2.4.0-prerelease/linux/arch/ia64/sn/sn1/llsc4.h	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/sn1/llsc4.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,104 @@
+/* 
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Jack Steiner (steiner@sgi.com)
+ */
+
+#ifdef STANDALONE
+#include "lock.h"
+#endif
+
+
+#define DEF_NAPTICKS		0
+#define DEF_PASSES		0
+#define DEF_AUTO_PASSES		1000000
+#define DEF_STOP_ON_ERROR	1
+#define DEF_VERBOSE		0
+#define DEF_LINECOUNT		2
+#define DEF_ITER_MSG		0
+#define DEF_VV			0xffffffff
+#define DEF_LINEPAD		0x234
+
+
+
+#define MAXCPUS			16
+#define CACHELINE		64
+#define MAX_LINECOUNT		1024
+#define K			1024
+#define	MB			(K*K)
+
+
+#define	uint 		unsigned int
+#define	ushort		unsigned short
+#define vint		volatile int
+#define vlong		volatile long
+
+#define LOCKADDR(i)	&linep->lock[(i)]
+#define LOCK(i)		set_lock(LOCKADDR(i), lockpat)
+#define UNLOCK(i)	clr_lock(LOCKADDR(i), lockpat)
+#define GETLOCK(i)	*LOCKADDR(i)
+#define ZEROLOCK(i)	zero_lock(LOCKADDR(i))
+
+#define CACHEALIGN(a)	((void*)((long)(a) & ~127L))
+
+typedef uint		lock_t;
+typedef uint		share_t;
+typedef uint		private_t;
+
+typedef struct {
+	lock_t		lock[2];
+	share_t		share[2];
+	private_t	private[MAXCPUS];
+	share_t		share0;
+	share_t		share1;
+} dataline_t ;
+
+
+#define LINEPAD			k_linepad
+#define LINESTRIDE		(((sizeof(dataline_t)+CACHELINE-1)/CACHELINE)*CACHELINE + LINEPAD)
+
+
+typedef struct {
+	vint		threadstate;
+	uint		threadpasses;
+	private_t	private[MAX_LINECOUNT];
+} threadprivate_t;
+
+typedef struct {
+	vlong		sk_go;		/* 0=idle, 1=init, 2=run */
+	long		sk_linecount;
+	long		sk_passes;
+	long		sk_napticks;
+	long		sk_stop_on_error;
+	long		sk_verbose;
+	long		sk_iter_msg;
+	long		sk_vv;
+	long		sk_linepad;
+	long		sk_options;
+	long		sk_testnumber;
+	vlong		sk_currentpass;
+	void 		*sk_blocks;
+	threadprivate_t	*sk_threadprivate[MAXCPUS];
+} control_t;
+
+/* Run state (k_go) constants */
+#define ST_IDLE		0
+#define ST_INIT		1
+#define ST_RUN		2
+#define ST_STOP		3
+#define ST_ERRSTOP	4
+
+
+/* Threadstate constants */
+#define TS_STOPPED	0
+#define	TS_RUNNING	1
+#define TS_KILLED	2
+
+
+
+int llsc_main (int cpuid, long mbasex);
+
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/sn1/mm.c linux/arch/ia64/sn/sn1/mm.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/sn1/mm.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/sn1/mm.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,398 @@
+/*
+ * Copyright, 2000, Silicon Graphics.
+ * Copyright Srinivasa Thirumalachar (sprasad@engr.sgi.com)
+ * Copyright 2000 Kanoj Sarcar (kanoj@sgi.com)
+ */
+
+#include <linux/mm.h>
+#include <linux/bootmem.h>
+#include <asm/page.h>
+#include <asm/efi.h>
+#include <asm/sn/mmzone_sn1.h>
+
+#       define MIN(a,b)         ((a) < (b) ? (a) : (b))
+#       define MAX(a,b)         ((a) > (b) ? (a) : (b))
+
+/*
+ * Note that the nodemem[] data structure does not support arbitrary
+ * memory types and memory descriptors inside the node. For example, 
+ * you can not have multiple efi-mem-type segments in the node and
+ * expect the OS not to use specific mem-types. Currently, the 
+ * assumption is that "start" is the start of virtual/physical memory 
+ * on the node. PROM can reserve some memory _only_ at the beginning. 
+ * This is tracked via the "usable" field, that maintains where the 
+ * os can start using memory from on a node (ie end of PROM memory).
+ * setup_node_bootmem() is passed the above "usable" value, and is
+ * expected to make bootmem calls that ensure lower memory is not used.
+ * Note that the bootmem for a node is initialized on the entire node, 
+ * without regards to any holes - then we reserve the holes in 
+ * setup_sn1_bootmem(), to make sure the holes are not handed out by
+ * alloc_bootmem, as well as the corresponding mem_map entries are not
+ * considered allocatable by the page_alloc routines.
+ */
+struct nodemem_s {
+        u64     start ;
+        u64     end   ;
+        u64 	hole[SN1_MAX_BANK_PER_NODE] ;
+	u64	usable;
+} nodemem[MAXNODES] ;
+static int nodemem_valid = 0;
+
+static int __init
+free_unused_memmap_hole(int nid, unsigned long start, unsigned long end)
+{
+        struct page * page, *pageend;
+        unsigned long count = 0;
+
+	if (start >= end)
+		return 0 ;
+
+	/*
+	 * Get the memmap ptrs to the start and end of the holes.
+	 * virt_to_page(start) will panic, if start is in hole.
+	 * Can we do virt_to_page(end), if end is on the next node?
+	 */
+
+	page = virt_to_page(start-1);
+	page++ ;
+	pageend = virt_to_page(end) ;
+
+	printk("hpage=0x%lx, hpageend=0x%lx\n", (u64)page, (u64)pageend) ;
+	free_bootmem_node(NODE_DATA(nid), __pa(page), (u64)pageend - (u64)page);
+
+	return count ;
+}
+
+void
+free_unused_memmap_node(int nid)
+{
+	u64	i = 0 ;
+	u64	holestart = -1 ;
+
+	do {
+		holestart = nodemem[nid].hole[i] ;
+		i++ ;
+		while ((i < SN1_MAX_BANK_PER_NODE) && 
+			(nodemem[nid].hole[i] == (u64)-1))
+			i++ ;
+		if (i < SN1_MAX_BANK_PER_NODE)
+			free_unused_memmap_hole(nid, holestart, 
+				nodemem[nid].start + (i<<SN1_BANK_ADDR_SHIFT));
+	} while (i<SN1_MAX_BANK_PER_NODE);
+}
+
+/*
+ * Since efi_memmap_walk merges contiguous banks, this code will need
+ * to find all the nasid/banks covered by the input memory descriptor.
+ */
+static int __init
+build_nodemem_map(unsigned long start, unsigned long end, void *arg)
+{
+	unsigned long vaddr = start;
+	unsigned long nvaddr;
+	int nasid = GetNasId(__pa(vaddr));
+	int cnodeid, bankid;
+
+	while (vaddr < end) {
+		cnodeid = NASID_TO_CNODEID(nasid);
+		bankid = GetBankId(__pa(vaddr));
+		nodemem[cnodeid].start = MIN(nodemem[cnodeid].start, vaddr);
+		nodemem[cnodeid].usable = MIN(nodemem[cnodeid].usable, vaddr);
+		nvaddr = (unsigned long)__va((unsigned long)(++nasid) << 
+							SN1_NODE_ADDR_SHIFT);
+		nodemem[cnodeid].end = MAX(nodemem[cnodeid].end, MIN(end, nvaddr));
+		while ((bankid < SN1_MAX_BANK_PER_NODE) && 
+					(vaddr < nodemem[cnodeid].end)) {
+			nvaddr = nodemem[cnodeid].start + 
+			  ((unsigned long)(bankid + 1) << SN1_BANK_ADDR_SHIFT);
+			nodemem[cnodeid].hole[bankid++] = MIN(nvaddr, end);
+			vaddr = nvaddr;
+		}
+	}
+
+	return 0;
+}
+
+static int __init
+pgtbl_size_ok(int nid)
+{
+	unsigned long numpfn, bank0size, nodesize ;
+	
+	nodesize 	= nodemem[nid].end - nodemem[nid].start ;
+	numpfn 		= nodesize >> PAGE_SHIFT;
+
+	bank0size 	= nodemem[nid].hole[0] - nodemem[nid].start ;
+	/* If nid == master node && no kernel text replication */
+	bank0size      -= 0xA00000 ;	/* Kernel text + stuff */
+	bank0size      -= ((numpfn + 7) >> 3);
+
+	if ((numpfn * sizeof(mem_map_t)) > bank0size) {
+		printk("nid = %d, ns=0x%lx, npfn=0x%lx, bank0size=0x%lx\n", 
+			nid, nodesize, numpfn, bank0size) ;
+		return 0 ;
+	}
+
+	return 1 ;
+}
+
+static void __init
+check_pgtbl_size(int nid)
+{
+	int	bank = SN1_MAX_BANK_PER_NODE - 1 ;
+
+	/* Find highest bank with valid memory */
+        while ((nodemem[nid].hole[bank] == -1) && (bank))
+               bank-- ;
+
+	while (!pgtbl_size_ok(nid)) {
+		/* Remove that bank of memory */
+		/* Collect some numbers later */
+		printk("Ignoring node %d bank %d\n", nid, bank) ;
+		nodemem[nid].hole[bank--] = -1 ;
+		/* Get to the next populated bank */
+		while ((nodemem[nid].hole[bank] == -1) && (bank))
+			bank-- ;
+		printk("Using only upto bank %d on node %d\n", bank,nid) ;
+		nodemem[nid].end = nodemem[nid].hole[bank] ; 
+		if (!bank) break ;
+	}
+}
+
+void dump_nodemem_map(int) ;
+
+#ifdef CONFIG_DISCONTIGMEM
+
+extern bootmem_data_t 	bdata[] ;
+static int	 	curnodeid ;
+
+static int __init
+setup_node_bootmem(unsigned long start, unsigned long end, unsigned long nodefree)
+{
+	extern char _end;
+	int i;
+	unsigned long kernelend = PAGE_ALIGN((unsigned long)(&_end));
+	unsigned long pkernelend = __pa(kernelend);
+	unsigned long bootmap_start, bootmap_size;
+	unsigned long pstart, pend;
+
+	pstart = __pa(start) ;
+	pend   = __pa(end) ;
+
+	/* If we are past a node mem boundary, on simulated dig numa
+	 * increment current node id. */
+
+	curnodeid = NASID_TO_CNODEID(GetNasId(pstart)) ;
+
+       /*
+        * Make sure we are being passed page aligned addresses.
+        */
+	if ((start & (PAGE_SIZE - 1)) || (end & (PAGE_SIZE - 1)))
+               panic("setup_node_bootmem:align");
+
+
+	/* For now, just go to the lower CHUNK alignment so that 
+	 * chunktonid of 0-8MB and other lower mem pages get initted. */
+
+	pstart &= CHUNKMASK ;
+	pend = (pend+CHUNKSZ-1) & CHUNKMASK;
+
+	/* If pend == 0, both addrs below 8 MB, special case it
+	 * FIX: CHUNKNUM(pend-1) broken if pend == 0 
+	 * both addrs within 8MB */
+
+	if (pend == 0) {
+		chunktonid[0] = 0;
+		return 0;
+	}
+
+	/* Fill up the chunktonid array first. */
+
+        for (i = PCHUNKNUM(pstart); i <= PCHUNKNUM(pend-1); i++)
+               chunktonid[i] = curnodeid;
+
+	/* This check is bogus for now till MAXCHUNKS is properly
+	 * defined to say if it includes holes or not. */
+
+	if ((CHUNKTONID(PCHUNKNUM(pend)) > MAXCHUNKS) || 
+		(PCHUNKNUM(pstart) >= PCHUNKNUM(pend))) {
+		printk("Ign 0x%lx-0x%lx, ", __pa(start), __pa(end));
+		return(0);
+	}
+
+	/* This routine gets called many times in node 0.
+	 * The first one to reach here would be the one after
+	 * kernelend to end of first node. */
+
+	NODE_DATA(curnodeid)->bdata = &(bdata[curnodeid]);
+
+	if (curnodeid == 0) {
+		/* for master node, forcibly assign these values
+		 * This gets called many times on dig but we
+		 * want these exact values 
+		 * Also on softsdv, the memdesc for 0 is missing */
+		NODE_START(curnodeid) = PAGE_OFFSET;
+		NODE_SIZE(curnodeid) = (end - PAGE_OFFSET);
+	} else {
+		/* This gets called only once for non zero nodes
+		 * If it does not, then NODE_STARt should be 
+		 * LOCAL_BASE(nid) */
+
+		NODE_START(curnodeid) = start;
+		NODE_SIZE(curnodeid) = (end - start);
+	}
+
+	/* if end < kernelend do not do anything below this */
+	if (pend < pkernelend)
+		return 0 ;
+
+       /*
+        * Handle the node that contains kernel text/data. It would
+        * be nice if the loader loads the kernel at a "chunk", ie
+        * not in memory that the kernel will ignore (else free_initmem
+        * has to worry about not freeing memory that the kernel ignores).
+        * Note that we assume the space from the node start to
+        * KERNEL_START can not hold all the bootmem data, but from kernel
+        * end to node end can.
+        */
+
+	/* TBD: This may be bogus in light of the above check. */
+
+	if ((pstart < pkernelend) && (pend >= pkernelend)) {
+               bootmap_start = pkernelend;
+	} else {
+               bootmap_start = __pa(start);    /* chunk & page aligned */
+	}
+
+	/*
+	 * Low memory is reserved for PROM use on SN1. The current node
+	 * memory model is [PROM mem ... kernel ... free], where the 
+	 * first two components are optional on a node.
+	 */
+	if (bootmap_start < __pa(nodefree))
+		bootmap_start = __pa(nodefree);
+
+/* XXX TBD */
+/* For curnodeid of 0, this gets called many times because of many
+ * < 8MB segments. start gets bumped each time. We want to fix it
+ * to 0 now. 
+ */
+	if (curnodeid == 0)
+		start=PAGE_OFFSET;
+/*
+ * This makes sure that in free_area_init_core - paging_init
+ * idx is the entire node page range and for loop goes thro
+ * all pages. test_bit for kernel pages should remain reserved
+ * because free available mem takes care of kernel_start and end
+ */
+
+        bootmap_size = init_bootmem_node(NODE_DATA(curnodeid),
+			(bootmap_start >> PAGE_SHIFT),
+			(__pa(start) >> PAGE_SHIFT), (__pa(end) >> PAGE_SHIFT));
+
+	free_bootmem_node(NODE_DATA(curnodeid), bootmap_start + bootmap_size,
+				__pa(end) - (bootmap_start + bootmap_size));
+
+	return(0);
+}
+
+void
+setup_sn1_bootmem(int maxnodes)
+{
+        int     i;
+
+        for (i=0;i<MAXNODES;i++) {
+                nodemem[i].usable = nodemem[i].start = -1 ;
+                nodemem[i].end   = 0 ;
+		memset(&nodemem[i].hole, -1, sizeof(nodemem[i].hole)) ;
+        }
+        efi_memmap_walk(build_nodemem_map, 0) ;
+
+	/*
+	 * Run thru all the nodes, adjusting their starts. This is needed
+	 * because efi_memmap_walk() might not process certain mds that 
+	 * are marked reserved for PROM at node low memory.
+	 */
+	for (i = 0; i < maxnodes; i++)
+		nodemem[i].start = ((nodemem[i].start >> SN1_NODE_ADDR_SHIFT) <<
+					SN1_NODE_ADDR_SHIFT);
+	nodemem_valid = 1 ;
+
+	/* After building the nodemem map, check if the page table
+	 * will fit in the first bank of each node. If not change
+	 * the node end addr till it fits. We dont want to do this
+	 * in mm/page_alloc.c
+ 	 */
+
+        for (i=0;i<maxnodes;i++)
+		check_pgtbl_size(i) ;
+
+        for (i=0;i<maxnodes;i++)
+                setup_node_bootmem(nodemem[i].start, nodemem[i].end, nodemem[i].usable);
+
+	/*
+	 * Mark the holes as reserved, so the corresponding mem_map
+	 * entries will not be marked allocatable in free_all_bootmem*().
+	 */
+	for (i = 0; i < maxnodes; i++) {
+		int j = 0 ;
+		u64 holestart = -1 ;
+
+		do {
+			holestart = nodemem[i].hole[j++];
+			while ((j < SN1_MAX_BANK_PER_NODE) && 
+					(nodemem[i].hole[j] == (u64)-1))
+				j++;
+			if (j < SN1_MAX_BANK_PER_NODE)
+				reserve_bootmem_node(NODE_DATA(i), 
+					__pa(holestart), (nodemem[i].start + 
+					((long)j <<  SN1_BANK_ADDR_SHIFT) - 
+					 holestart));
+		} while (j < SN1_MAX_BANK_PER_NODE);
+	}
+
+	dump_nodemem_map(maxnodes) ;
+}
+#endif
+
+/*
+ * This used to be invoked from an SN1 specific hack in efi_memmap_walk.
+ * It tries to ignore banks which the kernel is ignoring because bank 0 
+ * is too small to hold the memmap entries for this bank.
+ * The current SN1 efi_memmap_walk callbacks do not need this. That 
+ * leaves the generic ia64 callbacks find_max_pfn, count_pages and
+ * count_reserved_pages, of which the first can probably get by without
+ * this, the last two probably need this, although they also can probably
+ * get by. 
+ */
+int
+sn1_bank_ignore(u64 start, u64 end)
+{
+	int 	nid = NASID_TO_CNODEID(GetNasId(__pa(end))) ;
+	int	bank = GetBankId(__pa(end)) ;
+
+	if (!nodemem_valid)
+		return 0 ;
+
+	if (nodemem[nid].hole[bank] == -1)
+		return 1 ;
+	else
+		return 0 ;
+}
+
+void
+dump_nodemem_map(int maxnodes)
+{
+	int	i,j;
+
+        printk("NODEMEM_S info ....\n") ;
+        printk("Node         start                end                 usable\n");
+        for (i=0;i<maxnodes;i++) {
+                printk("%d      0x%lx   0x%lx   0x%lx\n",
+                       i, nodemem[i].start, nodemem[i].end, nodemem[i].usable);
+                printk("Holes -> ") ;
+                for (j=0;j<SN1_MAX_BANK_PER_NODE;j++)
+                        printk("0x%lx ", nodemem[i].hole[j]) ;
+		printk("\n");
+        }
+}
+
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/sn1/setup.c linux/arch/ia64/sn/sn1/setup.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/sn1/setup.c	Fri Aug 11 19:09:06 2000
+++ linux/arch/ia64/sn/sn1/setup.c	Thu Jan  4 13:00:15 2001
@@ -14,13 +14,14 @@
 #include <linux/timex.h>
 #include <linux/sched.h>
 #include <linux/ioport.h>
+#include <linux/mm.h>
 
+#include <asm/sn/mmzone_sn1.h>
 #include <asm/io.h>
 #include <asm/machvec.h>
 #include <asm/system.h>
 #include <asm/processor.h>
 
-
 /*
  * The format of "screen_info" is strange, and due to early i386-setup
  * code. This is just enough to make the console code think we're on a
@@ -50,29 +51,48 @@
 unsigned long
 sn1_map_nr (unsigned long addr)
 {
+#ifdef CONFIG_DISCONTIGMEM
 	return MAP_NR_SN1(addr);
+#else
+	return MAP_NR_DENSE(addr);
+#endif
 }
 
-void
+void __init
 sn1_setup(char **cmdline_p)
 {
-
+	extern void init_sn1_smp_config(void);
 	ROOT_DEV = to_kdev_t(0x0301);		/* default to first IDE drive */
 
+	init_sn1_smp_config();
+#ifdef ZZZ
 #if !defined (CONFIG_IA64_SOFTSDV_HACKS)
-	/* 
-	 * Program the timer to deliver timer ticks.  0x40 is the I/O port
-	 * address of PIT counter 0, 0x43 is the I/O port address of the 
-	 * PIT control word. 
-	 */
-	request_region(0x40,0x20,"timer");
-	outb(0x34, 0x43);            /* Control word */
-	outb(LATCH & 0xff , 0x40);   /* LSB */
-	outb(LATCH >> 8, 0x40);	     /* MSB */
-	printk("PIT: LATCH at 0x%x%x for %d HZ\n", LATCH >> 8, LATCH & 0xff, HZ);
+        /*
+         * Program the timer to deliver timer ticks.  0x40 is the I/O port
+         * address of PIT counter 0, 0x43 is the I/O port address of the
+         * PIT control word.
+         */
+        request_region(0x40,0x20,"timer");
+        outb(0x34, 0x43);            /* Control word */
+        outb(LATCH & 0xff , 0x40);   /* LSB */
+        outb(LATCH >> 8, 0x40);      /* MSB */
+        printk("PIT: LATCH at 0x%x%x for %d HZ\n", LATCH >> 8, LATCH & 0xff, HZ);
+#endif
 #endif
 #ifdef CONFIG_SMP
 	init_smp_config();
 #endif
 	screen_info = sn1_screen_info;
+}
+
+int
+IS_RUNNING_ON_SIMULATOR(void)
+{
+#ifdef CONFIG_IA64_SGI_SN1_SIM
+	long sn;
+	asm("mov %0=cpuid[%1]" : "=r"(sn) : "r"(2));
+	return(sn == SNMAGIC);
+#else
+	return(0);
+#endif
 }
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/sn1/smp.c linux/arch/ia64/sn/sn1/smp.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/sn1/smp.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/sn1/smp.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,186 @@
+/*
+ * SN1 Platform specific SMP Support
+ *
+ * Copyright (C) 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 Jack Steiner <steiner@sgi.com>
+ */
+
+
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/threads.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+
+#include <asm/sn/mmzone_sn1.h>
+#include <asm/sal.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/smp.h>
+#include <asm/current.h>
+#include <asm/sn/sn_cpuid.h>
+
+
+
+
+/*
+ * The following structure is used to pass params thru smp_call_function
+ * to other cpus for flushing TLB ranges.
+ */
+typedef struct {
+	unsigned long	start;
+	unsigned long	end;
+	unsigned long	nbits;
+} ptc_params_t;
+
+
+/*
+ * The following table/struct is for remembering PTC coherency domains. It
+ * is also used to translate sapicid into cpuids. We dont want to start 
+ * cpus unless we know their cache domain.
+ */
+#ifdef PTC_NOTYET
+sn_sapicid_info_t	sn_sapicid_info[NR_CPUS];
+#endif
+
+
+
+#ifdef PTC_NOTYET
+/*
+ * NOTE: This is probably not good enough, but I dont want to try to make
+ * it better until I get some statistics on a running system. 
+ * At a minimum, we should only send IPIs to 1 processor in each TLB domain
+ * & have it issue a ptc.g on it's own FSB. Also, serialize per FSB, not 
+ * globally.
+ *
+ * More likely, we will have to do some work to reduce the frequency of calls to
+ * this routine.
+ */
+
+static void
+sn1_ptc_local(void *arg)
+{
+	ptc_params_t	*params = arg;
+	unsigned long	start, end, nbits;
+
+	start = params->start;
+	end = params->end;
+	nbits = params->nbits;
+
+	do {
+		__asm__ __volatile__ ("ptc.l %0,%1" :: "r"(start), "r"(nbits<<2) : "memory");
+		start += (1UL << nbits);
+	} while (start < end);
+}
+
+
+void
+sn1_ptc_global (unsigned long start, unsigned long end, unsigned long nbits)
+{
+	ptc_params_t	params;
+
+	params.start = start;
+	params.end = end;
+	params.nbits = nbits;
+
+	if (smp_call_function(sn1_ptc_local, &params, 1, 0) != 0)
+		panic("Unable to do ptc_global - timed out");
+
+	sn1_ptc_local(&params);
+}
+#endif
+
+
+
+
+void
+sn1_send_IPI(int cpuid, int vector, int delivery_mode, int redirect)
+{
+	long		*p, nasid, slice;
+	static int 	off[4] = {0x1800080, 0x1800088, 0x1a00080, 0x1a00088};
+
+	/*
+	 * ZZZ - Replace with standard macros when available.
+	 */
+	nasid = cpuid_to_nasid(cpuid);
+	slice = cpuid_to_slice(cpuid);
+	p = (long*)(0xc0000a0000000000LL | (nasid<<33) | off[slice]);
+
+#if defined(ZZZBRINGUP)
+	{
+	static int count=0;
+	if (count++ < 10) printk("ZZ sendIPI 0x%x->0x%x, vec %d, nasid 0x%lx, slice %ld, adr 0x%lx\n",
+		smp_processor_id(), cpuid, vector, nasid, slice, (long)p);
+	}
+#endif
+	mb();
+	*p = (delivery_mode << 8) | (vector & 0xff);
+	
+}
+
+
+#ifdef CONFIG_SMP
+
+static void __init
+process_sal_ptc_domain_info(ia64_sal_ptc_domain_info_t *di, int domain)
+{
+#ifdef PTC_NOTYET
+	ia64_sal_ptc_domain_proc_entry_t	*pe;
+	int 					i, sapicid, cpuid;
+
+	pe = __va(di->proc_list);
+	for (i=0; i<di->proc_count; i++, pe++) {
+		sapicid = id_eid_to_sapicid(pe->id, pe->eid);
+		cpuid = cpu_logical_id(sapicid);
+		sn_sapicid_info[cpuid].domain = domain;
+		sn_sapicid_info[cpuid].sapicid = sapicid;
+	}
+#endif
+}
+
+
+static void __init
+process_sal_desc_ptc(ia64_sal_desc_ptc_t *ptc)
+{
+	ia64_sal_ptc_domain_info_t	*di;
+	int i;
+
+	di = __va(ptc->domain_info);
+	for (i=0; i<ptc->num_domains; i++, di++) {
+		process_sal_ptc_domain_info(di, i);	
+	}
+}
+
+
+void __init
+init_sn1_smp_config(void)
+{
+
+	if (!ia64_ptc_domain_info)  {
+		printk("SMP: Can't find PTC domain info. Forcing UP mode\n");
+		smp_num_cpus = 1;
+		return;
+	}
+
+#ifdef PTC_NOTYET
+	memset (sn_sapicid_info, -1, sizeof(sn_sapicid_info));
+	process_sal_desc_ptc(ia64_ptc_domain_info);
+#endif
+
+}
+
+#else /* CONFIG_SMP */
+
+void __init
+init_sn1_smp_config(void)
+{
+
+#ifdef PTC_NOTYET
+	sn_sapicid_info[0].sapicid = hard_processor_sapicid();
+#endif
+}
+
+#endif /* CONFIG_SMP */
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/sn1/sn1_asm.S linux/arch/ia64/sn/sn1/sn1_asm.S
--- v2.4.0-prerelease/linux/arch/ia64/sn/sn1/sn1_asm.S	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/sn1/sn1_asm.S	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,8 @@
+
+/*
+ * Copyright (C) 2000 Silicon Graphics
+ * Copyright (C) 2000 Jack Steiner (steiner@sgi.com)
+ */
+
+#include <linux/config.h>
+
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/sn1/synergy.c linux/arch/ia64/sn/sn1/synergy.c
--- v2.4.0-prerelease/linux/arch/ia64/sn/sn1/synergy.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/sn1/synergy.c	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,205 @@
+
+/*
+ * SN1 Platform specific synergy Support
+ *
+ * Copyright (C) 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 Alan Mayer (ajm@sgi.com)
+ */
+
+
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+
+#include <asm/ptrace.h>
+#include <linux/devfs_fs_kernel.h>
+#include <asm/smp.h>
+#include <asm/sn/sn_cpuid.h>
+#include <asm/sn/sn1/bedrock.h>
+#include <asm/sn/intr.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/synergy.h>
+
+int bit_pos_to_irq(int bit);
+void setclear_mask_b(int irq, int cpuid, int set);
+void setclear_mask_a(int irq, int cpuid, int set);
+void * kmalloc(size_t size, int flags);
+
+extern struct sn1_cnode_action_list *sn1_node_actions[];
+
+
+void
+synergy_intr_alloc(int bit, int cpuid) {
+	return;
+}
+
+int
+synergy_intr_connect(int bit, 
+		int cpuid)
+{
+	int irq;
+	unsigned is_b;
+int nasid;
+
+nasid = cpuid_to_nasid(cpuid);
+	irq = bit_pos_to_irq(bit);
+
+	is_b = (cpuid_to_slice(cpuid)) & 1;
+	if (is_b) {
+		setclear_mask_b(irq,cpuid,1);
+		setclear_mask_a(irq,cpuid, 0);
+	} else {
+		setclear_mask_a(irq, cpuid, 1);
+		setclear_mask_b(irq, cpuid, 0);
+	}
+	return 0;
+}
+void
+setclear_mask_a(int irq, int cpuid, int set)
+{
+	int synergy;
+	int nasid;
+	int reg_num;
+	unsigned long mask;
+	unsigned long addr;
+	unsigned long reg;
+	unsigned long val;
+	int my_cnode, my_synergy;
+	int target_cnode, target_synergy;
+
+        /*
+         * Perform some idiot checks ..
+         */
+        if ( (irq < 0) || (irq > 255) ||
+                (cpuid < 0) || (cpuid > 512) ) {
+                printk("clear_mask_a: Invalid parameter irq %d cpuid %d\n", irq, cpuid);
+		return;
+	}
+
+	target_cnode = cpuid_to_cnodeid(cpuid);
+	target_synergy = cpuid_to_synergy(cpuid);
+	my_cnode = cpuid_to_cnodeid(smp_processor_id());
+	my_synergy = cpuid_to_synergy(smp_processor_id());
+
+	reg_num = irq / 64;
+	mask = 1;
+	mask <<= (irq % 64);
+	switch (reg_num) {
+		case 0: 
+			reg = VEC_MASK0A;
+			addr = VEC_MASK0A_ADDR;
+			break;
+		case 1: 
+			reg = VEC_MASK1A;
+			addr = VEC_MASK1A_ADDR;
+			break;
+		case 2: 
+			reg = VEC_MASK2A;
+			addr = VEC_MASK2A_ADDR;
+			break;
+		case 3: 
+			reg = VEC_MASK3A;
+			addr = VEC_MASK3A_ADDR;
+			break;
+		default:
+			reg = addr = 0;
+			break;
+	}
+	if (my_cnode == target_cnode && my_synergy == target_synergy) {
+		// local synergy
+		val = READ_LOCAL_SYNERGY_REG(addr);
+		if (set) {
+			val |= mask;
+		} else {
+			val &= ~mask;
+		}
+		WRITE_LOCAL_SYNERGY_REG(addr, val);
+		val = READ_LOCAL_SYNERGY_REG(addr);
+	} else { /* remote synergy */
+		synergy = cpuid_to_synergy(cpuid);
+		nasid = cpuid_to_nasid(cpuid);
+		val = REMOTE_SYNERGY_LOAD(nasid, synergy, reg);
+		if (set) {
+			val |= mask;
+		} else {
+			val &= ~mask;
+		}
+		REMOTE_SYNERGY_STORE(nasid, synergy, reg, val);
+	}
+}
+
+void
+setclear_mask_b(int irq, int cpuid, int set)
+{
+	int synergy;
+	int nasid;
+	int reg_num;
+	unsigned long mask;
+	unsigned long addr;
+	unsigned long reg;
+	unsigned long val;
+	int my_cnode, my_synergy;
+	int target_cnode, target_synergy;
+
+	/*
+	 * Perform some idiot checks ..
+	 */
+	if ( (irq < 0) || (irq > 255) ||
+		(cpuid < 0) || (cpuid > 512) ) {
+		printk("clear_mask_b: Invalid parameter irq %d cpuid %d\n", irq, cpuid);
+		return;
+	}
+
+	target_cnode = cpuid_to_cnodeid(cpuid);
+	target_synergy = cpuid_to_synergy(cpuid);
+	my_cnode = cpuid_to_cnodeid(smp_processor_id());
+	my_synergy = cpuid_to_synergy(smp_processor_id());
+
+	reg_num = irq / 64;
+	mask = 1;
+	mask <<= (irq % 64);
+	switch (reg_num) {
+		case 0: 
+			reg = VEC_MASK0B;
+			addr = VEC_MASK0B_ADDR;
+			break;
+		case 1: 
+			reg = VEC_MASK1B;
+			addr = VEC_MASK1B_ADDR;
+			break;
+		case 2: 
+			reg = VEC_MASK2B;
+			addr = VEC_MASK2B_ADDR;
+			break;
+		case 3: 
+			reg = VEC_MASK3B;
+			addr = VEC_MASK3B_ADDR;
+			break;
+		default:
+			reg = addr = 0;
+			break;
+	}
+	if (my_cnode == target_cnode && my_synergy == target_synergy) {
+		// local synergy
+		val = READ_LOCAL_SYNERGY_REG(addr);
+		if (set) {
+			val |= mask;
+		} else {
+			val &= ~mask;
+		}
+		WRITE_LOCAL_SYNERGY_REG(addr, val);
+		val = READ_LOCAL_SYNERGY_REG(addr);
+	} else { /* remote synergy */
+		synergy = cpuid_to_synergy(cpuid);
+		nasid = cpuid_to_nasid(cpuid);
+		val = REMOTE_SYNERGY_LOAD(nasid, synergy, reg);
+		if (set) {
+			val |= mask;
+		} else {
+			val &= ~mask;
+		}
+		REMOTE_SYNERGY_STORE(nasid, synergy, reg, val);
+	}
+}
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/sn/tools/make_textsym linux/arch/ia64/sn/tools/make_textsym
--- v2.4.0-prerelease/linux/arch/ia64/sn/tools/make_textsym	Wed Dec 31 16:00:00 1969
+++ linux/arch/ia64/sn/tools/make_textsym	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,138 @@
+#!/bin/sh
+# Build a textsym file for use in the Arium ITP probe.
+
+help() {
+cat <<END
+Build a WinDD "symtxt" file for use with the Arium ECM-30 probe.
+
+	Usage: $0 [<vmlinux file> [<output file>]]
+		If no input file is specified, it defaults to vmlinux.
+		If no output file name is specified, it defaults to "textsym".
+END
+exit 1
+}
+
+err () {
+	echo "ERROR - $*" >&2
+	exit 1
+}
+
+
+OPTS="H"
+while getopts "$OPTS" c ; do
+	case $c in
+		H)  help;;
+		\?) help;;
+	esac
+
+done
+shift `expr $OPTIND - 1`
+
+LINUX=${1:-vmlinux}
+TEXTSYM=${2:-${LINUX}.sym}
+
+[ -f $VMLINUX ] || help
+
+
+# pipe everything thru sort
+echo "TEXTSYM V1.0"
+(cat <<END 
+GLOBAL | e000000000500000 | CODE | VEC_VHPT_Translation_0000
+GLOBAL | e000000000500400 | CODE | VEC_ITLB_0400
+GLOBAL | e000000000500800 | CODE | VEC_DTLB_0800
+GLOBAL | e000000000500c00 | CODE | VEC_Alt_ITLB_0c00
+GLOBAL | e000000000501000 | CODE | VEC_Alt_DTLB_1000
+GLOBAL | e000000000501400 | CODE | VEC_Data_nested_TLB_1400
+GLOBAL | e000000000501800 | CODE | VEC_Instruction_Key_Miss_1800
+GLOBAL | e000000000501c00 | CODE | VEC_Data_Key_Miss_1c00
+GLOBAL | e000000000502000 | CODE | VEC_Dirty-bit_2000
+GLOBAL | e000000000502400 | CODE | VEC_Instruction_Access-bit_2400
+GLOBAL | e000000000502800 | CODE | VEC_Data_Access-bit_2800
+GLOBAL | e000000000502c00 | CODE | VEC_Break_instruction_2c00
+GLOBAL | e000000000503000 | CODE | VEC_External_Interrupt_3000
+GLOBAL | e000000000503400 | CODE | VEC_Reserved_3400
+GLOBAL | e000000000503800 | CODE | VEC_Reserved_3800
+GLOBAL | e000000000503c00 | CODE | VEC_Reserved_3c00
+GLOBAL | e000000000504000 | CODE | VEC_Reserved_4000
+GLOBAL | e000000000504400 | CODE | VEC_Reserved_4400
+GLOBAL | e000000000504800 | CODE | VEC_Reserved_4800
+GLOBAL | e000000000504c00 | CODE | VEC_Reserved_4c00
+GLOBAL | e000000000505000 | CODE | VEC_Page_Not_Present_5000
+GLOBAL | e000000000505100 | CODE | VEC_Key_Permission_5100
+GLOBAL | e000000000505200 | CODE | VEC_Instruction_Access_Rights_5200
+GLOBAL | e000000000505300 | CODE | VEC_Data_Access_Rights_5300
+GLOBAL | e000000000505400 | CODE | VEC_General_Exception_5400
+GLOBAL | e000000000505500 | CODE | VEC_Disabled_FP-Register_5500
+GLOBAL | e000000000505600 | CODE | VEC_Nat_Consumption_5600
+GLOBAL | e000000000505700 | CODE | VEC_Speculation_5700
+GLOBAL | e000000000505800 | CODE | VEC_Reserved_5800
+GLOBAL | e000000000505900 | CODE | VEC_Debug_5900
+GLOBAL | e000000000505a00 | CODE | VEC_Unaligned_Reference_5a00
+GLOBAL | e000000000505b00 | CODE | VEC_Unsupported_Data_Reference_5b00
+GLOBAL | e000000000505c00 | CODE | VEC_Floating-Point_Fault_5c00
+GLOBAL | e000000000505d00 | CODE | VEC_Floating_Point_Trap_5d00
+GLOBAL | e000000000505e00 | CODE | VEC_Lower_Privilege_Tranfer_Trap_5e00
+GLOBAL | e000000000505f00 | CODE | VEC_Taken_Branch_Trap_5f00
+GLOBAL | e000000000506000 | CODE | VEC_Single_Step_Trap_6000
+GLOBAL | e000000000506100 | CODE | VEC_Reserved_6100
+GLOBAL | e000000000506200 | CODE | VEC_Reserved_6200
+GLOBAL | e000000000506300 | CODE | VEC_Reserved_6300
+GLOBAL | e000000000506400 | CODE | VEC_Reserved_6400
+GLOBAL | e000000000506500 | CODE | VEC_Reserved_6500
+GLOBAL | e000000000506600 | CODE | VEC_Reserved_6600
+GLOBAL | e000000000506700 | CODE | VEC_Reserved_6700
+GLOBAL | e000000000506800 | CODE | VEC_Reserved_6800
+GLOBAL | e000000000506900 | CODE | VEC_IA-32_Exeception_6900
+GLOBAL | e000000000506a00 | CODE | VEC_IA-32_Intercept_6a00
+GLOBAL | e000000000506b00 | CODE | VEC_IA-32_Interrupt_6b00
+GLOBAL | e000000000506c00 | CODE | VEC_Reserved_6c00
+GLOBAL | e000000000506d00 | CODE | VEC_Reserved_6d00
+GLOBAL | e000000000506e00 | CODE | VEC_Reserved_6e00
+GLOBAL | e000000000506f00 | CODE | VEC_Reserved_6f00
+GLOBAL | e000000000507000 | CODE | VEC_Reserved_7000
+GLOBAL | e000000000507100 | CODE | VEC_Reserved_7100
+GLOBAL | e000000000507200 | CODE | VEC_Reserved_7200
+GLOBAL | e000000000507300 | CODE | VEC_Reserved_7300
+GLOBAL | e000000000507400 | CODE | VEC_Reserved_7400
+GLOBAL | e000000000507500 | CODE | VEC_Reserved_7500
+GLOBAL | e000000000507600 | CODE | VEC_Reserved_7600
+GLOBAL | e000000000507700 | CODE | VEC_Reserved_7700
+GLOBAL | e000000000507800 | CODE | VEC_Reserved_7800
+GLOBAL | e000000000507900 | CODE | VEC_Reserved_7900
+GLOBAL | e000000000507a00 | CODE | VEC_Reserved_7a00
+GLOBAL | e000000000507b00 | CODE | VEC_Reserved_7b00
+GLOBAL | e000000000507c00 | CODE | VEC_Reserved_7c00
+GLOBAL | e000000000507d00 | CODE | VEC_Reserved_7d00
+GLOBAL | e000000000507e00 | CODE | VEC_Reserved_7e00
+GLOBAL | e000000000507f00 | CODE | VEC_Reserved_7f00
+END
+
+$OBJDUMP -t $LINUX | sort | awk '
+/empty_zero_page/ {start=1}
+/e0000000/ {
+	if ($4 == ".kdb")
+		next
+	if (start && substr($NF,1,1) != "0") {
+		type = substr($0,26,5)
+		if (type == ".text")
+			printf "GLOBAL | %s | CODE | %s\n", $1, $NF
+		else
+			printf "GLOBAL | %s | DATA | %s | %d\n", $1, $NF, $(NF-1)
+	}
+	if($NF == "_end") 
+		exit
+
+}
+' ) | egrep -v " __device| __vendor" | awk '
+/GLOBAL/ {
+	print $0
+	print substr($0,1,9) substr($0,18,18) "Phy_" substr($0,36)
+
+} ' | sort -k3
+
+
+
+N=`wc -l $TEXTSYM|awk '{print $1}'`
+echo "Generated TEXTSYM file" >&2
+echo "  $LINUX --> $TEXTSYM" >&2
+echo "  Found $N symbols" >&2
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/ia64/tools/print_offsets.c linux/arch/ia64/tools/print_offsets.c
--- v2.4.0-prerelease/linux/arch/ia64/tools/print_offsets.c	Fri Jul 14 16:08:12 2000
+++ linux/arch/ia64/tools/print_offsets.c	Thu Jan  4 12:50:17 2001
@@ -149,7 +149,7 @@
     { "IA64_SWITCH_STACK_AR_UNAT_OFFSET",	offsetof (struct switch_stack, ar_unat) },
     { "IA64_SWITCH_STACK_AR_RNAT_OFFSET",	offsetof (struct switch_stack, ar_rnat) },
     { "IA64_SWITCH_STACK_AR_BSPSTORE_OFFSET",	offsetof (struct switch_stack, ar_bspstore) },
-    { "IA64_SWITCH_STACK_PR_OFFSET",	offsetof (struct switch_stack, b0) },
+    { "IA64_SWITCH_STACK_PR_OFFSET",	offsetof (struct switch_stack, pr) },
     { "IA64_SIGCONTEXT_AR_BSP_OFFSET",	offsetof (struct sigcontext, sc_ar_bsp) },
     { "IA64_SIGCONTEXT_AR_RNAT_OFFSET",	offsetof (struct sigcontext, sc_ar_rnat) },
     { "IA64_SIGCONTEXT_FLAGS_OFFSET",	offsetof (struct sigcontext, sc_flags) },
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/m68k/Makefile linux/arch/m68k/Makefile
--- v2.4.0-prerelease/linux/arch/m68k/Makefile	Thu Mar 23 08:47:44 2000
+++ linux/arch/m68k/Makefile	Thu Jan  4 13:00:55 2001
@@ -19,6 +19,10 @@
 # override top level makefile
 AS += -m68020
 LD += -m m68kelf
+ifneq ($(COMPILE_ARCH),$(ARCH))
+	# prefix for cross-compiling binaries
+	CROSS_COMPILE = m68k-linux-
+endif
 
 ifndef CONFIG_SUN3
 LINKFLAGS = -T $(TOPDIR)/arch/m68k/vmlinux.lds
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/m68k/amiga/amisound.c linux/arch/m68k/amiga/amisound.c
--- v2.4.0-prerelease/linux/arch/m68k/amiga/amisound.c	Mon Dec 11 17:59:43 2000
+++ linux/arch/m68k/amiga/amisound.c	Thu Jan  4 13:00:55 2001
@@ -97,13 +97,10 @@
 		/* turn on DMA for audio channel 2 */
 		custom.dmacon = DMAF_SETCLR | DMAF_AUD2;
 
-		restore_flags(flags);
-		return;
-	} else {
+	} else
 		nosound( 0 );
-		restore_flags(flags);
-		return;
-	}
+
+	restore_flags(flags);
 }
 
 
@@ -113,4 +110,4 @@
 	custom.dmacon = DMAF_AUD2;
 	/* restore period to previous value after beeping */
 	custom.aud[2].audper = amiga_audio_period;
-}	
+}
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/m68k/amiga/config.c linux/arch/m68k/amiga/config.c
--- v2.4.0-prerelease/linux/arch/m68k/amiga/config.c	Mon Dec 11 17:59:43 2000
+++ linux/arch/m68k/amiga/config.c	Thu Jan  4 13:00:55 2001
@@ -136,11 +136,13 @@
      *  Motherboard Resources present in all Amiga models
      */
 
-static struct resource mb_resource[] = {
-    { "CIA B", 0x00bfd000, 0x00bfdfff },
-    { "CIA A", 0x00bfe000, 0x00bfefff },
-    { "Custom I/O", 0x00dff000, 0x00dfffff },
-    { "Kickstart ROM", 0x00f80000, 0x00ffffff }
+static struct {
+    struct resource _ciab, _ciaa, _custom, _kickstart;
+} mb_resources = {
+    _ciab:	{ "CIA B", 0x00bfd000, 0x00bfdfff },
+    _ciaa:	{ "CIA A", 0x00bfe000, 0x00bfefff },
+    _custom:	{ "Custom I/O", 0x00dff000, 0x00dfffff },
+    _kickstart:	{ "Kickstart ROM", 0x00f80000, 0x00ffffff }
 };
 
 static struct resource rtc_resource = {
@@ -196,7 +198,7 @@
 		dev->resource.end = dev->resource.start+cd->cd_BoardSize-1;
 	    } else
 		printk("amiga_parse_bootinfo: too many AutoConfig devices\n");
-#endif
+#endif /* CONFIG_ZORRO */
 	    break;
 
 	case BI_AMIGA_SERPER:
@@ -385,8 +387,8 @@
 
   /* Yuk, we don't have PCI memory */
   iomem_resource.name = "Memory";
-  for (i = 0; i < sizeof(mb_resource)/sizeof(mb_resource[0]); i++)
-      request_resource(&iomem_resource, &mb_resource[i]);
+  for (i = 0; i < 4; i++)
+    request_resource(&iomem_resource, &((struct resource *)&mb_resources)[i]);
 
   mach_sched_init      = amiga_sched_init;
   mach_keyb_init       = amiga_keyb_init;
@@ -430,7 +432,9 @@
   mach_floppy_setup    = amiga_floppy_setup;
 #endif
   mach_reset           = amiga_reset;
+#ifdef CONFIG_DUMMY_CONSOLE
   conswitchp           = &dummy_con;
+#endif
   kd_mksound           = amiga_mksound;
 #ifdef CONFIG_MAGIC_SYSRQ
   mach_sysrq_key = 0x5f;	     /* HELP */
@@ -515,10 +519,12 @@
 static void __init amiga_sched_init(void (*timer_routine)(int, void *,
 							  struct pt_regs *))
 {
-	static struct resource sched_res = { "timer" };
+	static struct resource sched_res = {
+	    "timer", 0x00bfd400, 0x00bfd5ff,
+	};
 	jiffy_ticks = (amiga_eclock+HZ/2)/HZ;
 
-	if (!request_mem_region(CIAB_PHYSADDR+0x400, 0x200, "timer"))
+	if (request_resource(&mb_resources._ciab, &sched_res))
 	    printk("Cannot allocate ciab.ta{lo,hi}\n");
 	ciab.cra &= 0xC0;   /* turn off timer A, continuous mode, from Eclk */
 	ciab.talo = jiffy_ticks % 256;
@@ -624,6 +630,8 @@
 			t->wday = tod->weekday;
 			t->mon  = tod->month1  * 10 + tod->month2 - 1;
 			t->year = tod->year1   * 10 + tod->year2;
+			if (t->year <= 69)
+				t->year += 100;
 		} else {
 			tod->second1 = t->sec / 10;
 			tod->second2 = t->sec % 10;
@@ -637,6 +645,8 @@
 				tod->weekday = t->wday;
 			tod->month1  = (t->mon + 1) / 10;
 			tod->month2  = (t->mon + 1) % 10;
+			if (t->year >= 100)
+				t->year -= 100;
 			tod->year1   = t->year / 10;
 			tod->year2   = t->year % 10;
 		}
@@ -658,6 +668,8 @@
 			t->wday = tod->weekday;
 			t->mon  = tod->month1      * 10 + tod->month2 - 1;
 			t->year = tod->year1       * 10 + tod->year2;
+			if (t->year <= 69)
+				t->year += 100;
 
 			if (!(tod->cntrl3 & TOD2000_CNTRL3_24HMODE)){
 				if (!(tod->hour1 & TOD2000_HOUR1_PM) && t->hour == 12)
@@ -684,6 +696,8 @@
 				tod->weekday = t->wday;
 			tod->month1  = (t->mon + 1) / 10;
 			tod->month2  = (t->mon + 1) % 10;
+			if (t->year >= 100)
+				t->year -= 100;
 			tod->year1   = t->year / 10;
 			tod->year2   = t->year % 10;
 		}
@@ -1047,7 +1061,7 @@
 				   "Device%s\n",
 		       AMIGAHW_PRESENT(ZORRO3) ? "I" : "",
 		       zorro_num_autocon, zorro_num_autocon == 1 ? "" : "s");
-#endif
+#endif /* CONFIG_ZORRO */
 
 #undef AMIGAHW_ANNOUNCE
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/m68k/atari/debug.c linux/arch/m68k/atari/debug.c
--- v2.4.0-prerelease/linux/arch/m68k/atari/debug.c	Tue Oct 31 12:42:26 2000
+++ linux/arch/m68k/atari/debug.c	Thu Jan  4 13:00:55 2001
@@ -94,7 +94,7 @@
 {
     unsigned char tmp;
     /* This a some-seconds timeout in case no printer is connected */
-    unsigned long i = loops_per_sec > 1 ? loops_per_sec : 10000000;
+    unsigned long i = loops_per_jiffy > 1 ? loops_per_jiffy : 10000000/HZ;
 
     while( (mfp.par_dt_reg & 1) && --i ) /* wait for BUSY == L */
 	;
@@ -196,7 +196,7 @@
 	MFPDELAY();					\
     } while(0)
 
-/* loops_per_sec isn't initialized yet, so we can't use udelay(). This does a
+/* loops_per_jiffy isn't initialized yet, so we can't use udelay(). This does a
  * delay of ~ 60us. */
 #define LONG_DELAY()				\
     do {					\
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/m68k/config.in linux/arch/m68k/config.in
--- v2.4.0-prerelease/linux/arch/m68k/config.in	Sun Nov 19 18:44:03 2000
+++ linux/arch/m68k/config.in	Thu Jan  4 13:00:55 2001
@@ -53,9 +53,9 @@
 if [ "$CONFIG_HP300" = "y" ]; then
    bool '  DIO bus support' CONFIG_DIO
 fi
+bool 'Sun3 support' CONFIG_SUN3
 bool 'Sun3x support' CONFIG_SUN3X
   
-define_bool CONFIG_SUN3 n
 bool 'Q40/Q60 support' CONFIG_Q40
 
 comment 'Processor type'
@@ -73,7 +73,11 @@
 bool 'Advanced configuration options' CONFIG_ADVANCED
 if [ "$CONFIG_ADVANCED" = "y" ]; then
    bool 'Use read-modify-write instructions' CONFIG_RMW_INSNS
-   bool 'Use one physical chunk of memory only' CONFIG_SINGLE_MEMORY_CHUNK
+   if [ "$CONFIG_SUN3" = "y" ]; then
+      define_bool CONFIG_SINGLE_MEMORY_CHUNK n
+   else
+      bool 'Use one physical chunk of memory only' CONFIG_SINGLE_MEMORY_CHUNK
+   fi
    if [ "$CONFIG_M68060" = "y" ]; then
       bool 'Use write-through caching for 68060 supervisor accesses' CONFIG_060_WRITETHROUGH
    fi
@@ -120,13 +124,12 @@
    tristate 'Parallel port support (EXPERIMENTAL)' CONFIG_PARPORT
    if [ "$CONFIG_PARPORT" != "n" ]; then
       if [ "$CONFIG_AMIGA" != "n" ]; then
-         dep_tristate '   Amiga builtin port' CONFIG_PARPORT_AMIGA $CONFIG_PARPORT
+         dep_tristate '  Amiga builtin port' CONFIG_PARPORT_AMIGA $CONFIG_PARPORT
 	 if [ "$CONFIG_ZORRO" != "n" ]; then
-            dep_tristate '    Multiface III parallel port' CONFIG_PARPORT_MFC3 $CONFIG_PARPORT
+            dep_tristate '  Multiface III parallel port' CONFIG_PARPORT_MFC3 $CONFIG_PARPORT
 	 fi
       fi
       if [ "$CONFIG_Q40" != "n" ]; then
-	 tristate '    Q40 Parallel port' CONFIG_PARPORT
 	 if [ "$CONFIG_PARPORT" != "n" ]; then
 	    define_bool CONFIG_PARPORT_PC y
 	 fi
@@ -156,6 +159,10 @@
    source net/Config.in
 fi
 
+if [ "$CONFIG_MAC" = "y" ]; then
+   source drivers/input/Config.in
+fi
+
 mainmenu_option next_comment
 comment 'ATA/IDE/MFM/RLL support'
 
@@ -254,6 +261,10 @@
       bool 'NCR53C710 SCSI driver for BVME6000' CONFIG_BVME6000_SCSI
    fi
 
+   if [ "$CONFIG_SUN3" = "y" ]; then
+	dep_tristate 'Sun3 NCR5380 SCSI' CONFIG_SUN3_SCSI $CONFIG_SCSI
+   fi
+
    if [ "$CONFIG_SUN3X" = "y" ]; then
       bool 'ESP SCSI driver' CONFIG_SUN3X_ESP
    fi
@@ -322,16 +333,17 @@
 	    tristate '  PAMsNet support' CONFIG_ATARI_PAMSNET
 	 fi
       fi
-      if [ "$CONFIG_SUN3X" = "y" ]; then
-	 bool '  Sun3x Lance support' CONFIG_SUNLANCE
+      if [ "$CONFIG_SUN3" = "y" -o "$CONFIG_SUN3X" = "y" ]; then
+	tristate '  Sun3/Sun3x on-board LANCE support' CONFIG_SUN3LANCE
       fi
       if [ "$CONFIG_HP300" = "y" ]; then
 	 bool '  HP on-board LANCE support' CONFIG_HPLANCE
       fi
       if [ "$CONFIG_Q40" = "y" ]; then
-	 if [ ! "$CONFIG_PARPORT" = "n" ]; then
+         if [  "$CONFIG_PARPORT" != "n" ]; then
 	    dep_tristate '  PLIP (parallel port) support' CONFIG_PLIP $CONFIG_PARPORT
 	 fi
+         tristate 'NE2000/NE1000 support' CONFIG_NE2000
       fi
    fi
    endmenu
@@ -404,29 +416,39 @@
       dep_tristate '  GVP IO-Extender parallel printer support' CONFIG_GVPIOEXT_LP $CONFIG_GVPIOEXT
       dep_tristate '  GVP IO-Extender PLIP support' CONFIG_GVPIOEXT_PLIP $CONFIG_GVPIOEXT
       tristate 'Multiface Card III serial support' CONFIG_MULTIFACE_III_TTY
+      if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
+        tristate 'Commodore A2232 serial support (EXPERIMENTAL)' CONFIG_A2232
+      fi
    fi
 fi
 if [ "$CONFIG_MAC" = "y" ]; then
    tristate 'Macintosh serial support' CONFIG_MAC_SCC
    bool 'Apple Desktop Bus (ADB) support' CONFIG_ADB
    if [ "$CONFIG_ADB" = "y" ]; then
-      bool '   Support for ADB keyboard' CONFIG_ADB_KEYBOARD 
-      bool '   Support for ADB mouse' CONFIG_ADBMOUSE
       bool '   Include Mac II ADB driver' CONFIG_ADB_MACII
       bool '   Include Mac IIsi ADB driver' CONFIG_ADB_MACIISI
       bool '   Include CUDA ADB driver' CONFIG_ADB_CUDA
       bool '   Include IOP (IIfx/Quadra 9x0) ADB driver' CONFIG_ADB_IOP
       bool '   Include PMU (Powerbook) ADB driver' CONFIG_ADB_PMU68K
    fi
-   if [ "$CONFIG_ADBMOUSE" = "y" ]; then
-      define_bool CONFIG_BUSMOUSE y
+   dep_bool '   Use input layer for ADB devices' CONFIG_INPUT_ADBHID $CONFIG_INPUT
+   if [ "$CONFIG_INPUT_ADBHID" = "y" ]; then
+      define_bool CONFIG_MAC_HID y
+      bool '     Support for ADB raw keycodes' CONFIG_MAC_ADBKEYCODES
+      bool '     Support for mouse button 2+3 emulation' CONFIG_MAC_EMUMOUSEBTN
+   else
+      bool '   Support for ADB keyboard (old driver)' CONFIG_ADB_KEYBOARD
    fi
 fi
 if [ "$CONFIG_HP300" = "y" -a "$CONFIG_DIO" = "y" ]; then
    tristate 'HP DCA serial support' CONFIG_HPDCA
 fi
 
-dep_bool 'Sun3x builtin serial support' CONFIG_SUN3X_ZS $CONFIG_SUN3X
+if [ "$CONFIG_SUN3" = "y" -o "$CONFIG_SUN3X" = "y" ]; then
+   bool 'Sun3/3x builtin serial support' CONFIG_SUN3X_ZS
+else
+   define_bool CONFIG_SUN3X_ZS n
+fi
 dep_bool '  Sun keyboard support' CONFIG_SUN_KEYBOARD $CONFIG_SUN3X_ZS
 dep_bool '  Sun mouse support' CONFIG_SUN_MOUSE $CONFIG_SUN3X_ZS
 if [ "$CONFIG_SUN_MOUSE" = "y" ]; then
@@ -442,7 +464,7 @@
 
 if [ "$CONFIG_AMIGA" = "y" -o "$CONFIG_ATARI" = "y" -o \
      "$CONFIG_MAC" = "y" -o "$CONFIG_HP300" = "y" -o \
-     "$CONFIG_SUN3X" = "y" ]; then
+     "$CONFIG_SUN3" = "y" -o "$CONFIG_SUN3X" = "y" ]; then
    if [ "$CONFIG_ATARI_MFPSER" = "y" -o "$CONFIG_ATARI_SCC" = "y" -o \
         "$CONFIG_ATARI_MIDI" = "y" -o "$CONFIG_MAC_SCC" = "y" -o \
         "$CONFIG_AMIGA_BUILTIN_SERIAL" = "y" -o \
@@ -476,6 +498,15 @@
 fi
 if [ "$CONFIG_ATARI" = "y" ]; then
    bool 'Enhanced Real Time Clock Support' CONFIG_RTC
+else
+   if [ "$CONFIG_SUN3" = "y" ]; then
+      define_bool CONFIG_GEN_RTC y
+   else
+      bool 'Generic /dev/rtc emulation' CONFIG_GEN_RTC
+   fi
+fi
+if [ "$CONFIG_Q40" = "y" ]; then
+  bool 'Q40 Real Time Clock Support' CONFIG_Q40RTC
 fi
 bool 'Unix98 PTY support' CONFIG_UNIX98_PTYS
 if [ "$CONFIG_UNIX98_PTYS" = "y" ]; then
@@ -498,11 +529,6 @@
 if [ "$CONFIG_VME" = "n" ]; then
    mainmenu_option next_comment
    comment 'Console drivers'
-   if [ "$CONFIG_HP300" = "y" ]; then
-      bool 'Frame buffer support' CONFIG_FB
-   else
-      define_bool CONFIG_FB y
-   fi
    source drivers/video/Config.in
    endmenu
 fi
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/m68k/kernel/m68k_defs.h linux/arch/m68k/kernel/m68k_defs.h
--- v2.4.0-prerelease/linux/arch/m68k/kernel/m68k_defs.h	Sun Aug 15 11:47:29 1999
+++ linux/arch/m68k/kernel/m68k_defs.h	Wed Dec 31 16:00:00 1969
@@ -1,63 +0,0 @@
-/*
- * WARNING! This file is automatically generated - DO NOT EDIT!
- */
-
-#define TS_MAGICKEY	0x5a5a5a5a
-#define TASK_STATE 0
-#define TASK_FLAGS 4
-#define TASK_SIGPENDING 8
-#define TASK_NEEDRESCHED 20
-#define TASK_THREAD 482
-#define TASK_MM 634
-#define TASK_ACTIVE_MM 638
-#define THREAD_KSP 0
-#define THREAD_USP 4
-#define THREAD_SR 8
-#define THREAD_FS 10
-#define THREAD_CRP 12
-#define THREAD_ESP0 20
-#define THREAD_FPREG 24
-#define THREAD_FPCNTL 120
-#define THREAD_FPSTATE 132
-#define PT_D0 32
-#define PT_ORIG_D0 36
-#define PT_D1 0
-#define PT_D2 4
-#define PT_D3 8
-#define PT_D4 12
-#define PT_D5 16
-#define PT_A0 20
-#define PT_A1 24
-#define PT_A2 28
-#define PT_PC 46
-#define PT_SR 44
-#define PT_VECTOR 50
-#define IRQ_HANDLER 0
-#define IRQ_DEVID 8
-#define IRQ_NEXT 16
-#define STAT_IRQ 120
-#define BIR_TAG 0
-#define BIR_SIZE 2
-#define BIR_DATA 4
-#define FBCON_FONT_DESC_IDX 0
-#define FBCON_FONT_DESC_NAME 4
-#define FBCON_FONT_DESC_WIDTH 8
-#define FBCON_FONT_DESC_HEIGHT 12
-#define FBCON_FONT_DESC_DATA 16
-#define FBCON_FONT_DESC_PREF 20
-#define SIGSEGV 11
-#define SEGV_MAPERR 1
-#define SIGTRAP 5
-#define TRAP_TRACE 2
-#define CUSTOMBASE -2132807680
-#define C_INTENAR 28
-#define C_INTREQR 30
-#define C_INTENA 154
-#define C_INTREQ 156
-#define C_SERDATR 24
-#define C_SERDAT 48
-#define C_SERPER 50
-#define CIAABASE -2134908927
-#define CIABBASE -2134913024
-#define C_PRA 0
-#define ZTWOBASE -2147483648
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/m68k/kernel/m68k_ksyms.c linux/arch/m68k/kernel/m68k_ksyms.c
--- v2.4.0-prerelease/linux/arch/m68k/kernel/m68k_ksyms.c	Mon Dec 11 17:59:43 2000
+++ linux/arch/m68k/kernel/m68k_ksyms.c	Thu Jan  4 13:00:55 2001
@@ -19,6 +19,7 @@
 #include <asm/hardirq.h>
 #include <asm/softirq.h>
 
+asmlinkage long long __ashldi3 (long long, int);
 asmlinkage long long __ashrdi3 (long long, int);
 asmlinkage long long __lshrdi3 (long long, int);
 asmlinkage long long __muldi3 (long long, long long);
@@ -32,27 +33,28 @@
 EXPORT_SYMBOL(m68k_machtype);
 EXPORT_SYMBOL(m68k_cputype);
 EXPORT_SYMBOL(m68k_is040or060);
+EXPORT_SYMBOL(m68k_realnum_memory);
+EXPORT_SYMBOL(m68k_memory);
+#ifndef CONFIG_SUN3
 EXPORT_SYMBOL(cache_push);
 EXPORT_SYMBOL(cache_clear);
 #ifndef CONFIG_SINGLE_MEMORY_CHUNK
 EXPORT_SYMBOL(mm_vtop);
 EXPORT_SYMBOL(mm_ptov);
 EXPORT_SYMBOL(mm_end_of_chunk);
-#endif
-EXPORT_SYMBOL(m68k_realnum_memory);
-EXPORT_SYMBOL(m68k_memory);
-#ifndef CONFIG_SUN3
+#endif /* !CONFIG_SINGLE_MEMORY_CHUNK */
 EXPORT_SYMBOL(mm_vtop_fallback);
 EXPORT_SYMBOL(__ioremap);
 EXPORT_SYMBOL(iounmap);
 EXPORT_SYMBOL(kernel_set_cachemode);
-#endif
+#endif /* !CONFIG_SUN3 */
 EXPORT_SYMBOL(m68k_debug_device);
 EXPORT_SYMBOL(dump_fpu);
 EXPORT_SYMBOL(dump_thread);
 EXPORT_SYMBOL(strnlen);
 EXPORT_SYMBOL(strrchr);
 EXPORT_SYMBOL(strstr);
+EXPORT_SYMBOL(strtok);
 EXPORT_SYMBOL(enable_irq);
 EXPORT_SYMBOL(disable_irq);
 EXPORT_SYMBOL(kernel_thread);
@@ -67,6 +69,7 @@
    explicitly (the C compiler generates them).  Fortunately,
    their interface isn't gonna change any time soon now, so
    it's OK to leave it out of version control.  */
+EXPORT_SYMBOL_NOVERS(__ashldi3);
 EXPORT_SYMBOL_NOVERS(__ashrdi3);
 EXPORT_SYMBOL_NOVERS(__lshrdi3);
 EXPORT_SYMBOL_NOVERS(memcpy);
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/m68k/kernel/setup.c linux/arch/m68k/kernel/setup.c
--- v2.4.0-prerelease/linux/arch/m68k/kernel/setup.c	Mon Jan  1 09:38:34 2001
+++ linux/arch/m68k/kernel/setup.c	Thu Jan  4 13:00:55 2001
@@ -432,7 +432,7 @@
     else
 	mmu = "unknown";
 
-    clockfreq = loops_per_sec*clockfactor;
+    clockfreq = loops_per_jiffy*HZ*clockfactor;
 
     return(sprintf(buffer, "CPU:\t\t%s\n"
 		   "MMU:\t\t%s\n"
@@ -442,8 +442,8 @@
 		   "Calibration:\t%lu loops\n",
 		   cpu, mmu, fpu,
 		   clockfreq/1000000,(clockfreq/100000)%10,
-		   loops_per_sec/500000,(loops_per_sec/5000)%100,
-		   loops_per_sec));
+		   loops_per_jiffy/(500000/HZ),(loops_per_jiffy/(5000/HZ))%100,
+		   loops_per_jiffy));
 
 }
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/m68k/kernel/time.c linux/arch/m68k/kernel/time.c
--- v2.4.0-prerelease/linux/arch/m68k/kernel/time.c	Sun Oct  8 10:50:06 2000
+++ linux/arch/m68k/kernel/time.c	Thu Jan  4 13:00:55 2001
@@ -126,13 +126,13 @@
  */
 void do_gettimeofday(struct timeval *tv)
 {
-	extern volatile unsigned long lost_ticks;
+	extern unsigned long wall_jiffies;
 	unsigned long flags;
 	unsigned long usec, sec, lost;
 
 	read_lock_irqsave(&xtime_lock, flags);
 	usec = mach_gettimeoffset();
-	lost = lost_ticks;
+	lost = jiffies - wall_jiffies;
 	if (lost)
 		usec += lost * (1000000/HZ);
 	sec = xtime.tv_sec;
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/m68k/kernel/traps.c linux/arch/m68k/kernel/traps.c
--- v2.4.0-prerelease/linux/arch/m68k/kernel/traps.c	Mon Dec 11 17:59:43 2000
+++ linux/arch/m68k/kernel/traps.c	Thu Jan  4 13:00:55 2001
@@ -152,13 +152,6 @@
 }
 
 
-static inline void console_verbose(void)
-{
-	extern int console_loglevel;
-	console_loglevel = 15;
-}
-
-
 static char *vec_names[] = {
 	"RESET SP", "RESET PC", "BUS ERROR", "ADDRESS ERROR",
 	"ILLEGAL INSTRUCTION", "ZERO DIVIDE", "CHK", "TRAPcc",
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/m68k/lib/Makefile linux/arch/m68k/lib/Makefile
--- v2.4.0-prerelease/linux/arch/m68k/lib/Makefile	Mon Jan  1 09:38:34 2001
+++ linux/arch/m68k/lib/Makefile	Thu Jan  4 13:00:55 2001
@@ -7,7 +7,7 @@
 
 L_TARGET = lib.a
 
-obj-y		:= ashrdi3.o lshrdi3.o checksum.o memcpy.o memcmp.o memset.o \
-		    semaphore.o muldi3.o
+obj-y		:= ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
+			checksum.o memcmp.o memcpy.o memset.o semaphore.o
 
 include $(TOPDIR)/Rules.make
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/m68k/lib/ashldi3.c linux/arch/m68k/lib/ashldi3.c
--- v2.4.0-prerelease/linux/arch/m68k/lib/ashldi3.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/m68k/lib/ashldi3.c	Thu Jan  4 13:00:55 2001
@@ -0,0 +1,62 @@
+/* ashrdi3.c extracted from gcc-2.95.2/libgcc2.c which is: */
+/* Copyright (C) 1989, 92-98, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING.  If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.  */
+
+#define BITS_PER_UNIT 8
+
+typedef 	 int SItype	__attribute__ ((mode (SI)));
+typedef unsigned int USItype	__attribute__ ((mode (SI)));
+typedef		 int DItype	__attribute__ ((mode (DI)));
+typedef int word_type __attribute__ ((mode (__word__)));
+
+struct DIstruct {SItype high, low;};
+
+typedef union
+{
+  struct DIstruct s;
+  DItype ll;
+} DIunion;
+
+DItype
+__ashldi3 (DItype u, word_type b)
+{
+  DIunion w;
+  word_type bm;
+  DIunion uu;
+
+  if (b == 0)
+    return u;
+
+  uu.ll = u;
+
+  bm = (sizeof (SItype) * BITS_PER_UNIT) - b;
+  if (bm <= 0)
+    {
+      w.s.low = 0;
+      w.s.high = (USItype)uu.s.low << -bm;
+    }
+  else
+    {
+      USItype carries = (USItype)uu.s.low >> bm;
+      w.s.low = (USItype)uu.s.low << b;
+      w.s.high = ((USItype)uu.s.high << b) | carries;
+    }
+
+  return w.ll;
+}
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/m68k/mm/fault.c linux/arch/m68k/mm/fault.c
--- v2.4.0-prerelease/linux/arch/m68k/mm/fault.c	Mon Dec 11 17:59:43 2000
+++ linux/arch/m68k/mm/fault.c	Thu Jan  4 13:00:55 2001
@@ -26,7 +26,9 @@
 	siginfo.si_signo = current->thread.signo;
 	siginfo.si_code = current->thread.code;
 	siginfo.si_addr = (void *)current->thread.faddr;
+#ifdef DEBUG
 	printk("send_fault_sig: %p,%d,%d\n", siginfo.si_addr, siginfo.si_signo, siginfo.si_code);
+#endif
 
 	if (user_mode(regs)) {
 		force_sig_info(siginfo.si_signo,
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/sh/config.in linux/arch/sh/config.in
--- v2.4.0-prerelease/linux/arch/sh/config.in	Sun Nov 19 18:44:04 2000
+++ linux/arch/sh/config.in	Thu Jan  4 13:19:13 2001
@@ -28,10 +28,22 @@
 	"Generic		CONFIG_SH_GENERIC		\
 	 SolutionEngine		CONFIG_SH_SOLUTION_ENGINE	\
 	 Overdrive		CONFIG_SH_OVERDRIVE		\
-	 HP600			CONFIG_SH_HP600			\
+	 HP620			CONFIG_SH_HP620			\
+	 HP680			CONFIG_SH_HP680			\
+	 HP690			CONFIG_SH_HP690			\
 	 CqREEK			CONFIG_SH_CQREEK		\
+	 DMIDA			CONFIG_SH_DMIDA    		\
+	 EC3104			CONFIG_SH_EC3104		\
+	 Dreamcast		CONFIG_SH_DREAMCAST		\
 	 BareCPU		CONFIG_SH_UNKNOWN" Generic
 
+define_bool CONFIG_SH_RTC y
+
+if [ "$CONFIG_SH_HP620" = "y" -o "$CONFIG_SH_HP680" = "y" -o \
+     "$CONFIG_SH_HP690" = "y" ]; then
+	define_bool CONFIG_SH_HP600 y
+fi
+
 choice 'Processor type' \
    "SH7707 CONFIG_CPU_SUBTYPE_SH7707 \
     SH7708 CONFIG_CPU_SUBTYPE_SH7708 \
@@ -65,14 +77,17 @@
 mainmenu_option next_comment
 comment 'General setup'
 
-define_bool CONFIG_ISA n
+# Even on SuperH devices which don't have an ISA bus,
+# this variable helps the PCMCIA modules handle
+# IRQ requesting properly -- Greg Banks.
+define_bool CONFIG_ISA y
 define_bool CONFIG_EISA n
 define_bool CONFIG_MCA n
 define_bool CONFIG_SBUS n
 
 bool 'Networking support' CONFIG_NET
 
-if [ "$CONFIG_SH_GENERIC" = "y" -o "$CONFIG_SH_SOLUTION_ENGINE" = "y" ]; then
+if [ "$CONFIG_SH_GENERIC" = "y" -o "$CONFIG_SH_SOLUTION_ENGINE" = "y" -o "$CONFIG_SH_UNKNOWN" = "y" ]; then
   bool 'Compact Flash Enabler support' CONFIG_CF_ENABLER
 fi
 
@@ -82,6 +97,11 @@
    bool 'HD64461 PCMCIA enabler' CONFIG_HD64461_ENABLER
 fi
 
+bool 'Hitachi HD64465 companion chip support' CONFIG_HD64465
+if [ "$CONFIG_HD64465" = "y" ]; then
+   int 'HD64465 IRQ' CONFIG_HD64465_IRQ 5
+fi
+
 bool 'PCI support' CONFIG_PCI
 if [ "$CONFIG_PCI" = "y" ]; then
    choice '  PCI access mode' \
@@ -170,6 +190,11 @@
    endmenu
 fi
 
+#
+# input before char - char/joystick depends on it. As does USB.
+#
+source drivers/input/Config.in
+
 mainmenu_option next_comment
 comment 'Character devices'
 
@@ -237,7 +262,6 @@
 bool 'Magic SysRq key' CONFIG_MAGIC_SYSRQ
 bool 'Use LinuxSH standard BIOS' CONFIG_SH_STANDARD_BIOS
 if [ "$CONFIG_SH_STANDARD_BIOS" = "y" ]; then
-   hex ' GDB Stub VBR' CONFIG_GDB_STUB_VBR a0000000
    bool 'GDB Stub kernel debug' CONFIG_DEBUG_KERNEL_WITH_GDB_STUB
    bool 'Early printk support' CONFIG_SH_EARLY_PRINTK
 fi
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/sh/kernel/Makefile linux/arch/sh/kernel/Makefile
--- v2.4.0-prerelease/linux/arch/sh/kernel/Makefile	Mon Jan  1 09:38:35 2001
+++ linux/arch/sh/kernel/Makefile	Thu Jan  4 13:19:13 2001
@@ -21,6 +21,7 @@
 obj-$(CONFIG_CF_ENABLER)	+= cf-enabler.o
 obj-$(CONFIG_CPU_SH4)		+= fpu.o
 obj-$(CONFIG_PCI)		+= pci-sh.o 
+obj-$(CONFIG_SH_RTC)            += rtc.o
 obj-$(CONFIG_SH_STANDARD_BIOS)	+= sh_bios.o
 
 obj-$(CONFIG_SH_HP600)		+= mach_hp600.o
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/sh/kernel/rtc.c linux/arch/sh/kernel/rtc.c
--- v2.4.0-prerelease/linux/arch/sh/kernel/rtc.c	Wed Dec 31 16:00:00 1969
+++ linux/arch/sh/kernel/rtc.c	Thu Jan  4 13:19:13 2001
@@ -0,0 +1,175 @@
+/*
+ * linux/arch/sh/kernel/rtc.c -- SH3 / SH4 on-chip RTC support
+ *
+ *  Copyright (C) 2000  Philipp Rumpf <prumpf@tux.org>
+ *  Copyright (C) 1999  Tetsuya Okada & Niibe Yutaka
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/time.h>
+
+#include <asm/io.h>
+#include <asm/rtc.h>
+
+/* RCR1 Bits */
+#define RCR1_CF		0x80	/* Carry Flag             */
+#define RCR1_CIE	0x10	/* Carry Interrupt Enable */
+#define RCR1_AIE	0x08	/* Alarm Interrupt Enable */
+#define RCR1_AF		0x01	/* Alarm Flag             */
+
+/* RCR2 Bits */
+#define RCR2_PEF	0x80	/* PEriodic interrupt Flag */
+#define RCR2_PESMASK	0x70	/* Periodic interrupt Set  */
+#define RCR2_RTCEN	0x08	/* ENable RTC              */
+#define RCR2_ADJ	0x04	/* ADJustment (30-second)  */
+#define RCR2_RESET	0x02	/* Reset bit               */
+#define RCR2_START	0x01	/* Start bit               */
+
+#if defined(__sh3__)
+/* SH-3 RTC */
+#define R64CNT  	0xfffffec0
+#define RSECCNT 	0xfffffec2
+#define RMINCNT 	0xfffffec4
+#define RHRCNT  	0xfffffec6
+#define RWKCNT  	0xfffffec8
+#define RDAYCNT 	0xfffffeca
+#define RMONCNT 	0xfffffecc
+#define RYRCNT  	0xfffffece
+#define RSECAR  	0xfffffed0
+#define RMINAR  	0xfffffed2
+#define RHRAR   	0xfffffed4
+#define RWKAR   	0xfffffed6
+#define RDAYAR  	0xfffffed8
+#define RMONAR  	0xfffffeda
+#define RCR1    	0xfffffedc
+#define RCR2    	0xfffffede
+#elif defined(__SH4__)
+/* SH-4 RTC */
+#define R64CNT  	0xffc80000
+#define RSECCNT 	0xffc80004
+#define RMINCNT 	0xffc80008
+#define RHRCNT  	0xffc8000c
+#define RWKCNT  	0xffc80010
+#define RDAYCNT 	0xffc80014
+#define RMONCNT 	0xffc80018
+#define RYRCNT  	0xffc8001c  /* 16bit */
+#define RSECAR  	0xffc80020
+#define RMINAR  	0xffc80024
+#define RHRAR   	0xffc80028
+#define RWKAR   	0xffc8002c
+#define RDAYAR  	0xffc80030
+#define RMONAR  	0xffc80034
+#define RCR1    	0xffc80038
+#define RCR2    	0xffc8003c
+#endif
+
+#ifndef BCD_TO_BIN
+#define BCD_TO_BIN(val) ((val)=((val)&15) + ((val)>>4)*10)
+#endif
+
+#ifndef BIN_TO_BCD
+#define BIN_TO_BCD(val) ((val)=(((val)/10)<<4) + (val)%10)
+#endif
+
+void sh_rtc_gettimeofday(struct timeval *tv)
+{
+	unsigned int sec128, sec, min, hr, wk, day, mon, yr, yr100;
+
+ again:
+	do {
+		ctrl_outb(0, RCR1);  /* Clear CF-bit */
+		sec128 = ctrl_inb(RSECCNT);
+		sec = ctrl_inb(RSECCNT);
+		min = ctrl_inb(RMINCNT);
+		hr  = ctrl_inb(RHRCNT);
+		wk  = ctrl_inb(RWKCNT);
+		day = ctrl_inb(RDAYCNT);
+		mon = ctrl_inb(RMONCNT);
+#if defined(__SH4__)
+		yr  = ctrl_inw(RYRCNT);
+		yr100 = (yr >> 8);
+		yr &= 0xff;
+#else
+		yr  = ctrl_inb(RYRCNT);
+		yr100 = (yr == 0x99) ? 0x19 : 0x20;
+#endif
+	} while ((ctrl_inb(RCR1) & RCR1_CF) != 0);
+
+	BCD_TO_BIN(yr100);
+	BCD_TO_BIN(yr);
+	BCD_TO_BIN(mon);
+	BCD_TO_BIN(day);
+	BCD_TO_BIN(hr);
+	BCD_TO_BIN(min);
+	BCD_TO_BIN(sec);
+
+	if (yr > 99 || mon < 1 || mon > 12 || day > 31 || day < 1 ||
+	    hr > 23 || min > 59 || sec > 59) {
+		printk(KERN_ERR
+		       "SH RTC: invalid value, resetting to 1 Jan 2000\n");
+		ctrl_outb(RCR2_RESET, RCR2);  /* Reset & Stop */
+		ctrl_outb(0, RSECCNT);
+		ctrl_outb(0, RMINCNT);
+		ctrl_outb(0, RHRCNT);
+		ctrl_outb(6, RWKCNT);
+		ctrl_outb(1, RDAYCNT);
+		ctrl_outb(1, RMONCNT);
+#if defined(__SH4__)
+		ctrl_outw(0x2000, RYRCNT);
+#else
+		ctrl_outb(0, RYRCNT);
+#endif
+		ctrl_outb(RCR2_RTCEN|RCR2_START, RCR2);  /* Start */
+		goto again;
+	}
+
+	tv->tv_sec = mktime(yr100 * 100 + yr, mon, day, hr, min, sec);
+	tv->tv_usec = (sec128 * 1000000) / 128;
+}
+
+static int set_rtc_time(unsigned long nowtime)
+{
+	extern int abs (int);
+	int retval = 0;
+	int real_seconds, real_minutes, cmos_minutes;
+
+	ctrl_outb(RCR2_RESET, RCR2);  /* Reset pre-scaler & stop RTC */
+
+	cmos_minutes = ctrl_inb(RMINCNT);
+	BCD_TO_BIN(cmos_minutes);
+
+	/*
+	 * since we're only adjusting minutes and seconds,
+	 * don't interfere with hour overflow. This avoids
+	 * messing with unknown time zones but requires your
+	 * RTC not to be off by more than 15 minutes
+	 */
+	real_seconds = nowtime % 60;
+	real_minutes = nowtime / 60;
+	if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
+		real_minutes += 30;	/* correct for half hour time zone */
+	real_minutes %= 60;
+
+	if (abs(real_minutes - cmos_minutes) < 30) {
+		BIN_TO_BCD(real_seconds);
+		BIN_TO_BCD(real_minutes);
+		ctrl_outb(real_seconds, RSECCNT);
+		ctrl_outb(real_minutes, RMINCNT);
+	} else {
+		printk(KERN_WARNING
+		       "set_rtc_time: can't update from %d to %d\n",
+		       cmos_minutes, real_minutes);
+		retval = -1;
+	}
+
+	ctrl_outb(RCR2_RTCEN|RCR2_START, RCR2);  /* Start RTC */
+
+	return retval;
+}
+
+int sh_rtc_settimeofday(const struct timeval *tv)
+{
+	return set_rtc_time(tv->tv_sec);
+}
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/sh/kernel/setup.c linux/arch/sh/kernel/setup.c
--- v2.4.0-prerelease/linux/arch/sh/kernel/setup.c	Mon Jan  1 09:38:35 2001
+++ linux/arch/sh/kernel/setup.c	Thu Jan  4 13:19:13 2001
@@ -503,8 +503,8 @@
 		       "cache size\t: 8K-byte/16K-byte\n");
 #endif
 	p += sprintf(p, "bogomips\t: %lu.%02lu\n\n",
-		     (loops_per_sec+2500)/500000,
-		     ((loops_per_sec+2500)/5000) % 100);
+		     (loops_per_jiffy+2500)/(500000/HZ),
+		     ((loops_per_jiffy+2500)/(5000/HZ)) % 100);
 	p += sprintf(p, "Machine: %s\n", sh_mv.mv_name);
 
 #define PRINT_CLOCK(name, value) \
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/sh/kernel/sh_ksyms.c linux/arch/sh/kernel/sh_ksyms.c
--- v2.4.0-prerelease/linux/arch/sh/kernel/sh_ksyms.c	Mon Jan  1 09:38:35 2001
+++ linux/arch/sh/kernel/sh_ksyms.c	Thu Jan  4 13:19:13 2001
@@ -37,11 +37,14 @@
 /* Networking helper routines. */
 EXPORT_SYMBOL(csum_partial_copy);
 
+EXPORT_SYMBOL(simple_strtol);
+
 EXPORT_SYMBOL(strtok);
 EXPORT_SYMBOL(strpbrk);
 EXPORT_SYMBOL(strstr);
 EXPORT_SYMBOL(strlen);
 EXPORT_SYMBOL(strchr);
+EXPORT_SYMBOL(strcat);
 
 /* mem exports */
 EXPORT_SYMBOL(memchr);
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/sh/kernel/time.c linux/arch/sh/kernel/time.c
--- v2.4.0-prerelease/linux/arch/sh/kernel/time.c	Tue Oct 31 12:42:26 2000
+++ linux/arch/sh/kernel/time.c	Thu Jan  4 13:19:13 2001
@@ -3,6 +3,7 @@
  *  linux/arch/sh/kernel/time.c
  *
  *  Copyright (C) 1999  Tetsuya Okada & Niibe Yutaka
+ *  Copyright (C) 2000  Philipp Rumpf <prumpf@tux.org>
  *
  *  Some code taken from i386 version.
  *    Copyright (C) 1991, 1992, 1995  Linus Torvalds
@@ -27,6 +28,7 @@
 #include <asm/irq.h>
 #include <asm/delay.h>
 #include <asm/machvec.h>
+#include <asm/rtc.h>
 
 #include <linux/timex.h>
 #include <linux/irq.h>
@@ -35,19 +37,7 @@
 #define TMU0_TCR_INIT	0x0020
 #define TMU_TSTR_INIT	1
 
-/* RCR1 Bits */
-#define RCR1_CF		0x80	/* Carry Flag             */
-#define RCR1_CIE	0x10	/* Carry Interrupt Enable */
-#define RCR1_AIE	0x08	/* Alarm Interrupt Enable */
-#define RCR1_AF		0x01	/* Alarm Flag             */
-
-/* RCR2 Bits */
-#define RCR2_PEF	0x80	/* PEriodic interrupt Flag */
-#define RCR2_PESMASK	0x70	/* Periodic interrupt Set  */
-#define RCR2_RTCEN	0x08	/* ENable RTC              */
-#define RCR2_ADJ	0x04	/* ADJustment (30-second)  */
-#define RCR2_RESET	0x02	/* Reset bit               */
-#define RCR2_START	0x01	/* Start bit               */
+#define TMU0_TCR_CALIB	0x0000
 
 #if defined(__sh3__)
 #define TMU_TOCR	0xfffffe90	/* Byte access */
@@ -58,25 +48,6 @@
 #define TMU0_TCR	0xfffffe9c	/* Word access */
 
 #define FRQCR		0xffffff80
-
-/* SH-3 RTC */
-#define R64CNT  	0xfffffec0
-#define RSECCNT 	0xfffffec2
-#define RMINCNT 	0xfffffec4
-#define RHRCNT  	0xfffffec6
-#define RWKCNT  	0xfffffec8
-#define RDAYCNT 	0xfffffeca
-#define RMONCNT 	0xfffffecc
-#define RYRCNT  	0xfffffece
-#define RSECAR  	0xfffffed0
-#define RMINAR  	0xfffffed2
-#define RHRAR   	0xfffffed4
-#define RWKAR   	0xfffffed6
-#define RDAYAR  	0xfffffed8
-#define RMONAR  	0xfffffeda
-#define RCR1    	0xfffffedc
-#define RCR2    	0xfffffede
-
 #elif defined(__SH4__)
 #define TMU_TOCR	0xffd80000	/* Byte access */
 #define TMU_TSTR	0xffd80004	/* Byte access */
@@ -86,45 +57,68 @@
 #define TMU0_TCR	0xffd80010	/* Word access */
 
 #define FRQCR		0xffc00000
-
-/* SH-4 RTC */
-#define R64CNT  	0xffc80000
-#define RSECCNT 	0xffc80004
-#define RMINCNT 	0xffc80008
-#define RHRCNT  	0xffc8000c
-#define RWKCNT  	0xffc80010
-#define RDAYCNT 	0xffc80014
-#define RMONCNT 	0xffc80018
-#define RYRCNT  	0xffc8001c  /* 16bit */
-#define RSECAR  	0xffc80020
-#define RMINAR  	0xffc80024
-#define RHRAR   	0xffc80028
-#define RWKAR   	0xffc8002c
-#define RDAYAR  	0xffc80030
-#define RMONAR  	0xffc80034
-#define RCR1    	0xffc80038
-#define RCR2    	0xffc8003c
-#endif
-
-#ifndef BCD_TO_BIN
-#define BCD_TO_BIN(val) ((val)=((val)&15) + ((val)>>4)*10)
-#endif
-
-#ifndef BIN_TO_BCD
-#define BIN_TO_BCD(val) ((val)=(((val)/10)<<4) + (val)%10)
 #endif
 
 extern rwlock_t xtime_lock;
 extern unsigned long wall_jiffies;
 #define TICK_SIZE tick
 
+static unsigned long do_gettimeoffset(void)
+{
+	int count;
+
+	static int count_p = 0x7fffffff;    /* for the first call after boot */
+	static unsigned long jiffies_p = 0;
+
+	/*
+	 * cache volatile jiffies temporarily; we have IRQs turned off. 
+	 */
+	unsigned long jiffies_t;
+
+	/* timer count may underflow right here */
+	count = ctrl_inl(TMU0_TCNT);	/* read the latched count */
+
+ 	jiffies_t = jiffies;
+
+	/*
+	 * avoiding timer inconsistencies (they are rare, but they happen)...
+	 * there is one kind of problem that must be avoided here:
+	 *  1. the timer counter underflows
+	 */
+
+	if( jiffies_t == jiffies_p ) {
+		if( count > count_p ) {
+			/* the nutcase */
+
+			if(ctrl_inw(TMU0_TCR) & 0x100) { /* Check UNF bit */
+				/*
+				 * We cannot detect lost timer interrupts ... 
+				 * well, that's why we call them lost, don't we? :)
+				 * [hmm, on the Pentium and Alpha we can ... sort of]
+				 */
+				count -= LATCH;
+			} else {
+				printk("do_slow_gettimeoffset(): hardware timer problem?\n");
+			}
+		}
+	} else
+		jiffies_p = jiffies_t;
+
+	count_p = count;
+
+	count = ((LATCH-1) - count) * TICK_SIZE;
+	count = (count + LATCH/2) / LATCH;
+
+	return count;
+}
+
 void do_gettimeofday(struct timeval *tv)
 {
 	unsigned long flags;
 	unsigned long usec, sec;
 
 	read_lock_irqsave(&xtime_lock, flags);
-	usec = 0;
+	usec = do_gettimeoffset();
 	{
 		unsigned long lost = jiffies - wall_jiffies;
 		if (lost)
@@ -143,11 +137,6 @@
 	tv->tv_usec = usec;
 }
 
-/*
- * Could someone please implement this...
- */
-#define do_gettimeoffset() 0
-
 void do_settimeofday(struct timeval *tv)
 {
 	write_lock_irq(&xtime_lock);
@@ -173,45 +162,6 @@
 	write_unlock_irq(&xtime_lock);
 }
 
-static int set_rtc_time(unsigned long nowtime)
-{
-	int retval = 0;
-	int real_seconds, real_minutes, cmos_minutes;
-
-	ctrl_outb(RCR2_RESET, RCR2);  /* Reset pre-scaler & stop RTC */
-
-	cmos_minutes = ctrl_inb(RMINCNT);
-	BCD_TO_BIN(cmos_minutes);
-
-	/*
-	 * since we're only adjusting minutes and seconds,
-	 * don't interfere with hour overflow. This avoids
-	 * messing with unknown time zones but requires your
-	 * RTC not to be off by more than 15 minutes
-	 */
-	real_seconds = nowtime % 60;
-	real_minutes = nowtime / 60;
-	if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
-		real_minutes += 30;	/* correct for half hour time zone */
-	real_minutes %= 60;
-
-	if (abs(real_minutes - cmos_minutes) < 30) {
-		BIN_TO_BCD(real_seconds);
-		BIN_TO_BCD(real_minutes);
-		ctrl_outb(real_seconds, RSECCNT);
-		ctrl_outb(real_minutes, RMINCNT);
-	} else {
-		printk(KERN_WARNING
-		       "set_rtc_time: can't update from %d to %d\n",
-		       cmos_minutes, real_minutes);
-		retval = -1;
-	}
-
-	ctrl_outb(RCR2_RTCEN|RCR2_START, RCR2);  /* Start RTC */
-
-	return retval;
-}
-
 /* last time the RTC clock got updated */
 static long last_rtc_update;
 
@@ -241,7 +191,7 @@
 	    xtime.tv_sec > last_rtc_update + 660 &&
 	    xtime.tv_usec >= 500000 - ((unsigned) tick) / 2 &&
 	    xtime.tv_usec <= 500000 + ((unsigned) tick) / 2) {
-		if (set_rtc_time(xtime.tv_sec) == 0)
+		if (sh_mv.mv_rtc_settimeofday(&xtime) == 0)
 			last_rtc_update = xtime.tv_sec;
 		else
 			last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
@@ -274,173 +224,114 @@
 	write_unlock(&xtime_lock);
 }
 
-static unsigned long get_rtc_time(void)
+static unsigned int __init get_timer_frequency(void)
 {
-	unsigned int sec, min, hr, wk, day, mon, yr, yr100;
+	u32 freq;
+	struct timeval tv1, tv2;
+	unsigned long diff_usec;
+	unsigned long factor;
+
+	/* Setup the timer:  We don't want to generate interrupts, just
+	 * have it count down at its natural rate.
+	 */
+	ctrl_outb(0, TMU_TSTR);
+	ctrl_outb(TMU_TOCR_INIT, TMU_TOCR);
+	ctrl_outw(TMU0_TCR_CALIB, TMU0_TCR);
+	ctrl_outl(0xffffffff, TMU0_TCOR);
+	ctrl_outl(0xffffffff, TMU0_TCNT);
+
+	rtc_gettimeofday(&tv2);
 
- again:
 	do {
-		ctrl_outb(0, RCR1);  /* Clear CF-bit */
-		sec = ctrl_inb(RSECCNT);
-		min = ctrl_inb(RMINCNT);
-		hr  = ctrl_inb(RHRCNT);
-		wk  = ctrl_inb(RWKCNT);
-		day = ctrl_inb(RDAYCNT);
-		mon = ctrl_inb(RMONCNT);
-#if defined(__SH4__)
-		yr  = ctrl_inw(RYRCNT);
-		yr100 = (yr >> 8);
-		yr &= 0xff;
-#else
-		yr  = ctrl_inb(RYRCNT);
-		yr100 = (yr == 0x99) ? 0x19 : 0x20;
-#endif
-	} while ((ctrl_inb(RCR1) & RCR1_CF) != 0);
+		rtc_gettimeofday(&tv1);
+	} while (tv1.tv_usec == tv2.tv_usec && tv1.tv_sec == tv2.tv_sec);
 
-	BCD_TO_BIN(yr100);
-	BCD_TO_BIN(yr);
-	BCD_TO_BIN(mon);
-	BCD_TO_BIN(day);
-	BCD_TO_BIN(hr);
-	BCD_TO_BIN(min);
-	BCD_TO_BIN(sec);
-
-	if (yr > 99 || mon < 1 || mon > 12 || day > 31 || day < 1 ||
-	    hr > 23 || min > 59 || sec > 59) {
-		printk(KERN_ERR
-		       "SH RTC: invalid value, resetting to 1 Jan 2000\n");
-		ctrl_outb(RCR2_RESET, RCR2);  /* Reset & Stop */
-		ctrl_outb(0, RSECCNT);
-		ctrl_outb(0, RMINCNT);
-		ctrl_outb(0, RHRCNT);
-		ctrl_outb(6, RWKCNT);
-		ctrl_outb(1, RDAYCNT);
-		ctrl_outb(1, RMONCNT);
-#if defined(__SH4__)
-		ctrl_outw(0x2000, RYRCNT);
-#else
-		ctrl_outb(0, RYRCNT);
-#endif
-		ctrl_outb(RCR2_RTCEN|RCR2_START, RCR2);  /* Start */
-		goto again;
-	}
+	/* actually start the timer */
+	ctrl_outb(TMU_TSTR_INIT, TMU_TSTR);
 
-	return mktime(yr100 * 100 + yr, mon, day, hr, min, sec);
-}
+	do {
+		rtc_gettimeofday(&tv2);
+	} while (tv1.tv_usec == tv2.tv_usec && tv1.tv_sec == tv2.tv_sec);
 
-static __init unsigned int get_cpu_mhz(void)
-{
-	unsigned int count;
-	unsigned long __dummy;
+	freq = 0xffffffff - ctrl_inl(TMU0_TCNT);
+	if (tv2.tv_usec < tv1.tv_usec) {
+		tv2.tv_usec += 1000000;
+		tv2.tv_sec--;
+	}
 
-	sti();
-	do {} while (ctrl_inb(R64CNT) != 0);
-	ctrl_outb(RCR1_CIE, RCR1); /* Enable carry interrupt */
-	asm volatile(
-		"1:\t"
-		"tst	%1,%1\n\t"
-		"bt/s	1b\n\t"
-		" add	#1,%0"
-		: "=r"(count), "=z" (__dummy)
-		: "0" (0), "1" (0)
-		: "t");
-	cli();
-	/*
-	 * SH-3:
-	 * CPU clock = 4 stages * loop
-	 * tst    rm,rm      if id ex
-	 * bt/s   1b            if id ex
-	 * add    #1,rd            if id ex
-         *                            (if) pipe line stole
-	 * tst    rm,rm                  if id ex
-         * ....
-	 *
-	 *
-	 * SH-4:
-	 * CPU clock = 6 stages * loop
-	 * I don't know why.
-         * ....
-	 */
-#if defined(__SH4__)
-	return count*6;
-#else
-	return count*4;
-#endif
-}
+	diff_usec = (tv2.tv_sec - tv1.tv_sec) * 1000000 + (tv2.tv_usec - tv1.tv_usec);
 
-static void rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
-{
-	ctrl_outb(0, RCR1);	/* Disable Carry Interrupts */
-	regs->regs[0] = 1;
+	/* this should work well if the RTC has a precision of n Hz, where
+	 * n is an integer.  I don't think we have to worry about the other
+	 * cases. */
+	factor = (1000000 + diff_usec/2) / diff_usec;
+
+	if (factor * diff_usec > 1100000 ||
+	    factor * diff_usec <  900000)
+		panic("weird RTC (diff_usec %ld)", diff_usec);
+
+	return freq * factor;
 }
 
 static struct irqaction irq0  = { timer_interrupt, SA_INTERRUPT, 0, "timer", NULL, NULL};
-static struct irqaction irq1  = { rtc_interrupt, SA_INTERRUPT, 0, "rtc", NULL, NULL};
 
 void __init time_init(void)
 {
 	unsigned int cpu_clock, master_clock, bus_clock, module_clock;
-	unsigned short frqcr, ifc, pfc;
+	unsigned int timer_freq;
+	unsigned short frqcr, ifc, pfc, bfc;
 	unsigned long interval;
 #if defined(__sh3__)
 	static int ifc_table[] = { 1, 2, 4, 1, 3, 1, 1, 1 };
 	static int pfc_table[] = { 1, 2, 4, 1, 3, 6, 1, 1 };
-	static int stc_table[] = { 1, 2, 3, 4, 6, 8, 1, 1 };
+	static int stc_table[] = { 1, 2, 4, 8, 3, 6, 1, 1 };
 #elif defined(__SH4__)
 	static int ifc_table[] = { 1, 2, 3, 4, 6, 8, 1, 1 };
 #define bfc_table ifc_table	/* Same */
 	static int pfc_table[] = { 2, 3, 4, 6, 8, 2, 2, 2 };
 #endif
 
-	xtime.tv_sec = get_rtc_time();
-	xtime.tv_usec = 0;
+	rtc_gettimeofday(&xtime);
 
 	setup_irq(TIMER_IRQ, &irq0);
-	setup_irq(RTC_IRQ, &irq1);
 
-	/* Check how fast it is.. */
-	cpu_clock = get_cpu_mhz();
-	disable_irq(RTC_IRQ);
+	timer_freq = get_timer_frequency();
+
+	module_clock = timer_freq * 4;
 
-	printk("CPU clock: %d.%02dMHz\n",
-	       (cpu_clock / 1000000), (cpu_clock % 1000000)/10000);
 #if defined(__sh3__)
 	{
-		unsigned short tmp, stc;
+		unsigned short tmp;
+
 		frqcr = ctrl_inw(FRQCR);
 		tmp  = (frqcr & 0x8000) >> 13;
 		tmp |= (frqcr & 0x0030) >>  4;
-		stc = stc_table[tmp];
+		bfc = stc_table[tmp];
 		tmp  = (frqcr & 0x4000) >> 12;
 		tmp |= (frqcr & 0x000c) >> 2;
 		ifc  = ifc_table[tmp];
 		tmp  = (frqcr & 0x2000) >> 11;
 		tmp |= frqcr & 0x0003;
 		pfc = pfc_table[tmp];
-		if (MACH_HP600) {
-			master_clock = cpu_clock/6;
-		} else {
-			master_clock = cpu_clock;
-		}
-		bus_clock = master_clock/pfc;
 	}
 #elif defined(__SH4__)
 	{
-		unsigned short bfc;
 		frqcr = ctrl_inw(FRQCR);
 		ifc  = ifc_table[(frqcr>> 6) & 0x0007];
 		bfc  = bfc_table[(frqcr>> 3) & 0x0007];
 		pfc = pfc_table[frqcr & 0x0007];
-		master_clock = cpu_clock * ifc;
-		bus_clock = master_clock/bfc;
 	}
 #endif
+	master_clock = module_clock * pfc;
+	bus_clock = master_clock / bfc;
+	cpu_clock = master_clock / ifc;
+	printk("CPU clock: %d.%02dMHz\n",
+	       (cpu_clock / 1000000), (cpu_clock % 1000000)/10000);
 	printk("Bus clock: %d.%02dMHz\n",
 	       (bus_clock/1000000), (bus_clock % 1000000)/10000);
-	module_clock = master_clock/pfc;
 	printk("Module clock: %d.%02dMHz\n",
 	       (module_clock/1000000), (module_clock % 1000000)/10000);
-	interval = (module_clock/(HZ*4));
+	interval = (module_clock/4 + HZ/2) / HZ;
 
 	printk("Interval = %ld\n", interval);
 
@@ -450,6 +341,7 @@
 	current_cpu_data.module_clock = module_clock;
 
 	/* Start TMU0 */
+	ctrl_outb(0, TMU_TSTR);
 	ctrl_outb(TMU_TOCR_INIT, TMU_TOCR);
 	ctrl_outw(TMU0_TCR_INIT, TMU0_TCR);
 	ctrl_outl(interval, TMU0_TCOR);
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/sh/lib/delay.c linux/arch/sh/lib/delay.c
--- v2.4.0-prerelease/linux/arch/sh/lib/delay.c	Mon Aug 30 18:12:59 1999
+++ linux/arch/sh/lib/delay.c	Thu Jan  4 13:19:13 2001
@@ -1,21 +1,31 @@
 /*
  *	Precise Delay Loops for SuperH
  *
- *	Copyright (C) 1999 Niibe Yutaka
+ *	Copyright (C) 1999 Niibe Yutaka & Kaz Kojima
  */
 
 #include <linux/sched.h>
 #include <linux/delay.h>
 
+void __delay(unsigned long loops)
+{
+	__asm__ __volatile__(
+		"tst	%0, %0\n\t"
+		"1:\t"
+		"bf/s	1b\n\t"
+		" dt	%0"
+		: "=r" (loops)
+		: "0" (loops)
+		: "t");
+}
+
 inline void __const_udelay(unsigned long xloops)
 {
-	xloops *= current_cpu_data.loops_per_sec;
-        __delay(xloops);
+	xloops *= current_cpu_data.loops_per_jiffy;
+	__delay(xloops * HZ);
 }
 
-#if 0
 void __udelay(unsigned long usecs)
 {
 	__const_udelay(usecs * 0x000010c6);  /* 2**32 / 1000000 */
 }
-#endif
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/sh/mm/fault.c linux/arch/sh/mm/fault.c
--- v2.4.0-prerelease/linux/arch/sh/mm/fault.c	Tue Oct 31 12:42:26 2000
+++ linux/arch/sh/mm/fault.c	Thu Jan  4 13:19:13 2001
@@ -282,6 +282,7 @@
 	unsigned long flags;
 	unsigned long pteval;
 	unsigned long pteaddr;
+	unsigned long ptea;
 
 	save_and_cli(flags);
 
@@ -307,9 +308,17 @@
 	pteaddr = (address & MMU_VPN_MASK) | get_asid();
 	ctrl_outl(pteaddr, MMU_PTEH);
 
-	/* Set PTEL register */
+	/* Set PTEA register */
+	/* TODO: make this look less hacky */
 	pteval = pte_val(pte);
+#if defined(__SH4__)
+	ptea = ((pteval >> 28) & 0xe) | (pteval & 0x1);
+	ctrl_outl(ptea, MMU_PTEA);
+#endif
+
+	/* Set PTEL register */
 	pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
+	/* conveniently, we want all the software flags to be 0 anyway */
 	ctrl_outl(pteval, MMU_PTEL);
 
 	/* Load the TLB */
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/sparc/kernel/entry.S linux/arch/sparc/kernel/entry.S
--- v2.4.0-prerelease/linux/arch/sparc/kernel/entry.S	Thu Sep  7 08:32:00 2000
+++ linux/arch/sparc/kernel/entry.S	Mon Jan  1 10:37:41 2001
@@ -1,4 +1,4 @@
-/* $Id: entry.S,v 1.167 2000/09/06 00:45:00 davem Exp $
+/* $Id: entry.S,v 1.168 2001/01/01 01:46:15 davem Exp $
  * arch/sparc/kernel/entry.S:  Sparc trap low-level entry points.
  *
  * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -1810,20 +1810,22 @@
 	call	.umul
 	 or	%o1, %lo(0x10c6), %o1
 #ifndef CONFIG_SMP
-	sethi	%hi(C_LABEL(loops_per_sec)), %o3
+	sethi	%hi(C_LABEL(loops_per_jiffy)), %o3
 	call	.umul
-	 ld	[%o3 + %lo(C_LABEL(loops_per_sec))], %o1
+	 ld	[%o3 + %lo(C_LABEL(loops_per_jiffy))], %o1
 #else
 	GET_PROCESSOR_OFFSET(o4, o2)
 	set	C_LABEL(cpu_data), %o3
 	call	.umul
 	 ld	[%o3 + %o4], %o1
 #endif
+	call	.umul
+	 mov	100, %o0
 
-	cmp	%o1, 0x0
+	cmp	%o0, 0x0
 1:
 	bne	1b
-	 subcc	%o1, 1, %o1
+	 subcc	%o0, 1, %o0
 	
 	ret
 	restore
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/sparc/kernel/semaphore.c linux/arch/sparc/kernel/semaphore.c
--- v2.4.0-prerelease/linux/arch/sparc/kernel/semaphore.c	Sun Nov 19 18:44:04 2000
+++ linux/arch/sparc/kernel/semaphore.c	Thu Jan  4 13:00:55 2001
@@ -1,34 +1,24 @@
-/* $Id: semaphore.c,v 1.4 2000/11/10 04:02:03 davem Exp $
- * Generic semaphore code. Buyer beware. Do your own
- * specific changes in <asm/semaphore-helper.h>
- */
+/* $Id: semaphore.c,v 1.5 2000/12/29 10:35:05 anton Exp $ */
+
+/* sparc32 semaphore implementation, based on i386 version */
 
 #include <linux/sched.h>
-#include <asm/semaphore-helper.h>
+
+#include <asm/semaphore.h>
 
 /*
  * Semaphores are implemented using a two-way counter:
  * The "count" variable is decremented for each process
- * that tries to sleep, while the "waking" variable is
- * incremented when the "up()" code goes to wake up waiting
- * processes.
+ * that tries to acquire the semaphore, while the "sleeping"
+ * variable is a count of such acquires.
  *
  * Notably, the inline "up()" and "down()" functions can
  * efficiently test if they need to do any extra work (up
  * needs to do something only if count was negative before
  * the increment operation.
  *
- * waking_non_zero() (from asm/semaphore.h) must execute
- * atomically.
- *
- * When __up() is called, the count was negative before
- * incrementing it, and we need to wake up somebody.
- *
- * This routine adds one to the count of processes that need to
- * wake up and exit.  ALL waiting processes actually wake up but
- * only the one that gets to the "waking" field first will gate
- * through and acquire the semaphore.  The others will go back
- * to sleep.
+ * "sleeping" and the contention routine ordering is
+ * protected by the semaphore spinlock.
  *
  * Note that these functions are only called when there is
  * contention on the lock, and as such all this is the
@@ -36,95 +26,130 @@
  * critical part is the inline stuff in <asm/semaphore.h>
  * where we want to avoid any extra jumps and calls.
  */
+
+/*
+ * Logic:
+ *  - only on a boundary condition do we need to care. When we go
+ *    from a negative count to a non-negative, we wake people up.
+ *  - when we go from a non-negative count to a negative do we
+ *    (a) synchronize with the "sleeper" count and (b) make sure
+ *    that we're on the wakeup list before we synchronize so that
+ *    we cannot lose wakeup events.
+ */
+
 void __up(struct semaphore *sem)
 {
-	wake_one_more(sem);
 	wake_up(&sem->wait);
 }
 
-/*
- * Perform the "down" function.  Return zero for semaphore acquired,
- * return negative for signalled out of the function.
- *
- * If called from __down, the return is ignored and the wait loop is
- * not interruptible.  This means that a task waiting on a semaphore
- * using "down()" cannot be killed until someone does an "up()" on
- * the semaphore.
- *
- * If called from __down_interruptible, the return value gets checked
- * upon return.  If the return value is negative then the task continues
- * with the negative value in the return register (it can be tested by
- * the caller).
- *
- * Either form may be used in conjunction with "up()".
- *
- */
+static spinlock_t semaphore_lock = SPIN_LOCK_UNLOCKED;
 
-#define DOWN_VAR				\
-	struct task_struct *tsk = current;	\
+void __down(struct semaphore * sem)
+{
+	struct task_struct *tsk = current;
 	DECLARE_WAITQUEUE(wait, tsk);
+	tsk->state = TASK_UNINTERRUPTIBLE;
+	add_wait_queue_exclusive(&sem->wait, &wait);
 
-#define DOWN_HEAD(task_state)						\
-									\
-									\
-	tsk->state = (task_state);					\
-	add_wait_queue(&sem->wait, &wait);				\
-									\
-	/*								\
-	 * Ok, we're set up.  sem->count is known to be less than zero	\
-	 * so we must wait.						\
-	 *								\
-	 * We can let go the lock for purposes of waiting.		\
-	 * We re-acquire it after awaking so as to protect		\
-	 * all semaphore operations.					\
-	 *								\
-	 * If "up()" is called before we call waking_non_zero() then	\
-	 * we will catch it right away.  If it is called later then	\
-	 * we will have to go through a wakeup cycle to catch it.	\
-	 *								\
-	 * Multiple waiters contend for the semaphore lock to see	\
-	 * who gets to gate through and who has to wait some more.	\
-	 */								\
+	spin_lock_irq(&semaphore_lock);
+	sem->sleepers++;
 	for (;;) {
+		int sleepers = sem->sleepers;
 
-#define DOWN_TAIL(task_state)			\
-		tsk->state = (task_state);	\
-	}					\
-	tsk->state = TASK_RUNNING;		\
+		/*
+		 * Add "everybody else" into it. They aren't
+		 * playing, because we own the spinlock.
+		 */
+		if (!atomic_add_negative(sleepers - 1, &sem->count)) {
+			sem->sleepers = 0;
+			break;
+		}
+		sem->sleepers = 1;	/* us - see -1 above */
+		spin_unlock_irq(&semaphore_lock);
+
+		schedule();
+		tsk->state = TASK_UNINTERRUPTIBLE;
+		spin_lock_irq(&semaphore_lock);
+	}
+	spin_unlock_irq(&semaphore_lock);
 	remove_wait_queue(&sem->wait, &wait);
-
-void __down(struct semaphore * sem)
-{
-	DOWN_VAR
-	DOWN_HEAD(TASK_UNINTERRUPTIBLE)
-	if (waking_non_zero(sem))
-		break;
-	schedule();
-	DOWN_TAIL(TASK_UNINTERRUPTIBLE)
+	tsk->state = TASK_RUNNING;
+	wake_up(&sem->wait);
 }
 
 int __down_interruptible(struct semaphore * sem)
 {
-	int ret = 0;
-	DOWN_VAR
-	DOWN_HEAD(TASK_INTERRUPTIBLE)
-
-	ret = waking_non_zero_interruptible(sem, tsk);
-	if (ret)
-	{
-		if (ret == 1)
-			/* ret != 0 only if we get interrupted -arca */
-			ret = 0;
-		break;
+	int retval = 0;
+	struct task_struct *tsk = current;
+	DECLARE_WAITQUEUE(wait, tsk);
+	tsk->state = TASK_INTERRUPTIBLE;
+	add_wait_queue_exclusive(&sem->wait, &wait);
+
+	spin_lock_irq(&semaphore_lock);
+	sem->sleepers ++;
+	for (;;) {
+		int sleepers = sem->sleepers;
+
+		/*
+		 * With signals pending, this turns into
+		 * the trylock failure case - we won't be
+		 * sleeping, and we* can't get the lock as
+		 * it has contention. Just correct the count
+		 * and exit.
+		 */
+		if (signal_pending(current)) {
+			retval = -EINTR;
+			sem->sleepers = 0;
+			atomic_add(sleepers, &sem->count);
+			break;
+		}
+
+		/*
+		 * Add "everybody else" into it. They aren't
+		 * playing, because we own the spinlock. The
+		 * "-1" is because we're still hoping to get
+		 * the lock.
+		 */
+		if (!atomic_add_negative(sleepers - 1, &sem->count)) {
+			sem->sleepers = 0;
+			break;
+		}
+		sem->sleepers = 1;	/* us - see -1 above */
+		spin_unlock_irq(&semaphore_lock);
+
+		schedule();
+		tsk->state = TASK_INTERRUPTIBLE;
+		spin_lock_irq(&semaphore_lock);
 	}
-	schedule();
-	DOWN_TAIL(TASK_INTERRUPTIBLE)
-	return ret;
+	spin_unlock_irq(&semaphore_lock);
+	tsk->state = TASK_RUNNING;
+	remove_wait_queue(&sem->wait, &wait);
+	wake_up(&sem->wait);
+	return retval;
 }
 
+/*
+ * Trylock failed - make sure we correct for
+ * having decremented the count.
+ */
 int __down_trylock(struct semaphore * sem)
 {
-	return waking_non_zero_trylock(sem);
+	int sleepers;
+	unsigned long flags;
+
+	spin_lock_irqsave(&semaphore_lock, flags);
+	sleepers = sem->sleepers + 1;
+	sem->sleepers = 0;
+
+	/*
+	 * Add "everybody else" and us into it. They aren't
+	 * playing, because we own the spinlock.
+	 */
+	if (!atomic_add_negative(sleepers, &sem->count))
+		wake_up(&sem->wait);
+
+	spin_unlock_irqrestore(&semaphore_lock, flags);
+	return 1;
 }
 
 /* rw mutexes
@@ -138,6 +163,10 @@
 	asm volatile("ldstub %1, %0" : "=r" (ret) : "m" (*p) : "memory");
 	return ret;
 }
+
+#define DOWN_VAR				\
+	struct task_struct *tsk = current;	\
+	DECLARE_WAITQUEUE(wait, tsk);
 
 void down_read_failed_biased(struct rw_semaphore *sem)
 {
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/sparc/kernel/setup.c linux/arch/sparc/kernel/setup.c
--- v2.4.0-prerelease/linux/arch/sparc/kernel/setup.c	Tue Oct 31 12:42:26 2000
+++ linux/arch/sparc/kernel/setup.c	Mon Jan  1 10:37:41 2001
@@ -1,7 +1,8 @@
-/*  $Id: setup.c,v 1.120 2000/10/14 10:09:00 davem Exp $
+/*  $Id: setup.c,v 1.122 2001/01/01 01:46:15 davem Exp $
  *  linux/arch/sparc/kernel/setup.c
  *
  *  Copyright (C) 1995  David S. Miller (davem@caip.rutgers.edu)
+ *  Copyright (C) 2000  Anton Blanchard (anton@linuxcare.com)
  */
 
 #include <linux/errno.h>
@@ -59,8 +60,6 @@
 	16                      /* orig-video-points */
 };
 
-unsigned int phys_bytes_of_ram, end_of_phys_memory;
-
 /* Typing sync at the prom prompt calls the function pointed to by
  * romvec->pv_synchook which I set to the following function.
  * This should sync all filesystems and return, for now it just
@@ -80,6 +79,7 @@
 {
 	unsigned long prom_tbr, flags;
 
+	/* XXX Badly broken. FIX! - Anton */
 	save_and_cli(flags);
 	__asm__ __volatile__("rd %%tbr, %0\n\t" : "=r" (prom_tbr));
 	__asm__ __volatile__("wr %0, 0x0, %%tbr\n\t"
@@ -111,7 +111,7 @@
 
 extern void rs_kgdb_hook(int tty_num); /* sparc/serial.c */
 
-unsigned int boot_flags;
+unsigned int boot_flags __initdata = 0;
 #define BOOTME_DEBUG  0x1
 #define BOOTME_SINGLE 0x2
 #define BOOTME_KGDBA  0x4
@@ -119,7 +119,7 @@
 #define BOOTME_KGDB   0xc
 
 #ifdef CONFIG_SUN_CONSOLE
-static int console_fb = 0;
+static int console_fb __initdata = 0;
 #endif
 
 /* Exported for mm/init.c:paging_init. */
@@ -163,7 +163,7 @@
 		break;
 	case 'h':
 		prom_printf("boot_flags_init: Halt!\n");
-		halt();
+		prom_halt();
 		break;
 	default:
 		printk("Unknown boot switch (-%c)\n", c);
@@ -277,19 +277,20 @@
 
 struct tt_entry *sparc_ttable;
 
-struct pt_regs fake_swapper_regs = { 0, 0, 0, 0, { 0, } };
+struct pt_regs fake_swapper_regs;
 
 #ifdef PROM_DEBUG_CONSOLE
-static void prom_cons_write(struct console *con, const char *str, unsigned count)
+static void
+prom_console_write(struct console *con, const char *s, unsigned n)
 {
-	while (count--)
-		prom_printf("%c", *str++);
+	prom_printf("%s", s);
 }
 
 static struct console prom_console = {
-	name:		"PROM",
-	write:		prom_cons_write,
+	name:		"debug",
+	write:		prom_console_write,
 	flags:		CON_PRINTBUFFER,
+	index:		-1,
 };
 #endif
 
@@ -485,7 +486,7 @@
             &cputypval,
 	    linux_num_cpus, smp_num_cpus
 #ifndef CONFIG_SMP
-	    , loops_per_sec/500000, (loops_per_sec/5000) % 100
+	    , loops_per_jiffy/(500000/HZ), (loops_per_jiffy/(5000/HZ)) % 100
 #endif
 	    );
 #ifdef CONFIG_SMP
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/sparc/kernel/smp.c linux/arch/sparc/kernel/smp.c
--- v2.4.0-prerelease/linux/arch/sparc/kernel/smp.c	Mon Dec 11 17:59:43 2000
+++ linux/arch/sparc/kernel/smp.c	Mon Jan  1 10:37:41 2001
@@ -86,7 +86,7 @@
 
 void __init smp_store_cpu_info(int id)
 {
-	cpu_data[id].udelay_val = loops_per_sec; /* this is it on sparc. */
+	cpu_data[id].udelay_val = loops_per_jiffy; /* this is it on sparc. */
 }
 
 void __init smp_commence(void)
@@ -284,8 +284,8 @@
 		if (cpu_present_map & (1 << i))
 			len += sprintf(buf + len, "Cpu%dBogo\t: %lu.%02lu\n", 
 					i,
-					cpu_data[i].udelay_val/500000,
-					(cpu_data[i].udelay_val/5000)%100);
+					cpu_data[i].udelay_val/(500000/HZ),
+					(cpu_data[i].udelay_val/(5000/HZ))%100);
 	return len;
 }
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/sparc/mm/fault.c linux/arch/sparc/mm/fault.c
--- v2.4.0-prerelease/linux/arch/sparc/mm/fault.c	Tue Oct 31 12:42:26 2000
+++ linux/arch/sparc/mm/fault.c	Mon Jan  1 10:37:41 2001
@@ -1,4 +1,4 @@
-/* $Id: fault.c,v 1.117 2000/10/16 14:32:50 anton Exp $
+/* $Id: fault.c,v 1.118 2000/12/29 07:52:41 anton Exp $
  * fault.c:  Page fault handlers for the Sparc.
  *
  * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -371,7 +371,7 @@
 		if (!pgd_present(*pgd)) {
 			if (!pgd_present(*pgd_k))
 				goto bad_area_nosemaphore;
-			pgd_set(pgd, *pgd_k);
+			pgd_val(*pgd) = pgd_val(*pgd_k);
 			return;
 		}
 
@@ -380,7 +380,7 @@
 
 		if (pmd_present(*pmd) || !pmd_present(*pmd_k))
 			goto bad_area_nosemaphore;
-		pmd_set(pmd, *pmd_k);
+		pmd_val(*pmd) = pmd_val(*pmd_k);
 		return;
 	}
 }
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/sparc64/kernel/ioctl32.c linux/arch/sparc64/kernel/ioctl32.c
--- v2.4.0-prerelease/linux/arch/sparc64/kernel/ioctl32.c	Sun Nov 19 18:44:05 2000
+++ linux/arch/sparc64/kernel/ioctl32.c	Thu Jan  4 12:50:17 2001
@@ -1,4 +1,4 @@
-/* $Id: ioctl32.c,v 1.103 2000/11/10 05:44:33 davem Exp $
+/* $Id: ioctl32.c,v 1.104 2001/01/03 09:28:19 anton Exp $
  * ioctl32.c: Conversion between 32bit and 64bit native ioctls.
  *
  * Copyright (C) 1997-2000  Jakub Jelinek  (jakub@redhat.com)
@@ -2274,7 +2274,8 @@
 		}
 		karg = v;
 		memset(v->pv, 0, sizeof(v->pv) + sizeof(v->lv));
-		if (v->pv_max > ABS_MAX_PV || v->lv_max == ABS_MAX_LV) return -EPERM;
+		if (v->pv_max > ABS_MAX_PV || v->lv_max > ABS_MAX_LV)
+			return -EPERM;
 		for (i = 0; i < v->pv_max; i++) {
 			err = __get_user(ptr, &((vg32_t *)arg)->pv[i]);
 			if (err) break;
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/sparc64/kernel/setup.c linux/arch/sparc64/kernel/setup.c
--- v2.4.0-prerelease/linux/arch/sparc64/kernel/setup.c	Tue Oct 31 12:42:26 2000
+++ linux/arch/sparc64/kernel/setup.c	Mon Jan  1 10:37:41 2001
@@ -1,4 +1,4 @@
-/*  $Id: setup.c,v 1.57 2000/10/14 10:09:00 davem Exp $
+/*  $Id: setup.c,v 1.58 2001/01/01 01:46:15 davem Exp $
  *  linux/arch/sparc64/kernel/setup.c
  *
  *  Copyright (C) 1995,1996  David S. Miller (davem@caip.rutgers.edu)
@@ -600,7 +600,7 @@
             prom_rev, prom_prev >> 16, (prom_prev >> 8) & 0xff, prom_prev & 0xff,
 	    linux_num_cpus, smp_num_cpus
 #ifndef CONFIG_SMP
-            , loops_per_sec/500000, (loops_per_sec/5000) % 100
+            , loops_per_jiffy/(500000/HZ), (loops_per_jiffy/(5000/HZ)) % 100
 #endif
 	    );
 #ifdef CONFIG_SMP
diff -u --recursive --new-file v2.4.0-prerelease/linux/arch/sparc64/kernel/smp.c linux/arch/sparc64/kernel/smp.c
--- v2.4.0-prerelease/linux/arch/sparc64/kernel/smp.c	Tue Oct 31 12:42:26 2000
+++ linux/arch/sparc64/kernel/smp.c	Mon Jan  1 10:37:41 2001
@@ -79,8 +79,8 @@
 		if(cpu_present_map & (1UL << i))
 			len += sprintf(buf + len,
 				       "Cpu%dBogo\t: %lu.%02lu\n",
-				       i, cpu_data[i].udelay_val / 500000,
-				       (cpu_data[i].udelay_val / 5000) % 100);
+				       i, cpu_data[i].udelay_val / (500000/HZ),
+				       (cpu_data[i].udelay_val / (5000/HZ)) % 100);
 	return len;
 }
 
@@ -90,7 +90,7 @@
 
 	/* multiplier and counter set by
 	   smp_setup_percpu_timer()  */
-	cpu_data[id].udelay_val			= loops_per_sec;
+	cpu_data[id].udelay_val			= loops_per_jiffy;
 
 	cpu_data[id].pgcache_size		= 0;
 	cpu_data[id].pte_cache[0]		= NULL;
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/acpi/hardware/hwcpu32.c linux/drivers/acpi/hardware/hwcpu32.c
--- v2.4.0-prerelease/linux/drivers/acpi/hardware/hwcpu32.c	Mon Jan  1 09:38:35 2001
+++ linux/drivers/acpi/hardware/hwcpu32.c	Mon Jan  1 10:23:20 2001
@@ -708,4 +708,4 @@
 	return;
 }
 
- 
\ No newline at end of file
+
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/acpi/namespace/nsxfobj.c linux/drivers/acpi/namespace/nsxfobj.c
--- v2.4.0-prerelease/linux/drivers/acpi/namespace/nsxfobj.c	Mon Jan  1 09:38:35 2001
+++ linux/drivers/acpi/namespace/nsxfobj.c	Mon Jan  1 10:23:21 2001
@@ -694,4 +694,5 @@
 	acpi_cm_release_mutex (ACPI_MTX_NAMESPACE);
 
 	return (status);
-}
\ No newline at end of file
+}
+
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/block/ataflop.c linux/drivers/block/ataflop.c
--- v2.4.0-prerelease/linux/drivers/block/ataflop.c	Mon Dec 11 17:59:44 2000
+++ linux/drivers/block/ataflop.c	Thu Jan  4 13:00:55 2001
@@ -355,7 +355,7 @@
 static void fd_select_drive( int drive );
 static void fd_deselect( void );
 static void fd_motor_off_timer( unsigned long dummy );
-static void check_change( unsigned long dummy );
+static void check_change( void );
 static __inline__ void set_head_settle_flag( void );
 static __inline__ int get_head_settle_flag( void );
 static void floppy_irq (int irq, void *dummy, struct pt_regs *fp);
@@ -409,7 +409,7 @@
 }
 
 static inline void
-start_check_change_timer(unsigned long dummy)
+start_check_change_timer( void )
 {
 	mod_timer(&fd_timer, jiffies + CHECK_CHANGE_DELAY);
 }
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/block/cciss.c linux/drivers/block/cciss.c
--- v2.4.0-prerelease/linux/drivers/block/cciss.c	Tue Oct 31 12:42:26 2000
+++ linux/drivers/block/cciss.c	Thu Jan  4 12:50:12 2001
@@ -113,7 +113,7 @@
 static void cciss_procinit(int i);
 #else
 static int cciss_proc_get_info(char *buffer, char **start, off_t offset, 
-		int length, int *eof, void *data) {}
+		int length, int *eof, void *data) { return 0;}
 static void cciss_procinit(int i) {}
 #endif /* CONFIG_PROC_FS */
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/char/Makefile linux/drivers/char/Makefile
--- v2.4.0-prerelease/linux/drivers/char/Makefile	Mon Jan  1 09:38:35 2001
+++ linux/drivers/char/Makefile	Thu Jan  4 13:00:55 2001
@@ -119,6 +119,7 @@
 obj-$(CONFIG_RIO) += rio/rio.o generic_serial.o
 obj-$(CONFIG_SH_SCI) += sh-sci.o generic_serial.o
 obj-$(CONFIG_SERIAL167) += serial167.o
+obj-$(CONFIG_MVME147_SCC) += generic_serial.o vme_scc.o
 obj-$(CONFIG_MVME162_SCC) += generic_serial.o vme_scc.o
 obj-$(CONFIG_BVME6000_SCC) += generic_serial.o vme_scc.o
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/char/amiserial.c linux/drivers/char/amiserial.c
--- v2.4.0-prerelease/linux/drivers/char/amiserial.c	Tue Jul 18 15:56:55 2000
+++ linux/drivers/char/amiserial.c	Thu Jan  4 13:00:55 2001
@@ -73,10 +73,12 @@
 #include <linux/errno.h>
 #include <linux/signal.h>
 #include <linux/sched.h>
+#include <linux/kernel.h>
 #include <linux/timer.h>
 #include <linux/interrupt.h>
 #include <linux/tty.h>
 #include <linux/tty_flip.h>
+#include <linux/console.h>
 #include <linux/major.h>
 #include <linux/string.h>
 #include <linux/fcntl.h>
@@ -87,20 +89,20 @@
 #include <linux/init.h>
 #include <linux/delay.h>
 
-#ifdef CONFIG_AMIGA
-#include <asm/amigahw.h>
-#include <asm/amigaints.h>
-#endif
+#include <asm/setup.h>
 
 #include <asm/system.h>
 #include <asm/io.h>
 #include <asm/irq.h>
 #include <asm/bitops.h>
 
+#include <asm/amigahw.h>
+#include <asm/amigaints.h>
+
 #ifdef SERIAL_INLINE
 #define _INLINE_ inline
 #endif
-	
+
 static char *serial_name = "Amiga-builtin serial driver";
 
 static DECLARE_TASK_QUEUE(tq_serial);
@@ -213,7 +215,7 @@
 
 	if (serial_paranoia_check(info, tty->device, "rs_stop"))
 		return;
-	
+
 	save_flags(flags); cli();
 	if (info->IER & UART_IER_THRI) {
 		info->IER &= ~UART_IER_THRI;
@@ -230,12 +232,14 @@
 {
 	struct async_struct *info = (struct async_struct *)tty->driver_data;
 	unsigned long flags;
-	
+
 	if (serial_paranoia_check(info, tty->device, "rs_start"))
 		return;
-	
+
 	save_flags(flags); cli();
-	if (info->xmit_cnt && info->xmit_buf && !(info->IER & UART_IER_THRI)) {
+	if (info->xmit.head != info->xmit.tail
+	    && info->xmit.buf
+	    && !(info->IER & UART_IER_THRI)) {
 		info->IER |= UART_IER_THRI;
 		custom.intena = IF_SETCLR | IF_TBE;
 		mb();
@@ -299,13 +303,13 @@
 	    status |= UART_LSR_BI;
 	if(serdatr & SDR_OVRUN)
 	    status |= UART_LSR_OE;
-		  
+
 	ch = serdatr & 0xff;
 	if (tty->flip.count >= TTY_FLIPBUF_SIZE)
 	  goto ignore_char;
 	*tty->flip.char_buf_ptr = ch;
 	icount->rx++;
-		
+
 #ifdef SERIAL_DEBUG_INTR
 	printk("DR%02x:%02x...", ch, status);
 #endif
@@ -386,27 +390,29 @@
 		info->x_char = 0;
 		return;
 	}
-	if ((info->xmit_cnt <= 0) || info->tty->stopped ||
-	    info->tty->hw_stopped) {
+	if (info->xmit.head == info->xmit.tail
+	    || info->tty->stopped
+	    || info->tty->hw_stopped) {
 		info->IER &= ~UART_IER_THRI;
 	        custom.intena = IF_TBE;
 		mb();
 		return;
 	}
-	
-	custom.serdat = info->xmit_buf[info->xmit_tail++] | 0x100;
+
+	custom.serdat = info->xmit.buf[info->xmit.tail++] | 0x100;
 	mb();
-	info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
+	info->xmit.tail = info->xmit.tail & (SERIAL_XMIT_SIZE-1);
 	info->state->icount.tx++;
-	--info->xmit_cnt;
-	
-	if (info->xmit_cnt < WAKEUP_CHARS)
+
+	if (CIRC_CNT(info->xmit.head,
+		     info->xmit.tail,
+		     SERIAL_XMIT_SIZE) < WAKEUP_CHARS)
 		rs_sched_event(info, RS_EVENT_WRITE_WAKEUP);
 
 #ifdef SERIAL_DEBUG_INTR
 	printk("THRE...");
 #endif
-	if (info->xmit_cnt <= 0) {
+	if (info->xmit.head == info->xmit.tail) {
 	        custom.intena = IF_TBE;
 		mb();
 		info->IER &= ~UART_IER_THRI;
@@ -445,7 +451,7 @@
 #if (defined(SERIAL_DEBUG_OPEN) || defined(SERIAL_DEBUG_INTR))
 		printk("ttyS%02d CD now %s...", info->line,
 		       (!(status & SER_DCD)) ? "on" : "off");
-#endif		
+#endif
 		if (!(status & SER_DCD))
 			wake_up_interruptible(&info->open_wait);
 		else if (!((info->flags & ASYNC_CALLOUT_ACTIVE) &&
@@ -505,11 +511,11 @@
 static void ser_rx_int(int irq, void *dev_id, struct pt_regs * regs)
 {
 	struct async_struct * info;
-	
+
 #ifdef SERIAL_DEBUG_INTR
 	printk("ser_rx_int...");
 #endif
-	
+
 	info = IRQ_ports;
 	if (!info || !info->tty)
 		return;
@@ -524,12 +530,12 @@
 static void ser_tx_int(int irq, void *dev_id, struct pt_regs * regs)
 {
 	struct async_struct * info;
-	
+
 	if (custom.serdatr & SDR_TBE) {
 #ifdef SERIAL_DEBUG_INTR
 	  printk("ser_tx_int...");
 #endif
-	
+
 	  info = IRQ_ports;
 	  if (!info || !info->tty)
 	    return;
@@ -566,7 +572,7 @@
 {
 	struct async_struct	*info = (struct async_struct *) private_;
 	struct tty_struct	*tty;
-	
+
 	tty = info->tty;
 	if (!tty)
 		return;
@@ -605,10 +611,10 @@
 		goto errout;
 	}
 
-	if (info->xmit_buf)
+	if (info->xmit.buf)
 		free_page(page);
 	else
-		info->xmit_buf = (unsigned char *) page;
+		info->xmit.buf = (unsigned char *) page;
 
 #ifdef SERIAL_DEBUG_OPEN
 	printk("starting up ttys%d ...", info->line);
@@ -647,7 +653,7 @@
 
 	if (info->tty)
 		clear_bit(TTY_IO_ERROR, &info->tty->flags);
-	info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
+	info->xmit.head = info->xmit.tail = 0;
 
 	/*
 	 * Set up the tty->alt_speed kludge
@@ -662,7 +668,7 @@
 		if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP)
 			info->tty->alt_speed = 460800;
 	}
-	
+
 	/*
 	 * and set the speed of the serial port
 	 */
@@ -671,7 +677,7 @@
 	info->flags |= ASYNC_INITIALIZED;
 	restore_flags(flags);
 	return 0;
-	
+
 errout:
 	restore_flags(flags);
 	return retval;
@@ -694,7 +700,7 @@
 #ifdef SERIAL_DEBUG_OPEN
 	printk("Shutting down serial port %d ....\n", info->line);
 #endif
-	
+
 	save_flags(flags); cli(); /* Disable interrupts */
 
 	/*
@@ -702,17 +708,17 @@
 	 * here so the queue might never be waken up
 	 */
 	wake_up_interruptible(&info->delta_msr_wait);
-	
+
 	IRQ_ports = NULL;
-	
+
 	/*
 	 * Free the IRQ, if necessary
 	 */
 	free_irq(IRQ_AMIGA_VERTB, info);
 
-	if (info->xmit_buf) {
-		free_page((unsigned long) info->xmit_buf);
-		info->xmit_buf = 0;
+	if (info->xmit.buf) {
+		free_page((unsigned long) info->xmit.buf);
+		info->xmit.buf = 0;
 	}
 
 	info->IER = 0;
@@ -722,7 +728,7 @@
 	/* disable break condition */
 	custom.adkcon = AC_UARTBRK;
 	mb();
-	
+
 	if (!info->tty || (info->tty->termios->c_cflag & HUPCL))
 		info->MCR &= ~(SER_DTR|SER_RTS);
 	rtsdtr_ctrl(info->MCR);
@@ -838,7 +844,7 @@
 		info->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
 	if (I_BRKINT(info->tty) || I_PARMRK(info->tty))
 		info->read_status_mask |= UART_LSR_BI;
-	
+
 	/*
 	 * Characters to ignore
 	 */
@@ -888,18 +894,19 @@
 	if (serial_paranoia_check(info, tty->device, "rs_put_char"))
 		return;
 
-	if (!tty || !info->xmit_buf)
+	if (!tty || !info->xmit.buf)
 		return;
 
 	save_flags(flags); cli();
-	if (info->xmit_cnt >= SERIAL_XMIT_SIZE - 1) {
+	if (CIRC_SPACE(info->xmit.head,
+		       info->xmit.tail,
+		       SERIAL_XMIT_SIZE) == 0) {
 		restore_flags(flags);
 		return;
 	}
 
-	info->xmit_buf[info->xmit_head++] = ch;
-	info->xmit_head &= SERIAL_XMIT_SIZE-1;
-	info->xmit_cnt++;
+	info->xmit.buf[info->xmit.head++] = ch;
+	info->xmit.head &= SERIAL_XMIT_SIZE-1;
 	restore_flags(flags);
 }
 
@@ -907,12 +914,14 @@
 {
 	struct async_struct *info = (struct async_struct *)tty->driver_data;
 	unsigned long flags;
-				
+
 	if (serial_paranoia_check(info, tty->device, "rs_flush_chars"))
 		return;
 
-	if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped ||
-	    !info->xmit_buf)
+	if (info->xmit.head == info->xmit.tail
+	    || tty->stopped
+	    || tty->hw_stopped
+	    || !info->xmit.buf)
 		return;
 
 	save_flags(flags); cli();
@@ -935,18 +944,19 @@
 	if (serial_paranoia_check(info, tty->device, "rs_write"))
 		return 0;
 
-	if (!tty || !info->xmit_buf || !tmp_buf)
+	if (!tty || !info->xmit.buf || !tmp_buf)
 		return 0;
 
 	save_flags(flags);
 	if (from_user) {
 		down(&tmp_buf_sem);
 		while (1) {
-			c = MIN(count,
-				MIN(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
-				    SERIAL_XMIT_SIZE - info->xmit_head));
-			if (c <= 0)
-				break;
+			int c1;
+			c = CIRC_SPACE_TO_END(info->xmit.head,
+					      info->xmit.tail,
+					      SERIAL_XMIT_SIZE);
+			if (count < c)
+				c = count;
 
 			c -= copy_from_user(tmp_buf, buf, c);
 			if (!c) {
@@ -955,12 +965,14 @@
 				break;
 			}
 			cli();
-			c = MIN(c, MIN(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
-				       SERIAL_XMIT_SIZE - info->xmit_head));
-			memcpy(info->xmit_buf + info->xmit_head, tmp_buf, c);
-			info->xmit_head = ((info->xmit_head + c) &
+			c1 = CIRC_SPACE_TO_END(info->xmit.head,
+					       info->xmit.tail,
+					       SERIAL_XMIT_SIZE);
+			if (c1 < c)
+				c = c1;
+			memcpy(info->xmit.buf + info->xmit.head, tmp_buf, c);
+			info->xmit.head = ((info->xmit.head + c) &
 					   (SERIAL_XMIT_SIZE-1));
-			info->xmit_cnt += c;
 			restore_flags(flags);
 			buf += c;
 			count -= c;
@@ -968,27 +980,29 @@
 		}
 		up(&tmp_buf_sem);
 	} else {
+		cli();
 		while (1) {
-			cli();		
-			c = MIN(count,
-				MIN(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
-				    SERIAL_XMIT_SIZE - info->xmit_head));
+			c = CIRC_SPACE_TO_END(info->xmit.head,
+					      info->xmit.tail,
+					      SERIAL_XMIT_SIZE);
+			if (count < c)
+				c = count;
 			if (c <= 0) {
-				restore_flags(flags);
 				break;
 			}
-			memcpy(info->xmit_buf + info->xmit_head, buf, c);
-			info->xmit_head = ((info->xmit_head + c) &
+			memcpy(info->xmit.buf + info->xmit.head, buf, c);
+			info->xmit.head = ((info->xmit.head + c) &
 					   (SERIAL_XMIT_SIZE-1));
-			info->xmit_cnt += c;
-			restore_flags(flags);
 			buf += c;
 			count -= c;
 			ret += c;
 		}
+		restore_flags(flags);
 	}
-	if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped &&
-	    !(info->IER & UART_IER_THRI)) {
+	if (info->xmit.head != info->xmit.tail
+	    && !tty->stopped
+	    && !tty->hw_stopped
+	    && !(info->IER & UART_IER_THRI)) {
 		info->IER |= UART_IER_THRI;
 		cli();
 		custom.intena = IF_SETCLR | IF_TBE;
@@ -1004,34 +1018,30 @@
 static int rs_write_room(struct tty_struct *tty)
 {
 	struct async_struct *info = (struct async_struct *)tty->driver_data;
-	int	ret;
-				
+
 	if (serial_paranoia_check(info, tty->device, "rs_write_room"))
 		return 0;
-	ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
-	if (ret < 0)
-		ret = 0;
-	return ret;
+	return CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
 }
 
 static int rs_chars_in_buffer(struct tty_struct *tty)
 {
 	struct async_struct *info = (struct async_struct *)tty->driver_data;
-				
+
 	if (serial_paranoia_check(info, tty->device, "rs_chars_in_buffer"))
 		return 0;
-	return info->xmit_cnt;
+	return CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
 }
 
 static void rs_flush_buffer(struct tty_struct *tty)
 {
 	struct async_struct *info = (struct async_struct *)tty->driver_data;
 	unsigned long flags;
-	
+
 	if (serial_paranoia_check(info, tty->device, "rs_flush_buffer"))
 		return;
 	save_flags(flags); cli();
-	info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
+	info->xmit.head = info->xmit.tail = 0;
 	restore_flags(flags);
 	wake_up_interruptible(&tty->write_wait);
 	if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
@@ -1085,14 +1095,14 @@
 	unsigned long flags;
 #ifdef SERIAL_DEBUG_THROTTLE
 	char	buf[64];
-	
+
 	printk("throttle %s: %d....\n", tty_name(tty, buf),
 	       tty->ldisc.chars_in_buffer(tty));
 #endif
 
 	if (serial_paranoia_check(info, tty->device, "rs_throttle"))
 		return;
-	
+
 	if (I_IXOFF(tty))
 		rs_send_xchar(tty, STOP_CHAR(tty));
 
@@ -1110,14 +1120,14 @@
 	unsigned long flags;
 #ifdef SERIAL_DEBUG_THROTTLE
 	char	buf[64];
-	
+
 	printk("unthrottle %s: %d....\n", tty_name(tty, buf),
 	       tty->ldisc.chars_in_buffer(tty));
 #endif
 
 	if (serial_paranoia_check(info, tty->device, "rs_unthrottle"))
 		return;
-	
+
 	if (I_IXOFF(tty)) {
 		if (info->x_char)
 			info->x_char = 0;
@@ -1211,7 +1221,7 @@
 	state->close_delay = new_serial.close_delay * HZ/100;
 	state->closing_wait = new_serial.closing_wait * HZ/100;
 	info->tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
-	
+
 check_and_exit:
 	if (info->flags & ASYNC_INITIALIZED) {
 		if (((old_state.flags & ASYNC_SPD_MASK) !=
@@ -1323,7 +1333,7 @@
 {
 	struct async_struct * info = (struct async_struct *)tty->driver_data;
 	unsigned long flags;
-	
+
 	if (serial_paranoia_check(info, tty->device, "rs_break"))
 		return;
 
@@ -1344,7 +1354,7 @@
 	struct async_icount cprev, cnow;	/* kernel counter temps */
 	struct serial_icounter_struct icount;
 	unsigned long flags;
-	
+
 	if (serial_paranoia_check(info, tty->device, "rs_ioctl"))
 		return -ENODEV;
 
@@ -1354,7 +1364,7 @@
 		if (tty->flags & (1 << TTY_IO_ERROR))
 		    return -EIO;
 	}
-	
+
 	switch (cmd) {
 		case TIOCMGET:
 			return get_modem_info(info, (unsigned int *) arg);
@@ -1379,7 +1389,7 @@
 					 info, sizeof(struct async_struct)))
 				return -EFAULT;
 			return 0;
-				
+
 		/*
 		 * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change
 		 * - mask passed in arg for lines of interest
@@ -1433,7 +1443,7 @@
 			icount.parity = cnow.parity;
 			icount.brk = cnow.brk;
 			icount.buf_overrun = cnow.buf_overrun;
-			
+
 			if (copy_to_user((void *)arg, &icount, sizeof(icount)))
 				return -EFAULT;
 			return 0;
@@ -1454,7 +1464,7 @@
 	struct async_struct *info = (struct async_struct *)tty->driver_data;
 	unsigned long flags;
 	unsigned int cflag = tty->termios->c_cflag;
-	
+
 	if (   (cflag == old_termios->c_cflag)
 	    && (   RELEVANT_IFLAG(tty->termios->c_iflag) 
 		== RELEVANT_IFLAG(old_termios->c_iflag)))
@@ -1470,7 +1480,7 @@
 		rtsdtr_ctrl(info->MCR);
 		restore_flags(flags);
 	}
-	
+
 	/* Handle transition away from B0 status */
 	if (!(old_termios->c_cflag & CBAUD) &&
 	    (cflag & CBAUD)) {
@@ -1483,7 +1493,7 @@
 		rtsdtr_ctrl(info->MCR);
 		restore_flags(flags);
 	}
-	
+
 	/* Handle turning off CRTSCTS */
 	if ((old_termios->c_cflag & CRTSCTS) &&
 	    !(tty->termios->c_cflag & CRTSCTS)) {
@@ -1524,16 +1534,16 @@
 		return;
 
 	state = info->state;
-	
+
 	save_flags(flags); cli();
-	
+
 	if (tty_hung_up_p(filp)) {
 		DBG_CNT("before DEC-hung");
 		MOD_DEC_USE_COUNT;
 		restore_flags(flags);
 		return;
 	}
-	
+
 #ifdef SERIAL_DEBUG_OPEN
 	printk("rs_close ttys%d, count = %d\n", info->line, state->count);
 #endif
@@ -1628,7 +1638,7 @@
 	struct async_struct * info = (struct async_struct *)tty->driver_data;
 	unsigned long orig_jiffies, char_time;
 	int lsr;
-	
+
 	if (serial_paranoia_check(info, tty->device, "rs_wait_until_sent"))
 		return;
 
@@ -1689,12 +1699,12 @@
 {
 	struct async_struct * info = (struct async_struct *)tty->driver_data;
 	struct serial_state *state = info->state;
-	
+
 	if (serial_paranoia_check(info, tty->device, "rs_hangup"))
 		return;
 
 	state = info->state;
-	
+
 	rs_flush_buffer(tty);
 	shutdown(info);
 	info->event = 0;
@@ -1756,7 +1766,7 @@
 		info->flags |= ASYNC_CALLOUT_ACTIVE;
 		return 0;
 	}
-	
+
 	/*
 	 * If non-blocking mode is set, or the port is not enabled,
 	 * then make the check up front and then exit.
@@ -1776,7 +1786,7 @@
 		if (tty->termios->c_cflag & CLOCAL)
 			do_clocal = 1;
 	}
-	
+
 	/*
 	 * Block waiting for the carrier detect and the line to become
 	 * free (i.e., not in use by the callout).  While we are in
@@ -1810,7 +1820,7 @@
 			if (info->flags & ASYNC_HUP_NOTIFY)
 				retval = -EAGAIN;
 			else
-				retval = -ERESTARTSYS;	
+				retval = -ERESTARTSYS;
 #else
 			retval = -EAGAIN;
 #endif
@@ -2006,7 +2016,7 @@
 	status = ciab.pra;
 	control = info ? info->MCR : status;
 	restore_flags(flags); 
-	
+
 	stat_buf[0] = 0;
 	stat_buf[1] = 0;
 	if(!(control & SER_RTS))
@@ -2030,12 +2040,12 @@
 
 	if (state->icount.frame)
 		ret += sprintf(buf+ret, " fe:%d", state->icount.frame);
-	
+
 	if (state->icount.parity)
 		ret += sprintf(buf+ret, " pe:%d", state->icount.parity);
-	
+
 	if (state->icount.brk)
-		ret += sprintf(buf+ret, " brk:%d", state->icount.brk);	
+		ret += sprintf(buf+ret, " brk:%d", state->icount.brk);
 
 	if (state->icount.overrun)
 		ret += sprintf(buf+ret, " oe:%d", state->icount.overrun);
@@ -2096,7 +2106,7 @@
 /*
  * The serial driver boot-time initialization code!
  */
-int __init rs_init(void)
+static int __init rs_init(void)
 {
 	unsigned long flags;
 	struct serial_state * state;
@@ -2118,7 +2128,7 @@
 	show_serial_version();
 
 	/* Initialize the tty_driver structure */
-	
+
 	memset(&serial_driver, 0, sizeof(struct tty_driver));
 	serial_driver.magic = TTY_DRIVER_MAGIC;
 	serial_driver.driver_name = "amiserial";
@@ -2156,7 +2166,7 @@
 	serial_driver.send_xchar = rs_send_xchar;
 	serial_driver.wait_until_sent = rs_wait_until_sent;
 	serial_driver.read_proc = rs_read_proc;
-	
+
 	/*
 	 * The callout device is just like normal device except for
 	 * major number and the subtype code.
@@ -2172,7 +2182,7 @@
 		panic("Couldn't register serial driver\n");
 	if (tty_register_driver(&callout_driver))
 		panic("Couldn't register callout driver\n");
-	
+
 	state = rs_table;
 	state->magic = SSTATE_MAGIC;
 	state->port = (int)&custom.serdatr; /* Just to give it a value */
@@ -2227,13 +2237,7 @@
 	return 0;
 }
 
-#ifdef MODULE
-int init_module(void)
-{
-	return rs_init();
-}
-
-void cleanup_module(void) 
+static __exit void rs_exit(void) 
 {
 	unsigned long flags;
 	int e1, e2;
@@ -2261,12 +2265,94 @@
 		free_page((unsigned long) tmp_buf);
 		tmp_buf = NULL;
 	}
+
 	release_mem_region(CUSTOM_PHYSADDR+0x30, 4);
 }
-#endif /* MODULE */
+
+module_init(rs_init)
+module_exit(rs_exit)
+
 
 /*
-  Local variables:
-  compile-command: "gcc -D__KERNEL__ -I../../include -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer -fno-strict-aliasing -pipe -fno-strength-reduce  -march=i686 -DMODULE -DMODVERSIONS -include ../../include/linux/modversions.h   -DEXPORT_SYMTAB -c amiserial.c"
-  End:
-*/
+ * ------------------------------------------------------------
+ * Serial console driver
+ * ------------------------------------------------------------
+ */
+#ifdef CONFIG_SERIAL_CONSOLE
+
+static void amiga_serial_putc(char c)
+{
+	custom.serdat = (unsigned char)c | 0x100;
+	while (!(custom.serdatr & 0x2000))
+		barrier();
+}
+
+/*
+ *	Print a string to the serial port trying not to disturb
+ *	any possible real use of the port...
+ *
+ *	The console_lock must be held when we get here.
+ */
+static void serial_console_write(struct console *co, const char *s,
+				unsigned count)
+{
+	unsigned short intena = custom.intenar;
+
+	custom.intena = IF_TBE;
+
+	while (count--) {
+		if (*s == '\n')
+			amiga_serial_putc('\r');
+		amiga_serial_putc(*s++);
+	}
+
+	custom.intena = IF_SETCLR | (intena & IF_TBE);
+}
+
+/*
+ *	Receive character from the serial port
+ */
+static int serial_console_wait_key(struct console *co)
+{
+	unsigned short intena = custom.intenar;
+	int ch;
+
+	custom.intena = IF_RBF;
+
+	while (!(custom.intreqr & IF_RBF))
+		barrier();
+	ch = custom.serdatr & 0xff;
+	custom.intreq = IF_RBF;
+
+	custom.intena = IF_SETCLR | (intena & IF_RBF);
+
+	return ch;
+}
+
+static kdev_t serial_console_device(struct console *c)
+{
+	return MKDEV(TTY_MAJOR, 64);
+}
+
+static struct console sercons = {
+	"ttyS",
+	serial_console_write,
+	NULL,
+	serial_console_device,
+	serial_console_wait_key,
+	NULL,
+	NULL,
+	CON_PRINTBUFFER,
+	-1,
+	0,
+	NULL
+};
+
+/*
+ *	Register console.
+ */
+void __init serial_console_init(void)
+{
+	register_console(&sercons);
+}
+#endif
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/char/drm/Makefile linux/drivers/char/drm/Makefile
--- v2.4.0-prerelease/linux/drivers/char/drm/Makefile	Mon Jan  1 09:38:35 2001
+++ linux/drivers/char/drm/Makefile	Thu Jan  4 13:07:01 2001
@@ -26,6 +26,11 @@
 # memory waste (in the dual-head case) for greatly improved long-term
 # maintainability.
 #
+# NOTE: lib-objs will be eliminated in future versions, thereby
+# eliminating the need to compile the .o files into every module, but
+# for now we still need them.
+#
+
 lib-objs   := init.o memory.o proc.o auth.o context.o drawable.o bufs.o
 lib-objs   += lists.o lock.o ioctl.o fops.o vm.o dma.o ctxbitmap.o
 
@@ -39,7 +44,7 @@
 
 gamma-objs := gamma_drv.o gamma_dma.o
 tdfx-objs  := tdfx_drv.o                tdfx_context.o
-r128-objs  := r128_drv.o  r128_dma.o    r128_context.o r128_bufs.o
+r128-objs  := r128_drv.o  r128_cce.o    r128_context.o r128_bufs.o r128_state.o
 ffb-objs   := ffb_drv.o                 ffb_context.o
 mga-objs   := mga_drv.o   mga_dma.o     mga_context.o  mga_bufs.o  mga_state.o
 i810-objs  := i810_drv.o  i810_dma.o    i810_context.o i810_bufs.o
@@ -55,13 +60,22 @@
 # When linking into the kernel, link the library just once. 
 # If making modules, we include the library into each module
 
+lib-objs-mod := $(patsubst %.o,%-mod.o,$(lib-objs))
+
 ifdef MAKING_MODULES
-  lib = drmlib.a
+  lib = drmlib-mod.a
 else
   obj-y += drmlib.a
 endif
 
 include $(TOPDIR)/Rules.make
+
+$(patsubst %.o,%.c,$(lib-objs-mod)): 
+	@ln -sf $(subst -mod,,$@) $@
+
+drmlib-mod.a: $(lib-objs-mod)
+	rm -f $@
+	$(AR) $(EXTRA_ARFLAGS) rcs $@ $(lib-objs-mod)
 
 drmlib.a: $(lib-objs)
 	rm -f $@
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/char/drm/drm.h linux/drivers/char/drm/drm.h
--- v2.4.0-prerelease/linux/drivers/char/drm/drm.h	Sun Oct  8 10:50:16 2000
+++ linux/drivers/char/drm/drm.h	Thu Jan  4 13:03:20 2001
@@ -11,11 +11,11 @@
  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  * and/or sell copies of the Software, and to permit persons to whom the
  * Software is furnished to do so, subject to the following conditions:
- * 
+ *
  * The above copyright notice and this permission notice (including the next
  * paragraph) shall be included in all copies or substantial portions of the
  * Software.
- * 
+ *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
@@ -23,7 +23,7 @@
  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
- * 
+ *
  * Authors:
  *    Rickard E. (Rik) Faith <faith@valinux.com>
  *
@@ -82,6 +82,9 @@
 #include "mga_drm.h"
 #include "i810_drm.h"
 #include "r128_drm.h"
+#ifdef CONFIG_DRM_SIS
+#include "sis_drm.h"
+#endif
 
 typedef struct drm_version {
 	int    version_major;	  /* Major version			    */
@@ -349,6 +352,7 @@
 #define DRM_IOCTL_MGA_VERTEX  DRM_IOW( 0x44, drm_mga_vertex_t)
 #define DRM_IOCTL_MGA_FLUSH   DRM_IOW( 0x45, drm_lock_t )
 #define DRM_IOCTL_MGA_INDICES DRM_IOW( 0x46, drm_mga_indices_t)
+#define DRM_IOCTL_MGA_BLIT    DRM_IOW( 0x47, drm_mga_blit_t)
 
 /* I810 specific ioctls */
 #define DRM_IOCTL_I810_INIT    DRM_IOW( 0x40, drm_i810_init_t)
@@ -362,11 +366,31 @@
 #define DRM_IOCTL_I810_DOCOPY  DRM_IO ( 0x48)
 
 /* Rage 128 specific ioctls */
-#define DRM_IOCTL_R128_INIT	DRM_IOW( 0x40, drm_r128_init_t)
-#define DRM_IOCTL_R128_RESET	DRM_IO(  0x41)
-#define DRM_IOCTL_R128_FLUSH	DRM_IO(  0x42)
-#define DRM_IOCTL_R128_IDLE	DRM_IO(  0x43)
-#define DRM_IOCTL_R128_PACKET	DRM_IOW( 0x44, drm_r128_packet_t)
-#define DRM_IOCTL_R128_VERTEX	DRM_IOW( 0x45, drm_r128_vertex_t)
+#define DRM_IOCTL_R128_INIT	 DRM_IOW( 0x40, drm_r128_init_t)
+#define DRM_IOCTL_R128_CCE_START DRM_IO(  0x41)
+#define DRM_IOCTL_R128_CCE_STOP	 DRM_IOW( 0x42, drm_r128_cce_stop_t)
+#define DRM_IOCTL_R128_CCE_RESET DRM_IO(  0x43)
+#define DRM_IOCTL_R128_CCE_IDLE	 DRM_IO(  0x44)
+#define DRM_IOCTL_R128_RESET	 DRM_IO(  0x46)
+#define DRM_IOCTL_R128_SWAP	 DRM_IO(  0x47)
+#define DRM_IOCTL_R128_CLEAR	 DRM_IOW( 0x48, drm_r128_clear_t)
+#define DRM_IOCTL_R128_VERTEX	 DRM_IOW( 0x49, drm_r128_vertex_t)
+#define DRM_IOCTL_R128_INDICES	 DRM_IOW( 0x4a, drm_r128_indices_t)
+#define DRM_IOCTL_R128_BLIT	 DRM_IOW( 0x4b, drm_r128_blit_t)
+#define DRM_IOCTL_R128_DEPTH	 DRM_IOW( 0x4c, drm_r128_depth_t)
+#define DRM_IOCTL_R128_STIPPLE	 DRM_IOW( 0x4d, drm_r128_stipple_t)
+#define DRM_IOCTL_R128_PACKET	 DRM_IOWR(0x4e, drm_r128_packet_t)
+
+#ifdef CONFIG_DRM_SIS
+/* SiS specific ioctls */
+#define SIS_IOCTL_FB_ALLOC     DRM_IOWR( 0x44, drm_sis_mem_t)
+#define SIS_IOCTL_FB_FREE      DRM_IOW( 0x45, drm_sis_mem_t)
+#define SIS_IOCTL_AGP_INIT     DRM_IOWR( 0x53, drm_sis_agp_t)
+#define SIS_IOCTL_AGP_ALLOC    DRM_IOWR( 0x54, drm_sis_mem_t)
+#define SIS_IOCTL_AGP_FREE     DRM_IOW( 0x55, drm_sis_mem_t)
+#define SIS_IOCTL_FLIP         DRM_IOW( 0x48, drm_sis_flip_t)
+#define SIS_IOCTL_FLIP_INIT    DRM_IO( 0x49)
+#define SIS_IOCTL_FLIP_FINAL   DRM_IO( 0x50)
+#endif
 
 #endif
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/char/drm/mga_drv.h linux/drivers/char/drm/mga_drv.h
--- v2.4.0-prerelease/linux/drivers/char/drm/mga_drv.h	Mon Dec 11 17:59:44 2000
+++ linux/drivers/char/drm/mga_drv.h	Thu Jan  4 14:06:18 2001
@@ -62,7 +62,7 @@
 #define MGA_IN_GETBUF	  3
 
 typedef struct _drm_mga_private {
-   	long dispatch_status;	/* long req'd for set_bit() --RR */
+   	long dispatch_status;  /* long req'd for set_bit() --RR */
 	unsigned int next_prim_age;
 	__volatile__ unsigned int last_prim_age;
    	int reserved_map_idx;
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/char/drm/r128_bufs.c linux/drivers/char/drm/r128_bufs.c
--- v2.4.0-prerelease/linux/drivers/char/drm/r128_bufs.c	Tue Aug 29 14:09:15 2000
+++ linux/drivers/char/drm/r128_bufs.c	Thu Jan  4 13:03:20 2001
@@ -11,11 +11,11 @@
  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  * and/or sell copies of the Software, and to permit persons to whom the
  * Software is furnished to do so, subject to the following conditions:
- * 
+ *
  * The above copyright notice and this permission notice (including the next
  * paragraph) shall be included in all copies or substantial portions of the
  * Software.
- * 
+ *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
@@ -23,11 +23,11 @@
  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
- * 
+ *
  * Authors: Kevin E. Martin <martin@valinux.com>
  *          Rickard E. (Rik) Faith <faith@valinux.com>
  *	    Jeff Hartmann <jhartmann@valinux.com>
- * 
+ *
  */
 
 #define __NO_VERSION__
@@ -94,7 +94,7 @@
 	}
 	atomic_inc(&dev->buf_alloc);
 	spin_unlock(&dev->count_lock);
-   
+
 	down(&dev->struct_sem);
 	entry = &dma->bufs[order];
 	if (entry->buf_count) {
@@ -102,7 +102,7 @@
 		atomic_dec(&dev->buf_alloc);
 		return -ENOMEM; /* May only call once for each order */
 	}
-   
+
 	entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
 				   DRM_MEM_BUFS);
 	if (!entry->buflist) {
@@ -111,7 +111,7 @@
 		return -ENOMEM;
 	}
 	memset(entry->buflist, 0, count * sizeof(*entry->buflist));
-   
+
 	entry->buf_size   = size;
 	entry->page_order = page_order;
 	offset            = 0;
@@ -243,16 +243,16 @@
 		if (dma->flags & _DRM_DMA_USE_AGP) {
 			drm_map_t *map;
 
-			map = dev_priv->agp_vertbufs;
+			map = dev_priv->buffers;
 			if (!map) {
 				retcode = -EINVAL;
 				goto done;
 			}
 
 			down(&current->mm->mmap_sem);
-			virtual = do_mmap(filp, 0, map->size, 
+			virtual = do_mmap(filp, 0, map->size,
 					  PROT_READ|PROT_WRITE,
-					  MAP_SHARED, 
+					  MAP_SHARED,
 					  (unsigned long)map->offset);
 			up(&current->mm->mmap_sem);
 		} else {
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/char/drm/r128_cce.c linux/drivers/char/drm/r128_cce.c
--- v2.4.0-prerelease/linux/drivers/char/drm/r128_cce.c	Wed Dec 31 16:00:00 1969
+++ linux/drivers/char/drm/r128_cce.c	Thu Jan  4 13:03:20 2001
@@ -0,0 +1,1253 @@
+/* r128_cce.c -- ATI Rage 128 driver -*- linux-c -*-
+ * Created: Wed Apr  5 19:24:19 2000 by kevin@precisioninsight.com
+ *
+ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *   Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+#include "r128_drv.h"
+
+#include <linux/interrupt.h>	/* For task queue support */
+#include <linux/delay.h>
+
+
+/* FIXME: Temporary CCE packet buffer */
+u32 r128_cce_buffer[(1 << 14)] __attribute__ ((aligned (32)));
+
+/* CCE microcode (from ATI) */
+static u32 r128_cce_microcode[] = {
+	0, 276838400, 0, 268449792, 2, 142, 2, 145, 0, 1076765731, 0,
+	1617039951, 0, 774592877, 0, 1987540286, 0, 2307490946U, 0,
+	599558925, 0, 589505315, 0, 596487092, 0, 589505315, 1,
+	11544576, 1, 206848, 1, 311296, 1, 198656, 2, 912273422, 11,
+	262144, 0, 0, 1, 33559837, 1, 7438, 1, 14809, 1, 6615, 12, 28,
+	1, 6614, 12, 28, 2, 23, 11, 18874368, 0, 16790922, 1, 409600, 9,
+	30, 1, 147854772, 16, 420483072, 3, 8192, 0, 10240, 1, 198656,
+	1, 15630, 1, 51200, 10, 34858, 9, 42, 1, 33559823, 2, 10276, 1,
+	15717, 1, 15718, 2, 43, 1, 15936948, 1, 570480831, 1, 14715071,
+	12, 322123831, 1, 33953125, 12, 55, 1, 33559908, 1, 15718, 2,
+	46, 4, 2099258, 1, 526336, 1, 442623, 4, 4194365, 1, 509952, 1,
+	459007, 3, 0, 12, 92, 2, 46, 12, 176, 1, 15734, 1, 206848, 1,
+	18432, 1, 133120, 1, 100670734, 1, 149504, 1, 165888, 1,
+	15975928, 1, 1048576, 6, 3145806, 1, 15715, 16, 2150645232U, 2,
+	268449859, 2, 10307, 12, 176, 1, 15734, 1, 15735, 1, 15630, 1,
+	15631, 1, 5253120, 6, 3145810, 16, 2150645232U, 1, 15864, 2, 82,
+	1, 343310, 1, 1064207, 2, 3145813, 1, 15728, 1, 7817, 1, 15729,
+	3, 15730, 12, 92, 2, 98, 1, 16168, 1, 16167, 1, 16002, 1, 16008,
+	1, 15974, 1, 15975, 1, 15990, 1, 15976, 1, 15977, 1, 15980, 0,
+	15981, 1, 10240, 1, 5253120, 1, 15720, 1, 198656, 6, 110, 1,
+	180224, 1, 103824738, 2, 112, 2, 3145839, 0, 536885440, 1,
+	114880, 14, 125, 12, 206975, 1, 33559995, 12, 198784, 0,
+	33570236, 1, 15803, 0, 15804, 3, 294912, 1, 294912, 3, 442370,
+	1, 11544576, 0, 811612160, 1, 12593152, 1, 11536384, 1,
+	14024704, 7, 310382726, 0, 10240, 1, 14796, 1, 14797, 1, 14793,
+	1, 14794, 0, 14795, 1, 268679168, 1, 9437184, 1, 268449792, 1,
+	198656, 1, 9452827, 1, 1075854602, 1, 1075854603, 1, 557056, 1,
+	114880, 14, 159, 12, 198784, 1, 1109409213, 12, 198783, 1,
+	1107312059, 12, 198784, 1, 1109409212, 2, 162, 1, 1075854781, 1,
+	1073757627, 1, 1075854780, 1, 540672, 1, 10485760, 6, 3145894,
+	16, 274741248, 9, 168, 3, 4194304, 3, 4209949, 0, 0, 0, 256, 14,
+	174, 1, 114857, 1, 33560007, 12, 176, 0, 10240, 1, 114858, 1,
+	33560018, 1, 114857, 3, 33560007, 1, 16008, 1, 114874, 1,
+	33560360, 1, 114875, 1, 33560154, 0, 15963, 0, 256, 0, 4096, 1,
+	409611, 9, 188, 0, 10240, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+
+#define DO_REMAP(_m) (_m)->handle = drm_ioremap((_m)->offset, (_m)->size)
+
+#define DO_REMAPFREE(_m)                                                    \
+	do {                                                                \
+		if ((_m)->handle && (_m)->size)                             \
+			drm_ioremapfree((_m)->handle, (_m)->size);          \
+	} while (0)
+
+#define DO_FIND_MAP(_m, _o)                                                 \
+	do {                                                                \
+		int _i;                                                     \
+		for (_i = 0; _i < dev->map_count; _i++) {                   \
+			if (dev->maplist[_i]->offset == _o) {               \
+				_m = dev->maplist[_i];                      \
+				break;                                      \
+			}                                                   \
+		}                                                           \
+	} while (0)
+
+
+int R128_READ_PLL(drm_device_t *dev, int addr)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+
+	R128_WRITE8(R128_CLOCK_CNTL_INDEX, addr & 0x1f);
+	return R128_READ(R128_CLOCK_CNTL_DATA);
+}
+
+#if 0
+static void r128_status( drm_r128_private_t *dev_priv )
+{
+	printk( "GUI_STAT           = 0x%08x\n",
+		(unsigned int)R128_READ( R128_GUI_STAT ) );
+	printk( "PM4_STAT           = 0x%08x\n",
+		(unsigned int)R128_READ( R128_PM4_STAT ) );
+	printk( "PM4_BUFFER_DL_WPTR = 0x%08x\n",
+		(unsigned int)R128_READ( R128_PM4_BUFFER_DL_WPTR ) );
+	printk( "PM4_BUFFER_DL_RPTR = 0x%08x\n",
+		(unsigned int)R128_READ( R128_PM4_BUFFER_DL_RPTR ) );
+	printk( "PM4_MICRO_CNTL     = 0x%08x\n",
+		(unsigned int)R128_READ( R128_PM4_MICRO_CNTL ) );
+	printk( "PM4_BUFFER_CNTL    = 0x%08x\n",
+		(unsigned int)R128_READ( R128_PM4_BUFFER_CNTL ) );
+}
+#endif
+
+
+/* ================================================================
+ * Engine, FIFO control
+ */
+
+static int r128_do_pixcache_flush( drm_r128_private_t *dev_priv )
+{
+	u32 tmp;
+	int i;
+
+	tmp = R128_READ( R128_PC_NGUI_CTLSTAT ) | R128_PC_FLUSH_ALL;
+	R128_WRITE( R128_PC_NGUI_CTLSTAT, tmp );
+
+	for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) {
+		if ( !(R128_READ( R128_PC_NGUI_CTLSTAT ) & R128_PC_BUSY) ) {
+			return 0;
+		}
+		udelay( 1 );
+	}
+
+	DRM_ERROR( "%s failed!\n", __FUNCTION__ );
+	return -EBUSY;
+}
+
+static int r128_do_wait_for_fifo( drm_r128_private_t *dev_priv, int entries )
+{
+	int i;
+
+	for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) {
+		int slots = R128_READ( R128_GUI_STAT ) & R128_GUI_FIFOCNT_MASK;
+		if ( slots >= entries ) return 0;
+		udelay( 1 );
+	}
+
+	DRM_ERROR( "%s failed!\n", __FUNCTION__ );
+	return -EBUSY;
+}
+
+static int r128_do_wait_for_idle( drm_r128_private_t *dev_priv )
+{
+	int i, ret;
+
+	ret = r128_do_wait_for_fifo( dev_priv, 64 );
+	if ( !ret ) return ret;
+
+	for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) {
+		if ( !(R128_READ( R128_GUI_STAT ) & R128_GUI_ACTIVE) ) {
+			r128_do_pixcache_flush( dev_priv );
+			return 0;
+		}
+		udelay( 1 );
+	}
+
+	DRM_ERROR( "%s failed!\n", __FUNCTION__ );
+	return -EBUSY;
+}
+
+
+/* ================================================================
+ * CCE control, initialization
+ */
+
+/* Load the microcode for the CCE */
+static void r128_cce_load_microcode( drm_r128_private_t *dev_priv )
+{
+	int i;
+
+	r128_do_wait_for_idle( dev_priv );
+
+	R128_WRITE( R128_PM4_MICROCODE_ADDR, 0 );
+	for ( i = 0 ; i < 256 ; i++ ) {
+		R128_WRITE( R128_PM4_MICROCODE_DATAH,
+			    r128_cce_microcode[i * 2] );
+		R128_WRITE( R128_PM4_MICROCODE_DATAL,
+			    r128_cce_microcode[i * 2 + 1] );
+	}
+}
+
+/* Flush any pending commands to the CCE.  This should only be used just
+ * prior to a wait for idle, as it informs the engine that the command
+ * stream is ending.
+ */
+static void r128_do_cce_flush( drm_r128_private_t *dev_priv )
+{
+	u32 tmp;
+
+	tmp = R128_READ( R128_PM4_BUFFER_DL_WPTR ) | R128_PM4_BUFFER_DL_DONE;
+	R128_WRITE( R128_PM4_BUFFER_DL_WPTR, tmp );
+}
+
+/* Wait for the CCE to go idle.
+ */
+static int r128_do_cce_idle( drm_r128_private_t *dev_priv )
+{
+	int i;
+
+	for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) {
+		if ( *dev_priv->ring.head == dev_priv->ring.tail ) {
+			int pm4stat = R128_READ( R128_PM4_STAT );
+			if ( ( (pm4stat & R128_PM4_FIFOCNT_MASK) >=
+			       dev_priv->cce_fifo_size ) &&
+			     !(pm4stat & (R128_PM4_BUSY |
+					  R128_PM4_GUI_ACTIVE)) ) {
+				return r128_do_pixcache_flush( dev_priv );
+			}
+		}
+		udelay( 1 );
+	}
+
+#if 0
+	DRM_ERROR( "failed!\n" );
+	r128_status( dev_priv );
+#endif
+	return -EBUSY;
+}
+
+/* Start the Concurrent Command Engine.
+ */
+static void r128_do_cce_start( drm_r128_private_t *dev_priv )
+{
+	r128_do_wait_for_idle( dev_priv );
+
+	R128_WRITE( R128_PM4_BUFFER_CNTL,
+		    dev_priv->cce_mode | dev_priv->ring.size_l2qw );
+	R128_READ( R128_PM4_BUFFER_ADDR ); /* as per the sample code */
+	R128_WRITE( R128_PM4_MICRO_CNTL, R128_PM4_MICRO_FREERUN );
+
+	dev_priv->cce_running = 1;
+}
+
+/* Reset the Concurrent Command Engine.  This will not flush any pending
+ * commangs, so you must wait for the CCE command stream to complete
+ * before calling this routine.
+ */
+static void r128_do_cce_reset( drm_r128_private_t *dev_priv )
+{
+	R128_WRITE( R128_PM4_BUFFER_DL_WPTR, 0 );
+	R128_WRITE( R128_PM4_BUFFER_DL_RPTR, 0 );
+	*dev_priv->ring.head = 0;
+	dev_priv->ring.tail = 0;
+}
+
+/* Stop the Concurrent Command Engine.  This will not flush any pending
+ * commangs, so you must flush the command stream and wait for the CCE
+ * to go idle before calling this routine.
+ */
+static void r128_do_cce_stop( drm_r128_private_t *dev_priv )
+{
+	R128_WRITE( R128_PM4_MICRO_CNTL, 0 );
+	R128_WRITE( R128_PM4_BUFFER_CNTL, R128_PM4_NONPM4 );
+
+	dev_priv->cce_running = 0;
+}
+
+/* Reset the engine.  This will stop the CCE if it is running.
+ */
+static int r128_do_engine_reset( drm_device_t *dev )
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	u32 clock_cntl_index, mclk_cntl, gen_reset_cntl;
+
+	r128_do_pixcache_flush( dev_priv );
+
+	clock_cntl_index = R128_READ( R128_CLOCK_CNTL_INDEX );
+	mclk_cntl = R128_READ_PLL( dev, R128_MCLK_CNTL );
+
+	R128_WRITE_PLL( R128_MCLK_CNTL,
+			mclk_cntl | R128_FORCE_GCP | R128_FORCE_PIPE3D_CP );
+
+	gen_reset_cntl = R128_READ( R128_GEN_RESET_CNTL );
+
+	/* Taken from the sample code - do not change */
+	R128_WRITE( R128_GEN_RESET_CNTL,
+		    gen_reset_cntl | R128_SOFT_RESET_GUI );
+	R128_READ( R128_GEN_RESET_CNTL );
+	R128_WRITE( R128_GEN_RESET_CNTL,
+		    gen_reset_cntl & ~R128_SOFT_RESET_GUI );
+	R128_READ( R128_GEN_RESET_CNTL );
+
+	R128_WRITE_PLL( R128_MCLK_CNTL, mclk_cntl );
+	R128_WRITE( R128_CLOCK_CNTL_INDEX, clock_cntl_index );
+	R128_WRITE( R128_GEN_RESET_CNTL, gen_reset_cntl );
+
+	/* Reset the CCE ring */
+	r128_do_cce_reset( dev_priv );
+
+	/* The CCE is no longer running after an engine reset */
+	dev_priv->cce_running = 0;
+
+	/* Reset any pending vertex, indirect buffers */
+	r128_freelist_reset( dev );
+
+	return 0;
+}
+
+static void r128_cce_init_ring_buffer( drm_device_t *dev )
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	u32 ring_start;
+	u32 tmp;
+
+	/* The manual (p. 2) says this address is in "VM space".  This
+	 * means it's an offset from the start of AGP space.
+	 */
+	ring_start = dev_priv->cce_ring->offset - dev->agp->base;
+	R128_WRITE( R128_PM4_BUFFER_OFFSET, ring_start | R128_AGP_OFFSET );
+
+	R128_WRITE( R128_PM4_BUFFER_DL_WPTR, 0 );
+	R128_WRITE( R128_PM4_BUFFER_DL_RPTR, 0 );
+
+	/* DL_RPTR_ADDR is a physical address in AGP space. */
+	*dev_priv->ring.head = 0;
+	R128_WRITE( R128_PM4_BUFFER_DL_RPTR_ADDR,
+		    dev_priv->ring_rptr->offset );
+
+	/* Set watermark control */
+	R128_WRITE( R128_PM4_BUFFER_WM_CNTL,
+		    ((R128_WATERMARK_L/4) << R128_WMA_SHIFT)
+		    | ((R128_WATERMARK_M/4) << R128_WMB_SHIFT)
+		    | ((R128_WATERMARK_N/4) << R128_WMC_SHIFT)
+		    | ((R128_WATERMARK_K/64) << R128_WB_WM_SHIFT) );
+
+	/* Force read.  Why?  Because it's in the examples... */
+	R128_READ( R128_PM4_BUFFER_ADDR );
+
+	/* Turn on bus mastering */
+	tmp = R128_READ( R128_BUS_CNTL ) & ~R128_BUS_MASTER_DIS;
+	R128_WRITE( R128_BUS_CNTL, tmp );
+}
+
+static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
+{
+	drm_r128_private_t *dev_priv;
+        int i;
+
+	dev_priv = drm_alloc( sizeof(drm_r128_private_t), DRM_MEM_DRIVER );
+	if ( dev_priv == NULL )
+		return -ENOMEM;
+	dev->dev_private = (void *)dev_priv;
+
+	memset( dev_priv, 0, sizeof(drm_r128_private_t) );
+
+	dev_priv->is_pci = init->is_pci;
+
+	/* GH: We don't support PCI cards until PCI GART is implemented.
+	 * Fail here so we can remove all checks for PCI cards around
+	 * the CCE ring code.
+	 */
+	if ( dev_priv->is_pci ) {
+		drm_free( dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER );
+		dev->dev_private = NULL;
+		return -EINVAL;
+	}
+
+	dev_priv->usec_timeout = init->usec_timeout;
+	if ( dev_priv->usec_timeout < 1 ||
+	     dev_priv->usec_timeout > R128_MAX_USEC_TIMEOUT ) {
+		drm_free( dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER );
+		dev->dev_private = NULL;
+		return -EINVAL;
+	}
+
+	dev_priv->cce_mode = init->cce_mode;
+	dev_priv->cce_secure = init->cce_secure;
+
+	/* GH: Simple idle check.
+	 */
+	atomic_set( &dev_priv->idle_count, 0 );
+
+	/* We don't support anything other than bus-mastering ring mode,
+	 * but the ring can be in either AGP or PCI space for the ring
+	 * read pointer.
+	 */
+	if ( ( init->cce_mode != R128_PM4_192BM ) &&
+	     ( init->cce_mode != R128_PM4_128BM_64INDBM ) &&
+	     ( init->cce_mode != R128_PM4_64BM_128INDBM ) &&
+	     ( init->cce_mode != R128_PM4_64BM_64VCBM_64INDBM ) ) {
+		drm_free( dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER );
+		dev->dev_private = NULL;
+		return -EINVAL;
+	}
+
+	switch ( init->cce_mode ) {
+	case R128_PM4_NONPM4:
+		dev_priv->cce_fifo_size = 0;
+		break;
+	case R128_PM4_192PIO:
+	case R128_PM4_192BM:
+		dev_priv->cce_fifo_size = 192;
+		break;
+	case R128_PM4_128PIO_64INDBM:
+	case R128_PM4_128BM_64INDBM:
+		dev_priv->cce_fifo_size = 128;
+		break;
+	case R128_PM4_64PIO_128INDBM:
+	case R128_PM4_64BM_128INDBM:
+	case R128_PM4_64PIO_64VCBM_64INDBM:
+	case R128_PM4_64BM_64VCBM_64INDBM:
+	case R128_PM4_64PIO_64VCPIO_64INDPIO:
+		dev_priv->cce_fifo_size = 64;
+		break;
+	}
+
+	dev_priv->fb_bpp	= init->fb_bpp;
+	dev_priv->front_offset	= init->front_offset;
+	dev_priv->front_pitch	= init->front_pitch;
+	dev_priv->back_offset	= init->back_offset;
+	dev_priv->back_pitch	= init->back_pitch;
+
+	dev_priv->depth_bpp	= init->depth_bpp;
+	dev_priv->depth_offset	= init->depth_offset;
+	dev_priv->depth_pitch	= init->depth_pitch;
+	dev_priv->span_offset	= init->span_offset;
+
+	dev_priv->front_pitch_offset_c = (((dev_priv->front_pitch/8) << 21) |
+					  (dev_priv->front_offset >> 5));
+	dev_priv->back_pitch_offset_c = (((dev_priv->back_pitch/8) << 21) |
+					 (dev_priv->back_offset >> 5));
+	dev_priv->depth_pitch_offset_c = (((dev_priv->depth_pitch/8) << 21) |
+					  (dev_priv->depth_offset >> 5) |
+					  R128_DST_TILE);
+	dev_priv->span_pitch_offset_c = (((dev_priv->depth_pitch/8) << 21) |
+					 (dev_priv->span_offset >> 5));
+
+	/* FIXME: We want multiple shared areas, including one shared
+	 * only by the X Server and kernel module.
+	 */
+	for ( i = 0 ; i < dev->map_count ; i++ ) {
+		if ( dev->maplist[i]->type == _DRM_SHM ) {
+			dev_priv->sarea = dev->maplist[i];
+			break;
+		}
+	}
+
+	DO_FIND_MAP( dev_priv->fb, init->fb_offset );
+	DO_FIND_MAP( dev_priv->mmio, init->mmio_offset );
+	DO_FIND_MAP( dev_priv->cce_ring, init->ring_offset );
+	DO_FIND_MAP( dev_priv->ring_rptr, init->ring_rptr_offset );
+	DO_FIND_MAP( dev_priv->buffers, init->buffers_offset );
+
+	if ( !dev_priv->is_pci ) {
+		DO_FIND_MAP( dev_priv->agp_textures,
+			     init->agp_textures_offset );
+	}
+
+	dev_priv->sarea_priv =
+		(drm_r128_sarea_t *)((u8 *)dev_priv->sarea->handle +
+				     init->sarea_priv_offset);
+
+	DO_REMAP( dev_priv->cce_ring );
+	DO_REMAP( dev_priv->ring_rptr );
+	DO_REMAP( dev_priv->buffers );
+#if 0
+	if ( !dev_priv->is_pci ) {
+		DO_REMAP( dev_priv->agp_textures );
+	}
+#endif
+
+	dev_priv->ring.head = ((__volatile__ u32 *)
+			       dev_priv->ring_rptr->handle);
+
+	dev_priv->ring.start = (u32 *)dev_priv->cce_ring->handle;
+	dev_priv->ring.end = ((u32 *)dev_priv->cce_ring->handle
+			      + init->ring_size / sizeof(u32));
+	dev_priv->ring.size = init->ring_size;
+	dev_priv->ring.size_l2qw = drm_order( init->ring_size / 8 );
+
+	dev_priv->ring.tail_mask =
+		(dev_priv->ring.size / sizeof(u32)) - 1;
+
+	dev_priv->sarea_priv->last_frame = 0;
+	R128_WRITE( R128_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame );
+
+	dev_priv->sarea_priv->last_dispatch = 0;
+	R128_WRITE( R128_LAST_DISPATCH_REG,
+		    dev_priv->sarea_priv->last_dispatch );
+
+	r128_cce_init_ring_buffer( dev );
+	r128_cce_load_microcode( dev_priv );
+	r128_do_engine_reset( dev );
+
+	return 0;
+}
+
+static int r128_do_cleanup_cce( drm_device_t *dev )
+{
+	if ( dev->dev_private ) {
+		drm_r128_private_t *dev_priv = dev->dev_private;
+
+		DO_REMAPFREE( dev_priv->cce_ring );
+		DO_REMAPFREE( dev_priv->ring_rptr );
+		DO_REMAPFREE( dev_priv->buffers );
+#if 0
+		if ( !dev_priv->is_pci ) {
+			DO_REMAPFREE( dev_priv->agp_textures );
+		}
+#endif
+
+		drm_free( dev->dev_private, sizeof(drm_r128_private_t),
+			  DRM_MEM_DRIVER );
+		dev->dev_private = NULL;
+	}
+
+	return 0;
+}
+
+int r128_cce_init( struct inode *inode, struct file *filp,
+		   unsigned int cmd, unsigned long arg )
+{
+        drm_file_t *priv = filp->private_data;
+        drm_device_t *dev = priv->dev;
+	drm_r128_init_t init;
+
+	if ( copy_from_user( &init, (drm_r128_init_t *)arg, sizeof(init) ) )
+		return -EFAULT;
+
+	switch ( init.func ) {
+	case R128_INIT_CCE:
+		return r128_do_init_cce( dev, &init );
+	case R128_CLEANUP_CCE:
+		return r128_do_cleanup_cce( dev );
+	}
+
+	return -EINVAL;
+}
+
+int r128_cce_start( struct inode *inode, struct file *filp,
+		    unsigned int cmd, unsigned long arg )
+{
+        drm_file_t *priv = filp->private_data;
+        drm_device_t *dev = priv->dev;
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	DRM_DEBUG( "%s\n", __FUNCTION__ );
+
+	if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+	     dev->lock.pid != current->pid ) {
+		DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+		return -EINVAL;
+	}
+	if ( dev_priv->cce_running || dev_priv->cce_mode == R128_PM4_NONPM4 ) {
+		DRM_DEBUG( "%s while CCE running\n", __FUNCTION__ );
+		return 0;
+	}
+
+	r128_do_cce_start( dev_priv );
+
+	return 0;
+}
+
+/* Stop the CCE.  The engine must have been idled before calling this
+ * routine.
+ */
+int r128_cce_stop( struct inode *inode, struct file *filp,
+		   unsigned int cmd, unsigned long arg )
+{
+        drm_file_t *priv = filp->private_data;
+        drm_device_t *dev = priv->dev;
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_r128_cce_stop_t stop;
+	int ret;
+	DRM_DEBUG( "%s\n", __FUNCTION__ );
+
+	if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+	     dev->lock.pid != current->pid ) {
+		DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+		return -EINVAL;
+	}
+
+	if ( copy_from_user( &stop, (drm_r128_init_t *)arg, sizeof(stop) ) )
+		return -EFAULT;
+
+	/* Flush any pending CCE commands.  This ensures any outstanding
+	 * commands are exectuted by the engine before we turn it off.
+	 */
+	if ( stop.flush ) {
+		r128_do_cce_flush( dev_priv );
+	}
+
+	/* If we fail to make the engine go idle, we return an error
+	 * code so that the DRM ioctl wrapper can try again.
+	 */
+	if ( stop.idle ) {
+		ret = r128_do_cce_idle( dev_priv );
+		if ( ret < 0 ) return ret;
+	}
+
+	/* Finally, we can turn off the CCE.  If the engine isn't idle,
+	 * we will get some dropped triangles as they won't be fully
+	 * rendered before the CCE is shut down.
+	 */
+	r128_do_cce_stop( dev_priv );
+
+	/* Reset the engine */
+	r128_do_engine_reset( dev );
+
+	return 0;
+}
+
+/* Just reset the CCE ring.  Called as part of an X Server engine reset.
+ */
+int r128_cce_reset( struct inode *inode, struct file *filp,
+		    unsigned int cmd, unsigned long arg )
+{
+        drm_file_t *priv = filp->private_data;
+        drm_device_t *dev = priv->dev;
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	DRM_DEBUG( "%s\n", __FUNCTION__ );
+
+	if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+	     dev->lock.pid != current->pid ) {
+		DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+		return -EINVAL;
+	}
+	if ( !dev_priv ) {
+		DRM_DEBUG( "%s called before init done\n", __FUNCTION__ );
+		return -EINVAL;
+	}
+
+	r128_do_cce_reset( dev_priv );
+
+	/* The CCE is no longer running after an engine reset */
+	dev_priv->cce_running = 0;
+
+	return 0;
+}
+
+int r128_cce_idle( struct inode *inode, struct file *filp,
+		   unsigned int cmd, unsigned long arg )
+{
+        drm_file_t *priv = filp->private_data;
+        drm_device_t *dev = priv->dev;
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	DRM_DEBUG( "%s\n", __FUNCTION__ );
+
+	if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+	     dev->lock.pid != current->pid ) {
+		DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+		return -EINVAL;
+	}
+
+	if ( dev_priv->cce_running ) {
+		r128_do_cce_flush( dev_priv );
+	}
+
+	return r128_do_cce_idle( dev_priv );
+}
+
+int r128_engine_reset( struct inode *inode, struct file *filp,
+		       unsigned int cmd, unsigned long arg )
+{
+        drm_file_t *priv = filp->private_data;
+        drm_device_t *dev = priv->dev;
+	DRM_DEBUG( "%s\n", __FUNCTION__ );
+
+	if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+	     dev->lock.pid != current->pid ) {
+		DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+		return -EINVAL;
+	}
+
+	return r128_do_engine_reset( dev );
+}
+
+
+/* ================================================================
+ * Freelist management
+ */
+#define R128_BUFFER_USED	0xffffffff
+#define R128_BUFFER_FREE	0
+
+#if 0
+static int r128_freelist_init( drm_device_t *dev )
+{
+	drm_device_dma_t *dma = dev->dma;
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_buf_t *buf;
+	drm_r128_buf_priv_t *buf_priv;
+	drm_r128_freelist_t *entry;
+	int i;
+
+	dev_priv->head = drm_alloc( sizeof(drm_r128_freelist_t),
+				    DRM_MEM_DRIVER );
+	if ( dev_priv->head == NULL )
+		return -ENOMEM;
+
+	memset( dev_priv->head, 0, sizeof(drm_r128_freelist_t) );
+	dev_priv->head->age = R128_BUFFER_USED;
+
+	for ( i = 0 ; i < dma->buf_count ; i++ ) {
+		buf = dma->buflist[i];
+		buf_priv = buf->dev_private;
+
+		entry = drm_alloc( sizeof(drm_r128_freelist_t),
+				   DRM_MEM_DRIVER );
+		if ( !entry ) return -ENOMEM;
+
+		entry->age = R128_BUFFER_FREE;
+		entry->buf = buf;
+		entry->prev = dev_priv->head;
+		entry->next = dev_priv->head->next;
+		if ( !entry->next )
+			dev_priv->tail = entry;
+
+		buf_priv->discard = 0;
+		buf_priv->dispatched = 0;
+		buf_priv->list_entry = entry;
+
+		dev_priv->head->next = entry;
+
+		if ( dev_priv->head->next )
+			dev_priv->head->next->prev = entry;
+	}
+
+	return 0;
+
+}
+#endif
+
+drm_buf_t *r128_freelist_get( drm_device_t *dev )
+{
+	drm_device_dma_t *dma = dev->dma;
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_r128_buf_priv_t *buf_priv;
+	drm_buf_t *buf;
+	int i, t;
+
+	/* FIXME: Optimize -- use freelist code */
+
+	for ( i = 0 ; i < dma->buf_count ; i++ ) {
+		buf = dma->buflist[i];
+		buf_priv = buf->dev_private;
+		if ( buf->pid == 0 )
+			return buf;
+	}
+
+	for ( t = 0 ; t < dev_priv->usec_timeout ; t++ ) {
+		u32 done_age = R128_READ( R128_LAST_DISPATCH_REG );
+
+		for ( i = 0 ; i < dma->buf_count ; i++ ) {
+			buf = dma->buflist[i];
+			buf_priv = buf->dev_private;
+			if ( buf->pending && buf_priv->age <= done_age ) {
+				/* The buffer has been processed, so it
+				 * can now be used.
+				 */
+				buf->pending = 0;
+				return buf;
+			}
+		}
+		udelay( 1 );
+	}
+
+	DRM_ERROR( "returning NULL!\n" );
+	return NULL;
+}
+
+void r128_freelist_reset( drm_device_t *dev )
+{
+	drm_device_dma_t *dma = dev->dma;
+	int i;
+
+	for ( i = 0 ; i < dma->buf_count ; i++ ) {
+		drm_buf_t *buf = dma->buflist[i];
+		drm_r128_buf_priv_t *buf_priv = buf->dev_private;
+		buf_priv->age = 0;
+	}
+}
+
+
+/* ================================================================
+ * CCE packet submission
+ */
+
+int r128_wait_ring( drm_r128_private_t *dev_priv, int n )
+{
+	drm_r128_ring_buffer_t *ring = &dev_priv->ring;
+	int i;
+
+	for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) {
+		ring->space = *ring->head - ring->tail;
+		if ( ring->space <= 0 )
+			ring->space += ring->size;
+
+		if ( ring->space >= n )
+			return 0;
+
+		udelay( 1 );
+	}
+
+	return -EBUSY;
+}
+
+void r128_update_ring_snapshot( drm_r128_private_t *dev_priv )
+{
+	drm_r128_ring_buffer_t *ring = &dev_priv->ring;
+
+	ring->space = *ring->head - ring->tail;
+#if R128_PERFORMANCE_BOXES
+	if ( ring->space == 0 )
+		atomic_inc( &dev_priv->idle_count );
+#endif
+	if ( ring->space <= 0 )
+		ring->space += ring->size;
+}
+
+#if 0
+static int r128_verify_command( drm_r128_private_t *dev_priv,
+				u32 cmd, int *size )
+{
+	int writing = 1;
+
+	*size = 0;
+
+	switch ( cmd & R128_CCE_PACKET_MASK ) {
+	case R128_CCE_PACKET0:
+		if ( (cmd & R128_CCE_PACKET0_REG_MASK) <= (0x1004 >> 2) &&
+		     (cmd & R128_CCE_PACKET0_REG_MASK) !=
+		     (R128_PM4_VC_FPU_SETUP >> 2) ) {
+			writing = 0;
+		}
+		*size = ((cmd & R128_CCE_PACKET_COUNT_MASK) >> 16) + 2;
+		break;
+
+	case R128_CCE_PACKET1:
+		if ( (cmd & R128_CCE_PACKET1_REG0_MASK) <= (0x1004 >> 2) &&
+		     (cmd & R128_CCE_PACKET1_REG0_MASK) !=
+		     (R128_PM4_VC_FPU_SETUP >> 2) ) {
+			writing = 0;
+		}
+		if ( (cmd & R128_CCE_PACKET1_REG1_MASK) <= (0x1004 << 9) &&
+		     (cmd & R128_CCE_PACKET1_REG1_MASK) !=
+		     (R128_PM4_VC_FPU_SETUP << 9) ) {
+			writing = 0;
+		}
+		*size = 3;
+		break;
+
+	case R128_CCE_PACKET2:
+		break;
+
+	case R128_CCE_PACKET3:
+		*size = ((cmd & R128_CCE_PACKET_COUNT_MASK) >> 16) + 2;
+		break;
+
+	}
+
+	return writing;
+}
+
+static int r128_submit_packet_ring_secure( drm_r128_private_t *dev_priv,
+					   u32 *commands, int *count )
+{
+#if 0
+	int write = dev_priv->sarea_priv->ring_write;
+	int *write_ptr = dev_priv->ring_start + write;
+	int c = *count;
+	u32 tmp = 0;
+	int psize = 0;
+	int writing = 1;
+	int timeout;
+
+	while ( c > 0 ) {
+		tmp = *commands++;
+		if ( !psize ) {
+			writing = r128_verify_command( dev_priv, tmp, &psize );
+		}
+		psize--;
+
+		if ( writing ) {
+			write++;
+			*write_ptr++ = tmp;
+		}
+		if ( write >= dev_priv->ring_entries ) {
+			write = 0;
+			write_ptr = dev_priv->ring_start;
+		}
+		timeout = 0;
+		while ( write == *dev_priv->ring_read_ptr ) {
+			R128_READ( R128_PM4_BUFFER_DL_RPTR );
+			if ( timeout++ >= dev_priv->usec_timeout )
+				return -EBUSY;
+			udelay( 1 );
+		}
+		c--;
+	}
+
+	if ( write < 32 ) {
+		memcpy( dev_priv->ring_end,
+			dev_priv->ring_start,
+			write * sizeof(u32) );
+	}
+
+	/* Make sure WC cache has been flushed */
+	r128_flush_write_combine();
+
+	dev_priv->sarea_priv->ring_write = write;
+	R128_WRITE( R128_PM4_BUFFER_DL_WPTR, write );
+
+	*count = 0;
+#endif
+	return 0;
+}
+
+static int r128_submit_packet_ring_insecure( drm_r128_private_t *dev_priv,
+					     u32 *commands, int *count )
+{
+#if 0
+	int write = dev_priv->sarea_priv->ring_write;
+	int *write_ptr = dev_priv->ring_start + write;
+	int c = *count;
+	int timeout;
+
+	while ( c > 0 ) {
+		write++;
+		*write_ptr++ = *commands++;
+		if ( write >= dev_priv->ring_entries ) {
+			write = 0;
+			write_ptr = dev_priv->ring_start;
+		}
+
+		timeout = 0;
+		while ( write == *dev_priv->ring_read_ptr ) {
+			R128_READ( R128_PM4_BUFFER_DL_RPTR );
+			if ( timeout++ >= dev_priv->usec_timeout )
+				return -EBUSY;
+			udelay( 1 );
+		}
+		c--;
+	}
+
+	if ( write < 32 ) {
+		memcpy( dev_priv->ring_end,
+			dev_priv->ring_start,
+			write * sizeof(u32) );
+	}
+
+	/* Make sure WC cache has been flushed */
+	r128_flush_write_combine();
+
+	dev_priv->sarea_priv->ring_write = write;
+	R128_WRITE( R128_PM4_BUFFER_DL_WPTR, write );
+
+	*count = 0;
+#endif
+	return 0;
+}
+#endif
+
+/* Internal packet submission routine.  This uses the insecure versions
+ * of the packet submission functions, and thus should only be used for
+ * packets generated inside the kernel module.
+ */
+int r128_do_submit_packet( drm_r128_private_t *dev_priv,
+			   u32 *buffer, int count )
+{
+	int c = count;
+	int ret = 0;
+
+#if 0
+	int left = 0;
+
+	if ( c >= dev_priv->ring_entries ) {
+		c = dev_priv->ring_entries - 1;
+		left = count - c;
+	}
+
+	/* Since this is only used by the kernel we can use the
+	 * insecure ring buffer submit packet routine.
+	 */
+	ret = r128_submit_packet_ring_insecure( dev_priv, buffer, &c );
+	c += left;
+#endif
+
+	return ( ret < 0 ) ? ret : c;
+}
+
+/* External packet submission routine.  This uses the secure versions
+ * by default, and can thus submit packets received from user space.
+ */
+int r128_cce_packet( struct inode *inode, struct file *filp,
+		     unsigned int cmd, unsigned long arg )
+{
+        drm_file_t *priv = filp->private_data;
+        drm_device_t *dev = priv->dev;
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_r128_packet_t packet;
+	u32 *buffer;
+	int c;
+	int size;
+	int ret = 0;
+
+#if 0
+	/* GH: Disable packet submission for now.
+	 */
+	return -EINVAL;
+#endif
+
+	if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+	     dev->lock.pid != current->pid ) {
+		DRM_ERROR( "r128_submit_packet called without lock held\n" );
+		return -EINVAL;
+	}
+
+	if ( copy_from_user( &packet, (drm_r128_packet_t *)arg,
+			     sizeof(packet) ) )
+		return -EFAULT;
+
+#if 0
+	c = packet.count;
+	size = c * sizeof(*buffer);
+
+	{
+		int left = 0;
+
+		if ( c >= dev_priv->ring_entries ) {
+			c = dev_priv->ring_entries - 1;
+			size = c * sizeof(*buffer);
+			left = packet.count - c;
+		}
+
+		buffer = kmalloc( size, 0 );
+		if ( buffer == NULL)
+			return -ENOMEM;
+		if ( copy_from_user( buffer, packet.buffer, size ) )
+			return -EFAULT;
+
+		if ( dev_priv->cce_secure ) {
+			ret = r128_submit_packet_ring_secure( dev_priv,
+							      buffer, &c );
+		} else {
+			ret = r128_submit_packet_ring_insecure( dev_priv,
+								buffer, &c );
+		}
+		c += left;
+	}
+
+	kfree( buffer );
+#else
+	c = 0;
+#endif
+
+	packet.count = c;
+	if ( copy_to_user( (drm_r128_packet_t *)arg, &packet,
+			   sizeof(packet) ) )
+		return -EFAULT;
+
+	if ( ret ) {
+		return ret;
+	} else if ( c > 0 ) {
+		return -EAGAIN;
+	}
+	return 0;
+}
+
+#if 0
+static int r128_send_vertbufs( drm_device_t *dev, drm_r128_vertex_t *v )
+{
+	drm_device_dma_t    *dma      = dev->dma;
+	drm_r128_private_t  *dev_priv = dev->dev_private;
+	drm_r128_buf_priv_t *buf_priv;
+	drm_buf_t           *buf;
+	int                  i, ret;
+	RING_LOCALS;
+
+	/* Make sure we have valid data */
+	for (i = 0; i < v->send_count; i++) {
+		int idx = v->send_indices[i];
+
+		if (idx < 0 || idx >= dma->buf_count) {
+			DRM_ERROR("Index %d (of %d max)\n",
+				  idx, dma->buf_count - 1);
+			return -EINVAL;
+		}
+		buf = dma->buflist[idx];
+		if (buf->pid != current->pid) {
+			DRM_ERROR("Process %d using buffer owned by %d\n",
+				  current->pid, buf->pid);
+			return -EINVAL;
+		}
+		if (buf->pending) {
+			DRM_ERROR("Sending pending buffer:"
+				  " buffer %d, offset %d\n",
+				  v->send_indices[i], i);
+			return -EINVAL;
+		}
+	}
+
+	/* Wait for idle, if we've wrapped to make sure that all pending
+           buffers have been processed */
+	if (dev_priv->submit_age == R128_MAX_VBUF_AGE) {
+		if ((ret = r128_do_cce_idle(dev)) < 0) return ret;
+		dev_priv->submit_age = 0;
+		r128_freelist_reset(dev);
+	}
+
+	/* Make sure WC cache has been flushed (if in PIO mode) */
+	if (!dev_priv->cce_is_bm_mode) r128_flush_write_combine();
+
+	/* FIXME: Add support for sending vertex buffer to the CCE here
+	   instead of in client code.  The v->prim holds the primitive
+	   type that should be drawn.  Loop over the list buffers in
+	   send_indices[] and submit a packet for each VB.
+
+	   This will require us to loop over the clip rects here as
+	   well, which implies that we extend the kernel driver to allow
+	   cliprects to be stored here.  Note that the cliprects could
+	   possibly come from the X server instead of the client, but
+	   this will require additional changes to the DRI to allow for
+	   this optimization. */
+
+	/* Submit a CCE packet that writes submit_age to R128_VB_AGE_REG */
+#if 0
+	cce_buffer[0] = R128CCE0(R128_CCE_PACKET0, R128_VB_AGE_REG, 0);
+	cce_buffer[1] = dev_priv->submit_age;
+
+	if ((ret = r128_do_submit_packet(dev, cce_buffer, 2)) < 0) {
+		/* Until we add support for sending VBs to the CCE in
+		   this routine, we can recover from this error.  After
+		   we add that support, we won't be able to easily
+		   recover, so we will probably have to implement
+		   another mechanism for handling timeouts from packets
+		   submitted directly by the kernel. */
+		return ret;
+	}
+#else
+	BEGIN_RING( 2 );
+
+	OUT_RING( CCE_PACKET0( R128_VB_AGE_REG, 0 ) );
+	OUT_RING( dev_priv->submit_age );
+
+	ADVANCE_RING();
+#endif
+	/* Now that the submit packet request has succeeded, we can mark
+           the buffers as pending */
+	for (i = 0; i < v->send_count; i++) {
+		buf = dma->buflist[v->send_indices[i]];
+		buf->pending = 1;
+
+		buf_priv      = buf->dev_private;
+		buf_priv->age = dev_priv->submit_age;
+	}
+
+	dev_priv->submit_age++;
+
+	return 0;
+}
+#endif
+
+
+
+
+static int r128_cce_get_buffers( drm_device_t *dev, drm_dma_t *d )
+{
+	int i;
+	drm_buf_t *buf;
+
+	for ( i = d->granted_count ; i < d->request_count ; i++ ) {
+		buf = r128_freelist_get( dev );
+		if ( !buf ) return -EAGAIN;
+
+		buf->pid = current->pid;
+
+		if ( copy_to_user( &d->request_indices[i], &buf->idx,
+				   sizeof(buf->idx) ) )
+			return -EFAULT;
+		if ( copy_to_user( &d->request_sizes[i], &buf->total,
+				   sizeof(buf->total) ) )
+			return -EFAULT;
+
+		d->granted_count++;
+	}
+	return 0;
+}
+
+int r128_cce_buffers( struct inode *inode, struct file *filp,
+		      unsigned int cmd, unsigned long arg )
+{
+	drm_file_t *priv = filp->private_data;
+	drm_device_t *dev = priv->dev;
+	drm_device_dma_t *dma = dev->dma;
+	int ret = 0;
+	drm_dma_t d;
+
+	if ( copy_from_user( &d, (drm_dma_t *) arg, sizeof(d) ) )
+		return -EFAULT;
+
+	if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+	     dev->lock.pid != current->pid ) {
+		DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+		return -EINVAL;
+	}
+
+	/* Please don't send us buffers.
+	 */
+	if ( d.send_count != 0 ) {
+		DRM_ERROR( "Process %d trying to send %d buffers via drmDMA\n",
+			   current->pid, d.send_count );
+		return -EINVAL;
+	}
+
+	/* We'll send you buffers.
+	 */
+	if ( d.request_count < 0 || d.request_count > dma->buf_count ) {
+		DRM_ERROR( "Process %d trying to get %d buffers (of %d max)\n",
+			   current->pid, d.request_count, dma->buf_count );
+		return -EINVAL;
+	}
+
+	d.granted_count = 0;
+
+	if ( d.request_count ) {
+		ret = r128_cce_get_buffers( dev, &d );
+	}
+
+	if ( copy_to_user( (drm_dma_t *) arg, &d, sizeof(d) ) )
+		return -EFAULT;
+
+	return ret;
+}
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/char/drm/r128_context.c linux/drivers/char/drm/r128_context.c
--- v2.4.0-prerelease/linux/drivers/char/drm/r128_context.c	Tue Aug 29 14:09:15 2000
+++ linux/drivers/char/drm/r128_context.c	Thu Jan  4 13:03:20 2001
@@ -11,11 +11,11 @@
  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  * and/or sell copies of the Software, and to permit persons to whom the
  * Software is furnished to do so, subject to the following conditions:
- * 
+ *
  * The above copyright notice and this permission notice (including the next
  * paragraph) shall be included in all copies or substantial portions of the
  * Software.
- * 
+ *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
@@ -23,7 +23,7 @@
  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
- * 
+ *
  * Author: Rickard E. (Rik) Faith <faith@valinux.com>
  *
  */
@@ -53,21 +53,21 @@
 #if DRM_DMA_HISTOGRAM
         dev->ctx_start = get_cycles();
 #endif
-        
+
         DRM_DEBUG("Context switch from %d to %d\n", old, new);
 
         if (new == dev->last_context) {
                 clear_bit(0, &dev->context_flag);
                 return 0;
         }
-        
+
         if (drm_flags & DRM_FLAG_NOCTX) {
                 r128_context_switch_complete(dev, new);
         } else {
                 sprintf(buf, "C %d %d\n", old, new);
                 drm_write_string(dev, buf);
         }
-        
+
         return 0;
 }
 
@@ -75,7 +75,7 @@
 {
         dev->last_context = new;  /* PRE/POST: This is the _only_ writer. */
         dev->last_switch  = jiffies;
-        
+
         if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
                 DRM_ERROR("Lock isn't held after context switch\n");
         }
@@ -86,11 +86,11 @@
 #if DRM_DMA_HISTOGRAM
         atomic_inc(&dev->histo.ctx[drm_histogram_slot(get_cycles()
                                                       - dev->ctx_start)]);
-                   
+
 #endif
         clear_bit(0, &dev->context_flag);
         wake_up(&dev->context_wait);
-        
+
         return 0;
 }
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/char/drm/r128_dma.c linux/drivers/char/drm/r128_dma.c
--- v2.4.0-prerelease/linux/drivers/char/drm/r128_dma.c	Sun Oct  8 10:50:16 2000
+++ linux/drivers/char/drm/r128_dma.c	Wed Dec 31 16:00:00 1969
@@ -1,909 +0,0 @@
-/* r128_drv.c -- ATI Rage 128 driver -*- linux-c -*-
- * Created: Wed Apr  5 19:24:19 2000 by kevin@precisioninsight.com
- *
- * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors: Kevin E. Martin <martin@valinux.com>
- *
- */
-
-#define __NO_VERSION__
-#include "drmP.h"
-#include "r128_drv.h"
-
-#include <linux/interrupt.h>	/* For task queue support */
-#include <linux/delay.h>
-
-
-
-#define DO_REMAP(_m) (_m)->handle = drm_ioremap((_m)->offset, (_m)->size)
-
-#define DO_REMAPFREE(_m)                                                    \
-	do {                                                                \
-		if ((_m)->handle && (_m)->size)                             \
-			drm_ioremapfree((_m)->handle, (_m)->size);          \
-	} while (0)
-
-#define DO_FIND_MAP(_m, _o)                                                 \
-	do {                                                                \
-		int _i;                                                     \
-		for (_i = 0; _i < dev->map_count; _i++) {                   \
-			if (dev->maplist[_i]->offset == _o) {               \
-				_m = dev->maplist[_i];                      \
-				break;                                      \
-			}                                                   \
-		}                                                           \
-	} while (0)
-
-
-#define R128_MAX_VBUF_AGE	0x10000000
-#define R128_VB_AGE_REG		R128_GUI_SCRATCH_REG0
-
-int R128_READ_PLL(drm_device_t *dev, int addr)
-{
-	drm_r128_private_t *dev_priv = dev->dev_private;
-
-	R128_WRITE8(R128_CLOCK_CNTL_INDEX, addr & 0x1f);
-	return R128_READ(R128_CLOCK_CNTL_DATA);
-}
-
-#define r128_flush_write_combine()	mb()
-
-
-static void r128_status(drm_device_t *dev)
-{
-	drm_r128_private_t *dev_priv = dev->dev_private;
-
-	printk("GUI_STAT           = 0x%08x\n",
-	       (unsigned int)R128_READ(R128_GUI_STAT));
-	printk("PM4_STAT           = 0x%08x\n",
-	       (unsigned int)R128_READ(R128_PM4_STAT));
-	printk("PM4_BUFFER_DL_WPTR = 0x%08x\n",
-	       (unsigned int)R128_READ(R128_PM4_BUFFER_DL_WPTR));
-	printk("PM4_BUFFER_DL_RPTR = 0x%08x\n",
-	       (unsigned int)R128_READ(R128_PM4_BUFFER_DL_RPTR));
-}
-
-static int r128_do_cleanup_cce(drm_device_t *dev)
-{
-	if (dev->dev_private) {
-		drm_r128_private_t *dev_priv = dev->dev_private;
-
-		if (!dev_priv->is_pci) {
-			DO_REMAPFREE(dev_priv->agp_ring);
-			DO_REMAPFREE(dev_priv->agp_read_ptr);
-			DO_REMAPFREE(dev_priv->agp_vertbufs);
-			DO_REMAPFREE(dev_priv->agp_indbufs);
-			DO_REMAPFREE(dev_priv->agp_textures);
-		}
-
-		drm_free(dev->dev_private, sizeof(drm_r128_private_t),
-			 DRM_MEM_DRIVER);
-		dev->dev_private = NULL;
-	}
-
-	return 0;
-}
-
-static int r128_do_init_cce(drm_device_t *dev, drm_r128_init_t *init)
-{
-	drm_r128_private_t *dev_priv;
-        int                 i;
-
-	dev_priv = drm_alloc(sizeof(drm_r128_private_t), DRM_MEM_DRIVER);
-	if (dev_priv == NULL) return -ENOMEM;
-	dev->dev_private = (void *)dev_priv;
-
-	memset(dev_priv, 0, sizeof(drm_r128_private_t));
-
-	dev_priv->is_pci         = init->is_pci;
-
-	dev_priv->usec_timeout   = init->usec_timeout;
-	if (dev_priv->usec_timeout < 1 ||
-	    dev_priv->usec_timeout > R128_MAX_USEC_TIMEOUT) {
-		drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
-		dev->dev_private = NULL;
-		return -EINVAL;
-	}
-
-	dev_priv->cce_mode       = init->cce_mode;
-	dev_priv->cce_fifo_size  = init->cce_fifo_size;
-	dev_priv->cce_is_bm_mode =
-		((init->cce_mode == R128_PM4_192BM) ||
-		 (init->cce_mode == R128_PM4_128BM_64INDBM) ||
-		 (init->cce_mode == R128_PM4_64BM_128INDBM) ||
-		 (init->cce_mode == R128_PM4_64BM_64VCBM_64INDBM));
-	dev_priv->cce_secure     = init->cce_secure;
-
-	if (dev_priv->cce_is_bm_mode && dev_priv->is_pci) {
-		drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
-		dev->dev_private = NULL;
-		return -EINVAL;
-	}
-
-	for (i = 0; i < dev->map_count; i++) {
-		if (dev->maplist[i]->type == _DRM_SHM) {
-			dev_priv->sarea = dev->maplist[i];
-			break;
-		}
-	}
-
-	DO_FIND_MAP(dev_priv->fb,           init->fb_offset);
-	if (!dev_priv->is_pci) {
-		DO_FIND_MAP(dev_priv->agp_ring,     init->agp_ring_offset);
-		DO_FIND_MAP(dev_priv->agp_read_ptr, init->agp_read_ptr_offset);
-		DO_FIND_MAP(dev_priv->agp_vertbufs, init->agp_vertbufs_offset);
-		DO_FIND_MAP(dev_priv->agp_indbufs,  init->agp_indbufs_offset);
-		DO_FIND_MAP(dev_priv->agp_textures, init->agp_textures_offset);
-	}
-	DO_FIND_MAP(dev_priv->mmio,         init->mmio_offset);
-
-	dev_priv->sarea_priv =
-		(drm_r128_sarea_t *)((u8 *)dev_priv->sarea->handle +
-				     init->sarea_priv_offset);
-
-	if (!dev_priv->is_pci) {
-		DO_REMAP(dev_priv->agp_ring);
-		DO_REMAP(dev_priv->agp_read_ptr);
-		DO_REMAP(dev_priv->agp_vertbufs);
-#if 0
-		DO_REMAP(dev_priv->agp_indirectbufs);
-		DO_REMAP(dev_priv->agp_textures);
-#endif
-
-		dev_priv->ring_size     = init->ring_size;
-		dev_priv->ring_sizel2qw = drm_order(init->ring_size/8);
-		dev_priv->ring_entries  = init->ring_size/sizeof(u32);
-		dev_priv->ring_read_ptr = ((__volatile__ u32 *)
-					   dev_priv->agp_read_ptr->handle);
-		dev_priv->ring_start    = (u32 *)dev_priv->agp_ring->handle;
-		dev_priv->ring_end      = ((u32 *)dev_priv->agp_ring->handle
-					   + dev_priv->ring_entries);
-	}
-
-	dev_priv->submit_age    = 0;
-	R128_WRITE(R128_VB_AGE_REG, dev_priv->submit_age);
-
-	return 0;
-}
-
-int r128_init_cce(struct inode *inode, struct file *filp,
-		  unsigned int cmd, unsigned long arg)
-{
-        drm_file_t        *priv   = filp->private_data;
-        drm_device_t      *dev    = priv->dev;
-	drm_r128_init_t    init;
-
-	if (copy_from_user(&init, (drm_r128_init_t *)arg, sizeof(init)))
-		return -EFAULT;
-
-	switch (init.func) {
-	case R128_INIT_CCE:
-		return r128_do_init_cce(dev, &init);
-	case R128_CLEANUP_CCE:
-		return r128_do_cleanup_cce(dev);
-	}
-
-	return -EINVAL;
-}
-
-static void r128_mark_vertbufs_done(drm_device_t *dev)
-{
-	drm_device_dma_t   *dma      = dev->dma;
-	int                 i;
-
-	for (i = 0; i < dma->buf_count; i++) {
-		drm_buf_t           *buf      = dma->buflist[i];
-		drm_r128_buf_priv_t *buf_priv = buf->dev_private;
-		buf_priv->age = 0;
-	}
-}
-
-static int r128_do_pixcache_flush(drm_device_t *dev)
-{
-	drm_r128_private_t *dev_priv = dev->dev_private;
-	u32                 tmp;
-	int                 i;
-
-	tmp = R128_READ(R128_PC_NGUI_CTLSTAT) | R128_PC_FLUSH_ALL;
-	R128_WRITE(R128_PC_NGUI_CTLSTAT, tmp);
-
-	for (i = 0; i < dev_priv->usec_timeout; i++) {
-		if (!(R128_READ(R128_PC_NGUI_CTLSTAT) & R128_PC_BUSY))
-			return 0;
-		udelay(1);
-	}
-
-	return -EBUSY;
-}
-
-static int r128_do_wait_for_fifo(drm_device_t *dev, int entries)
-{
-	drm_r128_private_t *dev_priv = dev->dev_private;
-	int                 i;
-
-	for (i = 0; i < dev_priv->usec_timeout; i++) {
-		int slots = R128_READ(R128_GUI_STAT) & R128_GUI_FIFOCNT_MASK;
-		if (slots >= entries) return 0;
-		udelay(1);
-	}
-	return -EBUSY;
-}
-
-static int r128_do_wait_for_idle(drm_device_t *dev)
-{
-	drm_r128_private_t *dev_priv = dev->dev_private;
-	int                 i, ret;
-
-	if (!(ret = r128_do_wait_for_fifo(dev, 64))) return ret;
-
-	for (i = 0; i < dev_priv->usec_timeout; i++) {
-		if (!(R128_READ(R128_GUI_STAT) & R128_GUI_ACTIVE)) {
-			(void)r128_do_pixcache_flush(dev);
-			return 0;
-		}
-		udelay(1);
-	}
-	return -EBUSY;
-}
-
-int r128_do_engine_reset(drm_device_t *dev)
-{
-	drm_r128_private_t *dev_priv = dev->dev_private;
-	u32                 clock_cntl_index, mclk_cntl, gen_reset_cntl;
-
-	(void)r128_do_pixcache_flush(dev);
-
-	clock_cntl_index = R128_READ(R128_CLOCK_CNTL_INDEX);
-	mclk_cntl        = R128_READ_PLL(dev, R128_MCLK_CNTL);
-
-	R128_WRITE_PLL(R128_MCLK_CNTL,
-		       mclk_cntl | R128_FORCE_GCP | R128_FORCE_PIPE3D_CP);
-
-	gen_reset_cntl   = R128_READ(R128_GEN_RESET_CNTL);
-
-	R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl | R128_SOFT_RESET_GUI);
-	(void)R128_READ(R128_GEN_RESET_CNTL);
-	R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl & ~R128_SOFT_RESET_GUI);
-	(void)R128_READ(R128_GEN_RESET_CNTL);
-
-	R128_WRITE_PLL(R128_MCLK_CNTL,    mclk_cntl);
-	R128_WRITE(R128_CLOCK_CNTL_INDEX, clock_cntl_index);
-	R128_WRITE(R128_GEN_RESET_CNTL,   gen_reset_cntl);
-
-	/* For CCE ring buffer only */
-	if (dev_priv->cce_is_bm_mode) {
-		R128_WRITE(R128_PM4_BUFFER_DL_WPTR, 0);
-		R128_WRITE(R128_PM4_BUFFER_DL_RPTR, 0);
-		*dev_priv->ring_read_ptr = 0;
-		dev_priv->sarea_priv->ring_write = 0;
-	}
-
-	/* Reset the CCE mode */
-	(void)r128_do_wait_for_idle(dev);
-	R128_WRITE(R128_PM4_BUFFER_CNTL,
-		   dev_priv->cce_mode | dev_priv->ring_sizel2qw);
-	(void)R128_READ(R128_PM4_BUFFER_ADDR); /* as per the sample code */
-	R128_WRITE(R128_PM4_MICRO_CNTL, R128_PM4_MICRO_FREERUN);
-
-	r128_mark_vertbufs_done(dev);
-	return 0;
-}
-
-int r128_eng_reset(struct inode *inode, struct file *filp,
-		   unsigned int cmd, unsigned long arg)
-{
-        drm_file_t        *priv   = filp->private_data;
-        drm_device_t      *dev    = priv->dev;
-
-	if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ||
-	    dev->lock.pid != current->pid) {
-		DRM_ERROR("r128_eng_reset called without holding the lock\n");
-		return -EINVAL;
-	}
-
-	return r128_do_engine_reset(dev);
-}
-
-static int r128_do_engine_flush(drm_device_t *dev)
-{
-	drm_r128_private_t *dev_priv = dev->dev_private;
-	u32                 tmp;
-
-	tmp = R128_READ(R128_PM4_BUFFER_DL_WPTR);
-	R128_WRITE(R128_PM4_BUFFER_DL_WPTR, tmp | R128_PM4_BUFFER_DL_DONE);
-
-	return 0;
-}
-
-int r128_eng_flush(struct inode *inode, struct file *filp,
-		   unsigned int cmd, unsigned long arg)
-{
-        drm_file_t        *priv   = filp->private_data;
-        drm_device_t      *dev    = priv->dev;
-
-	if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ||
-	    dev->lock.pid != current->pid) {
-		DRM_ERROR("r128_eng_flush called without holding the lock\n");
-		return -EINVAL;
-	}
-
-	return r128_do_engine_flush(dev);
-}
-
-static int r128_do_cce_wait_for_fifo(drm_device_t *dev, int entries)
-{
-	drm_r128_private_t *dev_priv = dev->dev_private;
-	int                 i;
-
-	for (i = 0; i < dev_priv->usec_timeout; i++) {
-		int slots = R128_READ(R128_PM4_STAT) & R128_PM4_FIFOCNT_MASK;
-		if (slots >= entries) return 0;
-		udelay(1);
-	}
-	return -EBUSY;
-}
-
-int r128_do_cce_wait_for_idle(drm_device_t *dev)
-{
-	drm_r128_private_t *dev_priv = dev->dev_private;
-	int                 i;
-
-	if (dev_priv->cce_is_bm_mode) {
-		for (i = 0; i < dev_priv->usec_timeout; i++) {
-			if (*dev_priv->ring_read_ptr == dev_priv->sarea_priv->ring_write) {
-				int pm4stat = R128_READ(R128_PM4_STAT);
-				if ((pm4stat & R128_PM4_FIFOCNT_MASK) >= dev_priv->cce_fifo_size &&
-				    !(pm4stat & (R128_PM4_BUSY | R128_PM4_GUI_ACTIVE))) {
-					return r128_do_pixcache_flush(dev);
-				}
-			}
-			udelay(1);
-		}
-		return -EBUSY;
-	} else {
-		int ret = r128_do_cce_wait_for_fifo(dev, dev_priv->cce_fifo_size);
-		if (ret < 0) return ret;
-
-		for (i = 0; i < dev_priv->usec_timeout; i++) {
-			int pm4stat = R128_READ(R128_PM4_STAT);
-			if (!(pm4stat & (R128_PM4_BUSY | R128_PM4_GUI_ACTIVE))) {
-				return r128_do_pixcache_flush(dev);
-			}
-			udelay(1);
-		}
-		return -EBUSY;
-	}
-}
-
-int r128_cce_idle(struct inode *inode, struct file *filp,
-		  unsigned int cmd, unsigned long arg)
-{
-        drm_file_t         *priv     = filp->private_data;
-        drm_device_t       *dev      = priv->dev;
-
-	if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ||
-	    dev->lock.pid != current->pid) {
-		DRM_ERROR("r128_wait_idle called without holding the lock\n");
-		return -EINVAL;
-	}
-
-	return r128_do_cce_wait_for_idle(dev);
-}
-
-static int r128_submit_packets_ring_secure(drm_device_t *dev,
-					   u32 *commands, int *count)
-{
-	drm_r128_private_t *dev_priv  = dev->dev_private;
-	int                 write     = dev_priv->sarea_priv->ring_write;
-	int                *write_ptr = dev_priv->ring_start + write;
-	int                 c         = *count;
-	u32                 tmp       = 0;
-	int                 psize     = 0;
-	int                 writing   = 1;
-	int                 timeout;
-
-	while (c > 0) {
-		tmp = *commands++;
-		if (!psize) {
-			writing = 1;
-
-			if ((tmp & R128_CCE_PACKET_MASK) == R128_CCE_PACKET0) {
-				if ((tmp & R128_CCE_PACKET0_REG_MASK) <= (0x1004 >> 2)) {
-					if ((tmp & R128_CCE_PACKET0_REG_MASK) !=
-					    (R128_PM4_VC_FPU_SETUP >> 2)) {
-						writing = 0;
-					}
-				}
-				psize = ((tmp & R128_CCE_PACKET_COUNT_MASK) >> 16) + 2;
-			} else if ((tmp & R128_CCE_PACKET_MASK) == R128_CCE_PACKET1) {
-				if ((tmp & R128_CCE_PACKET1_REG0_MASK) <= (0x1004 >> 2)) {
-					if ((tmp & R128_CCE_PACKET1_REG0_MASK) !=
-					    (R128_PM4_VC_FPU_SETUP >> 2)) {
-						writing = 0;
-					}
-				} else if ((tmp & R128_CCE_PACKET1_REG1_MASK) <=
-					   (0x1004 << 9)) {
-					if ((tmp & R128_CCE_PACKET1_REG1_MASK) !=
-					    (R128_PM4_VC_FPU_SETUP << 9)) {
-						writing = 0;
-					}
-				}
-				psize = 3;
-			} else {
-				psize = ((tmp & R128_CCE_PACKET_COUNT_MASK) >> 16) + 2;
-			}
-		}
-		psize--;
-
-		if (writing) {
-			write++;
-			*write_ptr++ = tmp;
-		}
-		if (write >= dev_priv->ring_entries) {
-			write     = 0;
-			write_ptr = dev_priv->ring_start;
-		}
-		timeout = 0;
-		while (write == *dev_priv->ring_read_ptr) {
-			(void)R128_READ(R128_PM4_BUFFER_DL_RPTR);
-			if (timeout++ >= dev_priv->usec_timeout)
-				return -EBUSY;
-			udelay(1);
-		}
-		c--;
-	}
-
-	if (write < 32)
-	    memcpy(dev_priv->ring_end,
-		   dev_priv->ring_start,
-		   write * sizeof(u32));
-
-	/* Make sure WC cache has been flushed */
-	r128_flush_write_combine();
-
-	dev_priv->sarea_priv->ring_write = write;
-	R128_WRITE(R128_PM4_BUFFER_DL_WPTR, write);
-
-	*count = 0;
-
-	return 0;
-}
-
-static int r128_submit_packets_pio_secure(drm_device_t *dev,
-					  u32 *commands, int *count)
-{
-	drm_r128_private_t *dev_priv = dev->dev_private;
-	u32                 tmp      = 0;
-	int                 psize    = 0;
-	int                 writing  = 1;
-	int                 addr     = R128_PM4_FIFO_DATA_EVEN;
-	int                 ret;
-
-	while (*count > 0) {
-		tmp = *commands++;
-		if (!psize) {
-			writing = 1;
-
-			if ((tmp & R128_CCE_PACKET_MASK) == R128_CCE_PACKET0) {
-				if ((tmp & R128_CCE_PACKET0_REG_MASK) <= (0x1004 >> 2)) {
-					if ((tmp & R128_CCE_PACKET0_REG_MASK) !=
-					    (R128_PM4_VC_FPU_SETUP >> 2)) {
-						writing = 0;
-					}
-				}
-				psize = ((tmp & R128_CCE_PACKET_COUNT_MASK) >> 16) + 2;
-			} else if ((tmp & R128_CCE_PACKET_MASK) == R128_CCE_PACKET1) {
-				if ((tmp & R128_CCE_PACKET1_REG0_MASK) <= (0x1004 >> 2)) {
-					if ((tmp & R128_CCE_PACKET1_REG0_MASK) !=
-					    (R128_PM4_VC_FPU_SETUP >> 2)) {
-						writing = 0;
-					}
-				} else if ((tmp & R128_CCE_PACKET1_REG1_MASK) <=
-					   (0x1004 << 9)) {
-					if ((tmp & R128_CCE_PACKET1_REG1_MASK) !=
-					    (R128_PM4_VC_FPU_SETUP << 9)) {
-						writing = 0;
-					}
-				}
-				psize = 3;
-			} else {
-				psize = ((tmp & R128_CCE_PACKET_COUNT_MASK) >> 16) + 2;
-			}
-		}
-		psize--;
-
-		if (writing) {
-			if ((ret = r128_do_cce_wait_for_fifo(dev, 1)) < 0)
-				return ret;
-			R128_WRITE(addr, tmp);
-			addr ^= 0x0004;
-		}
-
-		*count -= 1;
-	}
-
-	if (addr == R128_PM4_FIFO_DATA_ODD) {
-		if ((ret = r128_do_cce_wait_for_fifo(dev, 1)) < 0) return ret;
-		R128_WRITE(addr, R128_CCE_PACKET2);
-	}
-
-	return 0;
-}
-
-static int r128_submit_packets_ring(drm_device_t *dev,
-				    u32 *commands, int *count)
-{
-	drm_r128_private_t *dev_priv  = dev->dev_private;
-	int                 write     = dev_priv->sarea_priv->ring_write;
-	int                *write_ptr = dev_priv->ring_start + write;
-	int                 c         = *count;
-	int                 timeout;
-
-	while (c > 0) {
-		write++;
-		*write_ptr++ = *commands++;
-		if (write >= dev_priv->ring_entries) {
-			write     = 0;
-			write_ptr = dev_priv->ring_start;
-		}
-		timeout = 0;
-		while (write == *dev_priv->ring_read_ptr) {
-			(void)R128_READ(R128_PM4_BUFFER_DL_RPTR);
-			if (timeout++ >= dev_priv->usec_timeout)
-				return -EBUSY;
-			udelay(1);
-		}
-		c--;
-	}
-
-	if (write < 32)
-	    memcpy(dev_priv->ring_end,
-		   dev_priv->ring_start,
-		   write * sizeof(u32));
-
-	/* Make sure WC cache has been flushed */
-	r128_flush_write_combine();
-
-	dev_priv->sarea_priv->ring_write = write;
-	R128_WRITE(R128_PM4_BUFFER_DL_WPTR, write);
-
-	*count = 0;
-
-	return 0;
-}
-
-static int r128_submit_packets_pio(drm_device_t *dev,
-				   u32 *commands, int *count)
-{
-	drm_r128_private_t *dev_priv = dev->dev_private;
-	int                 ret;
-
-	while (*count > 1) {
-		if ((ret = r128_do_cce_wait_for_fifo(dev, 2)) < 0) return ret;
-		R128_WRITE(R128_PM4_FIFO_DATA_EVEN, *commands++);
-		R128_WRITE(R128_PM4_FIFO_DATA_ODD,  *commands++);
-		*count -= 2;
-	}
-
-	if (*count) {
-		if ((ret = r128_do_cce_wait_for_fifo(dev, 2)) < 0) return ret;
-		R128_WRITE(R128_PM4_FIFO_DATA_EVEN, *commands++);
-		R128_WRITE(R128_PM4_FIFO_DATA_ODD,  R128_CCE_PACKET2);
-		*count = 0;
-	}
-
-	return 0;
-}
-
-static int r128_do_submit_packets(drm_device_t *dev, u32 *buffer, int count)
-{
-	drm_r128_private_t *dev_priv = dev->dev_private;
-	int                 c = count;
-	int                 ret;
-
-	if (dev_priv->cce_is_bm_mode) {
-		int left = 0;
-
-		if (c >= dev_priv->ring_entries) {
-			c    = dev_priv->ring_entries-1;
-			left = count - c;
-		}
-
-		/* Since this is only used by the kernel we can use the
-                   insecure ring buffer submit packet routine */
-		ret = r128_submit_packets_ring(dev, buffer, &c);
-
-		c += left;
-	} else {
-		/* Since this is only used by the kernel we can use the
-                   insecure PIO submit packet routine */
-		ret = r128_submit_packets_pio(dev, buffer, &c);
-	}
-
-	if (ret < 0) return ret;
-	else         return c;
-}
-
-int r128_submit_pkt(struct inode *inode, struct file *filp,
-		    unsigned int cmd, unsigned long arg)
-{
-        drm_file_t         *priv     = filp->private_data;
-        drm_device_t       *dev      = priv->dev;
-	drm_r128_private_t *dev_priv = dev->dev_private;
-	drm_r128_packet_t   packet;
-	u32                *buffer;
-	int                 c;
-	int                 size;
-	int                 ret = 0;
-
-	if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ||
-	    dev->lock.pid != current->pid) {
-		DRM_ERROR("r128_submit_pkt called without holding the lock\n");
-		return -EINVAL;
-	}
-
-	if (copy_from_user(&packet, (drm_r128_packet_t *)arg, sizeof(packet)))
-		return -EFAULT;
-
-	c    = packet.count;
-	size = c * sizeof(*buffer);
-
-	if (dev_priv->cce_is_bm_mode) {
-		int left = 0;
-
-		if (c >= dev_priv->ring_entries) {
-			c    = dev_priv->ring_entries-1;
-			size = c * sizeof(*buffer);
-			left = packet.count - c;
-		}
-
-		if ((buffer = kmalloc(size, 0)) == NULL) return -ENOMEM;
-		if (copy_from_user(buffer, packet.buffer, size))
-			return -EFAULT;
-
-		if (dev_priv->cce_secure)
-			ret = r128_submit_packets_ring_secure(dev, buffer, &c);
-		else
-			ret = r128_submit_packets_ring(dev, buffer, &c);
-
-		c += left;
-	} else {
-		if ((buffer = kmalloc(size, 0)) == NULL) return -ENOMEM;
-		if (copy_from_user(buffer, packet.buffer, size))
-			return -EFAULT;
-
-		if (dev_priv->cce_secure)
-			ret = r128_submit_packets_pio_secure(dev, buffer, &c);
-		else
-			ret = r128_submit_packets_pio(dev, buffer, &c);
-	}
-
-	kfree(buffer);
-
-	packet.count = c;
-	if (copy_to_user((drm_r128_packet_t *)arg, &packet, sizeof(packet)))
-		return -EFAULT;
-
-	if (ret)        return ret;
-	else if (c > 0) return -EAGAIN;
-
-	return 0;
-}
-
-static int r128_send_vertbufs(drm_device_t *dev, drm_r128_vertex_t *v)
-{
-	drm_device_dma_t    *dma      = dev->dma;
-	drm_r128_private_t  *dev_priv = dev->dev_private;
-	drm_r128_buf_priv_t *buf_priv;
-	drm_buf_t           *buf;
-	int                  i, ret;
-	u32                  cce[2];
-
-	/* Make sure we have valid data */
-	for (i = 0; i < v->send_count; i++) {
-		int idx = v->send_indices[i];
-
-		if (idx < 0 || idx >= dma->buf_count) {
-			DRM_ERROR("Index %d (of %d max)\n",
-				  idx, dma->buf_count - 1);
-			return -EINVAL;
-		}
-		buf = dma->buflist[idx];
-		if (buf->pid != current->pid) {
-			DRM_ERROR("Process %d using buffer owned by %d\n",
-				  current->pid, buf->pid);
-			return -EINVAL;
-		}
-		if (buf->pending) {
-			DRM_ERROR("Sending pending buffer:"
-				  " buffer %d, offset %d\n",
-				  v->send_indices[i], i);
-			return -EINVAL;
-		}
-	}
-
-	/* Wait for idle, if we've wrapped to make sure that all pending
-           buffers have been processed */
-	if (dev_priv->submit_age == R128_MAX_VBUF_AGE) {
-		if ((ret = r128_do_cce_wait_for_idle(dev)) < 0) return ret;
-		dev_priv->submit_age = 0;
-		r128_mark_vertbufs_done(dev);
-	}
-
-	/* Make sure WC cache has been flushed (if in PIO mode) */
-	if (!dev_priv->cce_is_bm_mode) r128_flush_write_combine();
-
-	/* FIXME: Add support for sending vertex buffer to the CCE here
-	   instead of in client code.  The v->prim holds the primitive
-	   type that should be drawn.  Loop over the list buffers in
-	   send_indices[] and submit a packet for each VB.
-
-	   This will require us to loop over the clip rects here as
-	   well, which implies that we extend the kernel driver to allow
-	   cliprects to be stored here.  Note that the cliprects could
-	   possibly come from the X server instead of the client, but
-	   this will require additional changes to the DRI to allow for
-	   this optimization. */
-
-	/* Submit a CCE packet that writes submit_age to R128_VB_AGE_REG */
-	cce[0] = R128CCE0(R128_CCE_PACKET0, R128_VB_AGE_REG, 0);
-	cce[1] = dev_priv->submit_age;
-	if ((ret = r128_do_submit_packets(dev, cce, 2)) < 0) {
-		/* Until we add support for sending VBs to the CCE in
-		   this routine, we can recover from this error.  After
-		   we add that support, we won't be able to easily
-		   recover, so we will probably have to implement
-		   another mechanism for handling timeouts from packets
-		   submitted directly by the kernel. */
-		return ret;
-	}
-
-	/* Now that the submit packet request has succeeded, we can mark
-           the buffers as pending */
-	for (i = 0; i < v->send_count; i++) {
-		buf = dma->buflist[v->send_indices[i]];
-		buf->pending = 1;
-
-		buf_priv      = buf->dev_private;
-		buf_priv->age = dev_priv->submit_age;
-	}
-
-	dev_priv->submit_age++;
-
-	return 0;
-}
-
-static drm_buf_t *r128_freelist_get(drm_device_t *dev)
-{
-	drm_device_dma_t    *dma      = dev->dma;
-	drm_r128_private_t  *dev_priv = dev->dev_private;
-	drm_r128_buf_priv_t *buf_priv;
-	drm_buf_t           *buf;
-	int                  i, t;
-
-	/* FIXME: Optimize -- use freelist code */
-
-	for (i = 0; i < dma->buf_count; i++) {
-		buf = dma->buflist[i];
-		buf_priv = buf->dev_private;
-		if (buf->pid == 0) return buf;
-	}
-
-	for (t = 0; t < dev_priv->usec_timeout; t++) {
-		u32 done_age = R128_READ(R128_VB_AGE_REG);
-
-		for (i = 0; i < dma->buf_count; i++) {
-			buf = dma->buflist[i];
-			buf_priv = buf->dev_private;
-			if (buf->pending && buf_priv->age <= done_age) {
-				/* The buffer has been processed, so it
-                                   can now be used */
-				buf->pending = 0;
-				return buf;
-			}
-		}
-		udelay(1);
-	}
-
-	r128_status(dev);
-	return NULL;
-}
-
-
-static int r128_get_vertbufs(drm_device_t *dev, drm_r128_vertex_t *v)
-{
-	drm_buf_t *buf;
-	int        i;
-
-	for (i = v->granted_count; i < v->request_count; i++) {
-		buf = r128_freelist_get(dev);
-		if (!buf) break;
-		buf->pid = current->pid;
-		if (copy_to_user(&v->request_indices[i],
-				 &buf->idx,
-				 sizeof(buf->idx)) ||
-		    copy_to_user(&v->request_sizes[i],
-				 &buf->total,
-				 sizeof(buf->total)))
-			return -EFAULT;
-		++v->granted_count;
-	}
-	return 0;
-}
-
-int r128_vertex_buf(struct inode *inode, struct file *filp, unsigned int cmd,
-		    unsigned long arg)
-{
-	drm_file_t	   *priv     = filp->private_data;
-	drm_device_t	   *dev	     = priv->dev;
-	drm_r128_private_t *dev_priv = dev->dev_private;
-	drm_device_dma_t   *dma	     = dev->dma;
-	int		    retcode  = 0;
-	drm_r128_vertex_t   v;
-
-	if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ||
-	    dev->lock.pid != current->pid) {
-		DRM_ERROR("r128_vertex_buf called without holding the lock\n");
-		return -EINVAL;
-	}
-
-	if (!dev_priv || dev_priv->is_pci) {
-		DRM_ERROR("r128_vertex_buf called with a PCI card\n");
-		return -EINVAL;
-	}
-
-	if (copy_from_user(&v, (drm_r128_vertex_t *)arg, sizeof(v)))
-		return -EFAULT;
-	DRM_DEBUG("%d: %d send, %d req\n",
-		  current->pid, v.send_count, v.request_count);
-
-	if (v.send_count < 0 || v.send_count > dma->buf_count) {
-		DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n",
-			  current->pid, v.send_count, dma->buf_count);
-		return -EINVAL;
-	}
-	if (v.request_count < 0 || v.request_count > dma->buf_count) {
-		DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
-			  current->pid, v.request_count, dma->buf_count);
-		return -EINVAL;
-	}
-
-	if (v.send_count) {
-		retcode = r128_send_vertbufs(dev, &v);
-	}
-
-	v.granted_count = 0;
-
-	if (!retcode && v.request_count) {
-		retcode = r128_get_vertbufs(dev, &v);
-	}
-
-	DRM_DEBUG("%d returning, granted = %d\n",
-		  current->pid, v.granted_count);
-	if (copy_to_user((drm_r128_vertex_t *)arg, &v, sizeof(v)))
-		return -EFAULT;
-
-	return retcode;
-}
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/char/drm/r128_drm.h linux/drivers/char/drm/r128_drm.h
--- v2.4.0-prerelease/linux/drivers/char/drm/r128_drm.h	Sun Nov 19 18:44:06 2000
+++ linux/drivers/char/drm/r128_drm.h	Thu Jan  4 13:03:20 2001
@@ -11,11 +11,11 @@
  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  * and/or sell copies of the Software, and to permit persons to whom the
  * Software is furnished to do so, subject to the following conditions:
- * 
+ *
  * The above copyright notice and this permission notice (including the next
  * paragraph) shall be included in all copies or substantial portions of the
  * Software.
- * 
+ *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
@@ -24,7 +24,9 @@
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
  *
- * Authors: Kevin E. Martin <martin@valinux.com>
+ * Authors:
+ *    Kevin E. Martin <martin@valinux.com>
+ *    Gareth Hughes <gareth@valinux.com>
  *
  */
 
@@ -32,80 +34,239 @@
 #define _R128_DRM_H_
 
 /* WARNING: If you change any of these defines, make sure to change the
+ * defines in the X server file (r128_sarea.h)
+ */
+#ifndef __R128_SAREA_DEFINES__
+#define __R128_SAREA_DEFINES__
+
+/* What needs to be changed for the current vertex buffer?
+ */
+#define R128_UPLOAD_CONTEXT		0x001
+#define R128_UPLOAD_SETUP		0x002
+#define R128_UPLOAD_TEX0		0x004
+#define R128_UPLOAD_TEX1		0x008
+#define R128_UPLOAD_TEX0IMAGES		0x010
+#define R128_UPLOAD_TEX1IMAGES		0x020
+#define R128_UPLOAD_CORE		0x040
+#define R128_UPLOAD_MASKS		0x080
+#define R128_UPLOAD_WINDOW		0x100
+#define R128_UPLOAD_CLIPRECTS		0x200	/* handled client-side */
+#define R128_REQUIRE_QUIESCENCE		0x400
+#define R128_UPLOAD_ALL			0x7ff
+
+#define R128_FRONT			0x1
+#define R128_BACK			0x2
+#define R128_DEPTH			0x4
+
+/* Primitive types
+ */
+#define R128_POINTS			0x1
+#define R128_LINES			0x2
+#define R128_LINE_STRIP			0x3
+#define R128_TRIANGLES			0x4
+#define R128_TRIANGLE_FAN		0x5
+#define R128_TRIANGLE_STRIP		0x6
+
+/* Vertex/indirect buffer size
+ */
+#if 1
+#define R128_BUFFER_SIZE		16384
+#else
+#define R128_BUFFER_SIZE		(128 * 1024)
+#endif
+
+/* Byte offsets for indirect buffer data
+ */
+#define R128_INDEX_PRIM_OFFSET		20
+#define R128_HOSTDATA_BLIT_OFFSET	32
+
+/* 2048x2048 @ 32bpp texture requires this many indirect buffers
+ */
+#define R128_MAX_BLIT_BUFFERS		((2048 * 2048 * 4) / R128_BUFFER_SIZE)
+
+/* Keep these small for testing.
+ */
+#define R128_NR_SAREA_CLIPRECTS		12
+
+/* There are 2 heaps (local/AGP).  Each region within a heap is a
+ *  minimum of 64k, and there are at most 64 of them per heap.
+ */
+#define R128_LOCAL_TEX_HEAP		0
+#define R128_AGP_TEX_HEAP		1
+#define R128_NR_TEX_HEAPS		2
+#define R128_NR_TEX_REGIONS		64
+#define R128_LOG_TEX_GRANULARITY	16
+
+#define R128_NR_CONTEXT_REGS		12
+#define R128_TEX_MAXLEVELS		11
+
+#endif /* __R128_SAREA_DEFINES__ */
+
+typedef struct {
+	/* Context state - can be written in one large chunk */
+	unsigned int dst_pitch_offset_c;
+	unsigned int dp_gui_master_cntl_c;
+	unsigned int sc_top_left_c;
+	unsigned int sc_bottom_right_c;
+	unsigned int z_offset_c;
+	unsigned int z_pitch_c;
+	unsigned int z_sten_cntl_c;
+	unsigned int tex_cntl_c;
+	unsigned int misc_3d_state_cntl_reg;
+	unsigned int texture_clr_cmp_clr_c;
+	unsigned int texture_clr_cmp_msk_c;
+	unsigned int fog_color_c;
+
+	/* Texture state */
+	unsigned int tex_size_pitch_c;
+	unsigned int constant_color_c;
+
+	/* Setup state */
+	unsigned int pm4_vc_fpu_setup;
+	unsigned int setup_cntl;
+
+	/* Mask state */
+	unsigned int dp_write_mask;
+	unsigned int sten_ref_mask_c;
+	unsigned int plane_3d_mask_c;
+
+	/* Window state */
+	unsigned int window_xy_offset;
+
+	/* Core state */
+	unsigned int scale_3d_cntl;
+} drm_r128_context_regs_t;
+
+/* Setup registers for each texture unit */
+typedef struct {
+	unsigned int tex_cntl;
+	unsigned int tex_combine_cntl;
+	unsigned int tex_size_pitch;
+	unsigned int tex_offset[R128_TEX_MAXLEVELS];
+	unsigned int tex_border_color;
+} drm_r128_texture_regs_t;
+
+
+typedef struct drm_tex_region {
+	unsigned char next, prev;
+	unsigned char in_use;
+	int age;
+} drm_tex_region_t;
+
+typedef struct drm_r128_sarea {
+	/* The channel for communication of state information to the kernel
+	 * on firing a vertex buffer.
+	 */
+	drm_r128_context_regs_t context_state;
+	drm_r128_texture_regs_t tex_state[R128_NR_TEX_HEAPS];
+	unsigned int dirty;
+	unsigned int vertsize;
+	unsigned int vc_format;
+
+	/* The current cliprects, or a subset thereof.
+	 */
+	drm_clip_rect_t boxes[R128_NR_SAREA_CLIPRECTS];
+	unsigned int nbox;
+
+	/* Counters for client-side throttling of rendering clients.
+	 */
+	unsigned int last_frame;
+	unsigned int last_dispatch;
+
+	drm_tex_region_t tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS+1];
+	int tex_age[R128_NR_TEX_HEAPS];
+	int ctx_owner;
+} drm_r128_sarea_t;
+
+
+/* WARNING: If you change any of these defines, make sure to change the
  * defines in the Xserver file (xf86drmR128.h)
  */
 typedef struct drm_r128_init {
-	enum { 
+	enum {
 		R128_INIT_CCE    = 0x01,
 		R128_CLEANUP_CCE = 0x02
 	} func;
 	int sarea_priv_offset;
 	int is_pci;
 	int cce_mode;
-	int cce_fifo_size;
 	int cce_secure;
 	int ring_size;
 	int usec_timeout;
 
-	int fb_offset;
-	int agp_ring_offset;
-	int agp_read_ptr_offset;
-	int agp_vertbufs_offset;
-	int agp_indbufs_offset;
-	int agp_textures_offset;
-	int mmio_offset;
+	unsigned int fb_bpp;
+	unsigned int front_offset, front_pitch;
+	unsigned int back_offset, back_pitch;
+	unsigned int depth_bpp;
+	unsigned int depth_offset, depth_pitch;
+	unsigned int span_offset;
+
+	unsigned int fb_offset;
+	unsigned int mmio_offset;
+	unsigned int ring_offset;
+	unsigned int ring_rptr_offset;
+	unsigned int buffers_offset;
+	unsigned int agp_textures_offset;
 } drm_r128_init_t;
 
-typedef struct drm_r128_packet {
-	unsigned int *buffer;
-	int          count;
-	int          flags;
-} drm_r128_packet_t;
+typedef struct drm_r128_cce_stop {
+	int flush;
+	int idle;
+} drm_r128_cce_stop_t;
 
-typedef enum drm_r128_prim {
-	_DRM_R128_PRIM_NONE		= 0x0001,
-	_DRM_R128_PRIM_POINT		= 0x0002,
-	_DRM_R128_PRIM_LINE		= 0x0004,
-	_DRM_R128_PRIM_POLY_LINE	= 0x0008,
-	_DRM_R128_PRIM_TRI_LIST		= 0x0010,
-	_DRM_R128_PRIM_TRI_FAN		= 0x0020,
-	_DRM_R128_PRIM_TRI_STRIP	= 0x0040,
-	_DRM_R128_PRIM_TRI_TYPE2	= 0x0080
-} drm_r128_prim_t;
+typedef struct drm_r128_clear {
+	unsigned int flags;
+	int x, y, w, h;
+	unsigned int clear_color;
+	unsigned int clear_depth;
+} drm_r128_clear_t;
 
 typedef struct drm_r128_vertex {
-				/* Indices here refer to the offset into
-				   buflist in drm_buf_get_t.  */
-	int		send_count;	  /* Number of buffers to send	    */
-	int		*send_indices;	  /* List of handles to buffers	    */
-	int		*send_sizes;	  /* Lengths of data to send	    */
-	drm_r128_prim_t	prim;		  /* Primitive type		    */
-	int		request_count;	  /* Number of buffers requested    */
-	int		*request_indices; /* Buffer information		    */
-	int		*request_sizes;
-	int		granted_count;	  /* Number of buffers granted	    */
+	int prim;
+	int idx;			/* Index of vertex buffer */
+	int count;			/* Number of vertices in buffer */
+	int discard;			/* Client finished with buffer? */
 } drm_r128_vertex_t;
 
-/* WARNING: If you change any of these defines, make sure to change the
- * defines in the Xserver file (r128_sarea.h)
- */
-#define R128_LOCAL_TEX_HEAP       0
-#define R128_AGP_TEX_HEAP         1
-#define R128_NR_TEX_HEAPS         2
-#define R128_NR_TEX_REGIONS      64
-#define R128_LOG_TEX_GRANULARITY 16
+typedef struct drm_r128_indices {
+	int prim;
+	int idx;
+	int start;
+	int end;
+	int discard;			/* Client finished with buffer? */
+} drm_r128_indices_t;
 
-typedef struct drm_tex_region {
-	unsigned char next, prev;       
-	unsigned char in_use;   
-	int age;                        
-} drm_tex_region_t;
+typedef struct drm_r128_blit {
+	int idx;
+	int pitch;
+	int offset;
+	int format;
+	unsigned short x, y;
+	unsigned short width, height;
+} drm_r128_blit_t;
 
-typedef struct drm_r128_sarea {
-	drm_tex_region_t tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS+1];
-	int              tex_age[R128_NR_TEX_HEAPS];
-	int              ctx_owner;
-	int              ring_write;
-} drm_r128_sarea_t;
+typedef struct drm_r128_depth {
+	enum {
+		R128_WRITE_SPAN		= 0x01,
+		R128_WRITE_PIXELS	= 0x02,
+		R128_READ_SPAN		= 0x03,
+		R128_READ_PIXELS	= 0x04
+	} func;
+	int n;
+	int *x;
+	int *y;
+	unsigned int *buffer;
+	unsigned char *mask;
+} drm_r128_depth_t;
+
+typedef struct drm_r128_stipple {
+	unsigned int *mask;
+} drm_r128_stipple_t;
+
+typedef struct drm_r128_packet {
+	unsigned int *buffer;
+	int count;
+	int flags;
+} drm_r128_packet_t;
 
 #endif
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/char/drm/r128_drv.c linux/drivers/char/drm/r128_drv.c
--- v2.4.0-prerelease/linux/drivers/char/drm/r128_drv.c	Sun Nov 19 18:44:06 2000
+++ linux/drivers/char/drm/r128_drv.c	Thu Jan  4 13:03:20 2001
@@ -24,8 +24,10 @@
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
  *
- * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
- *          Kevin E. Martin <martin@valinux.com>
+ * Authors:
+ *    Rickard E. (Rik) Faith <faith@valinux.com>
+ *    Kevin E. Martin <martin@valinux.com>
+ *    Gareth Hughes <gareth@valinux.com>
  *
  */
 
@@ -33,15 +35,15 @@
 #include "drmP.h"
 #include "r128_drv.h"
 
-#define R128_NAME	 "r128"
-#define R128_DESC	 "ATI Rage 128"
-#define R128_DATE	 "20000928"
-#define R128_MAJOR	 1
-#define R128_MINOR	 0
-#define R128_PATCHLEVEL  0
+#define R128_NAME		"r128"
+#define R128_DESC		"ATI Rage 128"
+#define R128_DATE		"20001215"
+#define R128_MAJOR		2
+#define R128_MINOR		1
+#define R128_PATCHLEVEL		2
 
-static drm_device_t	      r128_device;
-drm_ctx_t	              r128_res_ctx;
+static drm_device_t	r128_device;
+drm_ctx_t		r128_res_ctx;
 
 static struct file_operations r128_fops = {
 #if LINUX_VERSION_CODE >= 0x020400
@@ -65,52 +67,61 @@
 };
 
 static drm_ioctl_desc_t	      r128_ioctls[] = {
-	[DRM_IOCTL_NR(DRM_IOCTL_VERSION)]     = { r128_version,	   0, 0 },
-	[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)]  = { drm_getunique,   0, 0 },
-	[DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)]   = { drm_getmagic,	   0, 0 },
-	[DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)]   = { drm_irq_busid,   0, 1 },
-
-	[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)]  = { drm_setunique,   1, 1 },
-	[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)]	      = { drm_block,	   1, 1 },
-	[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)]     = { drm_unblock,	   1, 1 },
-	[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)]  = { drm_authmagic,   1, 1 },
-	[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)]     = { drm_addmap,	   1, 1 },
-	[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)]    = { r128_addbufs,	   1, 1 },
-	[DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)]   = { drm_markbufs,    1, 1 },
-	[DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)]   = { drm_infobufs,    1, 0 },
-	[DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)]    = { r128_mapbufs,	   1, 0 },
-	[DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)]   = { drm_freebufs,    1, 0 },
-
-	[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)]     = { r128_addctx,	   1, 1 },
-	[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)]      = { r128_rmctx,	   1, 1 },
-	[DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)]     = { r128_modctx,	   1, 1 },
-	[DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)]     = { r128_getctx,	   1, 0 },
-	[DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)]  = { r128_switchctx,  1, 1 },
-	[DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)]     = { r128_newctx,	   1, 1 },
-	[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)]     = { r128_resctx,	   1, 0 },
-	[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)]    = { drm_adddraw,	   1, 1 },
-	[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)]     = { drm_rmdraw,	   1, 1 },
-	[DRM_IOCTL_NR(DRM_IOCTL_LOCK)]	      = { r128_lock,	   1, 0 },
-	[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)]      = { r128_unlock,	   1, 0 },
-	[DRM_IOCTL_NR(DRM_IOCTL_FINISH)]      = { drm_finish,	   1, 0 },
+	[DRM_IOCTL_NR(DRM_IOCTL_VERSION)]      = { r128_version,      0, 0 },
+	[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)]   = { drm_getunique,     0, 0 },
+	[DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)]    = { drm_getmagic,      0, 0 },
+	[DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)]    = { drm_irq_busid,     0, 1 },
+
+	[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)]   = { drm_setunique,     1, 1 },
+	[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)]	       = { drm_block,         1, 1 },
+	[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)]      = { drm_unblock,       1, 1 },
+	[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)]   = { drm_authmagic,     1, 1 },
+	[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)]      = { drm_addmap,        1, 1 },
+	[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)]     = { r128_addbufs,      1, 1 },
+	[DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)]    = { drm_markbufs,      1, 1 },
+	[DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)]    = { drm_infobufs,      1, 0 },
+	[DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)]     = { r128_mapbufs,      1, 0 },
+	[DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)]    = { drm_freebufs,      1, 0 },
+
+	[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)]      = { r128_addctx,	      1, 1 },
+	[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)]       = { r128_rmctx,	      1, 1 },
+	[DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)]      = { r128_modctx,	      1, 1 },
+	[DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)]      = { r128_getctx,	      1, 0 },
+	[DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)]   = { r128_switchctx,    1, 1 },
+	[DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)]      = { r128_newctx,	      1, 1 },
+	[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)]      = { r128_resctx,	      1, 0 },
+	[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)]     = { drm_adddraw,	      1, 1 },
+	[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)]      = { drm_rmdraw,	      1, 1 },
+	[DRM_IOCTL_NR(DRM_IOCTL_DMA)]	       = { r128_cce_buffers,  1, 0 },
+	[DRM_IOCTL_NR(DRM_IOCTL_LOCK)]	       = { r128_lock,	      1, 0 },
+	[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)]       = { r128_unlock,	      1, 0 },
+	[DRM_IOCTL_NR(DRM_IOCTL_FINISH)]       = { drm_finish,	      1, 0 },
 
 #if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
-	[DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 },
-	[DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 },
-	[DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)]  = { drm_agp_enable,  1, 1 },
-	[DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)]    = { drm_agp_info,    1, 0 },
-	[DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)]   = { drm_agp_alloc,   1, 1 },
-	[DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)]    = { drm_agp_free,    1, 1 },
-	[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)]    = { drm_agp_bind,    1, 1 },
-	[DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)]  = { drm_agp_unbind,  1, 1 },
-#endif
-
-	[DRM_IOCTL_NR(DRM_IOCTL_R128_INIT)]   = { r128_init_cce,   1, 1 },
-	[DRM_IOCTL_NR(DRM_IOCTL_R128_RESET)]  = { r128_eng_reset,  1, 0 },
-	[DRM_IOCTL_NR(DRM_IOCTL_R128_FLUSH)]  = { r128_eng_flush,  1, 0 },
-	[DRM_IOCTL_NR(DRM_IOCTL_R128_PACKET)] = { r128_submit_pkt, 1, 0 },
-	[DRM_IOCTL_NR(DRM_IOCTL_R128_IDLE)]   = { r128_cce_idle,   1, 0 },
-	[DRM_IOCTL_NR(DRM_IOCTL_R128_VERTEX)] = { r128_vertex_buf, 1, 0 },
+	[DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)]  = { drm_agp_acquire,   1, 1 },
+	[DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)]  = { drm_agp_release,   1, 1 },
+	[DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)]   = { drm_agp_enable,    1, 1 },
+	[DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)]     = { drm_agp_info,      1, 0 },
+	[DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)]    = { drm_agp_alloc,     1, 1 },
+	[DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)]     = { drm_agp_free,      1, 1 },
+	[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)]     = { drm_agp_bind,      1, 1 },
+	[DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)]   = { drm_agp_unbind,    1, 1 },
+#endif
+
+	[DRM_IOCTL_NR(DRM_IOCTL_R128_INIT)]    = { r128_cce_init,     1, 1 },
+	[DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_START)] = { r128_cce_start,  1, 1 },
+	[DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_STOP)]  = { r128_cce_stop,   1, 1 },
+	[DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_RESET)] = { r128_cce_reset,  1, 1 },
+	[DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_IDLE)]  = { r128_cce_idle,   1, 0 },
+	[DRM_IOCTL_NR(DRM_IOCTL_R128_RESET)]   = { r128_engine_reset, 1, 0 },
+	[DRM_IOCTL_NR(DRM_IOCTL_R128_SWAP)]    = { r128_cce_swap,     1, 0 },
+	[DRM_IOCTL_NR(DRM_IOCTL_R128_CLEAR)]   = { r128_cce_clear,    1, 0 },
+	[DRM_IOCTL_NR(DRM_IOCTL_R128_VERTEX)]  = { r128_cce_vertex,   1, 0 },
+	[DRM_IOCTL_NR(DRM_IOCTL_R128_INDICES)] = { r128_cce_indices,  1, 0 },
+	[DRM_IOCTL_NR(DRM_IOCTL_R128_BLIT)]    = { r128_cce_blit,     1, 0 },
+	[DRM_IOCTL_NR(DRM_IOCTL_R128_DEPTH)]   = { r128_cce_depth,    1, 0 },
+	[DRM_IOCTL_NR(DRM_IOCTL_R128_STIPPLE)] = { r128_cce_stipple,  1, 0 },
+	[DRM_IOCTL_NR(DRM_IOCTL_R128_PACKET)]  = { r128_cce_packet,   1, 0 },
 };
 #define R128_IOCTL_COUNT DRM_ARRAY_SIZE(r128_ioctls)
 
@@ -349,12 +360,12 @@
 
 #if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
 	dev->agp    = drm_agp_init();
-      	if (dev->agp == NULL) {
-	   	DRM_ERROR("Cannot initialize agpgart module.\n");
-	   	drm_proc_cleanup();
-	   	misc_deregister(&r128_misc);
-	   	r128_takedown(dev);
-	   	return -ENOMEM;
+	if (dev->agp == NULL) {
+		DRM_ERROR("Cannot initialize agpgart module.\n");
+		drm_proc_cleanup();
+		misc_deregister(&r128_misc);
+		r128_takedown(dev);
+		return -ENOMEM;
 	}
 
 #ifdef CONFIG_MTRR
@@ -413,8 +424,8 @@
 module_exit(r128_cleanup);
 
 
-int r128_version(struct inode *inode, struct file *filp, unsigned int cmd,
-		  unsigned long arg)
+int r128_version(struct inode *inode, struct file *filp,
+		 unsigned int cmd, unsigned long arg)
 {
 	drm_version_t version;
 	int	      len;
@@ -424,13 +435,13 @@
 			   sizeof(version)))
 		return -EFAULT;
 
-#define DRM_COPY(name,value)				     \
-	len = strlen(value);				     \
-	if (len > name##_len) len = name##_len;		     \
-	name##_len = strlen(value);			     \
-	if (len && name) {				     \
-		if (copy_to_user(name, value, len))	     \
-			return -EFAULT;			     \
+#define DRM_COPY(name,value)					\
+	len = strlen(value);					\
+	if (len > name##_len) len = name##_len;			\
+	name##_len = strlen(value);				\
+	if (len && name) {					\
+		if (copy_to_user(name, value, len))		\
+			return -EFAULT;				\
 	}
 
 	version.version_major	   = R128_MAJOR;
@@ -506,9 +517,8 @@
 }
 
 /* r128_ioctl is called whenever a process performs an ioctl on /dev/drm. */
-
-int r128_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
-		unsigned long arg)
+int r128_ioctl(struct inode *inode, struct file *filp,
+	       unsigned int cmd, unsigned long arg)
 {
 	int		 nr	 = DRM_IOCTL_NR(cmd);
 	drm_file_t	 *priv	 = filp->private_data;
@@ -534,19 +544,25 @@
 			DRM_DEBUG("no function\n");
 			retcode = -EINVAL;
 		} else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN))
-			    || (ioctl->auth_needed && !priv->authenticated)) {
+			   || (ioctl->auth_needed && !priv->authenticated)) {
 			retcode = -EACCES;
 		} else {
 			retcode = (func)(inode, filp, cmd, arg);
 		}
 	}
 
+#if 0
+	if ( retcode ) {
+		DRM_INFO( "%s 0x%x ret = %d\n", __FUNCTION__, nr, retcode );
+	}
+#endif
+
 	atomic_dec(&dev->ioctl_count);
 	return retcode;
 }
 
-int r128_lock(struct inode *inode, struct file *filp, unsigned int cmd,
-	      unsigned long arg)
+int r128_lock(struct inode *inode, struct file *filp,
+	      unsigned int cmd, unsigned long arg)
 {
         drm_file_t        *priv   = filp->private_data;
         drm_device_t      *dev    = priv->dev;
@@ -572,33 +588,10 @@
                   lock.context, current->pid, dev->lock.hw_lock->lock,
                   lock.flags);
 
-#if 0
-				/* dev->queue_count == 0 right now for
-                                   r128.  FIXME? */
-        if (lock.context < 0 || lock.context >= dev->queue_count)
+        if (lock.context < 0)
                 return -EINVAL;
-#endif
 
         if (!ret) {
-#if 0
-                if (_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)
-                    != lock.context) {
-                        long j = jiffies - dev->lock.lock_time;
-
-                        if (lock.context == r128_res_ctx.handle &&
-				j >= 0 && j < DRM_LOCK_SLICE) {
-                                /* Can't take lock if we just had it and
-                                   there is contention. */
-                                DRM_DEBUG("%d (pid %d) delayed j=%d dev=%d jiffies=%d\n",
-					lock.context, current->pid, j,
-					dev->lock.lock_time, jiffies);
-                                current->state = TASK_INTERRUPTIBLE;
-				current->policy |= SCHED_YIELD;
-                                schedule_timeout(DRM_LOCK_SLICE-j);
-				DRM_DEBUG("jiffies=%d\n", jiffies);
-                        }
-                }
-#endif
                 add_wait_queue(&dev->lock.lock_queue, &entry);
                 for (;;) {
                         current->state = TASK_INTERRUPTIBLE;
@@ -617,9 +610,6 @@
 
                                 /* Contention */
                         atomic_inc(&dev->total_sleeps);
-#if 1
-			current->policy |= SCHED_YIELD;
-#endif
                         schedule();
                         if (signal_pending(current)) {
                                 ret = -ERESTARTSYS;
@@ -630,32 +620,6 @@
                 remove_wait_queue(&dev->lock.lock_queue, &entry);
         }
 
-#if 0
-	if (!ret && dev->last_context != lock.context &&
-		lock.context != r128_res_ctx.handle &&
-		dev->last_context != r128_res_ctx.handle) {
-		add_wait_queue(&dev->context_wait, &entry);
-	        current->state = TASK_INTERRUPTIBLE;
-                /* PRE: dev->last_context != lock.context */
-	        r128_context_switch(dev, dev->last_context, lock.context);
-		/* POST: we will wait for the context
-                   switch and will dispatch on a later call
-                   when dev->last_context == lock.context
-                   NOTE WE HOLD THE LOCK THROUGHOUT THIS
-                   TIME! */
-		current->policy |= SCHED_YIELD;
-	        schedule();
-	        current->state = TASK_RUNNING;
-	        remove_wait_queue(&dev->context_wait, &entry);
-	        if (signal_pending(current)) {
-	                ret = -EINTR;
-	        } else if (dev->last_context != lock.context) {
-			DRM_ERROR("Context mismatch: %d %d\n",
-                        	dev->last_context, lock.context);
-	        }
-	}
-#endif
-
         if (!ret) {
 		sigemptyset(&dev->sigmask);
 		sigaddset(&dev->sigmask, SIGSTOP);
@@ -670,6 +634,7 @@
 		}
                 if (lock.flags & _DRM_LOCK_QUIESCENT) {
 				/* Make hardware quiescent */
+			DRM_DEBUG( "not quiescent!\n" );
 #if 0
                         r128_quiescent(dev);
 #endif
@@ -692,8 +657,8 @@
 }
 
 
-int r128_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
-		 unsigned long arg)
+int r128_unlock(struct inode *inode, struct file *filp,
+		unsigned int cmd, unsigned long arg)
 {
 	drm_file_t	  *priv	  = filp->private_data;
 	drm_device_t	  *dev	  = priv->dev;
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/char/drm/r128_drv.h linux/drivers/char/drm/r128_drv.h
--- v2.4.0-prerelease/linux/drivers/char/drm/r128_drv.h	Sun Oct  8 10:50:16 2000
+++ linux/drivers/char/drm/r128_drv.h	Thu Jan  4 13:03:20 2001
@@ -24,75 +24,136 @@
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
  *
- * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
- *          Kevin E. Martin <martin@valinux.com>
+ * Authors:
+ *   Rickard E. (Rik) Faith <faith@valinux.com>
+ *   Kevin E. Martin <martin@valinux.com>
+ *   Gareth Hughes <gareth@valinux.com>
  *
  */
 
-#ifndef _R128_DRV_H_
-#define _R128_DRV_H_
+#ifndef __R128_DRV_H__
+#define __R128_DRV_H__
 
-typedef struct drm_r128_private {
-	int               is_pci;
-
-	int               cce_mode;
-	int               cce_fifo_size;
-	int               cce_is_bm_mode;
-	int               cce_secure;
+typedef struct drm_r128_freelist {
+   	unsigned int age;
+   	drm_buf_t *buf;
+   	struct drm_r128_freelist *next;
+   	struct drm_r128_freelist *prev;
+} drm_r128_freelist_t;
+
+typedef struct drm_r128_ring_buffer {
+	u32 *start;
+	u32 *end;
+	int size;
+	int size_l2qw;
+
+	volatile u32 *head;
+	u32 tail;
+	u32 tail_mask;
+	int space;
+} drm_r128_ring_buffer_t;
 
+typedef struct drm_r128_private {
+	drm_r128_ring_buffer_t ring;
 	drm_r128_sarea_t *sarea_priv;
 
-	__volatile__ u32 *ring_read_ptr;
-
-	u32              *ring_start;
-	u32              *ring_end;
-	int               ring_size;
-	int               ring_sizel2qw;
-	int               ring_entries;
-
-	int               submit_age;
-
-	int               usec_timeout;
-
-	drm_map_t        *sarea;
-	drm_map_t        *fb;
-	drm_map_t        *agp_ring;
-	drm_map_t        *agp_read_ptr;
-	drm_map_t        *agp_vertbufs;
-	drm_map_t        *agp_indbufs;
-	drm_map_t        *agp_textures;
-	drm_map_t        *mmio;
+	int cce_mode;
+	int cce_fifo_size;
+	int cce_secure;
+	int cce_running;
+
+   	drm_r128_freelist_t *head;
+   	drm_r128_freelist_t *tail;
+
+	int usec_timeout;
+	int is_pci;
+
+	atomic_t idle_count;
+
+	unsigned int fb_bpp;
+	unsigned int front_offset;
+	unsigned int front_pitch;
+	unsigned int back_offset;
+	unsigned int back_pitch;
+
+	unsigned int depth_bpp;
+	unsigned int depth_offset;
+	unsigned int depth_pitch;
+	unsigned int span_offset;
+
+	u32 front_pitch_offset_c;
+	u32 back_pitch_offset_c;
+	u32 depth_pitch_offset_c;
+	u32 span_pitch_offset_c;
+
+	drm_map_t *sarea;
+	drm_map_t *fb;
+	drm_map_t *mmio;
+	drm_map_t *cce_ring;
+	drm_map_t *ring_rptr;
+	drm_map_t *buffers;
+	drm_map_t *agp_textures;
 } drm_r128_private_t;
 
 typedef struct drm_r128_buf_priv {
-	u32               age;
+	u32 age;
+	int prim;
+	int discard;
+	int dispatched;
+   	drm_r128_freelist_t *list_entry;
 } drm_r128_buf_priv_t;
 
 				/* r128_drv.c */
-extern int  r128_version(struct inode *inode, struct file *filp,
-			  unsigned int cmd, unsigned long arg);
-extern int  r128_open(struct inode *inode, struct file *filp);
-extern int  r128_release(struct inode *inode, struct file *filp);
-extern int  r128_ioctl(struct inode *inode, struct file *filp,
-			unsigned int cmd, unsigned long arg);
-extern int  r128_lock(struct inode *inode, struct file *filp,
-		       unsigned int cmd, unsigned long arg);
-extern int  r128_unlock(struct inode *inode, struct file *filp,
-			 unsigned int cmd, unsigned long arg);
-
-				/* r128_dma.c */
-extern int r128_init_cce(struct inode *inode, struct file *filp,
-			 unsigned int cmd, unsigned long arg);
-extern int r128_eng_reset(struct inode *inode, struct file *filp,
-			  unsigned int cmd, unsigned long arg);
-extern int r128_eng_flush(struct inode *inode, struct file *filp,
-			  unsigned int cmd, unsigned long arg);
-extern int r128_submit_pkt(struct inode *inode, struct file *filp,
-			   unsigned int cmd, unsigned long arg);
-extern int r128_cce_idle(struct inode *inode, struct file *filp,
-			 unsigned int cmd, unsigned long arg);
-extern int r128_vertex_buf(struct inode *inode, struct file *filp,
-			   unsigned int cmd, unsigned long arg);
+extern int  r128_version( struct inode *inode, struct file *filp,
+			  unsigned int cmd, unsigned long arg );
+extern int  r128_open( struct inode *inode, struct file *filp );
+extern int  r128_release( struct inode *inode, struct file *filp );
+extern int  r128_ioctl( struct inode *inode, struct file *filp,
+			unsigned int cmd, unsigned long arg );
+extern int  r128_lock( struct inode *inode, struct file *filp,
+		       unsigned int cmd, unsigned long arg );
+extern int  r128_unlock( struct inode *inode, struct file *filp,
+			 unsigned int cmd, unsigned long arg );
+
+				/* r128_cce.c */
+extern int r128_cce_init( struct inode *inode, struct file *filp,
+			  unsigned int cmd, unsigned long arg );
+extern int r128_cce_start( struct inode *inode, struct file *filp,
+			   unsigned int cmd, unsigned long arg );
+extern int r128_cce_stop( struct inode *inode, struct file *filp,
+			  unsigned int cmd, unsigned long arg );
+extern int r128_cce_reset( struct inode *inode, struct file *filp,
+			   unsigned int cmd, unsigned long arg );
+extern int r128_cce_idle( struct inode *inode, struct file *filp,
+			  unsigned int cmd, unsigned long arg );
+extern int r128_engine_reset( struct inode *inode, struct file *filp,
+			      unsigned int cmd, unsigned long arg );
+extern int r128_cce_packet( struct inode *inode, struct file *filp,
+			    unsigned int cmd, unsigned long arg );
+extern int r128_cce_buffers( struct inode *inode, struct file *filp,
+			     unsigned int cmd, unsigned long arg );
+
+extern void r128_freelist_reset( drm_device_t *dev );
+extern drm_buf_t *r128_freelist_get( drm_device_t *dev );
+
+extern int r128_wait_ring( drm_r128_private_t *dev_priv, int n );
+extern void r128_update_ring_snapshot( drm_r128_private_t *dev_priv );
+
+				/* r128_state.c */
+extern int r128_cce_clear( struct inode *inode, struct file *filp,
+			   unsigned int cmd, unsigned long arg );
+extern int r128_cce_swap( struct inode *inode, struct file *filp,
+			  unsigned int cmd, unsigned long arg );
+extern int r128_cce_vertex( struct inode *inode, struct file *filp,
+			    unsigned int cmd, unsigned long arg );
+extern int r128_cce_indices( struct inode *inode, struct file *filp,
+			     unsigned int cmd, unsigned long arg );
+extern int r128_cce_blit( struct inode *inode, struct file *filp,
+			  unsigned int cmd, unsigned long arg );
+extern int r128_cce_depth( struct inode *inode, struct file *filp,
+			   unsigned int cmd, unsigned long arg );
+extern int r128_cce_stipple( struct inode *inode, struct file *filp,
+			     unsigned int cmd, unsigned long arg );
 
 				/* r128_bufs.c */
 extern int r128_addbufs(struct inode *inode, struct file *filp,
@@ -124,78 +185,205 @@
  * for Rage 128 kernel driver.
  */
 
-#define R128_PC_NGUI_CTLSTAT	0x0184
-#       define R128_PC_FLUSH_ALL	0x00ff
-#       define R128_PC_BUSY		(1 << 31)
-
-#define R128_CLOCK_CNTL_INDEX	0x0008
-#define R128_CLOCK_CNTL_DATA	0x000c
-#       define R128_PLL_WR_EN		(1 << 7)
-
-#define R128_MCLK_CNTL		0x000f
-#       define R128_FORCE_GCP		(1 << 16)
-#       define R128_FORCE_PIPE3D_CP	(1 << 17)
-#       define R128_FORCE_RCP		(1 << 18)
-
-#define R128_GEN_RESET_CNTL	0x00f0
-#       define R128_SOFT_RESET_GUI	(1 <<  0)
-
-#define R128_PM4_BUFFER_CNTL	0x0704
-#       define R128_PM4_NONPM4			(0  << 28)
-#       define R128_PM4_192PIO			(1  << 28)
-#       define R128_PM4_192BM			(2  << 28)
-#       define R128_PM4_128PIO_64INDBM		(3  << 28)
-#       define R128_PM4_128BM_64INDBM		(4  << 28)
-#       define R128_PM4_64PIO_128INDBM		(5  << 28)
-#       define R128_PM4_64BM_128INDBM		(6  << 28)
-#       define R128_PM4_64PIO_64VCBM_64INDBM	(7  << 28)
-#       define R128_PM4_64BM_64VCBM_64INDBM	(8  << 28)
-#       define R128_PM4_64PIO_64VCPIO_64INDPIO	(15 << 28)
-
-
-#define R128_PM4_BUFFER_DL_RPTR	0x0710
-#define R128_PM4_BUFFER_DL_WPTR	0x0714
-#       define R128_PM4_BUFFER_DL_DONE		(1 << 31)
-
-#define R128_PM4_VC_FPU_SETUP	0x071c
-
-#define R128_PM4_STAT		0x07b8
-#       define R128_PM4_FIFOCNT_MASK		0x0fff
-#       define R128_PM4_BUSY			(1 << 16)
-#       define R128_PM4_GUI_ACTIVE		(1 << 31)
-
-#define R128_PM4_BUFFER_ADDR	0x07f0
-#define R128_PM4_MICRO_CNTL	0x07fc
-#       define R128_PM4_MICRO_FREERUN		(1 << 30)
-
-#define R128_PM4_FIFO_DATA_EVEN	0x1000
-#define R128_PM4_FIFO_DATA_ODD	0x1004
-
-#define R128_GUI_SCRATCH_REG0	0x15e0
-#define R128_GUI_SCRATCH_REG1	0x15e4
-#define R128_GUI_SCRATCH_REG2	0x15e8
-#define R128_GUI_SCRATCH_REG3	0x15ec
-#define R128_GUI_SCRATCH_REG4	0x15f0
-#define R128_GUI_SCRATCH_REG5	0x15f4
-
-#define R128_GUI_STAT		0x1740
-#       define R128_GUI_FIFOCNT_MASK		0x0fff
-#       define R128_GUI_ACTIVE			(1 << 31)
-
-
-/* CCE command packets */
-#define R128_CCE_PACKET0	0x00000000
-#define R128_CCE_PACKET1	0x40000000
-#define R128_CCE_PACKET2	0x80000000
-#       define R128_CCE_PACKET_MASK		0xC0000000
-#       define R128_CCE_PACKET_COUNT_MASK	0x3fff0000
-#       define R128_CCE_PACKET0_REG_MASK	0x000007ff
-#       define R128_CCE_PACKET1_REG0_MASK	0x000007ff
-#       define R128_CCE_PACKET1_REG1_MASK	0x003ff800
+#define R128_AUX_SC_CNTL		0x1660
+#	define R128_AUX1_SC_EN			(1 << 0)
+#	define R128_AUX1_SC_MODE_OR		(0 << 1)
+#	define R128_AUX1_SC_MODE_NAND		(1 << 1)
+#	define R128_AUX2_SC_EN			(1 << 2)
+#	define R128_AUX2_SC_MODE_OR		(0 << 3)
+#	define R128_AUX2_SC_MODE_NAND		(1 << 3)
+#	define R128_AUX3_SC_EN			(1 << 4)
+#	define R128_AUX3_SC_MODE_OR		(0 << 5)
+#	define R128_AUX3_SC_MODE_NAND		(1 << 5)
+#define R128_AUX1_SC_LEFT		0x1664
+#define R128_AUX1_SC_RIGHT		0x1668
+#define R128_AUX1_SC_TOP		0x166c
+#define R128_AUX1_SC_BOTTOM		0x1670
+#define R128_AUX2_SC_LEFT		0x1674
+#define R128_AUX2_SC_RIGHT		0x1678
+#define R128_AUX2_SC_TOP		0x167c
+#define R128_AUX2_SC_BOTTOM		0x1680
+#define R128_AUX3_SC_LEFT		0x1684
+#define R128_AUX3_SC_RIGHT		0x1688
+#define R128_AUX3_SC_TOP		0x168c
+#define R128_AUX3_SC_BOTTOM		0x1690
+
+#define R128_BRUSH_DATA0		0x1480
+#define R128_BUS_CNTL			0x0030
+#	define R128_BUS_MASTER_DIS		(1 << 6)
+
+#define R128_CLOCK_CNTL_INDEX		0x0008
+#define R128_CLOCK_CNTL_DATA		0x000c
+#	define R128_PLL_WR_EN			(1 << 7)
+
+#define R128_CONSTANT_COLOR_C		0x1d34
+
+#define R128_DP_GUI_MASTER_CNTL		0x146c
+#       define R128_GMC_SRC_PITCH_OFFSET_CNTL	(1    <<  0)
+#       define R128_GMC_DST_PITCH_OFFSET_CNTL	(1    <<  1)
+#	define R128_GMC_BRUSH_SOLID_COLOR	(13   <<  4)
+#	define R128_GMC_BRUSH_NONE		(15   <<  4)
+#	define R128_GMC_DST_16BPP		(4    <<  8)
+#	define R128_GMC_DST_24BPP		(5    <<  8)
+#	define R128_GMC_DST_32BPP		(6    <<  8)
+#       define R128_GMC_DST_DATATYPE_SHIFT	8
+#	define R128_GMC_SRC_DATATYPE_COLOR	(3    << 12)
+#	define R128_DP_SRC_SOURCE_MEMORY	(2    << 24)
+#	define R128_DP_SRC_SOURCE_HOST_DATA	(3    << 24)
+#	define R128_GMC_CLR_CMP_CNTL_DIS	(1    << 28)
+#	define R128_GMC_AUX_CLIP_DIS		(1    << 29)
+#	define R128_GMC_WR_MSK_DIS		(1    << 30)
+#	define R128_ROP3_S			0x00cc0000
+#	define R128_ROP3_P			0x00f00000
+#define R128_DP_WRITE_MASK		0x16cc
+#define R128_DST_PITCH_OFFSET_C		0x1c80
+#	define R128_DST_TILE			(1 << 31)
+
+#define R128_GEN_RESET_CNTL		0x00f0
+#	define R128_SOFT_RESET_GUI		(1 <<  0)
+
+#define R128_GUI_SCRATCH_REG0		0x15e0
+#define R128_GUI_SCRATCH_REG1		0x15e4
+#define R128_GUI_SCRATCH_REG2		0x15e8
+#define R128_GUI_SCRATCH_REG3		0x15ec
+#define R128_GUI_SCRATCH_REG4		0x15f0
+#define R128_GUI_SCRATCH_REG5		0x15f4
+
+#define R128_GUI_STAT			0x1740
+#	define R128_GUI_FIFOCNT_MASK		0x0fff
+#	define R128_GUI_ACTIVE			(1 << 31)
+
+#define R128_MCLK_CNTL			0x000f
+#	define R128_FORCE_GCP			(1 << 16)
+#	define R128_FORCE_PIPE3D_CP		(1 << 17)
+#	define R128_FORCE_RCP			(1 << 18)
+
+#define R128_PC_GUI_CTLSTAT		0x1748
+#define R128_PC_NGUI_CTLSTAT		0x0184
+#	define R128_PC_FLUSH_GUI		(3 << 0)
+#	define R128_PC_RI_GUI			(1 << 2)
+#	define R128_PC_FLUSH_ALL		0x00ff
+#	define R128_PC_BUSY			(1 << 31)
+
+#define R128_PRIM_TEX_CNTL_C		0x1cb0
+
+#define R128_SCALE_3D_CNTL		0x1a00
+#define R128_SEC_TEX_CNTL_C		0x1d00
+#define R128_SEC_TEXTURE_BORDER_COLOR_C	0x1d3c
+#define R128_SETUP_CNTL			0x1bc4
+#define R128_STEN_REF_MASK_C		0x1d40
+
+#define R128_TEX_CNTL_C			0x1c9c
+#	define R128_TEX_CACHE_FLUSH		(1 << 23)
+
+#define R128_WINDOW_XY_OFFSET		0x1bcc
+
+
+/* CCE registers
+ */
+#define R128_PM4_BUFFER_OFFSET		0x0700
+#define R128_PM4_BUFFER_CNTL		0x0704
+#	define R128_PM4_MASK			(15 << 28)
+#	define R128_PM4_NONPM4			(0  << 28)
+#	define R128_PM4_192PIO			(1  << 28)
+#	define R128_PM4_192BM			(2  << 28)
+#	define R128_PM4_128PIO_64INDBM		(3  << 28)
+#	define R128_PM4_128BM_64INDBM		(4  << 28)
+#	define R128_PM4_64PIO_128INDBM		(5  << 28)
+#	define R128_PM4_64BM_128INDBM		(6  << 28)
+#	define R128_PM4_64PIO_64VCBM_64INDBM	(7  << 28)
+#	define R128_PM4_64BM_64VCBM_64INDBM	(8  << 28)
+#	define R128_PM4_64PIO_64VCPIO_64INDPIO	(15 << 28)
+
+#define R128_PM4_BUFFER_WM_CNTL		0x0708
+#	define R128_WMA_SHIFT			0
+#	define R128_WMB_SHIFT			8
+#	define R128_WMC_SHIFT			16
+#	define R128_WB_WM_SHIFT			24
+
+#define R128_PM4_BUFFER_DL_RPTR_ADDR	0x070c
+#define R128_PM4_BUFFER_DL_RPTR		0x0710
+#define R128_PM4_BUFFER_DL_WPTR		0x0714
+#	define R128_PM4_BUFFER_DL_DONE		(1 << 31)
+
+#define R128_PM4_VC_FPU_SETUP		0x071c
+
+#define R128_PM4_IW_INDOFF		0x0738
+#define R128_PM4_IW_INDSIZE		0x073c
+
+#define R128_PM4_STAT			0x07b8
+#	define R128_PM4_FIFOCNT_MASK		0x0fff
+#	define R128_PM4_BUSY			(1 << 16)
+#	define R128_PM4_GUI_ACTIVE		(1 << 31)
+
+#define R128_PM4_MICROCODE_ADDR		0x07d4
+#define R128_PM4_MICROCODE_RADDR	0x07d8
+#define R128_PM4_MICROCODE_DATAH	0x07dc
+#define R128_PM4_MICROCODE_DATAL	0x07e0
+
+#define R128_PM4_BUFFER_ADDR		0x07f0
+#define R128_PM4_MICRO_CNTL		0x07fc
+#	define R128_PM4_MICRO_FREERUN		(1 << 30)
+
+#define R128_PM4_FIFO_DATA_EVEN		0x1000
+#define R128_PM4_FIFO_DATA_ODD		0x1004
 
 
+/* CCE command packets
+ */
+#define R128_CCE_PACKET0		0x00000000
+#define R128_CCE_PACKET1		0x40000000
+#define R128_CCE_PACKET2		0x80000000
+#define R128_CCE_PACKET3		0xC0000000
+#	define R128_CNTL_HOSTDATA_BLT		0x00009400
+#	define R128_CNTL_PAINT_MULTI		0x00009A00
+#	define R128_CNTL_BITBLT_MULTI		0x00009B00
+#	define R128_3D_RNDR_GEN_INDX_PRIM	0x00002300
+
+#define R128_CCE_PACKET_MASK		0xC0000000
+#define R128_CCE_PACKET_COUNT_MASK	0x3fff0000
+#define R128_CCE_PACKET0_REG_MASK	0x000007ff
+#define R128_CCE_PACKET1_REG0_MASK	0x000007ff
+#define R128_CCE_PACKET1_REG1_MASK	0x003ff800
+
+#define R128_CCE_VC_CNTL_PRIM_TYPE_NONE		0x00000000
+#define R128_CCE_VC_CNTL_PRIM_TYPE_POINT	0x00000001
+#define R128_CCE_VC_CNTL_PRIM_TYPE_LINE		0x00000002
+#define R128_CCE_VC_CNTL_PRIM_TYPE_POLY_LINE	0x00000003
+#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_LIST	0x00000004
+#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_FAN	0x00000005
+#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_STRIP	0x00000006
+#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2	0x00000007
+#define R128_CCE_VC_CNTL_PRIM_WALK_IND		0x00000010
+#define R128_CCE_VC_CNTL_PRIM_WALK_LIST		0x00000020
+#define R128_CCE_VC_CNTL_PRIM_WALK_RING		0x00000030
+#define R128_CCE_VC_CNTL_NUM_SHIFT		16
+
+#define R128_DATATYPE_CI8		2
+#define R128_DATATYPE_ARGB1555		3
+#define R128_DATATYPE_RGB565		4
+#define R128_DATATYPE_RGB888		5
+#define R128_DATATYPE_ARGB8888		6
+#define R128_DATATYPE_RGB332		7
+#define R128_DATATYPE_RGB8		9
+#define R128_DATATYPE_ARGB4444		15
+
+/* Constants */
+#define R128_AGP_OFFSET			0x02000000
+
+#define R128_WATERMARK_L		16
+#define R128_WATERMARK_M		8
+#define R128_WATERMARK_N		8
+#define R128_WATERMARK_K		128
+
 #define R128_MAX_USEC_TIMEOUT	100000	/* 100 ms */
 
+#define R128_LAST_FRAME_REG		R128_GUI_SCRATCH_REG0
+#define R128_LAST_DISPATCH_REG		R128_GUI_SCRATCH_REG1
+#define R128_MAX_VB_AGE			0xffffffff
+
+#define R128_MAX_VB_VERTS		(0xffff)
+
 
 #define R128_BASE(reg)		((u32)(dev_priv->mmio->handle))
 #define R128_ADDR(reg)		(R128_BASE(reg) + reg)
@@ -221,4 +409,58 @@
 #define R128CCE2(p)       ((p))
 #define R128CCE3(p,n)     ((p) | ((n) << 16))
 
-#endif
+
+
+
+#define CCE_PACKET0( reg, n )		(R128_CCE_PACKET0 |		\
+					 ((n) << 16) | ((reg) >> 2))
+#define CCE_PACKET1( reg0, reg1 )	(R128_CCE_PACKET1 |		\
+					 (((reg1) >> 2) << 11) | ((reg0) >> 2))
+#define CCE_PACKET2()			(R128_CCE_PACKET2)
+#define CCE_PACKET3( pkt, n )		(R128_CCE_PACKET3 |		\
+					 (pkt) | ((n) << 16))
+
+
+#define r128_flush_write_combine()		mb()
+
+
+#define R128_VERBOSE	0
+
+#define RING_LOCALS	int write; unsigned int tail_mask; volatile u32 *ring;
+
+#define BEGIN_RING( n ) do {						\
+	if ( R128_VERBOSE ) {						\
+		DRM_INFO( "BEGIN_RING( %d ) in %s\n",			\
+			   n, __FUNCTION__ );				\
+	}								\
+	if ( dev_priv->ring.space < n * sizeof(u32) ) {			\
+		r128_wait_ring( dev_priv, n * sizeof(u32) );		\
+	}								\
+	dev_priv->ring.space -= n * sizeof(u32);			\
+	ring = dev_priv->ring.start;					\
+	write = dev_priv->ring.tail;					\
+	tail_mask = dev_priv->ring.tail_mask;				\
+} while (0)
+
+#define ADVANCE_RING() do {						\
+	if ( R128_VERBOSE ) {						\
+		DRM_INFO( "ADVANCE_RING() tail=0x%06x wr=0x%06x\n",	\
+			  write, dev_priv->ring.tail );			\
+	}								\
+	r128_flush_write_combine();					\
+	dev_priv->ring.tail = write;					\
+	R128_WRITE( R128_PM4_BUFFER_DL_WPTR, write );			\
+} while (0)
+
+#define OUT_RING( x ) do {						\
+	if ( R128_VERBOSE ) {						\
+		DRM_INFO( "   OUT_RING( 0x%08x ) at 0x%x\n",		\
+			   (unsigned int)(x), write );			\
+	}								\
+	ring[write++] = x;						\
+	write &= tail_mask;						\
+} while (0)
+
+#define R128_PERFORMANCE_BOXES	0
+
+#endif /* __R128_DRV_H__ */
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/char/drm/r128_state.c linux/drivers/char/drm/r128_state.c
--- v2.4.0-prerelease/linux/drivers/char/drm/r128_state.c	Wed Dec 31 16:00:00 1969
+++ linux/drivers/char/drm/r128_state.c	Thu Jan  4 13:03:20 2001
@@ -0,0 +1,1605 @@
+/* r128_state.c -- State support for r128 -*- linux-c -*-
+ * Created: Thu Jan 27 02:53:43 2000 by gareth@valinux.com
+ *
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+#include "r128_drv.h"
+#include "drm.h"
+
+
+/* ================================================================
+ * CCE hardware state programming functions
+ */
+
+static void r128_emit_clip_rects( drm_r128_private_t *dev_priv,
+				  drm_clip_rect_t *boxes, int count )
+{
+	u32 aux_sc_cntl = 0x00000000;
+	RING_LOCALS;
+	DRM_DEBUG( "    %s\n", __FUNCTION__ );
+
+	BEGIN_RING( 17 );
+
+	if ( count >= 1 ) {
+		OUT_RING( CCE_PACKET0( R128_AUX1_SC_LEFT, 3 ) );
+		OUT_RING( boxes[0].x1 );
+		OUT_RING( boxes[0].x2 - 1 );
+		OUT_RING( boxes[0].y1 );
+		OUT_RING( boxes[0].y2 - 1 );
+
+		aux_sc_cntl |= (R128_AUX1_SC_EN | R128_AUX1_SC_MODE_OR);
+	}
+	if ( count >= 2 ) {
+		OUT_RING( CCE_PACKET0( R128_AUX2_SC_LEFT, 3 ) );
+		OUT_RING( boxes[1].x1 );
+		OUT_RING( boxes[1].x2 - 1 );
+		OUT_RING( boxes[1].y1 );
+		OUT_RING( boxes[1].y2 - 1 );
+
+		aux_sc_cntl |= (R128_AUX2_SC_EN | R128_AUX2_SC_MODE_OR);
+	}
+	if ( count >= 3 ) {
+		OUT_RING( CCE_PACKET0( R128_AUX3_SC_LEFT, 3 ) );
+		OUT_RING( boxes[2].x1 );
+		OUT_RING( boxes[2].x2 - 1 );
+		OUT_RING( boxes[2].y1 );
+		OUT_RING( boxes[2].y2 - 1 );
+
+		aux_sc_cntl |= (R128_AUX3_SC_EN | R128_AUX3_SC_MODE_OR);
+	}
+
+	OUT_RING( CCE_PACKET0( R128_AUX_SC_CNTL, 0 ) );
+	OUT_RING( aux_sc_cntl );
+
+	ADVANCE_RING();
+}
+
+static inline void r128_emit_core( drm_r128_private_t *dev_priv )
+{
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
+	RING_LOCALS;
+	DRM_DEBUG( "    %s\n", __FUNCTION__ );
+
+	BEGIN_RING( 2 );
+
+	OUT_RING( CCE_PACKET0( R128_SCALE_3D_CNTL, 0 ) );
+	OUT_RING( ctx->scale_3d_cntl );
+
+	ADVANCE_RING();
+}
+
+static inline void r128_emit_context( drm_r128_private_t *dev_priv )
+{
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
+	RING_LOCALS;
+	DRM_DEBUG( "    %s\n", __FUNCTION__ );
+
+	BEGIN_RING( 13 );
+
+	OUT_RING( CCE_PACKET0( R128_DST_PITCH_OFFSET_C, 11 ) );
+	OUT_RING( ctx->dst_pitch_offset_c );
+	OUT_RING( ctx->dp_gui_master_cntl_c );
+	OUT_RING( ctx->sc_top_left_c );
+	OUT_RING( ctx->sc_bottom_right_c );
+	OUT_RING( ctx->z_offset_c );
+	OUT_RING( ctx->z_pitch_c );
+	OUT_RING( ctx->z_sten_cntl_c );
+	OUT_RING( ctx->tex_cntl_c );
+	OUT_RING( ctx->misc_3d_state_cntl_reg );
+	OUT_RING( ctx->texture_clr_cmp_clr_c );
+	OUT_RING( ctx->texture_clr_cmp_msk_c );
+	OUT_RING( ctx->fog_color_c );
+
+	ADVANCE_RING();
+}
+
+static inline void r128_emit_setup( drm_r128_private_t *dev_priv )
+{
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
+	RING_LOCALS;
+	DRM_DEBUG( "    %s\n", __FUNCTION__ );
+
+	BEGIN_RING( 3 );
+
+	OUT_RING( CCE_PACKET1( R128_SETUP_CNTL, R128_PM4_VC_FPU_SETUP ) );
+	OUT_RING( ctx->setup_cntl );
+	OUT_RING( ctx->pm4_vc_fpu_setup );
+
+	ADVANCE_RING();
+}
+
+static inline void r128_emit_masks( drm_r128_private_t *dev_priv )
+{
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
+	RING_LOCALS;
+	DRM_DEBUG( "    %s\n", __FUNCTION__ );
+
+	BEGIN_RING( 5 );
+
+	OUT_RING( CCE_PACKET0( R128_DP_WRITE_MASK, 0 ) );
+	OUT_RING( ctx->dp_write_mask );
+
+	OUT_RING( CCE_PACKET0( R128_STEN_REF_MASK_C, 1 ) );
+	OUT_RING( ctx->sten_ref_mask_c );
+	OUT_RING( ctx->plane_3d_mask_c );
+
+	ADVANCE_RING();
+}
+
+static inline void r128_emit_window( drm_r128_private_t *dev_priv )
+{
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
+	RING_LOCALS;
+	DRM_DEBUG( "    %s\n", __FUNCTION__ );
+
+	BEGIN_RING( 2 );
+
+	OUT_RING( CCE_PACKET0( R128_WINDOW_XY_OFFSET, 0 ) );
+	OUT_RING( ctx->window_xy_offset );
+
+	ADVANCE_RING();
+}
+
+static inline void r128_emit_tex0( drm_r128_private_t *dev_priv )
+{
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
+	drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[0];
+	int i;
+	RING_LOCALS;
+	DRM_DEBUG( "    %s\n", __FUNCTION__ );
+
+	BEGIN_RING( 7 + R128_TEX_MAXLEVELS );
+
+	OUT_RING( CCE_PACKET0( R128_PRIM_TEX_CNTL_C,
+			       2 + R128_TEX_MAXLEVELS ) );
+	OUT_RING( tex->tex_cntl );
+	OUT_RING( tex->tex_combine_cntl );
+	OUT_RING( ctx->tex_size_pitch_c );
+	for ( i = 0 ; i < R128_TEX_MAXLEVELS ; i++ ) {
+		OUT_RING( tex->tex_offset[i] );
+	}
+
+	OUT_RING( CCE_PACKET0( R128_CONSTANT_COLOR_C, 1 ) );
+	OUT_RING( ctx->constant_color_c );
+	OUT_RING( tex->tex_border_color );
+
+	ADVANCE_RING();
+}
+
+static inline void r128_emit_tex1( drm_r128_private_t *dev_priv )
+{
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[1];
+	int i;
+	RING_LOCALS;
+	DRM_DEBUG( "    %s\n", __FUNCTION__ );
+
+	BEGIN_RING( 5 + R128_TEX_MAXLEVELS );
+
+	OUT_RING( CCE_PACKET0( R128_SEC_TEX_CNTL_C,
+			       1 + R128_TEX_MAXLEVELS ) );
+	OUT_RING( tex->tex_cntl );
+	OUT_RING( tex->tex_combine_cntl );
+	for ( i = 0 ; i < R128_TEX_MAXLEVELS ; i++ ) {
+		OUT_RING( tex->tex_offset[i] );
+	}
+
+	OUT_RING( CCE_PACKET0( R128_SEC_TEXTURE_BORDER_COLOR_C, 0 ) );
+	OUT_RING( tex->tex_border_color );
+
+	ADVANCE_RING();
+}
+
+static inline void r128_emit_state( drm_r128_private_t *dev_priv )
+{
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	unsigned int dirty = sarea_priv->dirty;
+
+	DRM_DEBUG( "%s: dirty=0x%08x\n", __FUNCTION__, dirty );
+
+	if ( dirty & R128_UPLOAD_CORE ) {
+		r128_emit_core( dev_priv );
+		sarea_priv->dirty &= ~R128_UPLOAD_CORE;
+	}
+
+	if ( dirty & R128_UPLOAD_CONTEXT ) {
+		r128_emit_context( dev_priv );
+		sarea_priv->dirty &= ~R128_UPLOAD_CONTEXT;
+	}
+
+	if ( dirty & R128_UPLOAD_SETUP ) {
+		r128_emit_setup( dev_priv );
+		sarea_priv->dirty &= ~R128_UPLOAD_SETUP;
+	}
+
+	if ( dirty & R128_UPLOAD_MASKS ) {
+		r128_emit_masks( dev_priv );
+		sarea_priv->dirty &= ~R128_UPLOAD_MASKS;
+	}
+
+	if ( dirty & R128_UPLOAD_WINDOW ) {
+		r128_emit_window( dev_priv );
+		sarea_priv->dirty &= ~R128_UPLOAD_WINDOW;
+	}
+
+	if ( dirty & R128_UPLOAD_TEX0 ) {
+		r128_emit_tex0( dev_priv );
+		sarea_priv->dirty &= ~R128_UPLOAD_TEX0;
+	}
+
+	if ( dirty & R128_UPLOAD_TEX1 ) {
+		r128_emit_tex1( dev_priv );
+		sarea_priv->dirty &= ~R128_UPLOAD_TEX1;
+	}
+
+	/* Turn off the texture cache flushing */
+	sarea_priv->context_state.tex_cntl_c &= ~R128_TEX_CACHE_FLUSH;
+
+	sarea_priv->dirty &= ~R128_REQUIRE_QUIESCENCE;
+}
+
+
+#if R128_PERFORMANCE_BOXES
+/* ================================================================
+ * Performance monitoring functions
+ */
+
+static void r128_clear_box( drm_r128_private_t *dev_priv,
+			    int x, int y, int w, int h,
+			    int r, int g, int b )
+{
+	u32 pitch, offset;
+	u32 fb_bpp, color;
+	RING_LOCALS;
+
+	switch ( dev_priv->fb_bpp ) {
+	case 16:
+		fb_bpp = R128_GMC_DST_16BPP;
+		color = (((r & 0xf8) << 8) |
+			 ((g & 0xfc) << 3) |
+			 ((b & 0xf8) >> 3));
+		break;
+	case 24:
+		fb_bpp = R128_GMC_DST_24BPP;
+		color = ((r << 16) | (g << 8) | b);
+		break;
+	case 32:
+		fb_bpp = R128_GMC_DST_32BPP;
+		color = (((0xff) << 24) | (r << 16) | (g <<  8) | b);
+		break;
+	default:
+		return;
+	}
+
+	offset = dev_priv->back_offset;
+	pitch = dev_priv->back_pitch >> 3;
+
+	BEGIN_RING( 6 );
+
+	OUT_RING( CCE_PACKET3( R128_CNTL_PAINT_MULTI, 4 ) );
+	OUT_RING( R128_GMC_DST_PITCH_OFFSET_CNTL
+		  | R128_GMC_BRUSH_SOLID_COLOR
+		  | fb_bpp
+		  | R128_GMC_SRC_DATATYPE_COLOR
+		  | R128_ROP3_P
+		  | R128_GMC_CLR_CMP_CNTL_DIS
+		  | R128_GMC_AUX_CLIP_DIS );
+
+	OUT_RING( (pitch << 21) | (offset >> 5) );
+	OUT_RING( color );
+
+	OUT_RING( (x << 16) | y );
+	OUT_RING( (w << 16) | h );
+
+	ADVANCE_RING();
+}
+
+static void r128_cce_performance_boxes( drm_r128_private_t *dev_priv )
+{
+	if ( atomic_read( &dev_priv->idle_count ) == 0 ) {
+		r128_clear_box( dev_priv, 64, 4, 8, 8, 0, 255, 0 );
+	} else {
+		atomic_set( &dev_priv->idle_count, 0 );
+	}
+}
+
+#endif
+
+
+/* ================================================================
+ * CCE command dispatch functions
+ */
+
+static void r128_print_dirty( const char *msg, unsigned int flags )
+{
+	DRM_INFO( "%s: (0x%x) %s%s%s%s%s%s%s%s%s\n",
+		  msg,
+		  flags,
+		  (flags & R128_UPLOAD_CORE)        ? "core, " : "",
+		  (flags & R128_UPLOAD_CONTEXT)     ? "context, " : "",
+		  (flags & R128_UPLOAD_SETUP)       ? "setup, " : "",
+		  (flags & R128_UPLOAD_TEX0)        ? "tex0, " : "",
+		  (flags & R128_UPLOAD_TEX1)        ? "tex1, " : "",
+		  (flags & R128_UPLOAD_MASKS)       ? "masks, " : "",
+		  (flags & R128_UPLOAD_WINDOW)      ? "window, " : "",
+		  (flags & R128_UPLOAD_CLIPRECTS)   ? "cliprects, " : "",
+		  (flags & R128_REQUIRE_QUIESCENCE) ? "quiescence, " : "" );
+}
+
+static void r128_cce_dispatch_clear( drm_device_t *dev,
+				     unsigned int flags,
+				     int cx, int cy, int cw, int ch,
+				     unsigned int clear_color,
+				     unsigned int clear_depth )
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	int nbox = sarea_priv->nbox;
+	drm_clip_rect_t *pbox = sarea_priv->boxes;
+	u32 fb_bpp, depth_bpp;
+	int i;
+	RING_LOCALS;
+	DRM_DEBUG( "%s\n", __FUNCTION__ );
+
+	r128_update_ring_snapshot( dev_priv );
+
+	switch ( dev_priv->fb_bpp ) {
+	case 16:
+		fb_bpp = R128_GMC_DST_16BPP;
+		break;
+	case 32:
+		fb_bpp = R128_GMC_DST_32BPP;
+		break;
+	default:
+		return;
+	}
+	switch ( dev_priv->depth_bpp ) {
+	case 16:
+		depth_bpp = R128_GMC_DST_16BPP;
+		break;
+	case 24:
+	case 32:
+		depth_bpp = R128_GMC_DST_32BPP;
+		break;
+	default:
+		return;
+	}
+
+	for ( i = 0 ; i < nbox ; i++ ) {
+		int x = pbox[i].x1;
+		int y = pbox[i].y1;
+		int w = pbox[i].x2 - x;
+		int h = pbox[i].y2 - y;
+
+		DRM_DEBUG( "dispatch clear %d,%d-%d,%d flags 0x%x\n",
+			   pbox[i].x1, pbox[i].y1, pbox[i].x2,
+			   pbox[i].y2, flags );
+
+		if ( flags & (R128_FRONT | R128_BACK) ) {
+			BEGIN_RING( 2 );
+
+			OUT_RING( CCE_PACKET0( R128_DP_WRITE_MASK, 0 ) );
+			OUT_RING( sarea_priv->context_state.plane_3d_mask_c );
+
+			ADVANCE_RING();
+		}
+
+		if ( flags & R128_FRONT ) {
+			BEGIN_RING( 6 );
+
+			OUT_RING( CCE_PACKET3( R128_CNTL_PAINT_MULTI, 4 ) );
+			OUT_RING( R128_GMC_DST_PITCH_OFFSET_CNTL
+				  | R128_GMC_BRUSH_SOLID_COLOR
+				  | fb_bpp
+				  | R128_GMC_SRC_DATATYPE_COLOR
+				  | R128_ROP3_P
+				  | R128_GMC_CLR_CMP_CNTL_DIS
+				  | R128_GMC_AUX_CLIP_DIS );
+
+			OUT_RING( dev_priv->front_pitch_offset_c );
+			OUT_RING( clear_color );
+
+			OUT_RING( (x << 16) | y );
+			OUT_RING( (w << 16) | h );
+
+			ADVANCE_RING();
+		}
+
+		if ( flags & R128_BACK ) {
+			BEGIN_RING( 6 );
+
+			OUT_RING( CCE_PACKET3( R128_CNTL_PAINT_MULTI, 4 ) );
+			OUT_RING( R128_GMC_DST_PITCH_OFFSET_CNTL
+				  | R128_GMC_BRUSH_SOLID_COLOR
+				  | fb_bpp
+				  | R128_GMC_SRC_DATATYPE_COLOR
+				  | R128_ROP3_P
+				  | R128_GMC_CLR_CMP_CNTL_DIS
+				  | R128_GMC_AUX_CLIP_DIS );
+
+			OUT_RING( dev_priv->back_pitch_offset_c );
+			OUT_RING( clear_color );
+
+			OUT_RING( (x << 16) | y );
+			OUT_RING( (w << 16) | h );
+
+			ADVANCE_RING();
+		}
+
+		if ( flags & R128_DEPTH ) {
+			BEGIN_RING( 6 );
+
+			OUT_RING( CCE_PACKET3( R128_CNTL_PAINT_MULTI, 4 ) );
+			OUT_RING( R128_GMC_DST_PITCH_OFFSET_CNTL
+				  | R128_GMC_BRUSH_SOLID_COLOR
+				  | depth_bpp
+				  | R128_GMC_SRC_DATATYPE_COLOR
+				  | R128_ROP3_P
+				  | R128_GMC_CLR_CMP_CNTL_DIS
+				  | R128_GMC_AUX_CLIP_DIS
+				  | R128_GMC_WR_MSK_DIS );
+
+			OUT_RING( dev_priv->depth_pitch_offset_c );
+			OUT_RING( clear_depth );
+
+			OUT_RING( (x << 16) | y );
+			OUT_RING( (w << 16) | h );
+
+			ADVANCE_RING();
+		}
+	}
+}
+
+static void r128_cce_dispatch_swap( drm_device_t *dev )
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	int nbox = sarea_priv->nbox;
+	drm_clip_rect_t *pbox = sarea_priv->boxes;
+	u32 fb_bpp;
+	int i;
+	RING_LOCALS;
+	DRM_DEBUG( "%s\n", __FUNCTION__ );
+
+	r128_update_ring_snapshot( dev_priv );
+
+#if R128_PERFORMANCE_BOXES
+	/* Do some trivial performance monitoring...
+	 */
+	r128_cce_performance_boxes( dev_priv );
+#endif
+
+	switch ( dev_priv->fb_bpp ) {
+	case 16:
+		fb_bpp = R128_GMC_DST_16BPP;
+		break;
+	case 32:
+	default:
+		fb_bpp = R128_GMC_DST_32BPP;
+		break;
+	}
+
+	for ( i = 0 ; i < nbox ; i++ ) {
+		int x = pbox[i].x1;
+		int y = pbox[i].y1;
+		int w = pbox[i].x2 - x;
+		int h = pbox[i].y2 - y;
+
+		BEGIN_RING( 7 );
+
+		OUT_RING( CCE_PACKET3( R128_CNTL_BITBLT_MULTI, 5 ) );
+		OUT_RING( R128_GMC_SRC_PITCH_OFFSET_CNTL
+			  | R128_GMC_DST_PITCH_OFFSET_CNTL
+			  | R128_GMC_BRUSH_NONE
+			  | fb_bpp
+			  | R128_GMC_SRC_DATATYPE_COLOR
+			  | R128_ROP3_S
+			  | R128_DP_SRC_SOURCE_MEMORY
+			  | R128_GMC_CLR_CMP_CNTL_DIS
+			  | R128_GMC_AUX_CLIP_DIS
+			  | R128_GMC_WR_MSK_DIS );
+
+		OUT_RING( dev_priv->back_pitch_offset_c );
+		OUT_RING( dev_priv->front_pitch_offset_c );
+
+		OUT_RING( (x << 16) | y );
+		OUT_RING( (x << 16) | y );
+		OUT_RING( (w << 16) | h );
+
+		ADVANCE_RING();
+	}
+
+	/* Increment the frame counter.  The client-side 3D driver must
+	 * throttle the framerate by waiting for this value before
+	 * performing the swapbuffer ioctl.
+	 */
+	dev_priv->sarea_priv->last_frame++;
+
+	BEGIN_RING( 2 );
+
+	OUT_RING( CCE_PACKET0( R128_LAST_FRAME_REG, 0 ) );
+	OUT_RING( dev_priv->sarea_priv->last_frame );
+
+	ADVANCE_RING();
+}
+
+static void r128_cce_dispatch_vertex( drm_device_t *dev,
+				      drm_buf_t *buf )
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_r128_buf_priv_t *buf_priv = buf->dev_private;
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	int format = sarea_priv->vc_format;
+	int offset = dev_priv->buffers->offset + buf->offset - dev->agp->base;
+	int size = buf->used;
+	int prim = buf_priv->prim;
+	int i = 0;
+	RING_LOCALS;
+	DRM_DEBUG( "%s: buf=%d nbox=%d\n",
+		   __FUNCTION__, buf->idx, sarea_priv->nbox );
+
+	r128_update_ring_snapshot( dev_priv );
+
+	if ( 0 )
+		r128_print_dirty( "dispatch_vertex", sarea_priv->dirty );
+
+	if ( buf->used ) {
+		buf_priv->dispatched = 1;
+
+		if ( sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS ) {
+			r128_emit_state( dev_priv );
+		}
+
+		do {
+			/* Emit the next set of up to three cliprects */
+			if ( i < sarea_priv->nbox ) {
+				r128_emit_clip_rects( dev_priv,
+						      &sarea_priv->boxes[i],
+						      sarea_priv->nbox - i );
+			}
+
+			/* Emit the vertex buffer rendering commands */
+			BEGIN_RING( 5 );
+
+			OUT_RING( CCE_PACKET3( R128_3D_RNDR_GEN_INDX_PRIM, 3 ) );
+			OUT_RING( offset );
+			OUT_RING( size );
+			OUT_RING( format );
+			OUT_RING( prim | R128_CCE_VC_CNTL_PRIM_WALK_LIST |
+				  (size << R128_CCE_VC_CNTL_NUM_SHIFT) );
+
+			ADVANCE_RING();
+
+			i += 3;
+		} while ( i < sarea_priv->nbox );
+	}
+
+	if ( buf_priv->discard ) {
+		buf_priv->age = dev_priv->sarea_priv->last_dispatch;
+
+		/* Emit the vertex buffer age */
+		BEGIN_RING( 2 );
+
+		OUT_RING( CCE_PACKET0( R128_LAST_DISPATCH_REG, 0 ) );
+		OUT_RING( buf_priv->age );
+
+		ADVANCE_RING();
+
+		buf->pending = 1;
+		buf->used = 0;
+		/* FIXME: Check dispatched field */
+		buf_priv->dispatched = 0;
+	}
+
+	dev_priv->sarea_priv->last_dispatch++;
+
+#if 0
+	if ( dev_priv->submit_age == R128_MAX_VB_AGE ) {
+		ret = r128_do_cce_idle( dev_priv );
+		if ( ret < 0 ) return ret;
+		dev_priv->submit_age = 0;
+		r128_freelist_reset( dev );
+	}
+#endif
+
+	sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS;
+	sarea_priv->nbox = 0;
+}
+
+
+
+
+static void r128_cce_dispatch_indirect( drm_device_t *dev,
+					drm_buf_t *buf,
+					int start, int end )
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_r128_buf_priv_t *buf_priv = buf->dev_private;
+	RING_LOCALS;
+	DRM_DEBUG( "indirect: buf=%d s=0x%x e=0x%x\n",
+		   buf->idx, start, end );
+
+	r128_update_ring_snapshot( dev_priv );
+
+	if ( start != end ) {
+		int offset = (dev_priv->buffers->offset - dev->agp->base
+			      + buf->offset + start);
+		int dwords = (end - start + 3) / sizeof(u32);
+
+		/* Indirect buffer data must be an even number of
+		 * dwords, so if we've been given an odd number we must
+		 * pad the data with a Type-2 CCE packet.
+		 */
+		if ( dwords & 1 ) {
+			u32 *data = (u32 *)
+				((char *)dev_priv->buffers->handle
+				 + buf->offset + start);
+			data[dwords++] = R128_CCE_PACKET2;
+		}
+
+		buf_priv->dispatched = 1;
+
+		/* Fire off the indirect buffer */
+		BEGIN_RING( 3 );
+
+		OUT_RING( CCE_PACKET0( R128_PM4_IW_INDOFF, 1 ) );
+		OUT_RING( offset );
+		OUT_RING( dwords );
+
+		ADVANCE_RING();
+	}
+
+	if ( buf_priv->discard ) {
+		buf_priv->age = dev_priv->sarea_priv->last_dispatch;
+
+		/* Emit the indirect buffer age */
+		BEGIN_RING( 2 );
+
+		OUT_RING( CCE_PACKET0( R128_LAST_DISPATCH_REG, 0 ) );
+		OUT_RING( buf_priv->age );
+
+		ADVANCE_RING();
+
+		buf->pending = 1;
+		buf->used = 0;
+		/* FIXME: Check dispatched field */
+		buf_priv->dispatched = 0;
+	}
+
+	dev_priv->sarea_priv->last_dispatch++;
+
+#if 0
+	if ( dev_priv->submit_age == R128_MAX_VB_AGE ) {
+		ret = r128_do_cce_idle( dev_priv );
+		if ( ret < 0 ) return ret;
+		dev_priv->submit_age = 0;
+		r128_freelist_reset( dev );
+	}
+#endif
+}
+
+static void r128_cce_dispatch_indices( drm_device_t *dev,
+				       drm_buf_t *buf,
+				       int start, int end,
+				       int count )
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_r128_buf_priv_t *buf_priv = buf->dev_private;
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	int format = sarea_priv->vc_format;
+	int offset = dev_priv->buffers->offset - dev->agp->base;
+	int prim = buf_priv->prim;
+	u32 *data;
+	int dwords;
+	int i = 0;
+	RING_LOCALS;
+	DRM_DEBUG( "indices: s=%d e=%d c=%d\n", start, end, count );
+
+	r128_update_ring_snapshot( dev_priv );
+
+	if ( 0 )
+		r128_print_dirty( "dispatch_indices", sarea_priv->dirty );
+
+	if ( start != end ) {
+		buf_priv->dispatched = 1;
+
+		if ( sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS ) {
+			r128_emit_state( dev_priv );
+		}
+
+		dwords = (end - start + 3) / sizeof(u32);
+
+		data = (u32 *)((char *)dev_priv->buffers->handle
+			       + buf->offset + start);
+
+		data[0] = CCE_PACKET3( R128_3D_RNDR_GEN_INDX_PRIM, dwords-2 );
+
+		data[1] = offset;
+		data[2] = R128_MAX_VB_VERTS;
+		data[3] = format;
+		data[4] = (prim | R128_CCE_VC_CNTL_PRIM_WALK_IND |
+			   (count << 16));
+
+		if ( count & 0x1 ) {
+			data[dwords-1] &= 0x0000ffff;
+		}
+
+		do {
+			/* Emit the next set of up to three cliprects */
+			if ( i < sarea_priv->nbox ) {
+				r128_emit_clip_rects( dev_priv,
+						      &sarea_priv->boxes[i],
+						      sarea_priv->nbox - i );
+			}
+
+			r128_cce_dispatch_indirect( dev, buf, start, end );
+
+			i += 3;
+		} while ( i < sarea_priv->nbox );
+	}
+
+	if ( buf_priv->discard ) {
+		buf_priv->age = dev_priv->sarea_priv->last_dispatch;
+
+		/* Emit the vertex buffer age */
+		BEGIN_RING( 2 );
+
+		OUT_RING( CCE_PACKET0( R128_LAST_DISPATCH_REG, 0 ) );
+		OUT_RING( buf_priv->age );
+
+		ADVANCE_RING();
+
+		buf->pending = 1;
+		/* FIXME: Check dispatched field */
+		buf_priv->dispatched = 0;
+	}
+
+	dev_priv->sarea_priv->last_dispatch++;
+
+#if 0
+	if ( dev_priv->submit_age == R128_MAX_VB_AGE ) {
+		ret = r128_do_cce_idle( dev_priv );
+		if ( ret < 0 ) return ret;
+		dev_priv->submit_age = 0;
+		r128_freelist_reset( dev );
+	}
+#endif
+
+	sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS;
+	sarea_priv->nbox = 0;
+}
+
+static int r128_cce_dispatch_blit( drm_device_t *dev,
+				   drm_r128_blit_t *blit )
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_device_dma_t *dma = dev->dma;
+	drm_buf_t *buf;
+	drm_r128_buf_priv_t *buf_priv;
+	u32 *data;
+	int dword_shift, dwords;
+	RING_LOCALS;
+	DRM_DEBUG( "%s\n", __FUNCTION__ );
+
+	r128_update_ring_snapshot( dev_priv );
+
+	/* The compiler won't optimize away a division by a variable,
+	 * even if the only legal values are powers of two.  Thus, we'll
+	 * use a shift instead.
+	 */
+	switch ( blit->format ) {
+	case R128_DATATYPE_ARGB1555:
+	case R128_DATATYPE_RGB565:
+	case R128_DATATYPE_ARGB4444:
+		dword_shift = 1;
+		break;
+	case R128_DATATYPE_ARGB8888:
+		dword_shift = 0;
+		break;
+	default:
+		DRM_ERROR( "invalid blit format %d\n", blit->format );
+		return -EINVAL;
+	}
+
+	/* Flush the pixel cache, and mark the contents as Read Invalid.
+	 * This ensures no pixel data gets mixed up with the texture
+	 * data from the host data blit, otherwise part of the texture
+	 * image may be corrupted.
+	 */
+	BEGIN_RING( 2 );
+
+	OUT_RING( CCE_PACKET0( R128_PC_GUI_CTLSTAT, 0 ) );
+	OUT_RING( R128_PC_RI_GUI | R128_PC_FLUSH_GUI );
+
+	ADVANCE_RING();
+
+	/* Dispatch the indirect buffer.
+	 */
+	buf = dma->buflist[blit->idx];
+	buf_priv = buf->dev_private;
+
+	if ( buf->pid != current->pid ) {
+		DRM_ERROR( "process %d using buffer owned by %d\n",
+			   current->pid, buf->pid );
+		return -EINVAL;
+	}
+	if ( buf->pending ) {
+		DRM_ERROR( "sending pending buffer %d\n", blit->idx );
+		return -EINVAL;
+	}
+
+	buf_priv->discard = 1;
+
+	dwords = (blit->width * blit->height) >> dword_shift;
+
+	data = (u32 *)((char *)dev_priv->buffers->handle + buf->offset);
+
+	data[0] = CCE_PACKET3( R128_CNTL_HOSTDATA_BLT, dwords + 6 );
+	data[1] = ( R128_GMC_DST_PITCH_OFFSET_CNTL
+		    | R128_GMC_BRUSH_NONE
+		    | (blit->format << 8)
+		    | R128_GMC_SRC_DATATYPE_COLOR
+		    | R128_ROP3_S
+		    | R128_DP_SRC_SOURCE_HOST_DATA
+		    | R128_GMC_CLR_CMP_CNTL_DIS
+		    | R128_GMC_AUX_CLIP_DIS
+		    | R128_GMC_WR_MSK_DIS );
+
+	data[2] = (blit->pitch << 21) | (blit->offset >> 5);
+	data[3] = 0xffffffff;
+	data[4] = 0xffffffff;
+	data[5] = (blit->y << 16) | blit->x;
+	data[6] = (blit->height << 16) | blit->width;
+	data[7] = dwords;
+
+	buf->used = (dwords + 8) * sizeof(u32);
+
+	r128_cce_dispatch_indirect( dev, buf, 0, buf->used );
+
+	/* Flush the pixel cache after the blit completes.  This ensures
+	 * the texture data is written out to memory before rendering
+	 * continues.
+	 */
+	BEGIN_RING( 2 );
+
+	OUT_RING( CCE_PACKET0( R128_PC_GUI_CTLSTAT, 0 ) );
+	OUT_RING( R128_PC_FLUSH_GUI );
+
+	ADVANCE_RING();
+
+	return 0;
+}
+
+
+/* ================================================================
+ * Tiled depth buffer management
+ *
+ * FIXME: These should all set the destination write mask for when we
+ * have hardware stencil support.
+ */
+
+static int r128_cce_dispatch_write_span( drm_device_t *dev,
+					 drm_r128_depth_t *depth )
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	int count, x, y;
+	u32 *buffer;
+	u8 *mask;
+	u32 depth_bpp;
+	int i;
+	RING_LOCALS;
+	DRM_DEBUG( "%s\n", __FUNCTION__ );
+
+	r128_update_ring_snapshot( dev_priv );
+
+	switch ( dev_priv->depth_bpp ) {
+	case 16:
+		depth_bpp = R128_GMC_DST_16BPP;
+		break;
+	case 24:
+	case 32:
+		depth_bpp = R128_GMC_DST_32BPP;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	count = depth->n;
+	if ( copy_from_user( &x, depth->x, sizeof(x) ) ) {
+		return -EFAULT;
+	}
+	if ( copy_from_user( &y, depth->y, sizeof(y) ) ) {
+		return -EFAULT;
+	}
+
+	buffer = kmalloc( depth->n * sizeof(u32), 0 );
+	if ( buffer == NULL )
+		return -ENOMEM;
+	if ( copy_from_user( buffer, depth->buffer,
+			     depth->n * sizeof(u32) ) ) {
+		kfree( buffer );
+		return -EFAULT;
+	}
+
+	if ( depth->mask ) {
+		mask = kmalloc( depth->n * sizeof(u8), 0 );
+		if ( mask == NULL ) {
+			kfree( buffer );
+			return -ENOMEM;
+		}
+		if ( copy_from_user( mask, depth->mask,
+				     depth->n * sizeof(u8) ) ) {
+			kfree( buffer );
+			kfree( mask );
+			return -EFAULT;
+		}
+
+		for ( i = 0 ; i < count ; i++, x++ ) {
+			if ( mask[i] ) {
+				BEGIN_RING( 6 );
+
+				OUT_RING( CCE_PACKET3( R128_CNTL_PAINT_MULTI,
+						       4 ) );
+				OUT_RING( R128_GMC_DST_PITCH_OFFSET_CNTL
+					  | R128_GMC_BRUSH_SOLID_COLOR
+					  | depth_bpp
+					  | R128_GMC_SRC_DATATYPE_COLOR
+					  | R128_ROP3_P
+					  | R128_GMC_CLR_CMP_CNTL_DIS
+					  | R128_GMC_WR_MSK_DIS );
+
+				OUT_RING( dev_priv->depth_pitch_offset_c );
+				OUT_RING( buffer[i] );
+
+				OUT_RING( (x << 16) | y );
+				OUT_RING( (1 << 16) | 1 );
+
+				ADVANCE_RING();
+			}
+		}
+
+		kfree( mask );
+	} else {
+		for ( i = 0 ; i < count ; i++, x++ ) {
+			BEGIN_RING( 6 );
+
+			OUT_RING( CCE_PACKET3( R128_CNTL_PAINT_MULTI, 4 ) );
+			OUT_RING( R128_GMC_DST_PITCH_OFFSET_CNTL
+				  | R128_GMC_BRUSH_SOLID_COLOR
+				  | depth_bpp
+				  | R128_GMC_SRC_DATATYPE_COLOR
+				  | R128_ROP3_P
+				  | R128_GMC_CLR_CMP_CNTL_DIS
+				  | R128_GMC_WR_MSK_DIS );
+
+			OUT_RING( dev_priv->depth_pitch_offset_c );
+			OUT_RING( buffer[i] );
+
+			OUT_RING( (x << 16) | y );
+			OUT_RING( (1 << 16) | 1 );
+
+			ADVANCE_RING();
+		}
+	}
+
+	kfree( buffer );
+
+	return 0;
+}
+
+static int r128_cce_dispatch_write_pixels( drm_device_t *dev,
+					   drm_r128_depth_t *depth )
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	int count, *x, *y;
+	u32 *buffer;
+	u8 *mask;
+	u32 depth_bpp;
+	int i;
+	RING_LOCALS;
+	DRM_DEBUG( "%s\n", __FUNCTION__ );
+
+	r128_update_ring_snapshot( dev_priv );
+
+	switch ( dev_priv->depth_bpp ) {
+	case 16:
+		depth_bpp = R128_GMC_DST_16BPP;
+		break;
+	case 24:
+	case 32:
+		depth_bpp = R128_GMC_DST_32BPP;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	count = depth->n;
+
+	x = kmalloc( count * sizeof(*x), 0 );
+	if ( x == NULL ) {
+		return -ENOMEM;
+	}
+	y = kmalloc( count * sizeof(*y), 0 );
+	if ( y == NULL ) {
+		kfree( x );
+		return -ENOMEM;
+	}
+	if ( copy_from_user( x, depth->x, count * sizeof(int) ) ) {
+		kfree( x );
+		kfree( y );
+		return -EFAULT;
+	}
+	if ( copy_from_user( y, depth->y, count * sizeof(int) ) ) {
+		kfree( x );
+		kfree( y );
+		return -EFAULT;
+	}
+
+	buffer = kmalloc( depth->n * sizeof(u32), 0 );
+	if ( buffer == NULL ) {
+		kfree( x );
+		kfree( y );
+		return -ENOMEM;
+	}
+	if ( copy_from_user( buffer, depth->buffer,
+			     depth->n * sizeof(u32) ) ) {
+		kfree( x );
+		kfree( y );
+		kfree( buffer );
+		return -EFAULT;
+	}
+
+	if ( depth->mask ) {
+		mask = kmalloc( depth->n * sizeof(u8), 0 );
+		if ( mask == NULL ) {
+			kfree( x );
+			kfree( y );
+			kfree( buffer );
+			return -ENOMEM;
+		}
+		if ( copy_from_user( mask, depth->mask,
+				     depth->n * sizeof(u8) ) ) {
+			kfree( x );
+			kfree( y );
+			kfree( buffer );
+			kfree( mask );
+			return -EFAULT;
+		}
+
+		for ( i = 0 ; i < count ; i++ ) {
+			if ( mask[i] ) {
+				BEGIN_RING( 6 );
+
+				OUT_RING( CCE_PACKET3( R128_CNTL_PAINT_MULTI,
+						       4 ) );
+				OUT_RING( R128_GMC_DST_PITCH_OFFSET_CNTL
+					  | R128_GMC_BRUSH_SOLID_COLOR
+					  | depth_bpp
+					  | R128_GMC_SRC_DATATYPE_COLOR
+					  | R128_ROP3_P
+					  | R128_GMC_CLR_CMP_CNTL_DIS
+					  | R128_GMC_WR_MSK_DIS );
+
+				OUT_RING( dev_priv->depth_pitch_offset_c );
+				OUT_RING( buffer[i] );
+
+				OUT_RING( (x[i] << 16) | y[i] );
+				OUT_RING( (1 << 16) | 1 );
+
+				ADVANCE_RING();
+			}
+		}
+
+		kfree( mask );
+	} else {
+		for ( i = 0 ; i < count ; i++ ) {
+			BEGIN_RING( 6 );
+
+			OUT_RING( CCE_PACKET3( R128_CNTL_PAINT_MULTI, 4 ) );
+			OUT_RING( R128_GMC_DST_PITCH_OFFSET_CNTL
+				  | R128_GMC_BRUSH_SOLID_COLOR
+				  | depth_bpp
+				  | R128_GMC_SRC_DATATYPE_COLOR
+				  | R128_ROP3_P
+				  | R128_GMC_CLR_CMP_CNTL_DIS
+				  | R128_GMC_WR_MSK_DIS );
+
+			OUT_RING( dev_priv->depth_pitch_offset_c );
+			OUT_RING( buffer[i] );
+
+			OUT_RING( (x[i] << 16) | y[i] );
+			OUT_RING( (1 << 16) | 1 );
+
+			ADVANCE_RING();
+		}
+	}
+
+	kfree( x );
+	kfree( y );
+	kfree( buffer );
+
+	return 0;
+}
+
+static int r128_cce_dispatch_read_span( drm_device_t *dev,
+					drm_r128_depth_t *depth )
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	int count, x, y;
+	u32 depth_bpp;
+	RING_LOCALS;
+	DRM_DEBUG( "%s\n", __FUNCTION__ );
+
+	r128_update_ring_snapshot( dev_priv );
+
+	switch ( dev_priv->depth_bpp ) {
+	case 16:
+		depth_bpp = R128_GMC_DST_16BPP;
+		break;
+	case 24:
+	case 32:
+		depth_bpp = R128_GMC_DST_32BPP;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	count = depth->n;
+	if ( copy_from_user( &x, depth->x, sizeof(x) ) ) {
+		return -EFAULT;
+	}
+	if ( copy_from_user( &y, depth->y, sizeof(y) ) ) {
+		return -EFAULT;
+	}
+
+	BEGIN_RING( 7 );
+
+	OUT_RING( CCE_PACKET3( R128_CNTL_BITBLT_MULTI, 5 ) );
+	OUT_RING( R128_GMC_SRC_PITCH_OFFSET_CNTL
+		  | R128_GMC_DST_PITCH_OFFSET_CNTL
+		  | R128_GMC_BRUSH_NONE
+		  | depth_bpp
+		  | R128_GMC_SRC_DATATYPE_COLOR
+		  | R128_ROP3_S
+		  | R128_DP_SRC_SOURCE_MEMORY
+		  | R128_GMC_CLR_CMP_CNTL_DIS
+		  | R128_GMC_WR_MSK_DIS );
+
+	OUT_RING( dev_priv->depth_pitch_offset_c );
+	OUT_RING( dev_priv->span_pitch_offset_c );
+
+	OUT_RING( (x << 16) | y );
+	OUT_RING( (0 << 16) | 0 );
+	OUT_RING( (count << 16) | 1 );
+
+	ADVANCE_RING();
+
+	return 0;
+}
+
+static int r128_cce_dispatch_read_pixels( drm_device_t *dev,
+					  drm_r128_depth_t *depth )
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	int count, *x, *y;
+	u32 depth_bpp;
+	int i;
+	RING_LOCALS;
+	DRM_DEBUG( "%s\n", __FUNCTION__ );
+
+	r128_update_ring_snapshot( dev_priv );
+
+	switch ( dev_priv->depth_bpp ) {
+	case 16:
+		depth_bpp = R128_GMC_DST_16BPP;
+		break;
+	case 24:
+	case 32:
+		depth_bpp = R128_GMC_DST_32BPP;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	count = depth->n;
+	if ( count > dev_priv->depth_pitch ) {
+		count = dev_priv->depth_pitch;
+	}
+
+	x = kmalloc( count * sizeof(*x), 0 );
+	if ( x == NULL ) {
+		return -ENOMEM;
+	}
+	y = kmalloc( count * sizeof(*y), 0 );
+	if ( y == NULL ) {
+		kfree( x );
+		return -ENOMEM;
+	}
+	if ( copy_from_user( x, depth->x, count * sizeof(int) ) ) {
+		kfree( x );
+		kfree( y );
+		return -EFAULT;
+	}
+	if ( copy_from_user( y, depth->y, count * sizeof(int) ) ) {
+		kfree( x );
+		kfree( y );
+		return -EFAULT;
+	}
+
+	for ( i = 0 ; i < count ; i++ ) {
+		BEGIN_RING( 7 );
+
+		OUT_RING( CCE_PACKET3( R128_CNTL_BITBLT_MULTI, 5 ) );
+		OUT_RING( R128_GMC_SRC_PITCH_OFFSET_CNTL
+			  | R128_GMC_DST_PITCH_OFFSET_CNTL
+			  | R128_GMC_BRUSH_NONE
+			  | depth_bpp
+			  | R128_GMC_SRC_DATATYPE_COLOR
+			  | R128_ROP3_S
+			  | R128_DP_SRC_SOURCE_MEMORY
+			  | R128_GMC_CLR_CMP_CNTL_DIS
+			  | R128_GMC_WR_MSK_DIS );
+
+		OUT_RING( dev_priv->depth_pitch_offset_c );
+		OUT_RING( dev_priv->span_pitch_offset_c );
+
+		OUT_RING( (x[i] << 16) | y[i] );
+		OUT_RING( (i << 16) | 0 );
+		OUT_RING( (1 << 16) | 1 );
+
+		ADVANCE_RING();
+	}
+
+	kfree( x );
+	kfree( y );
+
+	return 0;
+}
+
+
+/* ================================================================
+ * Polygon stipple
+ */
+
+static void r128_cce_dispatch_stipple( drm_device_t *dev, u32 *stipple )
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	int i;
+	RING_LOCALS;
+	DRM_DEBUG( "%s\n", __FUNCTION__ );
+
+	r128_update_ring_snapshot( dev_priv );
+
+	BEGIN_RING( 33 );
+
+	OUT_RING(  CCE_PACKET0( R128_BRUSH_DATA0, 31 ) );
+	for ( i = 0 ; i < 32 ; i++ ) {
+		OUT_RING( stipple[i] );
+	}
+
+	ADVANCE_RING();
+}
+
+
+/* ================================================================
+ * IOCTL functions
+ */
+
+int r128_cce_clear( struct inode *inode, struct file *filp,
+		    unsigned int cmd, unsigned long arg )
+{
+	drm_file_t *priv = filp->private_data;
+	drm_device_t *dev = priv->dev;
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_r128_clear_t clear;
+	DRM_DEBUG( "%s\n", __FUNCTION__ );
+
+	if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+	     dev->lock.pid != current->pid ) {
+		DRM_ERROR( "r128_cce_clear called without lock held\n" );
+		return -EINVAL;
+	}
+
+	if ( copy_from_user( &clear, (drm_r128_clear_t *) arg,
+			     sizeof(clear) ) )
+		return -EFAULT;
+
+	if ( sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS )
+		sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
+
+	r128_cce_dispatch_clear( dev, clear.flags,
+				 clear.x, clear.y, clear.w, clear.h,
+				 clear.clear_color, clear.clear_depth );
+
+	/* Make sure we restore the 3D state next time.
+	 */
+	dev_priv->sarea_priv->dirty |= R128_UPLOAD_CONTEXT | R128_UPLOAD_MASKS;
+
+	return 0;
+}
+
+int r128_cce_swap( struct inode *inode, struct file *filp,
+		   unsigned int cmd, unsigned long arg )
+{
+	drm_file_t *priv = filp->private_data;
+	drm_device_t *dev = priv->dev;
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	DRM_DEBUG( "%s\n", __FUNCTION__ );
+
+	if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+	     dev->lock.pid != current->pid ) {
+		DRM_ERROR( "r128_cce_swap called without lock held\n" );
+		return -EINVAL;
+	}
+
+	if ( sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS )
+		sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
+
+	r128_cce_dispatch_swap( dev );
+
+	/* Make sure we restore the 3D state next time.
+	 */
+	dev_priv->sarea_priv->dirty |= R128_UPLOAD_CONTEXT | R128_UPLOAD_MASKS;
+
+	return 0;
+}
+
+int r128_cce_vertex( struct inode *inode, struct file *filp,
+		     unsigned int cmd, unsigned long arg )
+{
+	drm_file_t *priv = filp->private_data;
+	drm_device_t *dev = priv->dev;
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_device_dma_t *dma = dev->dma;
+	drm_buf_t *buf;
+	drm_r128_buf_priv_t *buf_priv;
+	drm_r128_vertex_t vertex;
+
+	if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+	     dev->lock.pid != current->pid ) {
+		DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+		return -EINVAL;
+	}
+	if ( !dev_priv || dev_priv->is_pci ) {
+		DRM_ERROR( "%s called with a PCI card\n", __FUNCTION__ );
+		return -EINVAL;
+	}
+
+	if ( copy_from_user( &vertex, (drm_r128_vertex_t *)arg,
+			     sizeof(vertex) ) )
+		return -EFAULT;
+
+	DRM_DEBUG( "%s: pid=%d index=%d count=%d discard=%d\n",
+		   __FUNCTION__, current->pid,
+		   vertex.idx, vertex.count, vertex.discard );
+
+	if ( vertex.idx < 0 || vertex.idx >= dma->buf_count ) {
+		DRM_ERROR( "buffer index %d (of %d max)\n",
+			   vertex.idx, dma->buf_count - 1 );
+		return -EINVAL;
+	}
+	if ( vertex.prim < 0 ||
+	     vertex.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2 ) {
+		DRM_ERROR( "buffer prim %d\n", vertex.prim );
+		return -EINVAL;
+	}
+
+	buf = dma->buflist[vertex.idx];
+	buf_priv = buf->dev_private;
+
+	if ( buf->pid != current->pid ) {
+		DRM_ERROR( "process %d using buffer owned by %d\n",
+			   current->pid, buf->pid );
+		return -EINVAL;
+	}
+	if ( buf->pending ) {
+		DRM_ERROR( "sending pending buffer %d\n", vertex.idx );
+		return -EINVAL;
+	}
+
+	buf->used = vertex.count;
+	buf_priv->prim = vertex.prim;
+	buf_priv->discard = vertex.discard;
+
+	r128_cce_dispatch_vertex( dev, buf );
+
+	return 0;
+}
+
+int r128_cce_indices( struct inode *inode, struct file *filp,
+		      unsigned int cmd, unsigned long arg )
+{
+	drm_file_t *priv = filp->private_data;
+	drm_device_t *dev = priv->dev;
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_device_dma_t *dma = dev->dma;
+	drm_buf_t *buf;
+	drm_r128_buf_priv_t *buf_priv;
+	drm_r128_indices_t elts;
+	int count;
+
+	if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+	     dev->lock.pid != current->pid ) {
+		DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+		return -EINVAL;
+	}
+	if ( !dev_priv || dev_priv->is_pci ) {
+		DRM_ERROR( "%s called with a PCI card\n", __FUNCTION__ );
+		return -EINVAL;
+	}
+
+	if ( copy_from_user( &elts, (drm_r128_indices_t *)arg,
+			     sizeof(elts) ) )
+		return -EFAULT;
+
+	DRM_DEBUG( "%s: pid=%d buf=%d s=%d e=%d d=%d\n",
+		   __FUNCTION__, current->pid,
+		   elts.idx, elts.start, elts.end, elts.discard );
+
+	if ( elts.idx < 0 || elts.idx >= dma->buf_count ) {
+		DRM_ERROR( "buffer index %d (of %d max)\n",
+			   elts.idx, dma->buf_count - 1 );
+		return -EINVAL;
+	}
+	if ( elts.prim < 0 ||
+	     elts.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2 ) {
+		DRM_ERROR( "buffer prim %d\n", elts.prim );
+		return -EINVAL;
+	}
+
+	buf = dma->buflist[elts.idx];
+	buf_priv = buf->dev_private;
+
+	if ( buf->pid != current->pid ) {
+		DRM_ERROR( "process %d using buffer owned by %d\n",
+			   current->pid, buf->pid );
+		return -EINVAL;
+	}
+	if ( buf->pending ) {
+		DRM_ERROR( "sending pending buffer %d\n", elts.idx );
+		return -EINVAL;
+	}
+
+	count = (elts.end - elts.start) / sizeof(u16);
+	elts.start -= R128_INDEX_PRIM_OFFSET;
+
+	if ( elts.start & 0x7 ) {
+		DRM_ERROR( "misaligned buffer 0x%x\n", elts.start );
+		return -EINVAL;
+	}
+	if ( elts.start < buf->used ) {
+		DRM_ERROR( "no header 0x%x - 0x%x\n", elts.start, buf->used );
+		return -EINVAL;
+	}
+
+	buf->used = elts.end;
+	buf_priv->prim = elts.prim;
+	buf_priv->discard = elts.discard;
+
+	r128_cce_dispatch_indices( dev, buf, elts.start, elts.end, count );
+
+	return 0;
+}
+
+int r128_cce_blit( struct inode *inode, struct file *filp,
+		   unsigned int cmd, unsigned long arg )
+{
+	drm_file_t *priv = filp->private_data;
+	drm_device_t *dev = priv->dev;
+	drm_device_dma_t *dma = dev->dma;
+	drm_r128_blit_t blit;
+
+	if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+	     dev->lock.pid != current->pid ) {
+		DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+		return -EINVAL;
+	}
+
+	if ( copy_from_user( &blit, (drm_r128_blit_t *)arg,
+			     sizeof(blit) ) )
+		return -EFAULT;
+
+	DRM_DEBUG( "%s: pid=%d index=%d\n",
+		   __FUNCTION__, current->pid, blit.idx );
+
+	if ( blit.idx < 0 || blit.idx >= dma->buf_count ) {
+		DRM_ERROR( "buffer index %d (of %d max)\n",
+			   blit.idx, dma->buf_count - 1 );
+		return -EINVAL;
+	}
+
+	return r128_cce_dispatch_blit( dev, &blit );
+}
+
+int r128_cce_depth( struct inode *inode, struct file *filp,
+		    unsigned int cmd, unsigned long arg )
+{
+	drm_file_t *priv = filp->private_data;
+	drm_device_t *dev = priv->dev;
+	drm_r128_depth_t depth;
+
+	if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+	     dev->lock.pid != current->pid ) {
+		DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+		return -EINVAL;
+	}
+
+	if ( copy_from_user( &depth, (drm_r128_depth_t *)arg,
+			     sizeof(depth) ) )
+		return -EFAULT;
+
+	switch ( depth.func ) {
+	case R128_WRITE_SPAN:
+		return r128_cce_dispatch_write_span( dev, &depth );
+	case R128_WRITE_PIXELS:
+		return r128_cce_dispatch_write_pixels( dev, &depth );
+	case R128_READ_SPAN:
+		return r128_cce_dispatch_read_span( dev, &depth );
+	case R128_READ_PIXELS:
+		return r128_cce_dispatch_read_pixels( dev, &depth );
+	}
+
+	return -EINVAL;
+}
+
+int r128_cce_stipple( struct inode *inode, struct file *filp,
+		      unsigned int cmd, unsigned long arg )
+{
+	drm_file_t *priv = filp->private_data;
+	drm_device_t *dev = priv->dev;
+	drm_r128_stipple_t stipple;
+	u32 mask[32];
+
+	if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+	     dev->lock.pid != current->pid ) {
+		DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+		return -EINVAL;
+	}
+
+	if ( copy_from_user( &stipple, (drm_r128_stipple_t *)arg,
+			     sizeof(stipple) ) )
+		return -EFAULT;
+
+	if ( copy_from_user( &mask, stipple.mask,
+			     32 * sizeof(u32) ) )
+		return -EFAULT;
+
+	r128_cce_dispatch_stipple( dev, mask );
+
+	return 0;
+}
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/char/joystick/iforce.c linux/drivers/char/joystick/iforce.c
--- v2.4.0-prerelease/linux/drivers/char/joystick/iforce.c	Sun Nov 19 18:44:06 2000
+++ linux/drivers/char/joystick/iforce.c	Thu Jan  4 13:15:32 2001
@@ -217,11 +217,8 @@
 }
 
 static struct usb_device_id iforce_usb_ids [] = {
-    {
-	idVendor: USB_VENDOR_ID_LOGITECH,
-	idProduct: USB_DEVICE_ID_LOGITECH_WMFORCE
-    },
-    { }						/* Terminating entry */
+	{ USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WMFORCE) },
+	{ }						/* Terminating entry */
 };
 
 MODULE_DEVICE_TABLE (usb, iforce_usb_ids);
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/char/mem.c linux/drivers/char/mem.c
--- v2.4.0-prerelease/linux/drivers/char/mem.c	Mon Jan  1 09:38:35 2001
+++ linux/drivers/char/mem.c	Thu Jan  4 13:00:55 2001
@@ -145,9 +145,12 @@
 #elif defined(__powerpc__)
 	prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
 #elif defined(__mc68000__)
+#ifdef SUN3_PAGE_NOCACHE
 	if (MMU_IS_SUN3)
 		prot |= SUN3_PAGE_NOCACHE;
-	else if (MMU_IS_851 || MMU_IS_030)
+	else
+#endif
+	if (MMU_IS_851 || MMU_IS_030)
 		prot |= _PAGE_NOCACHE030;
 	/* Use no-cache mode, serialized */
 	else if (MMU_IS_040 || MMU_IS_060)
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/char/nvram.c linux/drivers/char/nvram.c
--- v2.4.0-prerelease/linux/drivers/char/nvram.c	Sun Oct  8 10:50:16 2000
+++ linux/drivers/char/nvram.c	Thu Jan  4 12:50:17 2001
@@ -107,8 +107,6 @@
 #include <asm/uaccess.h>
 #include <asm/system.h>
 
-extern spinlock_t rtc_lock;
-
 static int nvram_open_cnt;	/* #times opened */
 static int nvram_open_mode;		/* special open modes */
 #define	NVRAM_WRITE		1		/* opened for writing (exclusive) */
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/char/pcxx.c linux/drivers/char/pcxx.c
--- v2.4.0-prerelease/linux/drivers/char/pcxx.c	Mon Jan  1 09:38:35 2001
+++ linux/drivers/char/pcxx.c	Mon Jan  1 10:34:54 2001
@@ -1823,7 +1823,7 @@
  */
 static void pcxxdelay(int msec)
 {
-	mdelay(mseconds);
+	mdelay(msec);
 }
 
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/char/riscom8.c linux/drivers/char/riscom8.c
--- v2.4.0-prerelease/linux/drivers/char/riscom8.c	Mon Dec 11 17:59:44 2000
+++ linux/drivers/char/riscom8.c	Thu Jan  4 12:50:12 2001
@@ -1822,16 +1822,20 @@
  * addresses in this case.
  *
  */ 
-static void __init riscom8_setup(char *str, int * ints)
+static int __init riscom8_setup(char *str)
 {
+	int ints[RC_NBOARD];
 	int i;
 
+	str = get_options(str, ARRAY_SIZE(ints), ints);
+
 	for (i = 0; i < RC_NBOARD; i++) {
 		if (i < ints[0])
 			rc_board[i].base = ints[i+1];
 		else 
 			rc_board[i].base = 0;
 	}
+	return 1;
 }
 
 __setup("riscom8=", riscom8_setup);
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/char/rtc.c linux/drivers/char/rtc.c
--- v2.4.0-prerelease/linux/drivers/char/rtc.c	Sun Nov 19 18:44:07 2000
+++ linux/drivers/char/rtc.c	Thu Jan  4 12:50:17 2001
@@ -89,8 +89,6 @@
 
 static DECLARE_WAIT_QUEUE_HEAD(rtc_wait);
 
-extern spinlock_t rtc_lock;
-
 static struct timer_list rtc_irq_timer;
 
 static loff_t rtc_llseek(struct file *file, loff_t offset, int origin);
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/char/vt.c linux/drivers/char/vt.c
--- v2.4.0-prerelease/linux/drivers/char/vt.c	Mon Dec 11 17:59:44 2000
+++ linux/drivers/char/vt.c	Thu Jan  4 13:00:55 2001
@@ -27,6 +27,10 @@
 #include <asm/io.h>
 #include <asm/uaccess.h>
 
+#if defined(__mc68000__) || defined(CONFIG_APUS)
+#include <asm/machdep.h>
+#endif
+
 #include <linux/kbd_kern.h>
 #include <linux/vt_kern.h>
 #include <linux/kbd_diacr.h>
@@ -491,6 +495,27 @@
 	case KDDISABIO:
 		return sys_ioperm(GPFIRST, GPNUM,
 				  (cmd == KDENABIO)) ? -ENXIO : 0;
+#endif
+
+#if defined(__mc68000__) || defined(CONFIG_APUS)
+	/* Linux/m68k interface for setting the keyboard delay/repeat rate */
+		
+	case KDKBDREP:
+	{
+		struct kbd_repeat kbrep;
+		
+		if (!mach_kbdrate) return( -EINVAL );
+		if (!suser()) return( -EPERM );
+
+		if (copy_from_user(&kbrep, (void *)arg,
+				   sizeof(struct kbd_repeat)))
+			return -EFAULT;
+		if ((i = mach_kbdrate( &kbrep ))) return( i );
+		if (copy_to_user((void *)arg, &kbrep,
+				 sizeof(struct kbd_repeat)))
+			return -EFAULT;
+		return 0;
+	}
 #endif
 
 	case KDSETMODE:
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/ide/cs5530.c linux/drivers/ide/cs5530.c
--- v2.4.0-prerelease/linux/drivers/ide/cs5530.c	Tue Jun 20 07:52:36 2000
+++ linux/drivers/ide/cs5530.c	Tue Jan  2 16:58:45 2001
@@ -257,6 +257,14 @@
 	unsigned short pcicmd = 0;
 	unsigned long flags;
 
+#if defined(DISPLAY_CS5530_TIMINGS) && defined(CONFIG_PROC_FS)
+	if (!cs5530_proc) {
+		cs5530_proc = 1;
+		bmide_dev = dev;
+		cs5530_display_info = &cs5530_get_info;
+	}
+#endif /* DISPLAY_CS5530_TIMINGS && CONFIG_PROC_FS */
+
 	pci_for_each_dev (dev) {
 		if (dev->vendor == PCI_VENDOR_ID_CYRIX) {
 			switch (dev->device) {
@@ -326,14 +334,6 @@
 	pci_write_config_byte(master_0, 0x43, 0xc1);
 
 	restore_flags(flags);
-
-#if defined(DISPLAY_CS5530_TIMINGS) && defined(CONFIG_PROC_FS)
-	if (!cs5530_proc) {
-		cs5530_proc = 1;
-		bmide_dev = dev;
-		cs5530_display_info = &cs5530_get_info;
-	}
-#endif /* DISPLAY_CS5530_TIMINGS && CONFIG_PROC_FS */
 
 	return 0;
 }
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/ide/hd.c linux/drivers/ide/hd.c
--- v2.4.0-prerelease/linux/drivers/ide/hd.c	Sun Nov 19 18:44:07 2000
+++ linux/drivers/ide/hd.c	Thu Jan  4 12:50:17 2001
@@ -738,6 +738,7 @@
 	if (!NR_HD) {
 		extern struct drive_info drive_info;
 		unsigned char *BIOS = (unsigned char *) &drive_info;
+		unsigned long flags;
 		int cmos_disks;
 
 		for (drive=0 ; drive<2 ; drive++) {
@@ -773,10 +774,15 @@
 		Needless to say, a non-zero value means we have 
 		an AT controller hard disk for that drive.
 
-		
+		Currently the rtc_lock is a bit academic since this
+		driver is non-modular, but someday... ?         Paul G.
 	*/
 
-		if ((cmos_disks = CMOS_READ(0x12)) & 0xf0) {
+		spin_lock_irqsave(&rtc_lock, flags);
+		cmos_disks = CMOS_READ(0x12);
+		spin_unlock_irqrestore(&rtc_lock, flags);
+
+		if (cmos_disks & 0xf0) {
 			if (cmos_disks & 0x0f)
 				NR_HD = 2;
 			else
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/ide/hpt366.c linux/drivers/ide/hpt366.c
--- v2.4.0-prerelease/linux/drivers/ide/hpt366.c	Sun Nov 19 18:44:07 2000
+++ linux/drivers/ide/hpt366.c	Tue Jan  2 16:58:45 2001
@@ -346,6 +346,9 @@
 
 static int hpt3xx_tune_chipset (ide_drive_t *drive, byte speed)
 {
+	if ((drive->media != ide_disk) && (speed < XFER_SW_DMA_0))
+		return -1;
+
 	if (!drive->init_speed)
 		drive->init_speed = speed;
 
@@ -428,6 +431,9 @@
 	byte ultra66		= eighty_ninty_three(drive);
 	int  rval;
 
+	if ((drive->media != ide_disk) && (speed < XFER_SW_DMA_0))
+		return ((int) ide_dma_off_quietly);
+
 	if ((id->dma_ultra & 0x0020) &&
 	    (!check_in_drive_lists(drive, bad_ata100_5)) &&
 	    (HPT370_ALLOW_ATA100_5) &&
@@ -617,8 +623,14 @@
 		pci_write_config_byte(dev, PCI_ROM_ADDRESS, dev->resource[PCI_ROM_RESOURCE].start | PCI_ROM_ADDRESS_ENABLE);
 
 	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &test);
+
+#if 0
 	if (test != 0x08)
 		pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 0x08);
+#else
+	if (test != (L1_CACHE_BYTES / 4))
+		pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4));
+#endif
 
 	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &test);
 	if (test != 0x78)
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/ide/ide-cd.c linux/drivers/ide/ide-cd.c
--- v2.4.0-prerelease/linux/drivers/ide/ide-cd.c	Mon Jan  1 09:38:35 2001
+++ linux/drivers/ide/ide-cd.c	Tue Jan  2 16:59:17 2001
@@ -513,9 +513,9 @@
 				      struct request_sense *sense,
 				      struct packet_command *failed_command)
 {
-	struct cdrom_info *info = drive->driver_data;
+	struct cdrom_info *info		= drive->driver_data;
+	struct packet_command *pc	= &info->request_sense_pc;
 	struct request *rq;
-	struct packet_command *pc = &info->request_sense_pc;
 
 	if (sense == NULL)
 		sense = &info->sense_data;
@@ -541,13 +541,14 @@
 	struct request *rq = HWGROUP(drive)->rq;
 
 	if (rq->cmd == REQUEST_SENSE_COMMAND && uptodate) {
-		struct packet_command *pc = (struct packet_command *)rq->buffer;
+		struct packet_command *pc = (struct packet_command *) rq->buffer;
 		cdrom_analyze_sense_data(drive,
 			(struct packet_command *) pc->sense,
 			(struct request_sense *) (pc->buffer - pc->c[4]));
 	}
-	if (rq->cmd == READ && !rq->current_nr_sectors)
-		uptodate = 1;
+	if (rq->cmd == READ || rq->cmd == WRITE)
+		if (!rq->current_nr_sectors)
+			uptodate = 1;
 
 	ide_end_request (uptodate, HWGROUP(drive));
 }
@@ -628,7 +629,7 @@
 		if ((stat & ERR_STAT) != 0)
 			cdrom_queue_request_sense(drive, sem, pc->sense, pc);
 	} else {
-		/* Handle errors from READ requests. */
+		/* Handle errors from READ and WRITE requests. */
 
 		if (sense_key == NOT_READY) {
 			/* Tray open. */
@@ -679,12 +680,22 @@
 	struct packet_command *pc = (struct packet_command *) rq->buffer;
 	unsigned long wait = 0;
 
-	/* blank and format can take an extremly long time to
-	 * complete, if the IMMED bit was not set.
+	/*
+	 * Some commands are *slow* and normally take a long time to
+	 * complete. Usually we can use the ATAPI "disconnect" to bypass
+	 * this, but not all commands/drives support that. Let
+	 * ide_timer_expiry keep polling us for these.
 	 */
-	if (pc->c[0] == GPCMD_BLANK || pc->c[0] == GPCMD_FORMAT_UNIT)
-		wait = 60*60*HZ;
-
+	switch (pc->c[0]) {
+		case GPCMD_BLANK:
+		case GPCMD_FORMAT_UNIT:
+		case GPCMD_RESERVE_RZONE_TRACK:
+			wait = WAIT_CMD;
+			break;
+		default:
+			wait = 0;
+			break;
+	}
 	return wait;
 }
 
@@ -706,8 +717,15 @@
 	if (ide_wait_stat(&startstop, drive, 0, BUSY_STAT, WAIT_READY))
 		return startstop;
 
-	if (info->dma)
-		info->dma = !HWIF(drive)->dmaproc(ide_dma_read, drive);
+	if (info->dma) {
+		if (info->cmd == READ) {
+			info->dma = !HWIF(drive)->dmaproc(ide_dma_read, drive);
+		} else if (info->cmd == WRITE) {
+			info->dma = !HWIF(drive)->dmaproc(ide_dma_write, drive);
+		} else {
+			printk("ide-cd: DMA set, but not allowed\n");
+		}
+	}
 
 	/* Set up the controller registers. */
 	OUT_BYTE (info->dma, IDE_FEATURE_REG);
@@ -737,11 +755,20 @@
    by cdrom_start_packet_command.
    HANDLER is the interrupt handler to call when the command completes
    or there's data ready. */
+/*
+ * changed 5 parameters to 3 for dvd-ram
+ * struct packet_command *pc; now packet_command_t *pc;
+ */
+#undef CLASSIC_PACKET_STRUCT
 static ide_startstop_t cdrom_transfer_packet_command (ide_drive_t *drive,
-                                          unsigned char *cmd_buf, int cmd_len,
-					  ide_handler_t *handler,
-					  unsigned int timeout)
+					  struct packet_command *pc,
+					  ide_handler_t *handler)
 {
+#ifdef CLASSIC_PACKET_STRUCT
+	unsigned char *cmd_buf	= pc->c;
+	int cmd_len		= sizeof(pc->c);
+	unsigned int timeout	= pc->timeout;
+#endif
 	ide_startstop_t startstop;
 
 	if (CDROM_CONFIG_FLAGS (drive)->drq_interrupt) {
@@ -759,16 +786,25 @@
 	}
 
 	/* Arm the interrupt handler. */
+#ifdef CLASSIC_PACKET_STRUCT
+	/* Arm the interrupt handler. */
 	ide_set_handler (drive, handler, timeout, cdrom_timer_expiry);
 
 	/* Send the command to the device. */
 	atapi_output_bytes (drive, cmd_buf, cmd_len);
+#else /* !CLASSIC_PACKET_STRUCT */
+	/* Arm the interrupt handler. */
+//	ide_set_handler (drive, handler, (unsigned int) pc->timeout, cdrom_timer_expiry);
+	ide_set_handler (drive, handler, pc->timeout, cdrom_timer_expiry);
+
+	/* Send the command to the device. */
+//	atapi_output_bytes (drive, (void *)pc->c, (unsigned int) sizeof(pc->c));
+	atapi_output_bytes (drive, pc->c, sizeof(pc->c));
+#endif /* CLASSIC_PACKET_STRUCT */
 
 	return ide_started;
 }
 
-
-
 /****************************************************************************
  * Block read functions.
  */
@@ -1101,10 +1137,10 @@
 	pc.c[7] = (nframes >> 8);
 	pc.c[8] = (nframes & 0xff);
 	put_unaligned(cpu_to_be32(frame), (unsigned int *) &pc.c[2]);
+	pc.timeout = WAIT_CMD;
 
 	/* Send the command to the drive and return. */
-	return cdrom_transfer_packet_command(drive, pc.c, sizeof(pc.c),
-					     &cdrom_read_intr, WAIT_CMD);
+	return cdrom_transfer_packet_command(drive, &pc, &cdrom_read_intr);
 }
 
 
@@ -1153,7 +1189,9 @@
 	memset (&pc.c, 0, sizeof (pc.c));
 	pc.c[0] = GPCMD_SEEK;
 	put_unaligned(cpu_to_be32(frame), (unsigned int *) &pc.c[2]);
-	return cdrom_transfer_packet_command(drive, pc.c, sizeof(pc.c), &cdrom_seek_intr, WAIT_CMD);
+
+	pc.timeout = WAIT_CMD;
+	return cdrom_transfer_packet_command(drive, &pc, &cdrom_seek_intr);
 }
 
 static ide_startstop_t cdrom_start_seek (ide_drive_t *drive, unsigned int block)
@@ -1161,6 +1199,7 @@
 	struct cdrom_info *info = drive->driver_data;
 
 	info->dma = 0;
+	info->cmd = 0;
 	info->start_seek = jiffies;
 	return cdrom_start_packet_command (drive, 0, cdrom_start_seek_continuation);
 }
@@ -1213,6 +1252,7 @@
 	else
 		info->dma = 0;
 
+	info->cmd = READ;
 	/* Start sending the read request to the drive. */
 	return cdrom_start_packet_command(drive, 32768, cdrom_start_read_continuation);
 }
@@ -1332,8 +1372,7 @@
 		pc->timeout = WAIT_CMD;
 
 	/* Send the command to the drive and return. */
-	return cdrom_transfer_packet_command(drive, pc->c, sizeof(pc->c),
-					     &cdrom_pc_intr, pc->timeout);
+	return cdrom_transfer_packet_command(drive, pc, &cdrom_pc_intr);
 }
 
 
@@ -1345,6 +1384,7 @@
 	struct cdrom_info *info = drive->driver_data;
 
 	info->dma = 0;
+	info->cmd = 0;
 	pc->stat = 0;
 	len = pc->buflen;
 
@@ -1414,6 +1454,162 @@
 	return pc->stat ? -EIO : 0;
 }
 
+/*
+ * Write handling
+ */
+static inline int cdrom_write_check_ireason(ide_drive_t *drive, int len, int ireason)
+{
+	/* Two notes about IDE interrupt reason here - 0 means that
+	 * the drive wants to receive data from us, 2 means that
+	 * the drive is expecting data from us.
+	 */
+	ireason &= 3;
+
+	if (ireason == 2) {
+		/* Whoops... The drive wants to send data. */
+		printk("%s: cdrom_write_intr: wrong transfer direction!\n",
+			drive->name);
+
+		/* Throw some data at the drive so it doesn't hang
+		   and quit this request. */
+		while (len > 0) {
+			int dum = 0;
+			atapi_output_bytes(drive, &dum, sizeof(dum));
+			len -= sizeof(dum);
+		}
+	} else {
+		/* Drive wants a command packet, or invalid ireason... */
+		printk("%s: cdrom_write_intr: bad interrupt reason %d\n",
+			drive->name, ireason);
+	}
+
+	cdrom_end_request(0, drive);
+	return 1;
+}
+
+static ide_startstop_t cdrom_write_intr(ide_drive_t *drive)
+{
+	int stat, ireason, len, sectors_to_transfer;
+	struct cdrom_info *info = drive->driver_data;
+	int i, dma_error = 0, dma = info->dma;
+	ide_startstop_t startstop;
+
+	struct request *rq = HWGROUP(drive)->rq;
+
+	/* Check for errors. */
+	if (dma) {
+		info->dma = 0;
+		if ((dma_error = HWIF(drive)->dmaproc(ide_dma_end, drive))) {
+			printk("ide-cd: write dma error\n");
+			HWIF(drive)->dmaproc(ide_dma_off, drive);
+		}
+	}
+
+	if (cdrom_decode_status(&startstop, drive, 0, &stat)) {
+		printk("ide-cd: write_intr decode_status bad\n");
+		return startstop;
+	}
+ 
+	if (dma) {
+		if (dma_error)
+			return ide_error(drive, "dma error", stat);
+
+		rq = HWGROUP(drive)->rq;
+		for (i = rq->nr_sectors; i > 0;) {
+			i -= rq->current_nr_sectors;
+			ide_end_request(1, HWGROUP(drive));
+		}
+		return ide_stopped;
+	}
+
+	/* Read the interrupt reason and the transfer length. */
+	ireason = IN_BYTE(IDE_NSECTOR_REG);
+	len = IN_BYTE(IDE_LCYL_REG) + 256 * IN_BYTE(IDE_HCYL_REG);
+
+	/* If DRQ is clear, the command has completed. */
+	if ((stat & DRQ_STAT) == 0) {
+		/* If we're not done writing, complain.
+		 * Otherwise, complete the command normally.
+		 */
+		if (rq->current_nr_sectors > 0) {
+			printk("%s: write_intr: data underrun (%ld blocks)\n",
+				drive->name, rq->current_nr_sectors);
+			cdrom_end_request(0, drive);
+		} else
+			cdrom_end_request(1, drive);
+		return ide_stopped;
+	}
+
+	/* Check that the drive is expecting to do the same thing we are. */
+	if (ireason & 3)
+		if (cdrom_write_check_ireason(drive, len, ireason))
+			return ide_stopped;
+
+	/* The number of sectors we need to read from the drive. */
+	sectors_to_transfer = len / SECTOR_SIZE;
+
+	/* Now loop while we still have data to read from the drive. DMA
+	 * transfers will already have been complete
+	 */
+	while (sectors_to_transfer > 0) {
+		/* If we've filled the present buffer but there's another
+		   chained buffer after it, move on. */
+		if (rq->current_nr_sectors == 0 && rq->nr_sectors > 0)
+			cdrom_end_request(1, drive);
+
+		atapi_output_bytes(drive, rq->buffer, rq->current_nr_sectors);
+		rq->nr_sectors -= rq->current_nr_sectors;
+		rq->current_nr_sectors = 0;
+		rq->sector += rq->current_nr_sectors;
+		sectors_to_transfer -= rq->current_nr_sectors;
+	}
+
+	/* arm handler */
+	ide_set_handler(drive, &cdrom_write_intr, 5 * WAIT_CMD, NULL);
+	return ide_started;
+}
+
+static ide_startstop_t cdrom_start_write_cont(ide_drive_t *drive)
+{
+	struct packet_command pc;	/* packet_command_t pc; */
+	struct request *rq = HWGROUP(drive)->rq;
+	unsigned nframes, frame;
+
+	nframes = rq->nr_sectors >> 2;
+	frame = rq->sector >> 2;
+
+	memset(&pc.c, 0, sizeof(pc.c));
+	/*
+	 * we might as well use WRITE_12, but none of the device I have
+	 * support the streaming feature anyway, so who cares.
+	 */
+	pc.c[0] = GPCMD_WRITE_10;
+#if 0	/* the immediate bit */
+	pc.c[1] = 1 << 3;
+#endif
+	pc.c[7] = (nframes >> 8) & 0xff;
+	pc.c[8] = nframes & 0xff;
+	put_unaligned(cpu_to_be32(frame), (unsigned int *)&pc.c[2]);
+	pc.timeout = 2 * WAIT_CMD;
+
+	return cdrom_transfer_packet_command(drive, &pc, cdrom_write_intr);
+}
+
+static ide_startstop_t cdrom_start_write(ide_drive_t *drive)
+{
+	struct cdrom_info *info = drive->driver_data;
+
+	info->nsectors_buffered = 0;
+
+        /* use dma, if possible. we don't need to check more, since we
+	 * know that the transfer is always (at least!) 2KB aligned */
+	info->dma = drive->using_dma ? 1 : 0;
+	info->cmd = WRITE;
+
+	/* Start sending the read request to the drive. */
+	return cdrom_start_packet_command(drive, 32768, cdrom_start_write_cont);
+}
+
 /****************************************************************************
  * cdrom driver request routine.
  */
@@ -1424,6 +1620,7 @@
 	struct cdrom_info *info = drive->driver_data;
 
 	switch (rq->cmd) {
+		case WRITE:
 		case READ: {
 			if (CDROM_CONFIG_FLAGS(drive)->seeking) {
 				unsigned long elpased = jiffies - info->start_seek;
@@ -1440,8 +1637,12 @@
 			}
 			if (IDE_LARGE_SEEK(info->last_block, block, IDECD_SEEK_THRESHOLD) && drive->dsc_overlap)
 				action = cdrom_start_seek (drive, block);
-			else
-				action = cdrom_start_read (drive, block);
+			else {
+				if (rq->cmd == READ)
+					action = cdrom_start_read(drive, block);
+				else
+					action = cdrom_start_write(drive);
+			}
 			info->last_block = block;
 			return action;
 		}
@@ -1457,7 +1658,7 @@
 		}
 
 		default: {
-			printk("ide-cd: bad cmd %d\n", rq -> cmd);
+			printk("ide-cd: bad cmd %d\n", rq->cmd);
 			cdrom_end_request(0, drive);
 			return ide_stopped;
 		}
@@ -1849,8 +2050,9 @@
 	pc.c[2] = (speed >> 8) & 0xff;	
 	/* Read Drive speed in kbytes/second LSB */
 	pc.c[3] = speed & 0xff;
-	if ( CDROM_CONFIG_FLAGS(drive)->cd_r ||
-                   CDROM_CONFIG_FLAGS(drive)->cd_rw ) {
+	if (CDROM_CONFIG_FLAGS(drive)->cd_r ||
+	    CDROM_CONFIG_FLAGS(drive)->cd_rw ||
+	    CDROM_CONFIG_FLAGS(drive)->dvd_r) {
 		/* Write Drive speed in kbytes/second MSB */
 		pc.c[4] = (speed >> 8) & 0xff;
 		/* Write Drive speed in kbytes/second LSB */
@@ -1902,10 +2104,6 @@
 	return 0;
 }
 
-
-
-
-
 /* the generic packet interface to cdrom.c */
 static int ide_cdrom_packet(struct cdrom_device_info *cdi,
 			    struct cdrom_generic_command *cgc)
@@ -2441,6 +2639,9 @@
 	int minor = drive->select.b.unit << PARTN_BITS;
 	int nslots;
 
+	/*
+	 * default to read-only always and fix latter at the bottom
+	 */
 	set_device_ro(MKDEV(HWIF(drive)->major, minor), 1);
 	set_blocksize(MKDEV(HWIF(drive)->major, minor), CD_FRAMESIZE);
 
@@ -2559,6 +2760,9 @@
 	info->start_seek	= 0;
 
 	nslots = ide_cdrom_probe_capabilities (drive);
+
+	if (CDROM_CONFIG_FLAGS(drive)->dvd_ram)
+		set_device_ro(MKDEV(HWIF(drive)->major, minor), 0);
 
 	if (ide_cdrom_register (drive, nslots)) {
 		printk ("%s: ide_cdrom_setup failed to register device with the cdrom driver.\n", drive->name);
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/ide/ide-cd.h linux/drivers/ide/ide-cd.h
--- v2.4.0-prerelease/linux/drivers/ide/ide-cd.h	Mon Jan  1 09:38:35 2001
+++ linux/drivers/ide/ide-cd.h	Thu Jan  4 13:55:20 2001
@@ -478,6 +478,7 @@
 	struct request request_sense_request;
 	struct packet_command request_sense_pc;
 	int dma;
+	int cmd;
 	unsigned long last_block;
 	unsigned long start_seek;
 	/* Buffer to hold mechanism status and changer slot table. */
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/ide/ide-cs.c linux/drivers/ide/ide-cs.c
--- v2.4.0-prerelease/linux/drivers/ide/ide-cs.c	Thu Jul  6 19:25:21 2000
+++ linux/drivers/ide/ide-cs.c	Tue Jan  2 16:45:36 2001
@@ -341,7 +341,7 @@
     }
     
     if (hd < 0) {
-	printk(KERN_NOTICE "ide_cs: ide_register() at 0x%3x & 0x%3x"
+	printk(KERN_NOTICE "ide_cs: ide_register() at 0x%03x & 0x%03x"
 	       ", irq %u failed\n", io_base, ctl_base,
 	       link->irq.AssignedIRQ);
 	goto failed;
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/ide/ide-dma.c linux/drivers/ide/ide-dma.c
--- v2.4.0-prerelease/linux/drivers/ide/ide-dma.c	Thu Jul 27 16:40:57 2000
+++ linux/drivers/ide/ide-dma.c	Tue Jan  2 16:58:45 2001
@@ -90,6 +90,8 @@
 #include <asm/io.h>
 #include <asm/irq.h>
 
+#undef CONFIG_BLK_DEV_IDEDMA_TIMEOUT
+
 extern char *ide_dmafunc_verbose(ide_dma_action_t dmafunc);
 
 #ifdef CONFIG_IDEDMA_NEW_DRIVE_LISTINGS
@@ -265,6 +267,12 @@
 		cur_addr = sg_dma_address(sg);
 		cur_len = sg_dma_len(sg);
 
+		/*
+		 * Fill in the dma table, without crossing any 64kB boundaries.
+		 * Most hardware requires 16-bit alignment of all blocks,
+		 * but the trm290 requires 32-bit alignment.
+		 */
+
 		while (cur_len) {
 			if (++count >= PRD_ENTRIES) {
 				printk("%s: DMA table too small\n", drive->name);
@@ -515,9 +523,17 @@
 			return check_drive_lists(drive, (func == ide_dma_good_drive));
 		case ide_dma_verbose:
 			return report_drive_dmaing(drive);
+		case ide_dma_timeout:
+#ifdef CONFIG_BLK_DEV_IDEDMA_TIMEOUT
+			/*
+			 * Have to issue an abort and requeue the request
+			 * DMA engine got turned off by a goofy ASIC, and
+			 * we have to clean up the mess, and here is as good
+			 * as any.  Do it globally for all chipsets.
+			 */
+#endif /* CONFIG_BLK_DEV_IDEDMA_TIMEOUT */
 		case ide_dma_retune:
 		case ide_dma_lostirq:
-		case ide_dma_timeout:
 			printk("ide_dmaproc: chipset supported %s func only: %d\n", ide_dmafunc_verbose(func),  func);
 			return 1;
 		default:
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/ide/ide-geometry.c linux/drivers/ide/ide-geometry.c
--- v2.4.0-prerelease/linux/drivers/ide/ide-geometry.c	Sun Aug  6 11:30:07 2000
+++ linux/drivers/ide/ide-geometry.c	Thu Jan  4 12:50:17 2001
@@ -3,6 +3,7 @@
  */
 #include <linux/config.h>
 #include <linux/ide.h>
+#include <linux/mc146818rtc.h>
 #include <asm/io.h>
 
 /*
@@ -46,13 +47,15 @@
 	extern struct drive_info_struct drive_info;
 	byte cmos_disks, *BIOS = (byte *) &drive_info;
 	int unit;
+	unsigned long flags;
 
 #ifdef CONFIG_BLK_DEV_PDC4030
 	if (hwif->chipset == ide_pdc4030 && hwif->channel != 0)
 		return;
 #endif /* CONFIG_BLK_DEV_PDC4030 */
-	outb_p(0x12,0x70);		/* specify CMOS address 0x12 */
-	cmos_disks = inb_p(0x71);	/* read the data from 0x12 */
+	spin_lock_irqsave(&rtc_lock, flags);
+	cmos_disks = CMOS_READ(0x12);
+	spin_unlock_irqrestore(&rtc_lock, flags);
 	/* Extract drive geometry from CMOS+BIOS if not already setup */
 	for (unit = 0; unit < MAX_DRIVES; ++unit) {
 		ide_drive_t *drive = &hwif->drives[unit];
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/ide/ide-pci.c linux/drivers/ide/ide-pci.c
--- v2.4.0-prerelease/linux/drivers/ide/ide-pci.c	Sun Nov 19 18:44:07 2000
+++ linux/drivers/ide/ide-pci.c	Tue Jan  2 16:58:45 2001
@@ -561,7 +561,7 @@
 	if (IDE_PCI_DEVID_EQ(d->devid, DEVID_HPT34X)) {
 		/* see comments in hpt34x.c on why..... */
 		char *chipset_names[] = {"HPT343", "HPT345"};
-		strcpy(d->name, chipset_names[(pcicmd & PCI_COMMAND_MEMORY)]);
+		strcpy(d->name, chipset_names[(pcicmd & PCI_COMMAND_MEMORY) ? 1 : 0]);
 		d->bootable = (pcicmd & PCI_COMMAND_MEMORY) ? OFF_BOARD : NEVER_BOARD;
 	}
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/ide/macide.c linux/drivers/ide/macide.c
--- v2.4.0-prerelease/linux/drivers/ide/macide.c	Thu Apr 13 22:54:26 2000
+++ linux/drivers/ide/macide.c	Thu Jan  4 13:00:55 2001
@@ -18,100 +18,129 @@
 #include <linux/hdreg.h>
 #include <linux/delay.h>
 #include <linux/ide.h>
-#include <linux/init.h>
 
 #include <asm/machw.h>
 #include <asm/macintosh.h>
 #include <asm/macints.h>
+#include <asm/mac_baboon.h>
 
-    /*
-     *  Base of the IDE interface (see ATAManager ROM code)
-     */
-
-#define MAC_HD_BASE	0x50f1a000
-
-    /*
-     *  Offsets from the above base (scaling 4)
-     */
-
-#define MAC_HD_DATA	0x00
-#define MAC_HD_ERROR	0x04		/* see err-bits */
-#define MAC_HD_NSECTOR	0x08		/* nr of sectors to read/write */
-#define MAC_HD_SECTOR	0x0c		/* starting sector */
-#define MAC_HD_LCYL	0x10		/* starting cylinder */
-#define MAC_HD_HCYL	0x14		/* high byte of starting cyl */
-#define MAC_HD_SELECT	0x18		/* 101dhhhh , d=drive, hhhh=head */
-#define MAC_HD_STATUS	0x1c		/* see status-bits */
-#define MAC_HD_CONTROL	0x38		/* control/altstatus */
-
-static int __init macide_offsets[IDE_NR_PORTS] = {
-    MAC_HD_DATA, MAC_HD_ERROR, MAC_HD_NSECTOR, MAC_HD_SECTOR, MAC_HD_LCYL,
-    MAC_HD_HCYL, MAC_HD_SELECT, MAC_HD_STATUS, MAC_HD_CONTROL
-};
-
-	/*
-	 * Other registers
-	 */
-
-	/* 
-	 * IDE interrupt status register for both (?) hwifs on Quadra
-	 * Initial setting: 0xc
-	 * Guessing again:
-	 * Bit 0+1: some interrupt flags
-	 * Bit 2+3: some interrupt enable
-	 * Bit 4:   ??
-	 * Bit 5:   IDE interrupt flag (any hwif)
-	 * Bit 6:   maybe IDE interrupt enable (any hwif) ??
-	 * Bit 7:   Any interrupt condition
-	 *
-	 * Only relevant item: bit 5, to be checked by mac_ack_intr
-	 */
+#define IDE_BASE 0x50F1A000	/* Base address of IDE controller */
 
-#define MAC_HD_ISR	0x101
+/*
+ * Generic IDE registers as offsets from the base
+ * These match MkLinux so they should be correct.
+ */
+
+#define IDE_DATA	0x00
+#define IDE_ERROR	0x04	/* see err-bits */
+#define IDE_NSECTOR	0x08	/* nr of sectors to read/write */
+#define IDE_SECTOR	0x0c	/* starting sector */
+#define IDE_LCYL	0x10	/* starting cylinder */
+#define IDE_HCYL	0x14	/* high byte of starting cyl */
+#define IDE_SELECT	0x18	/* 101dhhhh , d=drive, hhhh=head */
+#define IDE_STATUS	0x1c	/* see status-bits */
+#define IDE_CONTROL	0x38	/* control/altstatus */
+
+/*
+ * Mac-specific registers
+ */
+
+/*
+ * this register is odd; it doesn't seem to do much and it's
+ * not word-aligned like virtually every other hardware register
+ * on the Mac...
+ */
+
+#define IDE_IFR		0x101	/* (0x101) IDE interrupt flags on Quadra:
+				 *
+				 * Bit 0+1: some interrupt flags
+				 * Bit 2+3: some interrupt enable
+				 * Bit 4:   ??
+				 * Bit 5:   IDE interrupt flag (any hwif)
+				 * Bit 6:   maybe IDE interrupt enable (any hwif) ??
+				 * Bit 7:   Any interrupt condition
+				 */
+
+volatile unsigned char *ide_ifr = (unsigned char *) (IDE_BASE + IDE_IFR);
+
+static int macide_offsets[IDE_NR_PORTS] = {
+    IDE_DATA, IDE_ERROR,  IDE_NSECTOR, IDE_SECTOR, IDE_LCYL,
+    IDE_HCYL, IDE_SELECT, IDE_STATUS,  IDE_CONTROL
+};
 
-static int mac_ack_intr(ide_hwif_t* hwif)
+int macide_ack_intr(ide_hwif_t* hwif)
 {
-	unsigned char isr;
-	isr = readb(MAC_HD_BASE + MAC_HD_ISR);
-	if (isr & (1<<5)) {
-		writeb(isr & ~(1<<5), MAC_HD_BASE + MAC_HD_ISR);
+	if (*ide_ifr & 0x20) {
+		*ide_ifr &= ~0x20;
 		return 1;
 	}
-
 	return 0;
 }
 
-    /*
-     *  Probe for a Macintosh IDE interface
-     */
+#ifdef CONFIG_BLK_DEV_MAC_MEDIABAY
+static void macide_mediabay_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+	int state = baboon->mb_status & 0x04;
+
+	printk("macide: media bay %s detected\n", state? "removal":"insertion");
+}
+#endif
+
+/*
+ * Probe for a Macintosh IDE interface
+ */
 
-void __init macide_init(void)
+void macide_init(void)
 {
 	hw_regs_t hw;
 	int index = -1;
 
-	if (!MACH_IS_MAC || macintosh_config->ide_type == 0)
-		return;
-
 	switch (macintosh_config->ide_type) {
 	case MAC_IDE_QUADRA:
-		ide_setup_ports(&hw, (ide_ioreg_t)MAC_HD_BASE, macide_offsets,
-				0, (ide_ioreg_t)(MAC_HD_BASE+MAC_HD_ISR),
-				mac_ack_intr, IRQ_NUBUS_F);
+		ide_setup_ports(&hw, (ide_ioreg_t)IDE_BASE, macide_offsets,
+				0, 0, macide_ack_intr, IRQ_NUBUS_F);
+		index = ide_register_hw(&hw, NULL);
+		break;
+	case MAC_IDE_PB:
+		ide_setup_ports(&hw, (ide_ioreg_t)IDE_BASE, macide_offsets,
+				0, 0, macide_ack_intr, IRQ_NUBUS_C);
+		index = ide_register_hw(&hw, NULL);
+		break;
+	case MAC_IDE_BABOON:
+		ide_setup_ports(&hw, (ide_ioreg_t)BABOON_BASE, macide_offsets,
+				0, 0, NULL, IRQ_BABOON_1);
 		index = ide_register_hw(&hw, NULL);
+		if (index == -1) break;
+		if (macintosh_config->ident == MAC_MODEL_PB190) {
+
+			/* Fix breakage in ide-disk.c: drive capacity	*/
+			/* is not initialized for drives without a 	*/
+			/* hardware ID, and we cna't get that without	*/
+			/* probing the drive which freezes a 190.	*/
+
+			ide_drive_t *drive = &ide_hwifs[index].drives[0];
+        		drive->capacity = drive->cyl*drive->head*drive->sect;
+
+#ifdef CONFIG_BLK_DEV_MAC_MEDIABAY
+			request_irq(IRQ_BABOON_2, macide_mediabay_interrupt,
+					IRQ_FLG_FAST, "mediabay",
+					macide_mediabay_interrupt);
+#endif
+		}
 		break;
 
 	default:
-	    ide_setup_ports(&hw, (ide_ioreg_t)MAC_HD_BASE, macide_offsets,
-	    		    0, 0, NULL, IRQ_NUBUS_C);
-	    index = ide_register_hw(&hw, NULL);
-	    break;
+	    return;
 	}
 
         if (index != -1) {
 		if (macintosh_config->ide_type == MAC_IDE_QUADRA)
 			printk("ide%d: Macintosh Quadra IDE interface\n", index);
-		else
+		else if (macintosh_config->ide_type == MAC_IDE_PB)
 			printk("ide%d: Macintosh Powerbook IDE interface\n", index);
+		else if (macintosh_config->ide_type == MAC_IDE_BABOON)
+			printk("ide%d: Macintosh Powerbook Baboon IDE interface\n", index);
+		else
+			printk("ide%d: Unknown Macintosh IDE interface\n", index);
 	}
 }
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/ide/osb4.c linux/drivers/ide/osb4.c
--- v2.4.0-prerelease/linux/drivers/ide/osb4.c	Tue Oct 31 12:42:26 2000
+++ linux/drivers/ide/osb4.c	Tue Jan  2 16:58:45 2001
@@ -60,14 +60,13 @@
 #include <linux/stat.h>
 #include <linux/proc_fs.h>
 
-static byte osb4_revision = 0;
 static struct pci_dev *bmide_dev;
 
-static int osb4_get_info(char *, char **, off_t, int, int);
-extern int (*osb4_display_info)(char *, char **, off_t, int, int); /* ide-proc.c */
+static int osb4_get_info(char *, char **, off_t, int);
+extern int (*osb4_display_info)(char *, char **, off_t, int); /* ide-proc.c */
 extern char *ide_media_verbose(ide_drive_t *);
 
-static int osb4_get_info (char *buffer, char **addr, off_t offset, int count, int dummy)
+static int osb4_get_info (char *buffer, char **addr, off_t offset, int count)
 {
 	char *p = buffer;
 	u32 bibma = pci_resource_start(bmide_dev, 4);
@@ -113,116 +112,202 @@
 }
 #endif  /* defined(DISPLAY_OSB4_TIMINGS) && defined(CONFIG_PROC_FS) */
 
+static byte osb4_revision = 0;
+
 byte osb4_proc = 0;
 
 extern char *ide_xfer_verbose (byte xfer_rate);
 
-static void osb4_tune_drive (ide_drive_t *drive, byte pio)
-{
-        /* command/recover widths */
-	byte timings[]	= { 0x5d, 0x47, 0x34, 0x22, 0x20 };
-	int port		= HWIF(drive)->index ? 0x42 : 0x40;
-
-	pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
-	if (&HWIF(drive)->drives[0] == drive)  /* master drive */
-		port++;
-	pci_write_config_byte(HWIF(drive)->pci_dev, port, timings[pio]);
-}
+static struct pci_dev *isa_dev;
 
-#if defined(CONFIG_BLK_DEV_IDEDMA) && defined(CONFIG_BLK_DEV_OSB4)
 static int osb4_tune_chipset (ide_drive_t *drive, byte speed)
 {
+	byte udma_modes[]	= { 0x00, 0x01, 0x02 };
+	byte dma_modes[]	= { 0x77, 0x21, 0x20 };
+	byte pio_modes[]	= { 0x5d, 0x47, 0x34, 0x22, 0x20 };
+
 	ide_hwif_t *hwif	= HWIF(drive);
 	struct pci_dev *dev	= hwif->pci_dev;
-	byte is_slave           = (&HWIF(drive)->drives[1] == drive) ? 1 : 0;
-	byte bit8, enable;
-	int err;
-	
-	/* clear udma register if we don't want udma */
-	if (speed < XFER_UDMA_0) {
-		enable = 0x1 << (is_slave + (hwif->channel ? 2 : 0));
-		pci_read_config_byte(dev, 0x54, &bit8);
-		pci_write_config_byte(dev, 0x54, bit8 & ~enable);
-	}
-
+	byte unit		= (drive->select.b.unit & 0x01);
 #ifdef CONFIG_BLK_DEV_IDEDMA
-	if (speed >= XFER_MW_DMA_0) {
-		byte channel = hwif->channel ? 0x46 : 0x44;
-		if (!is_slave)
-			channel++;
+	unsigned long dma_base	= hwif->dma_base;
+#endif /* CONFIG_BLK_DEV_IDEDMA */
+	int err;
 
-		switch (speed) {
-		case XFER_MW_DMA_0:
-			bit8 = 0x77;
-			break;
-		case XFER_MW_DMA_1:
-			bit8 = 0x21;
-			break;
-		case XFER_MW_DMA_2:
+	byte drive_pci		= 0x00;
+	byte drive_pci2		= 0x00;
+	byte drive_pci3		= hwif->channel ? 0x57 : 0x56;
+
+	byte ultra_enable	= 0x00;
+	byte ultra_timing	= 0x00;
+	byte dma_timing		= 0x00;
+	byte pio_timing		= 0x00;
+
+	byte pio	= ide_get_best_pio_mode(drive, 255, 5, NULL);
+
+        switch (drive->dn) {
+		case 0: drive_pci = 0x41; drive_pci2 = 0x45; break;
+		case 1: drive_pci = 0x40; drive_pci2 = 0x44; break;
+		case 2: drive_pci = 0x43; drive_pci2 = 0x47; break;
+		case 3: drive_pci = 0x42; drive_pci2 = 0x46; break;
 		default:
-			bit8 = 0x20;
-			break;
-		}
-		pci_write_config_byte(dev, channel, bit8);
+			return -1;
 	}
 
-	if (speed >= XFER_UDMA_0) {
-		byte channel = hwif->channel ? 0x57 : 0x56;
-		int slave = is_slave ? 4 : 0;
+	pci_read_config_byte(dev, drive_pci, &pio_timing);
+	pci_read_config_byte(dev, drive_pci2, &dma_timing);
+	pci_read_config_byte(dev, drive_pci3, &ultra_timing);
+	pci_read_config_byte(dev, 0x54, &ultra_enable);
+
+#ifdef DEBUG
+	printk("%s: UDMA 0x%02x DMAPIO 0x%02x PIO 0x%02x ",
+		drive->name, ultra_timing, dma_timing, pio_timing);
+#endif
 
-		pci_read_config_byte(dev, channel, &bit8);
-		bit8 &= ~(0xf << slave);
-		switch (speed) {
-		case XFER_UDMA_0:
+	pio_timing	&= ~0xFF;
+	dma_timing	&= ~0xFF;
+	ultra_timing	&= ~(0x0F << (4*unit));
+	ultra_enable	&= ~(0x01 << drive->dn);
+
+	switch(speed) {
+		case XFER_PIO_4:
+		case XFER_PIO_3:
+		case XFER_PIO_2:
+		case XFER_PIO_1:
+		case XFER_PIO_0:
+			pio_timing |= pio_modes[speed - XFER_PIO_0];
 			break;
-		case XFER_UDMA_1:
-			bit8 |= 0x1 << slave;
+#ifdef CONFIG_BLK_DEV_IDEDMA
+		case XFER_MW_DMA_2:
+		case XFER_MW_DMA_1:
+		case XFER_MW_DMA_0:
+			pio_timing |= pio_modes[pio];
+			dma_timing |= dma_modes[speed - XFER_MW_DMA_0];
 			break;
+
+//		case XFER_UDMA_5:
+//		case XFER_UDMA_4:
+//		case XFER_UDMA_3:
 		case XFER_UDMA_2:
+		case XFER_UDMA_1:
+		case XFER_UDMA_0:
+			pio_timing |= pio_modes[pio];
+			dma_timing |= dma_modes[2];
+			ultra_timing |= ((udma_modes[speed - XFER_UDMA_0]) << (4*unit));
+			ultra_enable |= (0x01 << drive->dn);
+#endif
 		default:
-			bit8 |= 0x2 << slave;
 			break;
-		}
-		pci_write_config_byte(dev, channel, bit8);
-
-		enable = 0x1 << (is_slave + (hwif->channel ? 2 : 0));
-		pci_read_config_byte(dev, 0x54, &bit8);
-		pci_write_config_byte(dev, 0x54, bit8 | enable);
 	}
+
+#ifdef DEBUG
+	printk("%s: UDMA 0x%02x DMAPIO 0x%02x PIO 0x%02x ",
+		drive->name, ultra_timing, dma_timing, pio_timing);
 #endif
 
 #if OSB4_DEBUG_DRIVE_INFO
 	printk("%s: %s drive%d\n", drive->name, ide_xfer_verbose(speed), drive->dn);
 #endif /* OSB4_DEBUG_DRIVE_INFO */
+
 	if (!drive->init_speed)
 		drive->init_speed = speed;
+
+	pci_write_config_byte(dev, drive_pci, pio_timing);
+#ifdef CONFIG_BLK_DEV_IDEDMA
+	pci_write_config_byte(dev, drive_pci2, dma_timing);
+	pci_write_config_byte(dev, drive_pci3, ultra_timing);
+	pci_write_config_byte(dev, 0x54, ultra_enable);
+	
+	if (speed > XFER_PIO_4) {
+		outb(inb(dma_base+2)|(1<<(5+unit)), dma_base+2);
+	} else {
+		outb(inb(dma_base+2) & ~(1<<(5+unit)), dma_base+2);
+	}
+#endif /* CONFIG_BLK_DEV_IDEDMA */
+
 	err = ide_config_drive_speed(drive, speed);
 	drive->current_speed = speed;
 	return err;
 }
 
-static int osb4_config_drive_for_dma (ide_drive_t *drive)
+static void config_chipset_for_pio (ide_drive_t *drive)
+{
+	unsigned short eide_pio_timing[6] = {960, 480, 240, 180, 120, 90};
+	unsigned short xfer_pio = drive->id->eide_pio_modes;
+	byte			timing, speed, pio;
+
+	pio = ide_get_best_pio_mode(drive, 255, 5, NULL);
+
+	if (xfer_pio> 4)
+		xfer_pio = 0;
+
+	if (drive->id->eide_pio_iordy > 0) {
+		for (xfer_pio = 5;
+			xfer_pio>0 &&
+			drive->id->eide_pio_iordy>eide_pio_timing[xfer_pio];
+			xfer_pio--);
+	} else {
+		xfer_pio = (drive->id->eide_pio_modes & 4) ? 0x05 :
+			   (drive->id->eide_pio_modes & 2) ? 0x04 :
+			   (drive->id->eide_pio_modes & 1) ? 0x03 :
+			   (drive->id->tPIO & 2) ? 0x02 :
+			   (drive->id->tPIO & 1) ? 0x01 : xfer_pio;
+	}
+
+	timing = (xfer_pio >= pio) ? xfer_pio : pio;
+
+	switch(timing) {
+		case 4: speed = XFER_PIO_4;break;
+		case 3: speed = XFER_PIO_3;break;
+		case 2: speed = XFER_PIO_2;break;
+		case 1: speed = XFER_PIO_1;break;
+		default:
+			speed = (!drive->id->tPIO) ? XFER_PIO_0 : XFER_PIO_SLOW;
+			break;
+	}
+	(void) osb4_tune_chipset(drive, speed);
+	drive->current_speed = speed;
+}
+
+static void osb4_tune_drive (ide_drive_t *drive, byte pio)
+{
+	byte speed;
+	switch(pio) {
+		case 4:		speed = XFER_PIO_4;break;
+		case 3:		speed = XFER_PIO_3;break;
+		case 2:		speed = XFER_PIO_2;break;
+		case 1:		speed = XFER_PIO_1;break;
+		default:	speed = XFER_PIO_0;break;
+	}
+	(void) osb4_tune_chipset(drive, speed);
+}
+
+#ifdef CONFIG_BLK_DEV_IDEDMA
+static int config_chipset_for_dma (ide_drive_t *drive)
 {
 	struct hd_driveid *id	= drive->id;
 	byte			speed;
 
+#if 0
 	byte udma_66		= eighty_ninty_three(drive);
 	/* need specs to figure out if osb4 is capable of ata/66/100 */
 	int ultra100		= 0;
 	int ultra66		= 0;
-	int ultra		= 1;
 
 	if ((id->dma_ultra & 0x0020) && (udma_66) && (ultra100)) {
 		speed = XFER_UDMA_5;
-	} else if ((id->dma_ultra & 0x0010) && (ultra)) {
+	} else if (id->dma_ultra & 0x0010) {
 		speed = ((udma_66) && (ultra66)) ? XFER_UDMA_4 : XFER_UDMA_2;
-	} else if ((id->dma_ultra & 0x0008) && (ultra)) {
+	} else if (id->dma_ultra & 0x0008) {
 		speed = ((udma_66) && (ultra66)) ? XFER_UDMA_3 : XFER_UDMA_1;
-	} else if ((id->dma_ultra & 0x0004) && (ultra)) {
+	} else if (id->dma_ultra & 0x0004) {
+#else
+	if (id->dma_ultra & 0x0004) {
+#endif
 		speed = XFER_UDMA_2;
-	} else if ((id->dma_ultra & 0x0002) && (ultra)) {
+	} else if (id->dma_ultra & 0x0002) {
 		speed = XFER_UDMA_1;
-	} else if ((id->dma_ultra & 0x0001) && (ultra)) {
+	} else if (id->dma_ultra & 0x0001) {
 		speed = XFER_UDMA_0;
 	} else if (id->dma_mword & 0x0004) {
 		speed = XFER_MW_DMA_2;
@@ -243,45 +328,87 @@
 						     ide_dma_off_quietly);
 }
 
+static int config_drive_xfer_rate (ide_drive_t *drive)
+{
+	struct hd_driveid *id = drive->id;
+	ide_dma_action_t dma_func = ide_dma_on;
+
+	if (id && (id->capability & 1) && HWIF(drive)->autodma) {
+		/* Consult the list of known "bad" drives */
+		if (ide_dmaproc(ide_dma_bad_drive, drive)) {
+			dma_func = ide_dma_off;
+			goto fast_ata_pio;
+		}
+		dma_func = ide_dma_off_quietly;
+		if (id->field_valid & 4) {
+			if (id->dma_ultra & 0x002F) {
+				/* Force if Capable UltraDMA */
+				dma_func = config_chipset_for_dma(drive);
+				if ((id->field_valid & 2) &&
+				    (dma_func != ide_dma_on))
+					goto try_dma_modes;
+			}
+		} else if (id->field_valid & 2) {
+try_dma_modes:
+			if ((id->dma_mword & 0x0007) ||
+			    (id->dma_1word & 0x007)) {
+				/* Force if Capable regular DMA modes */
+				dma_func = config_chipset_for_dma(drive);
+				if (dma_func != ide_dma_on)
+					goto no_dma_set;
+			}
+		} else if (ide_dmaproc(ide_dma_good_drive, drive)) {
+			if (id->eide_dma_time > 150) {
+				goto no_dma_set;
+			}
+			/* Consult the list of known "good" drives */
+			dma_func = config_chipset_for_dma(drive);
+			if (dma_func != ide_dma_on)
+				goto no_dma_set;
+		} else {
+			goto fast_ata_pio;
+		}
+	} else if ((id->capability & 8) || (id->field_valid & 2)) {
+fast_ata_pio:
+		dma_func = ide_dma_off_quietly;
+no_dma_set:
+		config_chipset_for_pio(drive);
+	}
+	return HWIF(drive)->dmaproc(dma_func, drive);
+}
+
 static int osb4_dmaproc(ide_dma_action_t func, ide_drive_t *drive)
 {
 	switch (func) {
 		case ide_dma_check:
-			 return ide_dmaproc((ide_dma_action_t) osb4_config_drive_for_dma(drive), drive);
+			return config_drive_xfer_rate(drive);
 		default :
 			break;
 	}
 	/* Other cases are done by generic IDE-DMA code. */
 	return ide_dmaproc(func, drive);
 }
-#endif /* defined(CONFIG_BLK_DEV_IDEDMA) && (CONFIG_BLK_DEV_OSB4) */
+#endif /* CONFIG_BLK_DEV_IDEDMA */
 
 unsigned int __init pci_init_osb4 (struct pci_dev *dev, const char *name)
 {
-	u16 word;
-	byte bit8;
+	unsigned int reg64;
 
 	pci_read_config_byte(dev, PCI_REVISION_ID, &osb4_revision);
 
-	/* setup command register. just make sure that bus master and
-	 * i/o ports are on. */
-	pci_read_config_word(dev, PCI_COMMAND, &word);
-	if ((word & (PCI_COMMAND_MASTER | PCI_COMMAND_IO)) !=
-	     (PCI_COMMAND_MASTER | PCI_COMMAND_IO))
-		pci_write_config_word(dev, PCI_COMMAND, word |
-				      PCI_COMMAND_MASTER | PCI_COMMAND_IO);
-	
-	/* make sure that we're in pci native mode for both the primary
-	 * and secondary channel. */
-	pci_read_config_byte(dev, PCI_CLASS_PROG, &bit8);
-	if ((bit8 & 0x5) != 0x5)
-		pci_write_config_byte(dev, PCI_CLASS_PROG, bit8 | 0x5);
-
-	/* setup up our latency. the default is 255 which is a bit large.
-	 * set it to 64 instead. */
-	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &bit8);
-	if (bit8 != 0x40)
-	    pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x40);
+	isa_dev = pci_find_device(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4, NULL);
+
+	pci_read_config_dword(isa_dev, 0x64, &reg64);
+#ifdef DEBUG
+	printk("%s: reg64 == 0x%08x\n", name, reg64);
+#endif
+	reg64 &= ~0x0000A000;
+#ifdef CONFIG_SMP
+	reg64 |= 0x00008000;
+#endif
+	pci_write_config_dword(isa_dev, 0x64, reg64);
+
+	pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x40);
 
 #if defined(DISPLAY_OSB4_TIMINGS) && defined(CONFIG_PROC_FS)
 	if (!osb4_proc) {
@@ -304,19 +431,22 @@
 		hwif->irq = hwif->channel ? 15 : 14;
 
 	hwif->tuneproc = &osb4_tune_drive;
-	hwif->drives[0].autotune = 1;
-	hwif->drives[1].autotune = 1;
-
-	if (!hwif->dma_base)
-		return;
+	hwif->speedproc = &osb4_tune_chipset;
 
 #ifndef CONFIG_BLK_DEV_IDEDMA
+	hwif->drives[0].autotune = 1;
+	hwif->drives[1].autotune = 1;
 	hwif->autodma = 0;
+	return;
 #else /* CONFIG_BLK_DEV_IDEDMA */
-#ifdef CONFIG_BLK_DEV_OSB4
-	hwif->autodma = 1;
-	hwif->dmaproc = &osb4_dmaproc;
-	hwif->speedproc = &osb4_tune_chipset;
-#endif /* CONFIG_BLK_DEV_OSB4 */
+
+	if (hwif->dma_base) {
+		hwif->autodma = 1;
+		hwif->dmaproc = &osb4_dmaproc;
+	} else {
+		hwif->autodma = 0;
+		hwif->drives[0].autotune = 1;
+		hwif->drives[1].autotune = 1;
+	}
 #endif /* !CONFIG_BLK_DEV_IDEDMA */
 }
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/ide/piix.c linux/drivers/ide/piix.c
--- v2.4.0-prerelease/linux/drivers/ide/piix.c	Fri Jul  7 15:55:24 2000
+++ linux/drivers/ide/piix.c	Tue Jan  2 16:58:45 2001
@@ -399,11 +399,65 @@
 						     ide_dma_off_quietly);
 }
 
+static void config_chipset_for_pio (ide_drive_t *drive)
+{
+	piix_tune_drive(drive, ide_get_best_pio_mode(drive, 255, 5, NULL));
+}
+
+static int config_drive_xfer_rate (ide_drive_t *drive)
+{
+	struct hd_driveid *id = drive->id;
+	ide_dma_action_t dma_func = ide_dma_on;
+
+	if (id && (id->capability & 1) && HWIF(drive)->autodma) {
+		/* Consult the list of known "bad" drives */
+		if (ide_dmaproc(ide_dma_bad_drive, drive)) {
+			dma_func = ide_dma_off;
+			goto fast_ata_pio;
+		}
+		dma_func = ide_dma_off_quietly;
+		if (id->field_valid & 4) {
+			if (id->dma_ultra & 0x002F) {
+				/* Force if Capable UltraDMA */
+				dma_func = piix_config_drive_for_dma(drive);
+				if ((id->field_valid & 2) &&
+				    (dma_func != ide_dma_on))
+					goto try_dma_modes;
+			}
+		} else if (id->field_valid & 2) {
+try_dma_modes:
+			if ((id->dma_mword & 0x0007) ||
+			    (id->dma_1word & 0x007)) {
+				/* Force if Capable regular DMA modes */
+				dma_func = piix_config_drive_for_dma(drive);
+				if (dma_func != ide_dma_on)
+					goto no_dma_set;
+			}
+		} else if (ide_dmaproc(ide_dma_good_drive, drive)) {
+			if (id->eide_dma_time > 150) {
+				goto no_dma_set;
+			}
+			/* Consult the list of known "good" drives */
+			dma_func = piix_config_drive_for_dma(drive);
+			if (dma_func != ide_dma_on)
+				goto no_dma_set;
+		} else {
+			goto fast_ata_pio;
+		}
+	} else if ((id->capability & 8) || (id->field_valid & 2)) {
+fast_ata_pio:
+		dma_func = ide_dma_off_quietly;
+no_dma_set:
+		config_chipset_for_pio(drive);
+	}
+	return HWIF(drive)->dmaproc(dma_func, drive);
+}
+
 static int piix_dmaproc(ide_dma_action_t func, ide_drive_t *drive)
 {
 	switch (func) {
 		case ide_dma_check:
-			 return ide_dmaproc((ide_dma_action_t) piix_config_drive_for_dma(drive), drive);
+			return config_drive_xfer_rate(drive);
 		default :
 			break;
 	}
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/ide/sis5513.c linux/drivers/ide/sis5513.c
--- v2.4.0-prerelease/linux/drivers/ide/sis5513.c	Sun Nov 19 18:44:07 2000
+++ linux/drivers/ide/sis5513.c	Tue Jan  2 16:58:45 2001
@@ -48,6 +48,7 @@
 	{ "SiS540",	PCI_DEVICE_ID_SI_540,	SIS5513_FLAG_ATA_66, },
 	{ "SiS620",	PCI_DEVICE_ID_SI_620,	SIS5513_FLAG_ATA_66|SIS5513_FLAG_LATENCY, },
 	{ "SiS630",	PCI_DEVICE_ID_SI_630,	SIS5513_FLAG_ATA_66|SIS5513_FLAG_LATENCY, },
+	{ "SiS730",	PCI_DEVICE_ID_SI_730,	SIS5513_FLAG_ATA_66|SIS5513_FLAG_LATENCY, },
 	{ "SiS5591",	PCI_DEVICE_ID_SI_5591,	SIS5513_FLAG_ATA_33, },
 	{ "SiS5597",	PCI_DEVICE_ID_SI_5597,	SIS5513_FLAG_ATA_33, },
 	{ "SiS5600",	PCI_DEVICE_ID_SI_5600,	SIS5513_FLAG_ATA_33, },
@@ -337,6 +338,7 @@
 			case PCI_DEVICE_ID_SI_540:
 			case PCI_DEVICE_ID_SI_620:
 			case PCI_DEVICE_ID_SI_630:
+			case PCI_DEVICE_ID_SI_730:
 				unmask   = 0xF0;
 				four_two = 0x01;
 				break;
@@ -370,7 +372,7 @@
 
 	switch(speed) {
 #ifdef CONFIG_BLK_DEV_IDEDMA
-		case XFER_UDMA_5: /* can not do ultra mode 5 yet */
+		case XFER_UDMA_5: mask = 0x80; break;
 		case XFER_UDMA_4: mask = 0x90; break;
 		case XFER_UDMA_3: mask = 0xA0; break;
 		case XFER_UDMA_2: mask = (four_two) ? 0xB0 : 0xA0; break;
@@ -417,20 +419,26 @@
 
 	byte unit		= (drive->select.b.unit & 0x01);
 	byte udma_66		= eighty_ninty_three(drive);
+	byte ultra_100		= 0;
 
 	if (host_dev) {
 		switch(host_dev->device) {
+			case PCI_DEVICE_ID_SI_730:
+				ultra_100 = 1;
 			case PCI_DEVICE_ID_SI_530:
 			case PCI_DEVICE_ID_SI_540:
 			case PCI_DEVICE_ID_SI_620:
 			case PCI_DEVICE_ID_SI_630:
-				four_two = 0x01; break;
+				four_two = 0x01;
+				break;
 			default:
 				four_two = 0x00; break;
 		}
 	}
 
-	if ((id->dma_ultra & 0x0010) && (ultra) && (udma_66) && (four_two))
+	if ((id->dma_ultra & 0x0020) && (ultra) && (udma_66) && (four_two) && (ultra_100))
+		speed = XFER_UDMA_5;
+	else if ((id->dma_ultra & 0x0010) && (ultra) && (udma_66) && (four_two))
 		speed = XFER_UDMA_4;
 	else if ((id->dma_ultra & 0x0008) && (ultra) && (udma_66) && (four_two))
 		speed = XFER_UDMA_3;
@@ -590,6 +598,7 @@
 			case PCI_DEVICE_ID_SI_540:
 			case PCI_DEVICE_ID_SI_620:
 			case PCI_DEVICE_ID_SI_630:
+			case PCI_DEVICE_ID_SI_730:
 				ata66 = (reg48h & mask) ? 0 : 1;
 			default:
 				break;
@@ -616,6 +625,7 @@
 			case PCI_DEVICE_ID_SI_540:
 			case PCI_DEVICE_ID_SI_620:
 			case PCI_DEVICE_ID_SI_630:
+			case PCI_DEVICE_ID_SI_730:
 			case PCI_DEVICE_ID_SI_5600:
 			case PCI_DEVICE_ID_SI_5597:
 			case PCI_DEVICE_ID_SI_5591:
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/ieee1394/Makefile linux/drivers/ieee1394/Makefile
--- v2.4.0-prerelease/linux/drivers/ieee1394/Makefile	Mon Jan  1 09:38:35 2001
+++ linux/drivers/ieee1394/Makefile	Tue Jan  2 16:45:37 2001
@@ -8,7 +8,7 @@
 # Note 2! The CFLAGS definitions are now in the main makefile.
 #
 
-L_TARGET := ieee1394.a
+O_TARGET := ieee1394drv.o
 
 export-objs := ieee1394_syms.o
 
@@ -23,7 +23,7 @@
 obj-$(CONFIG_IEEE1394_VIDEO1394) += video1394.o
 obj-$(CONFIG_IEEE1394_RAWIO) += raw1394.o
 
+include $(TOPDIR)/Rules.make
+
 ieee1394.o: $(ieee1394-objs)
 	$(LD) -r -o $@ $(ieee1394-objs)
-
-include $(TOPDIR)/Rules.make
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/ieee1394/csr.c linux/drivers/ieee1394/csr.c
--- v2.4.0-prerelease/linux/drivers/ieee1394/csr.c	Wed Jul  5 13:03:56 2000
+++ linux/drivers/ieee1394/csr.c	Tue Jan  2 16:45:38 2001
@@ -116,6 +116,7 @@
 {
         int csraddr = addr - CSR_REGISTER_BASE;
         int oldcycle;
+        quadlet_t ret;
         
         if ((csraddr | length) & 0x3) {
                 return RCODE_TYPE_ERROR;
@@ -181,16 +182,36 @@
                 return RCODE_ADDRESS_ERROR;
 
         case CSR_BUS_MANAGER_ID:
-                *(buf++) = cpu_to_be32(host->csr.bus_manager_id);
+                if (host->template->hw_csr_reg)
+                        ret = host->template->hw_csr_reg(host, 0, 0, 0);
+                else
+                        ret = host->csr.bus_manager_id;
+
+                *(buf++) = cpu_to_be32(ret);
                 out;
         case CSR_BANDWIDTH_AVAILABLE:
-                *(buf++) = cpu_to_be32(host->csr.bandwidth_available);
+                if (host->template->hw_csr_reg)
+                        ret = host->template->hw_csr_reg(host, 1, 0, 0);
+                else
+                        ret = host->csr.bandwidth_available;
+
+                *(buf++) = cpu_to_be32(ret);
                 out;
         case CSR_CHANNELS_AVAILABLE_HI:
-                *(buf++) = cpu_to_be32(host->csr.channels_available_hi);
+                if (host->template->hw_csr_reg)
+                        ret = host->template->hw_csr_reg(host, 2, 0, 0);
+                else
+                        ret = host->csr.channels_available_hi;
+
+                *(buf++) = cpu_to_be32(ret);
                 out;
         case CSR_CHANNELS_AVAILABLE_LO:
-                *(buf++) = cpu_to_be32(host->csr.channels_available_lo);
+                if (host->template->hw_csr_reg)
+                        ret = host->template->hw_csr_reg(host, 3, 0, 0);
+                else
+                        ret = host->csr.channels_available_lo;
+
+                *(buf++) = cpu_to_be32(ret);
                 out;
 
                 /* address gap to end - fall through to default */
@@ -282,66 +303,60 @@
 #undef out
 
 
-/* helper function for lock_regs */
-inline static void compare_swap(quadlet_t *old, quadlet_t data, quadlet_t arg)
-{
-        if (*old == be32_to_cpu(arg)) {
-                *old = be32_to_cpu(data);
-        }
-}
-
 static int lock_regs(struct hpsb_host *host, int nodeid, quadlet_t *store,
                      u64 addr, quadlet_t data, quadlet_t arg, int extcode)
 {
         int csraddr = addr - CSR_REGISTER_BASE;
         unsigned long flags;
+        quadlet_t *regptr = NULL;
 
-        if (csraddr & 0x3) {
-                return RCODE_TYPE_ERROR;
+        if (csraddr & 0x3) return RCODE_TYPE_ERROR;
+
+        if (csraddr < CSR_BUS_MANAGER_ID || csraddr > CSR_CHANNELS_AVAILABLE_LO
+            || extcode != EXTCODE_COMPARE_SWAP)
+                goto unsupported_lockreq;
+
+        data = be32_to_cpu(data);
+        arg = be32_to_cpu(arg);
+
+        if (host->template->hw_csr_reg) {
+                quadlet_t old;
+
+                old = host->template->
+                        hw_csr_reg(host, (csraddr - CSR_BUS_MANAGER_ID) >> 2,
+                                   data, arg);
+
+                *store = cpu_to_be32(old);
+                return RCODE_COMPLETE;
         }
 
-        if ((csraddr >= CSR_BUS_MANAGER_ID)
-            && (csraddr <= CSR_CHANNELS_AVAILABLE_LO)) {
-                if (extcode == EXTCODE_COMPARE_SWAP) {
-                        spin_lock_irqsave(&host->csr.lock, flags);
-
-                        switch (csraddr) {
-                        case CSR_BUS_MANAGER_ID:
-                                *store = cpu_to_be32(host->csr.bus_manager_id);
-                                compare_swap(&host->csr.bus_manager_id,
-                                             data, arg);
-                                break;
-
-                        case CSR_BANDWIDTH_AVAILABLE:
-                                *store = cpu_to_be32(host->
-                                                     csr.bandwidth_available);
-                                compare_swap(&host->csr.bandwidth_available,
-                                             data, arg);
-                                break;
-
-                        case CSR_CHANNELS_AVAILABLE_HI:
-                                *store = cpu_to_be32(host->
-                                                     csr.channels_available_hi);
-                                compare_swap(&host->csr.channels_available_hi,
-                                             data, arg);
-                                break;
-
-                        case CSR_CHANNELS_AVAILABLE_LO:
-                                *store = cpu_to_be32(host->
-                                                     csr.channels_available_lo);
-                                compare_swap(&host->csr.channels_available_lo,
-                                             data, arg);
-                                break;
-                        }
-
-                        spin_unlock_irqrestore(&host->csr.lock, flags);
-                        return RCODE_COMPLETE;
-                } else {
-                        return RCODE_TYPE_ERROR;
-                }
+        spin_lock_irqsave(&host->csr.lock, flags);
+
+        switch (csraddr) {
+        case CSR_BUS_MANAGER_ID:
+                regptr = &host->csr.bus_manager_id;
+                break;
+
+        case CSR_BANDWIDTH_AVAILABLE:
+                regptr = &host->csr.bandwidth_available;
+                break;
+
+        case CSR_CHANNELS_AVAILABLE_HI:
+                regptr = &host->csr.channels_available_hi;
+                break;
+
+        case CSR_CHANNELS_AVAILABLE_LO:
+                regptr = &host->csr.channels_available_lo;
+                break;
         }
 
-        /* no locking for anything else yet */
+        *store = cpu_to_be32(*regptr);
+        if (*regptr == arg) *regptr = data;
+        spin_unlock_irqrestore(&host->csr.lock, flags);
+
+        return RCODE_COMPLETE;
+
+ unsupported_lockreq:
         switch (csraddr) {
         case CSR_STATE_CLEAR:
         case CSR_STATE_SET:
@@ -351,6 +366,10 @@
         case CSR_SPLIT_TIMEOUT_LO:
         case CSR_CYCLE_TIME:
         case CSR_BUS_TIME:
+        case CSR_BUS_MANAGER_ID:
+        case CSR_BANDWIDTH_AVAILABLE:
+        case CSR_CHANNELS_AVAILABLE_HI:
+        case CSR_CHANNELS_AVAILABLE_LO:
                 return RCODE_TYPE_ERROR;
 
         case CSR_BUSY_TIMEOUT:
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/ieee1394/hosts.h linux/drivers/ieee1394/hosts.h
--- v2.4.0-prerelease/linux/drivers/ieee1394/hosts.h	Sun Oct  8 10:50:17 2000
+++ linux/drivers/ieee1394/hosts.h	Tue Jan  2 16:45:38 2001
@@ -150,6 +150,16 @@
          * command, though that should never happen.
          */
         int (*devctl) (struct hpsb_host *host, enum devctl_cmd command, int arg);
+
+        /* This function is mainly to redirect local CSR reads/locks to the iso
+         * management registers (bus manager id, bandwidth available, channels
+         * available) to the hardware registers in OHCI.  reg is 0,1,2,3 for bus
+         * mgr, bwdth avail, ch avail hi, ch avail lo respectively (the same ids
+         * as OHCI uses).  data and compare are the new data and expected data
+         * respectively, return value is the old value.
+         */
+        quadlet_t (*hw_csr_reg) (struct hpsb_host *host, int reg,
+                                 quadlet_t data, quadlet_t compare);
 };
 
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/ieee1394/ohci1394.c linux/drivers/ieee1394/ohci1394.c
--- v2.4.0-prerelease/linux/drivers/ieee1394/ohci1394.c	Mon Dec 11 17:59:44 2000
+++ linux/drivers/ieee1394/ohci1394.c	Tue Jan  2 16:45:38 2001
@@ -44,6 +44,10 @@
 /* 
  * Acknowledgments:
  *
+ * Adam J Richter <adam@yggdrasil.com>
+ *  . Use of pci_class to find device
+ * Andreas Tobler <toa@pop.agri.ch>
+ *  . Updated proc_fs calls
  * Emilie Chung	<emilie.chung@axis.com>
  *  . Tip on Async Request Filter
  * Pascal Drolet <pascal.drolet@informission.ca>
@@ -85,6 +89,7 @@
 #include <linux/types.h>
 #include <linux/wrapper.h>
 #include <linux/vmalloc.h>
+#include <linux/init.h>
 
 #include "ieee1394.h"
 #include "ieee1394_types.h"
@@ -121,10 +126,13 @@
 	    remove_card(ohci); \
 	      return 1;
 
+#if USE_DEVICE
+
 int supported_chips[][2] = {
 	{ PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_OHCI1394_LV22 },
 	{ PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_OHCI1394_LV23 },
 	{ PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_OHCI1394_LV26 },
+	{ PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_OHCI1394_PCI4450 },
 	{ PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_OHCI1394 },
 	{ PCI_VENDOR_ID_SONY, PCI_DEVICE_ID_SONY_CXD3222 },
 	{ PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_UPD72862 },
@@ -136,6 +144,30 @@
 	{ -1, -1 }
 };
 
+#else
+
+#define PCI_CLASS_FIREWIRE_OHCI     ((PCI_CLASS_SERIAL_FIREWIRE << 8) | 0x10)
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+static struct pci_device_id ohci1394_pci_tbl[] __initdata = {
+	{
+		class: 		PCI_CLASS_FIREWIRE_OHCI,
+		class_mask: 	0x00ffffff,
+		vendor:		PCI_ANY_ID,
+		device:		PCI_ANY_ID,
+		subvendor:	PCI_ANY_ID,
+		subdevice:	PCI_ANY_ID,
+	},
+	{ 0, },
+};
+MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
+#endif
+
+#endif /* USE_DEVICE */
+
+MODULE_PARM(attempt_root,"i");
+static int attempt_root = 0;
+
 static struct ti_ohci cards[MAX_OHCI1394_CARDS];
 static int num_of_cards = 0;
 
@@ -640,12 +672,19 @@
 	else 
 		d->prg_cpu[idx]->begin.status = 0;
 
-	d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
-		(packet->header[0] & 0xFFFF);
-	d->prg_cpu[idx]->data[1] = (packet->header[1] & 0xFFFF) | 
-		(packet->header[0] & 0xFFFF0000);
-	d->prg_cpu[idx]->data[2] = packet->header[2];
-	d->prg_cpu[idx]->data[3] = packet->header[3];
+        if (packet->type == raw) {
+		d->prg_cpu[idx]->data[0] = OHCI1394_TCODE_PHY<<4;
+		d->prg_cpu[idx]->data[1] = packet->header[0];
+		d->prg_cpu[idx]->data[2] = packet->header[1];
+        }
+        else {
+		d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
+			(packet->header[0] & 0xFFFF);
+		d->prg_cpu[idx]->data[1] = (packet->header[1] & 0xFFFF) | 
+			(packet->header[0] & 0xFFFF0000);
+		d->prg_cpu[idx]->data[2] = packet->header[2];
+		d->prg_cpu[idx]->data[3] = packet->header[3];
+        }
 
 	if (packet->data_size) { /* block transmit */
 		d->prg_cpu[idx]->begin.control = OUTPUT_MORE_IMMEDIATE | 0x10;
@@ -673,8 +712,13 @@
 		d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
 	}
 	else { /* quadlet transmit */
-		d->prg_cpu[idx]->begin.control = 
-			OUTPUT_LAST_IMMEDIATE | packet->header_size;
+                if (packet->type == raw)
+                        d->prg_cpu[idx]->begin.control =
+				OUTPUT_LAST_IMMEDIATE|(packet->header_size+4);
+                else
+                        d->prg_cpu[idx]->begin.control =
+                                OUTPUT_LAST_IMMEDIATE|packet->header_size;
+
 		if (d->branchAddrPtr) 
 			*(d->branchAddrPtr) = d->prg_bus[idx] | 0x2;
 		d->branchAddrPtr = &(d->prg_cpu[idx]->begin.branchAddress);
@@ -788,12 +832,12 @@
 		/*
 		 * FIXME: this flag might be necessary in some case
 		 */
-		/* host->attempt_root = 1; */
 		PRINT(KERN_INFO, ohci->id, "resetting bus on request%s",
-		      (host->attempt_root ? " and attempting to become root"
-		       : ""));
+		      ((host->attempt_root || attempt_root) ? 
+		       " and attempting to become root" : ""));
 		reg_write(ohci, OHCI1394_PhyControl, 
-			  (host->attempt_root) ? 0x000041ff : 0x0000417f);
+			  (host->attempt_root || attempt_root) ? 
+			  0x000041ff : 0x0000417f);
 		break;
 
 	case GET_CYCLE_COUNTER:
@@ -842,62 +886,74 @@
 
 	case ISO_LISTEN_CHANNEL:
         {
-                int *isochannels, offset= OHCI1394_IRMultiChanMaskLoSet;
-                unsigned int channel= (unsigned int)arg;
-                unsigned int channelbit= channel;
-                u32 setMask= 0x00000001;
-
-                /* save people from themselves */
-                if (channel > 63)
-                        break;
-
-                if (channel > 31) {
-                        isochannels= &(((int*)&ohci->IR_channel_usage)[0]);
-                        channelbit-= 32;
-                        offset= OHCI1394_IRMultiChanMaskHiSet;
-                }
-                else
-                        isochannels= &(((int*)&ohci->IR_channel_usage)[1]);
+		u64 mask;
 
-                while(channelbit--) setMask= setMask << 1;
+		if (arg<0 || arg>63) {
+			PRINT(KERN_ERR, ohci->id, __FUNCTION__
+			      "IS0_LISTEN_CHANNEL channel %d out of range", 
+			      arg);
+			return -EFAULT;
+		}
 
+		mask = (u64)0x1<<arg;
+		
                 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
 
-                if (!test_and_set_bit(channelbit, isochannels))
-                        reg_write(ohci, offset, setMask);
+		if (ohci->ISO_channel_usage & mask) {
+			PRINT(KERN_ERR, ohci->id, __FUNCTION__
+			      "IS0_LISTEN_CHANNEL channel %d already used", 
+			      arg);
+			spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
+			return -EFAULT;
+		}
+		
+		ohci->ISO_channel_usage |= mask;
+
+		if (arg>31) 
+			reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, 
+				  1<<(arg-32));			
+		else
+			reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, 
+				  1<<arg);			
 
                 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
-                DBGMSG(ohci->id, "listening enabled on channel %u", channel);
+                DBGMSG(ohci->id, "listening enabled on channel %d", arg);
                 break;
         }
 	case ISO_UNLISTEN_CHANNEL:
         {
-                int *isochannels, offset= OHCI1394_IRMultiChanMaskLoClear;
-                unsigned int channel= (unsigned int)arg;
-                unsigned int channelbit= channel;
-                u32 clearMask= 0x00000001;
-
-                /* save people from themselves */
-                if (channel > 63)
-                        break;
-
-                if (channel > 31) {
-                        isochannels= &(((int*)&ohci->IR_channel_usage)[0]);
-                        channelbit-= 32;
-                        offset= OHCI1394_IRMultiChanMaskHiClear;
-                }
-                else
-                        isochannels= &(((int*)&ohci->IR_channel_usage)[1]);
+		u64 mask;
 
-                while(channelbit--) clearMask= clearMask << 1;
+		if (arg<0 || arg>63) {
+			PRINT(KERN_ERR, ohci->id, __FUNCTION__
+			      "IS0_UNLISTEN_CHANNEL channel %d out of range", 
+			      arg);
+			return -EFAULT;
+		}
 
+		mask = (u64)0x1<<arg;
+		
                 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
 
-                if (!test_and_clear_bit(channelbit, isochannels))
-                        reg_write(ohci, offset, clearMask);
+		if (!(ohci->ISO_channel_usage & mask)) {
+			PRINT(KERN_ERR, ohci->id, __FUNCTION__
+			      "IS0_UNLISTEN_CHANNEL channel %d not used", 
+			      arg);
+			spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
+			return -EFAULT;
+		}
+		
+		ohci->ISO_channel_usage &= ~mask;
+
+		if (arg>31) 
+			reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 
+				  1<<(arg-32));			
+		else
+			reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 
+				  1<<arg);			
 
                 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
-                DBGMSG(ohci->id, "listening disabled on channel %u", channel);
+                DBGMSG(ohci->id, "listening disabled on channel %d", arg);
                 break;
         }
 	default:
@@ -1750,7 +1806,7 @@
 	 * is to allocate 8192 bytes instead of 2048
 	 */
 	ohci->selfid_buf_cpu = 
-		pci_alloc_consistent(ohci->dev, 2048, &ohci->selfid_buf_bus);
+		pci_alloc_consistent(ohci->dev, 8192, &ohci->selfid_buf_bus);
 	if (ohci->selfid_buf_cpu == NULL) {
 		FAIL("failed to allocate DMA buffer for self-id packets");
 	}
@@ -1827,7 +1883,7 @@
 		FAIL("failed to allocate IR context");
 	}
 
-        ohci->IR_channel_usage= 0x0000000000000000;
+        ohci->ISO_channel_usage= 0;
         spin_lock_init(&ohci->IR_channel_lock);
 
 	if (!request_irq(dev->irq, ohci_irq_handler, SA_SHIRQ,
@@ -1852,12 +1908,7 @@
 p += sprintf(p,fmt,reg_read(ohci, reg0),\
 	       reg_read(ohci, reg1),reg_read(ohci, reg2));
 
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0)
 static int ohci_get_status(char *buf)
-#else
-int ohci_get_info(char *buf, char **start, off_t fpos, 
-		  int length, int dummy)
-#endif
 {
 	struct ti_ohci *ohci=&cards[0];
 	struct hpsb_host *host=ohci->host;
@@ -2074,7 +2125,6 @@
 	return  p - buf;
 }
 
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0)
 static int ohci1394_read_proc(char *page, char **start, off_t off,
 			      int count, int *eof, void *data)
 {
@@ -2086,20 +2136,9 @@
         if (len<0) len = 0;
         return len;
 }
-#else
-struct proc_dir_entry ohci_proc_entry = 
-{
-	0,			/* Inode number - dynamic */
-	8,			/* Length of the file name */
-	"ohci1394",		/* The file name */
-	S_IFREG | S_IRUGO,	/* File mode */
-	1,			/* Number of links */
-	0, 0,			/* The uid and gid for the file */
-	0,			/* The size of the file reported by ls. */
-	NULL,			/* functions which can be done on the inode */
-	ohci_get_info,		/* The read function for this file */
-	NULL
-}; 
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)
+struct proc_dir_entry *ohci_proc_entry;
 #endif /* LINUX_VERSION_CODE */
 #endif /* CONFIG_PROC_FS */
 
@@ -2147,8 +2186,9 @@
 {
 	struct pci_dev *dev = NULL;
 	int success = 0;
+#if USE_DEVICE
 	int i;
-
+#endif
 	if (num_of_cards) {
 		PRINT_G(KERN_DEBUG, __PRETTY_FUNCTION__ " called again");
 		return 0;
@@ -2156,6 +2196,7 @@
 
 	PRINT_G(KERN_INFO, "looking for Ohci1394 cards");
 
+#if USE_DEVICE
 	for (i = 0; supported_chips[i][0] != -1; i++) {
 		while ((dev = pci_find_device(supported_chips[i][0],
 					      supported_chips[i][1], dev)) 
@@ -2165,7 +2206,11 @@
 			}
 		}
 	}
-
+#else
+	while ((dev = pci_find_class(PCI_CLASS_FIREWIRE_OHCI, dev)) != NULL ) {
+		if (add_card(dev) == 0) success = 1;
+ 	}
+#endif /* USE_DEVICE */
 	if (success == 0) {
 		PRINT_G(KERN_WARNING, "no operable Ohci1394 cards found");
 		return -ENXIO;
@@ -2175,10 +2220,8 @@
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0)
 	create_proc_read_entry ("ohci1394", 0, NULL, ohci1394_read_proc, NULL);
 #else
-	if (proc_register(&proc_root, &ohci_proc_entry)) {
-		PRINT_G(KERN_ERR, "unable to register proc file");
-		return -EIO;
-	}
+	if ((ohci_proc_entry = create_proc_entry("ohci1394", 0, NULL)))
+		ohci_proc_entry->read_proc = ohci1394_read_proc;
 #endif
 #endif
 	return 0;
@@ -2195,6 +2238,24 @@
 	return sizeof(ohci_csr_rom);
 }
 
+static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
+				 quadlet_t data, quadlet_t compare)
+{
+	struct ti_ohci *ohci=host->hostdata;
+	int timeout = 255;
+
+	reg_write(ohci, OHCI1394_CSRData, data);
+	reg_write(ohci, OHCI1394_CSRCompareData, compare);
+	reg_write(ohci, OHCI1394_CSRControl, reg&0x3);
+
+	while (timeout-- && !(reg_read(ohci, OHCI1394_CSRControl)&0x80000000));
+
+	if (!timeout)
+		PRINT(KERN_ERR, ohci->id, __FUNCTION__ "timeout!");
+
+	return reg_read(ohci, OHCI1394_CSRData);
+}
+		
 struct hpsb_host_template *get_ohci_template(void)
 {
 	static struct hpsb_host_template tmpl;
@@ -2211,7 +2272,7 @@
 		tmpl.get_rom = get_ohci_rom;
 		tmpl.transmit_packet = ohci_transmit;
 		tmpl.devctl = ohci_devctl;
-
+		tmpl.hw_csr_reg = ohci_hw_csr_reg;
 		initialized = 1;
 	}
 
@@ -2343,11 +2404,7 @@
 {
 	hpsb_unregister_lowlevel(get_ohci_template());
 #ifdef CONFIG_PROC_FS
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0)
 	remove_proc_entry ("ohci1394", NULL);
-#else
-	proc_unregister(&proc_root, ohci_proc_entry.low_ino);
-#endif
 #endif
 
 	PRINT_G(KERN_INFO, "removed " OHCI1394_DRIVER_NAME " module");
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/ieee1394/ohci1394.h linux/drivers/ieee1394/ohci1394.h
--- v2.4.0-prerelease/linux/drivers/ieee1394/ohci1394.h	Sun Oct  8 10:50:17 2000
+++ linux/drivers/ieee1394/ohci1394.h	Tue Jan  2 16:45:38 2001
@@ -23,10 +23,14 @@
 
 #include "ieee1394_types.h"
 
-#define IEEE1394_USE_BOTTOM_HALVES 0
+#define IEEE1394_USE_BOTTOM_HALVES 1
 
 #define OHCI1394_DRIVER_NAME      "ohci1394"
 
+#define USE_DEVICE 0
+
+#if USE_DEVICE
+
 #ifndef PCI_DEVICE_ID_TI_OHCI1394_LV22
 #define PCI_DEVICE_ID_TI_OHCI1394_LV22 0x8009
 #endif
@@ -39,6 +43,10 @@
 #define PCI_DEVICE_ID_TI_OHCI1394_LV26 0x8020
 #endif
 
+#ifndef PCI_DEVICE_ID_TI_OHCI1394_PCI4450
+#define PCI_DEVICE_ID_TI_OHCI1394_PCI4450 0x8011
+#endif
+
 #ifndef PCI_DEVICE_ID_VIA_OHCI1394
 #define PCI_DEVICE_ID_VIA_OHCI1394 0x3044
 #endif
@@ -83,6 +91,9 @@
 #define PCI_DEVICE_ID_LUCENT_FW323 0x5811
 #endif
 
+#endif /* USE_DEVICE */
+
+
 #define MAX_OHCI1394_CARDS        4
 
 #define OHCI1394_MAX_AT_REQ_RETRIES       0x2
@@ -218,13 +229,14 @@
 
         /* iso receive */
 	struct dma_rcv_ctx *ir_context;
-        u64 IR_channel_usage;
         spinlock_t IR_channel_lock;
 	int nb_iso_rcv_ctx;
 
         /* iso transmit */
 	int nb_iso_xmit_ctx;
 
+        u64 ISO_channel_usage;
+
         /* IEEE-1394 part follows */
         struct hpsb_host *host;
 
@@ -450,6 +462,8 @@
 #define DMA_SPEED_100                    0x0
 #define DMA_SPEED_200                    0x1
 #define DMA_SPEED_400                    0x2
+
+#define OHCI1394_TCODE_PHY               0xE
 
 void ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg);
 struct ti_ohci *ohci1394_get_struct(int card_num);
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/ieee1394/video1394.c linux/drivers/ieee1394/video1394.c
--- v2.4.0-prerelease/linux/drivers/ieee1394/video1394.c	Sun Oct  8 10:50:17 2000
+++ linux/drivers/ieee1394/video1394.c	Tue Jan  2 16:45:38 2001
@@ -61,6 +61,10 @@
 #define virt_to_page(x) MAP_NR(x)
 #endif
 
+#ifndef vmalloc_32
+#define vmalloc_32(x) vmalloc(x)
+#endif
+
 struct it_dma_prg {
 	struct dma_cmd begin;
 	quadlet_t data[4];
@@ -438,14 +442,19 @@
 	d->ir_prg[n][i].status = d->left_size;
 }
 
-static void initialize_dma_ir_prg(struct dma_iso_ctx *d, int n)
+static void initialize_dma_ir_prg(struct dma_iso_ctx *d, int n, int flags)
 {
 	struct dma_cmd *ir_prg = d->ir_prg[n];
 	unsigned long buf = (unsigned long)d->buf+n*d->buf_size;
 	int i;
-	
-	/* the first descriptor will sync and read only 4 bytes */
-	ir_prg[0].control = (0x280F << 16) | 4;
+
+	/* the first descriptor will read only 4 bytes */
+	ir_prg[0].control = (0x280C << 16) | 4;
+
+	/* set the sync flag */
+	if (flags & VIDEO1394_SYNC_FRAMES)
+		ir_prg[0].control |= 0x00030000;
+
 	ir_prg[0].address = kvirt_to_bus(buf);
 	ir_prg[0].branchAddress =  (virt_to_bus(&(ir_prg[1].control)) 
 				    & 0xfffffff0) | 0x1;
@@ -470,7 +479,7 @@
 	ir_prg[i].address = kvirt_to_bus(buf+(i-1)*PAGE_SIZE);
 }
 	
-static void initialize_dma_ir_ctx(struct dma_iso_ctx *d, int tag)
+static void initialize_dma_ir_ctx(struct dma_iso_ctx *d, int tag, int flags)
 {
 	struct ti_ohci *ohci = (struct ti_ohci *)d->ohci;
 	int i;
@@ -478,13 +487,20 @@
 	ohci1394_stop_context(ohci, d->ctrlClear, NULL);
 
 	for (i=0;i<d->num_desc;i++) {
-		initialize_dma_ir_prg(d, i);
+		initialize_dma_ir_prg(d, i, flags);
 		reset_ir_status(d, i);
 	}
-	
-	/* Set bufferFill, no header */
+
+	/* reset the ctrl register */
+	reg_write(ohci, d->ctrlClear, 0xf0000000);
+
+	/* Set bufferFill */
 	reg_write(ohci, d->ctrlSet, 0x80000000);
-			
+
+	/* Set isoch header */
+	if (flags & VIDEO1394_INCLUDE_ISO_HEADERS) 
+		reg_write(ohci, d->ctrlSet, 0x40000000);
+
 	/* Set the context match register to match on all tags, 
 	   sync for sync tag, and listen to d->channel */
 	reg_write(ohci, d->ctxMatch, 0xf0000000|((tag&0xf)<<8)|d->channel);
@@ -683,6 +699,7 @@
 	case VIDEO1394_TALK_CHANNEL:
 	{
 		struct video1394_mmap v;
+		u64 mask;
 		int i;
 
 		if(copy_from_user(&v, (void *)arg, sizeof(v)))
@@ -692,12 +709,18 @@
 			      "iso channel %d out of bound", v.channel);
 			return -EFAULT;
 		}
-                if (test_and_set_bit(v.channel, &ohci->IR_channel_usage)) {
+		mask = (u64)0x1<<v.channel;
+		printk("mask: %08X%08X usage: %08X%08X\n",
+		       (u32)(mask>>32),(u32)(mask&0xffffffff),
+		       (u32)(ohci->ISO_channel_usage>>32),
+		       (u32)(ohci->ISO_channel_usage&0xffffffff));
+		if (ohci->ISO_channel_usage & mask) {
 			PRINT(KERN_ERR, ohci->id, 
 			      "channel %d is already taken", v.channel);
 			return -EFAULT;
 		}
-
+		ohci->ISO_channel_usage |= mask;
+		
 		if (v.buf_size<=0) {
 			PRINT(KERN_ERR, ohci->id,
 			      "Invalid %d length buffer requested",v.buf_size);
@@ -739,7 +762,7 @@
 				return -EFAULT;
 			}
 			initialize_dma_ir_ctx(video->ir_context[i], 
-					      v.sync_tag);
+					      v.sync_tag, v.flags);
 
 			video->current_ctx = video->ir_context[i];
 
@@ -791,16 +814,24 @@
 	case VIDEO1394_UNTALK_CHANNEL:
 	{
 		int channel;
+		u64 mask;
 		int i;
 
 		if(copy_from_user(&channel, (void *)arg, sizeof(int)))
 			return -EFAULT;
 
-                if (!test_and_clear_bit(channel, &ohci->IR_channel_usage)) {
+		if (channel<0 || channel>(ISO_CHANNELS-1)) {
+			PRINT(KERN_ERR, ohci->id, 
+			      "iso channel %d out of bound", channel);
+			return -EFAULT;
+		}
+		mask = (u64)0x1<<channel;
+		if (!(ohci->ISO_channel_usage & mask)) {
 			PRINT(KERN_ERR, ohci->id, 
 			      "channel %d is not being used", channel);
 			return -EFAULT;
 		}
+		ohci->ISO_channel_usage &= ~mask;
 
 		if (cmd == VIDEO1394_UNLISTEN_CHANNEL) {
 			i = ir_ctx_listening(video, channel);
@@ -1131,34 +1162,35 @@
 {
 	struct video_card *video = &video_cards[MINOR(inode->i_rdev)];
 	struct ti_ohci *ohci= video->ohci;
+	u64 mask;
 	int i;
 
 	lock_kernel();
 	for (i=0;i<ohci->nb_iso_rcv_ctx-1;i++) 
 		if (video->ir_context[i]) {
-			if (!test_and_clear_bit(
-				video->ir_context[i]->channel,
-				&ohci->IR_channel_usage)) {
+			mask = (u64)0x1<<video->ir_context[i]->channel;
+			if (!(ohci->ISO_channel_usage & mask))
 				PRINT(KERN_ERR, ohci->id, 
 				      "channel %d is not being used", 
 				      video->ir_context[i]->channel);
-			}
+			else
+				ohci->ISO_channel_usage &= ~mask;
 			PRINT(KERN_INFO, ohci->id, 
 			      "iso receive context %d stop listening "
 			      "on channel %d", i+1, 
 			      video->ir_context[i]->channel);
 			free_dma_iso_ctx(&video->ir_context[i]);
 		}
-
+	
 	for (i=0;i<ohci->nb_iso_xmit_ctx;i++) 
 		if (video->it_context[i]) {
-			if (!test_and_clear_bit(
-				video->it_context[i]->channel,
-				&ohci->IR_channel_usage)) {
+			mask = (u64)0x1<<video->it_context[i]->channel;
+			if (!(ohci->ISO_channel_usage & mask))
 				PRINT(KERN_ERR, ohci->id, 
 				      "channel %d is not being used", 
 				      video->it_context[i]->channel);
-			}
+			else
+				ohci->ISO_channel_usage &= ~mask;
 			PRINT(KERN_INFO, ohci->id, 
 			      "iso transmit context %d stop talking "
 			      "on channel %d", i+1, 
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/ieee1394/video1394.h linux/drivers/ieee1394/video1394.h
--- v2.4.0-prerelease/linux/drivers/ieee1394/video1394.h	Sun Oct  8 10:50:17 2000
+++ linux/drivers/ieee1394/video1394.h	Tue Jan  2 16:45:38 2001
@@ -41,6 +41,9 @@
 	VIDEO1394_TALK_WAIT_BUFFER
 };
 
+#define VIDEO1394_SYNC_FRAMES         0x00000001
+#define VIDEO1394_INCLUDE_ISO_HEADERS 0x00000002
+
 struct video1394_mmap {
 	int channel;
 	int sync_tag;
@@ -48,6 +51,7 @@
 	int buf_size;
 	int packet_size;
 	int fps;
+	int flags;
 };
 
 struct video1394_wait {
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/isdn/Config.in linux/drivers/isdn/Config.in
--- v2.4.0-prerelease/linux/drivers/isdn/Config.in	Mon Jan  1 09:38:35 2001
+++ linux/drivers/isdn/Config.in	Tue Jan  2 16:45:37 2001
@@ -23,7 +23,7 @@
 mainmenu_option next_comment
 comment 'ISDN feature submodules'
    dep_tristate 'isdnloop support' CONFIG_ISDN_DRV_LOOP $CONFIG_ISDN
-   dep_tristate 'Support isdn diversion services' CONFIG_ISDN_DIVERSION $CONFIG_ISDN
+   dep_tristate 'Support isdn diversion services' CONFIG_ISDN_DIVERSION $CONFIG_ISDN m
 endmenu
 
 comment 'low-level hardware drivers'
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/isdn/hisax/md5sums.asc linux/drivers/isdn/hisax/md5sums.asc
--- v2.4.0-prerelease/linux/drivers/isdn/hisax/md5sums.asc	Mon Aug 21 07:49:03 2000
+++ linux/drivers/isdn/hisax/md5sums.asc	Thu Jan  4 13:20:17 2001
@@ -3,31 +3,31 @@
 # This are valid md5sums for certificated HiSax driver.
 # The certification is valid only if the md5sums of all files match.
 # The certification is valid only for ELSA Microlink PCI,
-# Eicon Technology Diva 2.01 PCI and Sedlbauer SpeedFax +
+# Eicon Technology Diva 2.01 PCI and Sedlbauer SpeedFax+
 # cards in the moment.
 # Read ../../../Documentation/isdn/HiSax.cert for more informations.
 # 
-f4573d10ffe38b49f6c94e4c966b7bab  isac.c
-a29f5270c0c89626d8d6fa5dd09e7005  isdnl1.c
-fbe41751c8130a8c3c607bfe1b41cb4e  isdnl2.c
-7915b7e802b98f6f4f05b931c4736ad4  isdnl3.c
-7c31c12b3c2cfde33596bd2c406f775c  tei.c
-f1fbd532016f005e01decf36e5197d8f  callc.c
+ca7bd9bac39203f3074f3f093948cc3c  isac.c
+a2ad619fd404b3149099a2984de9d23c  isdnl1.c
+d2a78e407f3d94876deac160c6f9aae6  isdnl2.c
+a109841c2e75b11fc8ef2c8718e24c3e  isdnl3.c
+afb5f2f4ac296d6de45c856993b161e1  tei.c
+00023e2a482cb86a26ea870577ade5d6  callc.c
 a1834e9b2ec068440cff2e899eff4710  cert.c
-a1f908f8b4f225c5c2f2a13842549b72  l3dss1.c
-5bcab52f9937beb352aa02093182e039  l3_1tr6.c
-030d4600ee59a2b246410d6a73977412  elsa.c
-9e800b8e05c24542d731721eb192f305  diva.c
-f32fae58dd9b2b3a73b2e5028f68dc4c  sedlbauer.c
+1551f78b3cd01097ecd586b5c96d0765  l3dss1.c
+89aecf3a80489c723dc885fcaa4aba1b  l3_1tr6.c
+1685c1ddfecf3e1b88ae5f3d7202ce69  elsa.c
+6d7056d1558bf6cc57dd89b7b260dc27  diva.c
+4398918680d45c4618bb48108ea0c282  sedlbauer.c
 # end of md5sums
 
 -----BEGIN PGP SIGNATURE-----
 Version: 2.6.3i
 Charset: noconv
 
-iQCVAwUBOaARmDpxHvX/mS9tAQFT7wP/TEEhtP96uKKgzr2o3GpJ5rRik0Q1HbKY
-dzeA3U79QCEYqyptU09Uz96Av3dt1lNxpQyaahX419NjHH53HCaZgFCxgRxFWBYS
-M9s4aSXLPTCSNM/kWiZkzWQ2lZ7ISNk2/+fF73w4l3G+4zF5y+VotjZCPx7OJj6i
-R/L1m4vZXys=
-=6DzE
+iQCVAwUBOlMTgDpxHvX/mS9tAQFSbgP/W9y6tnnWHTRLGqyr3EY1OHZiQXERkAAu
+hp+Y8PIoX1GgAh4yZ7xhYwUsk6y0z5USdGuhC9ZHh+oZd57lPsJMnhkEZR5BVsYT
+r7jHwelP527+QCLkVUCHIVIWUW0ANzeZBhDV2vefkFb+gWLiZsBhaHssbcKGsMNG
+Ak4xS1ByqsM=
+=lsIJ
 -----END PGP SIGNATURE-----
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/isdn/hisax/teles3c.c linux/drivers/isdn/hisax/teles3c.c
--- v2.4.0-prerelease/linux/drivers/isdn/hisax/teles3c.c	Tue Jul  6 19:05:49 1999
+++ linux/drivers/isdn/hisax/teles3c.c	Wed Dec 31 16:00:00 1969
@@ -1,190 +0,0 @@
-/* $Id: teles3c.c,v 1.3 1998/11/15 23:55:27 keil Exp $
-
- * teles3c.c     low level stuff for teles 16.3c
- *
- * Author     Karsten Keil (keil@isdn4linux.de)
- *
- *
- * $Log: teles3c.c,v $
- * Revision 1.3  1998/11/15 23:55:27  keil
- * changes from 2.0
- *
- * Revision 1.2  1998/02/02 13:27:07  keil
- * New
- *
- *
- *
- */
-
-#define __NO_VERSION__
-#include "hisax.h"
-#include "hfc_2bds0.h"
-#include "isdnl1.h"
-
-extern const char *CardType[];
-
-const char *teles163c_revision = "$Revision: 1.3 $";
-
-static void
-t163c_interrupt(int intno, void *dev_id, struct pt_regs *regs)
-{
-	struct IsdnCardState *cs = dev_id;
-	u_char val, stat;
-
-	if (!cs) {
-		printk(KERN_WARNING "teles3c: Spurious interrupt!\n");
-		return;
-	}
-	if ((HFCD_ANYINT | HFCD_BUSY_NBUSY) & 
-		(stat = cs->BC_Read_Reg(cs, HFCD_DATA, HFCD_STAT))) {
-		val = cs->BC_Read_Reg(cs, HFCD_DATA, HFCD_INT_S1);
-		if (cs->debug & L1_DEB_ISAC)
-			debugl1(cs, "teles3c: stat(%02x) s1(%02x)", stat, val);
-		hfc2bds0_interrupt(cs, val);
-	} else {
-		if (cs->debug & L1_DEB_ISAC)
-			debugl1(cs, "teles3c: irq_no_irq stat(%02x)", stat);
-	}
-}
-
-static void
-t163c_Timer(struct IsdnCardState *cs)
-{
-	cs->hw.hfcD.timer.expires = jiffies + 75;
-	/* WD RESET */
-/*	WriteReg(cs, HFCD_DATA, HFCD_CTMT, cs->hw.hfcD.ctmt | 0x80);
-	add_timer(&cs->hw.hfcD.timer);
-*/
-}
-
-void
-release_io_t163c(struct IsdnCardState *cs)
-{
-	release2bds0(cs);
-	del_timer(&cs->hw.hfcD.timer);
-	if (cs->hw.hfcD.addr)
-		release_region(cs->hw.hfcD.addr, 2);
-}
-
-static void
-reset_t163c(struct IsdnCardState *cs)
-{
-	long flags;
-
-	printk(KERN_INFO "teles3c: resetting card\n");
-	cs->hw.hfcD.cirm = HFCD_RESET | HFCD_MEM8K;
-	cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_CIRM, cs->hw.hfcD.cirm);	/* Reset On */
-	save_flags(flags);
-	sti();
-	current->state = TASK_INTERRUPTIBLE;
-	schedule_timeout(3);
-	cs->hw.hfcD.cirm = HFCD_MEM8K;
-	cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_CIRM, cs->hw.hfcD.cirm);	/* Reset Off */
-	current->state = TASK_INTERRUPTIBLE;
-	schedule_timeout(1);
-	cs->hw.hfcD.cirm |= HFCD_INTB;
-	cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_CIRM, cs->hw.hfcD.cirm);	/* INT B */
-	cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_CLKDEL, 0x0e);
-	cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_TEST, HFCD_AUTO_AWAKE); /* S/T Auto awake */
-	cs->hw.hfcD.ctmt = HFCD_TIM25 | HFCD_AUTO_TIMER;
-	cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_CTMT, cs->hw.hfcD.ctmt);
-	cs->hw.hfcD.int_m2 = HFCD_IRQ_ENABLE;
-	cs->hw.hfcD.int_m1 = HFCD_INTS_B1TRANS | HFCD_INTS_B2TRANS |
-		HFCD_INTS_DTRANS | HFCD_INTS_B1REC | HFCD_INTS_B2REC |
-		HFCD_INTS_DREC | HFCD_INTS_L1STATE;
-	cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_INT_M1, cs->hw.hfcD.int_m1);
-	cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_INT_M2, cs->hw.hfcD.int_m2);
-	cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_STATES, HFCD_LOAD_STATE | 2); /* HFC ST 2 */
-	udelay(10);
-	cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_STATES, 2); /* HFC ST 2 */
-	cs->hw.hfcD.mst_m = 0;
-	cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_MST_MODE, HFCD_MASTER); /* HFC Master */
-	cs->hw.hfcD.sctrl = 0;
-	cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_SCTRL, cs->hw.hfcD.sctrl);
-	restore_flags(flags);
-}
-
-static int
-t163c_card_msg(struct IsdnCardState *cs, int mt, void *arg)
-{
-	long flags;
-
-	if (cs->debug & L1_DEB_ISAC)
-		debugl1(cs, "teles3c: card_msg %x", mt);
-	switch (mt) {
-		case CARD_RESET:
-			reset_t163c(cs);
-			return(0);
-		case CARD_RELEASE:
-			release_io_t163c(cs);
-			return(0);
-		case CARD_SETIRQ:
-			cs->hw.hfcD.timer.expires = jiffies + 75;
-			add_timer(&cs->hw.hfcD.timer);
-			return(request_irq(cs->irq, &t163c_interrupt,
-					I4L_IRQ_FLAG, "HiSax", cs));
-		case CARD_INIT:
-			init2bds0(cs);
-			save_flags(flags);
-			sti();
-			current->state = TASK_INTERRUPTIBLE;
-			schedule_timeout((80*HZ)/1000);
-			cs->hw.hfcD.ctmt |= HFCD_TIM800;
-			cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_CTMT, cs->hw.hfcD.ctmt); 
-			cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_MST_MODE, cs->hw.hfcD.mst_m);
-			restore_flags(flags);
-			return(0);
-		case CARD_TEST:
-			return(0);
-	}
-	return(0);
-}
-
-int __init 
-setup_t163c(struct IsdnCard *card)
-{
-	struct IsdnCardState *cs = card->cs;
-	char tmp[64];
-
-	strcpy(tmp, teles163c_revision);
-	printk(KERN_INFO "HiSax: Teles 16.3c driver Rev. %s\n", HiSax_getrev(tmp));
-	if (cs->typ != ISDN_CTYPE_TELES3C)
-		return (0);
-	cs->debug = 0xff;
-	cs->hw.hfcD.addr = card->para[1] & 0xfffe;
-	cs->irq = card->para[0];
-	cs->hw.hfcD.cip = 0;
-	cs->hw.hfcD.int_s1 = 0;
-	cs->hw.hfcD.send = NULL;
-	cs->bcs[0].hw.hfc.send = NULL;
-	cs->bcs[1].hw.hfc.send = NULL;
-	cs->hw.hfcD.bfifosize = 1024 + 512;
-	cs->hw.hfcD.dfifosize = 512;
-	cs->ph_state = 0;
-	cs->hw.hfcD.fifo = 255;
-	if (check_region((cs->hw.hfcD.addr), 2)) {
-		printk(KERN_WARNING
-		       "HiSax: %s config port %x-%x already in use\n",
-		       CardType[card->typ],
-		       cs->hw.hfcD.addr,
-		       cs->hw.hfcD.addr + 2);
-		return (0);
-	} else {
-		request_region(cs->hw.hfcD.addr, 2, "teles3c isdn");
-	}
-	/* Teles 16.3c IO ADR is 0x200 | YY0U (YY Bit 15/14 address) */
-	outb(0x00, cs->hw.hfcD.addr);
-	outb(0x56, cs->hw.hfcD.addr | 1);
-	printk(KERN_INFO
-	       "teles3c: defined at 0x%x IRQ %d HZ %d\n",
-	       cs->hw.hfcD.addr,
-	       cs->irq, HZ);
-
-	set_cs_func(cs);
-	cs->hw.hfcD.timer.function = (void *) t163c_Timer;
-	cs->hw.hfcD.timer.data = (long) cs;
-	init_timer(&cs->hw.hfcD.timer);
-	reset_t163c(cs);
-	cs->cardmsg = &t163c_card_msg;
-	return (1);
-}
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/isdn/isdn_common.c linux/drivers/isdn/isdn_common.c
--- v2.4.0-prerelease/linux/drivers/isdn/isdn_common.c	Mon Jan  1 09:38:35 2001
+++ linux/drivers/isdn/isdn_common.c	Tue Jan  2 16:45:38 2001
@@ -68,7 +68,7 @@
 extern char *isdn_v110_revision;
 
 #ifdef CONFIG_ISDN_DIVERSION
-isdn_divert_if *divert_if; /* interface to diversion module */
+static isdn_divert_if *divert_if; /* = NULL */
 #endif CONFIG_ISDN_DIVERSION
 
 
@@ -2118,7 +2118,6 @@
 }
 
 #ifdef CONFIG_ISDN_DIVERSION
-extern isdn_divert_if *divert_if;
 
 static char *map_drvname(int di)
 {
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/md/raid5.c linux/drivers/md/raid5.c
--- v2.4.0-prerelease/linux/drivers/md/raid5.c	Mon Jan  1 09:38:35 2001
+++ linux/drivers/md/raid5.c	Thu Jan  4 12:50:17 2001
@@ -1096,8 +1096,11 @@
 				bh->b_rdev = bh->b_dev;
 				bh->b_rsector = bh->b_blocknr * (bh->b_size>>9);
 				generic_make_request(action[i]-1, bh);
-			} else
+			} else {
 				PRINTK("skip op %d on disc %d for sector %ld\n", action[i]-1, i, sh->sector);
+				clear_bit(BH_Lock, &bh->b_state);
+				set_bit(STRIPE_HANDLE, &sh->state);
+			}
 		}
 }
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/media/radio/radio-aimslab.c linux/drivers/media/radio/radio-aimslab.c
--- v2.4.0-prerelease/linux/drivers/media/radio/radio-aimslab.c	Mon Dec 11 17:59:44 2000
+++ linux/drivers/media/radio/radio-aimslab.c	Mon Jan  1 10:14:31 2001
@@ -338,7 +338,7 @@
 		return -EINVAL;
 	}
 
-	if (request_region(io, 2, "rtrack")) 
+	if (!request_region(io, 2, "rtrack")) 
 	{
 		printk(KERN_ERR "rtrack: port 0x%x already in use\n", io);
 		return -EBUSY;
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/media/radio/radio-aztech.c linux/drivers/media/radio/radio-aztech.c
--- v2.4.0-prerelease/linux/drivers/media/radio/radio-aztech.c	Mon Dec 11 17:59:44 2000
+++ linux/drivers/media/radio/radio-aztech.c	Mon Jan  1 10:14:31 2001
@@ -289,7 +289,7 @@
 		return -EINVAL;
 	}
 
-	if (request_region(io, 2, "aztech")) 
+	if (!request_region(io, 2, "aztech")) 
 	{
 		printk(KERN_ERR "aztech: port 0x%x already in use\n", io);
 		return -EBUSY;
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/media/radio/radio-rtrack2.c linux/drivers/media/radio/radio-rtrack2.c
--- v2.4.0-prerelease/linux/drivers/media/radio/radio-rtrack2.c	Mon Dec 11 17:59:44 2000
+++ linux/drivers/media/radio/radio-rtrack2.c	Mon Jan  1 10:14:31 2001
@@ -230,7 +230,7 @@
 		printk(KERN_ERR "You must set an I/O address with io=0x20c or io=0x30c\n");
 		return -EINVAL;
 	}
-	if (request_region(io, 4, "rtrack2")) 
+	if (!request_region(io, 4, "rtrack2")) 
 	{
 		printk(KERN_ERR "rtrack2: port 0x%x already in use\n", io);
 		return -EBUSY;
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/media/radio/radio-sf16fmi.c linux/drivers/media/radio/radio-sf16fmi.c
--- v2.4.0-prerelease/linux/drivers/media/radio/radio-sf16fmi.c	Mon Dec 11 17:59:44 2000
+++ linux/drivers/media/radio/radio-sf16fmi.c	Mon Jan  1 10:14:31 2001
@@ -291,7 +291,7 @@
 		printk(KERN_ERR "You must set an I/O address with io=0x???\n");
 		return -EINVAL;
 	}
-	if (request_region(io, 2, "fmi")) 
+	if (!request_region(io, 2, "fmi")) 
 	{
 		printk(KERN_ERR "fmi: port 0x%x already in use\n", io);
 		return -EBUSY;
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/media/radio/radio-terratec.c linux/drivers/media/radio/radio-terratec.c
--- v2.4.0-prerelease/linux/drivers/media/radio/radio-terratec.c	Mon Dec 11 17:59:44 2000
+++ linux/drivers/media/radio/radio-terratec.c	Mon Jan  1 10:14:31 2001
@@ -309,7 +309,7 @@
 		printk(KERN_ERR "You must set an I/O address with io=0x???\n");
 		return -EINVAL;
 	}
-	if (request_region(io, 2, "terratec")) 
+	if (!request_region(io, 2, "terratec")) 
 	{
 		printk(KERN_ERR "TerraTec: port 0x%x already in use\n", io);
 		return -EBUSY;
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/media/radio/radio-trust.c linux/drivers/media/radio/radio-trust.c
--- v2.4.0-prerelease/linux/drivers/media/radio/radio-trust.c	Mon Dec 11 17:59:44 2000
+++ linux/drivers/media/radio/radio-trust.c	Mon Jan  1 10:14:31 2001
@@ -300,7 +300,7 @@
 		printk(KERN_ERR "You must set an I/O address with io=0x???\n");
 		return -EINVAL;
 	}
-	if(request_region(io, 2, "Trust FM Radio")) {
+	if(!request_region(io, 2, "Trust FM Radio")) {
 		printk(KERN_ERR "trust: port 0x%x already in use\n", io);
 		return -EBUSY;
 	}
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/media/radio/radio-typhoon.c linux/drivers/media/radio/radio-typhoon.c
--- v2.4.0-prerelease/linux/drivers/media/radio/radio-typhoon.c	Mon Dec 11 17:59:44 2000
+++ linux/drivers/media/radio/radio-typhoon.c	Mon Jan  1 10:14:31 2001
@@ -350,7 +350,7 @@
 
 	printk(KERN_INFO BANNER);
 	io = typhoon_unit.iobase;
-	if (request_region(io, 8, "typhoon")) {
+	if (!request_region(io, 8, "typhoon")) {
 		printk(KERN_ERR "radio-typhoon: port 0x%x already in use\n",
 		       typhoon_unit.iobase);
 		return -EBUSY;
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/media/radio/radio-zoltrix.c linux/drivers/media/radio/radio-zoltrix.c
--- v2.4.0-prerelease/linux/drivers/media/radio/radio-zoltrix.c	Mon Dec 11 17:59:44 2000
+++ linux/drivers/media/radio/radio-zoltrix.c	Mon Jan  1 10:14:31 2001
@@ -361,7 +361,7 @@
 	}
 
 	zoltrix_radio.priv = &zoltrix_unit;
-	if (request_region(io, 2, "zoltrix")) {
+	if (!request_region(io, 2, "zoltrix")) {
 		printk(KERN_ERR "zoltrix: port 0x%x already in use\n", io);
 		return -EBUSY;
 	}
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/media/video/cpia_usb.c linux/drivers/media/video/cpia_usb.c
--- v2.4.0-prerelease/linux/drivers/media/video/cpia_usb.c	Sun Nov 19 18:44:09 2000
+++ linux/drivers/media/video/cpia_usb.c	Thu Jan  4 13:15:32 2001
@@ -557,7 +557,7 @@
 static void cpia_disconnect(struct usb_device *dev, void *ptr);
 
 static struct usb_device_id cpia_id_table [] = {
-	{ idVendor: 0x0553, idProduct: 0x0002 },
+	{ USB_DEVICE(0x0553, 0x0002) },
 	{ }					/* Terminating entry */
 };
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/media/video/videodev.c linux/drivers/media/video/videodev.c
--- v2.4.0-prerelease/linux/drivers/media/video/videodev.c	Tue Jul 18 22:35:33 2000
+++ linux/drivers/media/video/videodev.c	Tue Jan  2 16:45:37 2001
@@ -143,12 +143,12 @@
 static int video_open(struct inode *inode, struct file *file)
 {
 	unsigned int minor = MINOR(inode->i_rdev);
-	int err;
+	int err, retval = 0;
 	struct video_device *vfl;
 	
 	if(minor>=VIDEO_NUM_DEVICES)
 		return -ENODEV;
-		
+	lock_kernel();		
 	vfl=video_device[minor];
 	if(vfl==NULL) {
 		char modname[20];
@@ -156,12 +156,17 @@
 		sprintf (modname, "char-major-%d-%d", VIDEO_MAJOR, minor);
 		request_module(modname);
 		vfl=video_device[minor];
-		if (vfl==NULL)
-			return -ENODEV;
+		if (vfl==NULL) {
+			retval = -ENODEV;
+			goto error_out;
+		}
+	}
+	if(vfl->busy) {
+		retval = -EBUSY;
+		goto error_out;
 	}
-	if(vfl->busy)
-		return -EBUSY;
 	vfl->busy=1;		/* In case vfl->open sleeps */
+	unlock_kernel();
 	
 	if(vfl->open)
 	{
@@ -173,6 +178,9 @@
 		}
 	}
 	return 0;
+error_out:
+	unlock_kernel();
+	return retval;
 }
 
 /*
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/net/7990.c linux/drivers/net/7990.c
--- v2.4.0-prerelease/linux/drivers/net/7990.c	Wed Aug 18 11:36:41 1999
+++ linux/drivers/net/7990.c	Thu Jan  4 13:00:55 2001
@@ -100,7 +100,6 @@
 /* #define to 0 or 1 appropriately */
 #define DEBUG_IRING 0
 /* Set up the Lance Rx and Tx rings and the init block */
-/* Sets dev->tbusy */
 static void lance_init_ring (struct net_device *dev)
 {
         struct lance_private *lp = (struct lance_private *) dev->priv;
@@ -111,8 +110,6 @@
 
         aib = lp->lance_init_block;
 
-        /* Lock out other processes while setting up hardware */
-        dev->tbusy = 1;
         lp->rx_new = lp->tx_new = 0;
         lp->rx_old = lp->tx_old = 0;
 
@@ -143,6 +140,7 @@
         if (DEBUG_IRING)
                 printk ("TX rings:\n");
     
+	lp->tx_full = 0;
         /* Setup the Tx ring entries */
         for (i = 0; i < (1<<lp->lance_log_tx_bufs); i++) {
                 leptr = LANCE_ADDR(&aib->tx_buf[i][0]);
@@ -232,9 +230,6 @@
         load_csrs (lp);
         lance_init_ring (dev);
         dev->trans_start = jiffies;
-        dev->interrupt = 0;
-        dev->start = 1;
-        dev->tbusy = 0;
         status = init_restart_lance (lp);
 #ifdef DEBUG_DRIVER
         printk ("Lance restart=%d\n", status);
@@ -409,19 +404,18 @@
         struct lance_private *lp = (struct lance_private *)dev->priv;
         int csr0;
         DECLARE_LL;
-        
+
+	spin_lock (&lp->devlock);
+
         WRITERAP(LE_CSR0);              /* LANCE Controller Status */
         csr0 = READRDP();
 
         PRINT_RINGS();
         
-        if (!(csr0 & LE_C0_INTR))       /* Check if any interrupt has */
+        if (!(csr0 & LE_C0_INTR)) {     /* Check if any interrupt has */
+		spin_lock (&lp->devlock);
                 return;                 /* been generated by the Lance. */
-
-        if (dev->interrupt)
-                printk ("%s: again", dev->name);
-
-        dev->interrupt = 1;
+	}
 
         /* Acknowledge all the interrupt sources ASAP */
         WRITERDP(csr0 & ~(LE_C0_INEA|LE_C0_TDMD|LE_C0_STOP|LE_C0_STRT|LE_C0_INIT));
@@ -449,27 +443,32 @@
                 WRITERDP(LE_C0_STRT);
         }
 
-        if ((TX_BUFFS_AVAIL >= 0) && dev->tbusy) {
-                dev->tbusy = 0;
-                mark_bh (NET_BH);
+        if (lp->tx_full && netif_queue_stopped(dev) && (TX_BUFFS_AVAIL >= 0)) {
+		lp->tx_full = 0;
+		netif_wake_queue (dev);
         }
         
         WRITERAP(LE_CSR0);
         WRITERDP(LE_C0_BABL|LE_C0_CERR|LE_C0_MISS|LE_C0_MERR|LE_C0_IDON|LE_C0_INEA);
-        
-        dev->interrupt = 0;
+
+	spin_unlock (&lp->devlock);
 }
 
 int lance_open (struct net_device *dev)
 {
         struct lance_private *lp = (struct lance_private *)dev->priv;
+	int res;
         DECLARE_LL;
         
         /* Install the Interrupt handler. Or we could shunt this out to specific drivers? */
         if (request_irq(lp->irq, lance_interrupt, 0, lp->name, dev))
                 return -EAGAIN;
 
-        return lance_reset(dev);
+        res = lance_reset(dev);
+	lp->devlock = SPIN_LOCK_UNLOCKED;
+	netif_start_queue (dev);
+
+	return res;
 }
 
 int lance_close (struct net_device *dev)
@@ -477,8 +476,7 @@
         struct lance_private *lp = (struct lance_private *) dev->priv;
         DECLARE_LL;
         
-        dev->start = 0;
-        dev->tbusy = 1;
+	netif_stop_queue (dev);
 
         /* Stop the LANCE */
         WRITERAP(LE_CSR0);
@@ -489,41 +487,30 @@
         return 0;
 }
 
+void lance_tx_timeout(struct net_device *dev)
+{
+	printk("lance_tx_timeout\n");
+	lance_reset(dev);
+	dev->trans_start = jiffies;
+	netif_start_queue (dev);
+}
+
+
 int lance_start_xmit (struct sk_buff *skb, struct net_device *dev)
 {
         struct lance_private *lp = (struct lance_private *)dev->priv;
         volatile struct lance_init_block *ib = lp->init_block;
         int entry, skblen, len;
-        int status = 0;
         static int outs;
+	unsigned long flags;
         DECLARE_LL;
 
-	lance_reset(dev);
-
-        /* Transmitter timeout, serious problems */
-        if (dev->tbusy) {
-                int tickssofar = jiffies - dev->trans_start;
-            
-                if (tickssofar < 100) {
-                        status = -1;
-                } else {
-                        printk ("%s: transmit timed out, status %04x, resetting\n",
-                                dev->name, READRDP());
-                        lance_reset (dev);
-                }
-                return status;
-        }
-
-        /* Block a timer-based transmit from overlapping. */
-        if (test_and_set_bit (0, (void *) &dev->tbusy) != 0) {
-                printk ("Transmitter access conflict.\n");
+        if (!TX_BUFFS_AVAIL)
                 return -1;
-        }
 
-        skblen = skb->len;
+	netif_stop_queue (dev);
 
-        if (!TX_BUFFS_AVAIL)
-                return -1;
+        skblen = skb->len;
 
 #ifdef DEBUG_DRIVER
         /* dump the packet */
@@ -554,10 +541,14 @@
         dev->trans_start = jiffies;
         dev_kfree_skb (skb);
     
+	spin_lock_irqsave (&lp->devlock, flags);
         if (TX_BUFFS_AVAIL)
-                dev->tbusy = 0;
+		netif_start_queue (dev);
+	else
+		lp->tx_full = 1;
+	spin_unlock_irqrestore (&lp->devlock, flags);
 
-        return status;
+        return 0;
 }
 
 struct net_device_stats *lance_get_stats (struct net_device *dev)
@@ -623,11 +614,12 @@
 {
         struct lance_private *lp = (struct lance_private *) dev->priv;
         volatile struct lance_init_block *ib = lp->init_block;
+	int stopped;
         DECLARE_LL;
 
-        while (dev->tbusy)
-                schedule();
-        set_bit (0, (void *) &dev->tbusy);
+	stopped = netif_queue_stopped(dev);
+	if (!stopped)
+		netif_stop_queue (dev);
 
         while (lp->tx_old != lp->tx_new)
                 schedule();
@@ -644,6 +636,8 @@
         }
         load_csrs (lp);
         init_restart_lance (lp);
-        dev->tbusy = 0;
+
+	if (!stopped)
+		netif_start_queue (dev);
 }
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/net/7990.h linux/drivers/net/7990.h
--- v2.4.0-prerelease/linux/drivers/net/7990.h	Wed Aug 18 11:36:45 1999
+++ linux/drivers/net/7990.h	Thu Jan  4 13:00:55 2001
@@ -127,7 +127,9 @@
          */
         void (*writerap)(void *, unsigned short);
         void (*writerdp)(void *, unsigned short);
-        unsigned short (*readrdp)(struct lance_private *);
+        unsigned short (*readrdp)(void *);
+	spinlock_t devlock;
+	char tx_full;
 };
 
 #define CRC_POLYNOMIAL_BE 0x04c11db7UL  /* Ethernet CRC, big endian */
@@ -252,5 +254,6 @@
 extern int lance_start_xmit (struct sk_buff *skb, struct net_device *dev);
 extern struct net_device_stats *lance_get_stats (struct net_device *dev);
 extern void lance_set_multicast (struct net_device *dev);
+extern void lance_tx_timeout(struct net_device *dev);
 
 #endif /* ndef _7990_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/net/8390.c linux/drivers/net/8390.c
--- v2.4.0-prerelease/linux/drivers/net/8390.c	Mon Dec 11 17:59:44 2000
+++ linux/drivers/net/8390.c	Thu Jan  4 13:00:55 2001
@@ -1054,7 +1054,9 @@
 	long e8390_base = dev->base_addr;
 	struct ei_device *ei_local = (struct ei_device *) dev->priv;
 	int i;
-	int endcfg = ei_local->word16 ? (0x48 | ENDCFG_WTS) : 0x48;
+	int endcfg = ei_local->word16
+	    ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0))
+	    : 0x48;
     
 	if(sizeof(struct e8390_pkt_hdr)!=4)
     		panic("8390.c: header struct mispacked\n");    
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/net/8390.h linux/drivers/net/8390.h
--- v2.4.0-prerelease/linux/drivers/net/8390.h	Tue Oct 31 12:42:26 2000
+++ linux/drivers/net/8390.h	Thu Jan  4 13:56:29 2001
@@ -67,6 +67,7 @@
 	unsigned char mcfilter[8];
 	unsigned open:1;
 	unsigned word16:1;  		/* We have the 16-bit (vs 8-bit) version of the card. */
+	unsigned bigendian:1;		/* 16-bit big endian mode */
 	unsigned txing:1;		/* Transmit Active */
 	unsigned irqlock:1;		/* 8390's intrs disabled when '1'. */
 	unsigned dmaing:1;		/* Remote DMA Active */
@@ -117,7 +118,8 @@
  */
  
 #if defined(CONFIG_MAC) || defined(CONFIG_AMIGA_PCMCIA) || \
-    defined(CONFIG_ARIADNE2) || defined(CONFIG_ARIADNE2_MODULE)
+    defined(CONFIG_ARIADNE2) || defined(CONFIG_ARIADNE2_MODULE) || \
+    defined(CONFIG_HYDRA) || defined(CONFIG_HYDRA_MODULE)
 #define EI_SHIFT(x)	(ei_local->reg_offset[x])
 #else
 #define EI_SHIFT(x)	(x)
@@ -165,6 +167,7 @@
 
 /* Bits in EN0_DCFG - Data config register */
 #define ENDCFG_WTS	0x01	/* word transfer mode selection */
+#define ENDCFG_BOS	0x02	/* byte order selection */
 
 /* Page 1 register offsets. */
 #define EN1_PHYS   EI_SHIFT(0x01)	/* This board's physical enet addr RD WR */
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/net/Config.in linux/drivers/net/Config.in
--- v2.4.0-prerelease/linux/drivers/net/Config.in	Mon Jan  1 09:38:35 2001
+++ linux/drivers/net/Config.in	Thu Jan  4 13:00:55 2001
@@ -42,7 +42,7 @@
    fi
    if [ "$CONFIG_ZORRO" = "y" ]; then
       tristate '  Ariadne support' CONFIG_ARIADNE
-      tristate '  Ariadne II support' CONFIG_ARIADNE2
+      tristate '  Ariadne II and X-Surf support' CONFIG_NE2K_ZORRO
       tristate '  A2065 support' CONFIG_A2065
       tristate '  Hydra support' CONFIG_HYDRA
    fi
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/net/Makefile linux/drivers/net/Makefile
--- v2.4.0-prerelease/linux/drivers/net/Makefile	Mon Jan  1 09:38:35 2001
+++ linux/drivers/net/Makefile	Thu Jan  4 13:00:55 2001
@@ -193,7 +193,7 @@
 obj-$(CONFIG_ATARI_BIONET) += atari_bionet.o
 obj-$(CONFIG_ATARI_PAMSNET) += atari_pamsnet.o
 obj-$(CONFIG_A2065) += a2065.o
-obj-$(CONFIG_HYDRA) += hydra.o
+obj-$(CONFIG_HYDRA) += hydra.o 8390.o
 obj-$(CONFIG_ARIADNE) += ariadne.o
 obj-$(CONFIG_CS89x0) += cs89x0.o
 obj-$(CONFIG_MACSONIC) += macsonic.o
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/net/Space.c linux/drivers/net/Space.c
--- v2.4.0-prerelease/linux/drivers/net/Space.c	Sun Nov 19 18:44:10 2000
+++ linux/drivers/net/Space.c	Thu Jan  4 13:00:55 2001
@@ -86,8 +86,6 @@
 extern int sgiseeq_probe(struct net_device *);
 extern int atarilance_probe(struct net_device *);
 extern int sun3lance_probe(struct net_device *);
-extern int ariadne2_probe(struct net_device *);
-extern int hydra_probe(struct net_device *);
 extern int apne_probe(struct net_device *);
 extern int bionet_probe(struct net_device *);
 extern int pamsnet_probe(struct net_device *);
@@ -337,12 +335,6 @@
 #endif
 #ifdef CONFIG_SUN3LANCE         /* sun3 onboard Lance chip */
 	{sun3lance_probe, 0},
-#endif
-#ifdef CONFIG_ARIADNE2		/* Village Tronic Ariadne II Ethernet Board */
-	{ariadne2_probe, 0},
-#endif
-#ifdef CONFIG_HYDRA		/* Hydra Systems Amiganet Ethernet board */
-	{hydra_probe, 0},
 #endif
 #ifdef CONFIG_APNE		/* A1200 PCMCIA NE2000 */
 	{apne_probe, 0},
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/net/ariadne2.c linux/drivers/net/ariadne2.c
--- v2.4.0-prerelease/linux/drivers/net/ariadne2.c	Sun Nov 19 18:44:10 2000
+++ linux/drivers/net/ariadne2.c	Thu Jan  4 13:00:55 2001
@@ -1,7 +1,7 @@
 /*
- *  Amiga Linux/m68k Ariadne II Ethernet Driver
+ *  Amiga Linux/m68k and Linux/PPC Ariadne II and X-Surf Ethernet Driver
  *
- *  (C) Copyright 1998 by some Elitist 680x0 Users(TM)
+ *  (C) Copyright 1998-2000 by some Elitist 680x0 Users(TM)
  *
  *  ---------------------------------------------------------------------------
  *
@@ -15,8 +15,8 @@
  *
  *  ---------------------------------------------------------------------------
  *
- *  The Ariadne II is a Zorro-II board made by Village Tronic. It contains a
- *  Realtek RTL8019AS Ethernet Controller.
+ *  The Ariadne II and X-Surf are Zorro-II boards containing Realtek RTL8019AS
+ *  Ethernet Controllers.
  */
 
 #include <linux/module.h>
@@ -38,10 +38,6 @@
 #include "8390.h"
 
 
-#define ARIADNE2_BASE		0x0300
-#define ARIADNE2_BOOTROM	0xc000
-
-
 #define NE_BASE		(dev->base_addr)
 #define NE_CMD		(0x00*2)
 #define NE_DATAPORT	(0x10*2)	/* NatSemi-defined port window offset. */
@@ -65,12 +61,24 @@
 
 #define WORDSWAP(a)	((((a)>>8)&0xff) | ((a)<<8))
 
-int ariadne2_probe(struct net_device *dev);
-static int ariadne2_init(struct net_device *dev, unsigned long board);
+#ifdef MODULE
+static struct net_device *root_ariadne2_dev = NULL;
+#endif
 
+static const struct card_info {
+    zorro_id id;
+    const char *name;
+    unsigned int offset;
+} cards[] __initdata = {
+    { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE2, "Ariadne II", 0x0600 },
+    { ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, "X-Surf", 0x8600 },
+};
+
+static int __init ariadne2_probe(void);
+static int __init ariadne2_init(struct net_device *dev, unsigned long board,
+				const char *name, unsigned long ioaddr);
 static int ariadne2_open(struct net_device *dev);
 static int ariadne2_close(struct net_device *dev);
-
 static void ariadne2_reset_8390(struct net_device *dev);
 static void ariadne2_get_8390_hdr(struct net_device *dev,
 				  struct e8390_pkt_hdr *hdr, int ring_page);
@@ -79,40 +87,56 @@
 static void ariadne2_block_output(struct net_device *dev, const int count,
 				  const unsigned char *buf,
 				  const int start_page);
+static void __exit ariadne2_cleanup(void);
 
-int __init ariadne2_probe(struct net_device *dev)
+static int __init ariadne2_probe(void)
 {
+    struct net_device *dev;
     struct zorro_dev *z = NULL;
     unsigned long board, ioaddr;
-    int err;
-
-    SET_MODULE_OWNER(dev);
+    int err = -ENODEV;
+    int i;
 
-    while ((z = zorro_find_device(ZORRO_PROD_VILLAGE_TRONIC_ARIADNE2, z))) {
+    while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
+	for (i = ARRAY_SIZE(cards)-1; i >= 0; i--)
+	    if (z->id == cards[i].id)
+		break;
+	if (i < 0)
+	    continue;
 	board = z->resource.start;
-	ioaddr = board+ARIADNE2_BASE*2;
-	if (!request_mem_region(ioaddr, NE_IO_EXTENT*2, dev->name))
+	ioaddr = board+cards[i].offset;
+	dev = init_etherdev(0, 0);
+	SET_MODULE_OWNER(dev);
+	if (!dev)
+	    return -ENOMEM;
+	if (!request_mem_region(ioaddr, NE_IO_EXTENT*2, dev->name)) {
+	    kfree(dev);
 	    continue;
-	if ((err = ariadne2_init(dev, ZTWO_VADDR(board)))) {
+	}
+	if ((err = ariadne2_init(dev, board, cards[i].name,
+				 ZTWO_VADDR(ioaddr)))) {
 	    release_mem_region(ioaddr, NE_IO_EXTENT*2);
+	    kfree(dev);
 	    return err;
 	}
-	return 0;
+	err = 0;
     }
-    return -ENODEV;
+
+    if (err == -ENODEV)
+	printk("No Ariadne II or X-Surf ethernet card found.\n");
+    return err;
 }
 
-static int __init ariadne2_init(struct net_device *dev, unsigned long board)
+static int __init ariadne2_init(struct net_device *dev, unsigned long board,
+				const char *name, unsigned long ioaddr)
 {
     int i;
     unsigned char SA_prom[32];
-    const char *name = NULL;
     int start_page, stop_page;
     static u32 ariadne2_offsets[16] = {
 	0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
 	0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
     };
-    unsigned long ioaddr = board+ARIADNE2_BASE*2;
 
     /* Reset card. Who knows what dain-bramaged state it was left in. */
     {
@@ -166,8 +190,6 @@
     start_page = NESM_START_PG;
     stop_page = NESM_STOP_PG;
 
-    name = "NE2000";
-
     dev->base_addr = ioaddr;
     dev->irq = IRQ_AMIGA_PORTS;
 
@@ -188,8 +210,8 @@
 	dev->dev_addr[i] = SA_prom[i];
     }
 
-    printk("%s: AriadNE2 at 0x%08lx, Ethernet Address "
-	   "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, board,
+    printk("%s: %s at 0x%08lx, Ethernet Address "
+	   "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, name, board,
 	   dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
 	   dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
 
@@ -207,6 +229,10 @@
     ei_status.reg_offset = ariadne2_offsets;
     dev->open = &ariadne2_open;
     dev->stop = &ariadne2_close;
+#ifdef MODULE
+    ei_status.priv = (unsigned long)root_ariadne2_dev;
+    root_ariadne2_dev = dev;
+#endif
     NS8390_init(dev, 0);
     return 0;
 }
@@ -379,26 +405,21 @@
     return;
 }
 
-#ifdef MODULE
-static struct net_device ariadne2_dev;
-
-int init_module(void)
+static void __exit ariadne2_cleanup(void)
 {
-    int err;
+#ifdef MODULE
+    struct net_device *dev, *next;
 
-    ariadne2_dev.init = ariadne2_probe;
-    if ((err = register_netdev(&ariadne2_dev))) {
-	printk(KERN_WARNING "No AriadNE2 ethernet card found.\n");
-	return err;
+    while ((dev = root_ariadne2_dev)) {
+	next = (struct net_device *)(ei_status.priv);
+	unregister_netdev(dev);
+	free_irq(IRQ_AMIGA_PORTS, dev);
+	release_mem_region(ZTWO_PADDR(dev->base_addr), NE_IO_EXTENT*2);
+	kfree(dev);
+	root_ariadne2_dev = next;
     }
-    return 0;
-}
-
-void cleanup_module(void)
-{
-    free_irq(IRQ_AMIGA_PORTS, &ariadne2_dev);
-    release_mem_region(ZTWO_PADDR(ariadne2_dev.base_addr), NE_IO_EXTENT*2);
-    unregister_netdev(&ariadne2_dev);
+#endif
 }
 
-#endif	/* MODULE */
+module_init(ariadne2_probe);
+module_exit(ariadne2_cleanup);
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/net/hamradio/6pack.c linux/drivers/net/hamradio/6pack.c
--- v2.4.0-prerelease/linux/drivers/net/hamradio/6pack.c	Thu May  4 11:31:21 2000
+++ linux/drivers/net/hamradio/6pack.c	Thu Jan  4 12:50:12 2001
@@ -13,10 +13,9 @@
  *		Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org>
  *
  */
- 
+
 #include <linux/config.h>
 #include <linux/module.h>
-
 #include <asm/system.h>
 #include <asm/uaccess.h>
 #include <asm/bitops.h>
@@ -37,24 +36,109 @@
 #include <linux/init.h>
 #include <linux/ip.h>
 #include <linux/tcp.h>
-/* 
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <stdio.h>
-#include <unistd.h> 
-*/
 
-#include "6pack.h"
+#define SIXPACK_VERSION    "Revision: 0.3.0"
 
-typedef unsigned char byte;
+/* sixpack priority commands */
+#define SIXP_SEOF	0x40	/* start and end of a 6pack frame */
+#define SIXP_TX_URUN	0x48	/* transmit overrun */
+#define SIXP_RX_ORUN	0x50	/* receive overrun */
+#define SIXP_RX_BUF_OVL	0x58	/* receive buffer overflow */
+
+#define SIXP_CHKSUM	0xFF	/* valid checksum of a 6pack frame */
+
+/* masks to get certain bits out of the status bytes sent by the TNC */
+
+#define SIXP_CMD_MASK		0xC0
+#define SIXP_CHN_MASK		0x07
+#define SIXP_PRIO_CMD_MASK	0x80
+#define SIXP_STD_CMD_MASK	0x40
+#define SIXP_PRIO_DATA_MASK	0x38
+#define SIXP_TX_MASK		0x20
+#define SIXP_RX_MASK		0x10
+#define SIXP_RX_DCD_MASK	0x18
+#define SIXP_LEDS_ON		0x78
+#define SIXP_LEDS_OFF		0x60
+#define SIXP_CON		0x08
+#define SIXP_STA		0x10
+
+#define SIXP_FOUND_TNC		0xe9
+#define SIXP_CON_ON		0x68
+#define SIXP_DCD_MASK		0x08
+#define SIXP_DAMA_OFF		0
+
+/* default level 2 parameters */
+#define SIXP_TXDELAY			25	/* in 10 ms */
+#define SIXP_PERSIST			50	/* in 256ths */
+#define SIXP_SLOTTIME			10	/* in 10 ms */
+#define SIXP_INIT_RESYNC_TIMEOUT	150	/* in 10 ms */
+#define SIXP_RESYNC_TIMEOUT		500	/* in 10 ms */
+
+/* 6pack configuration. */
+#define SIXP_NRUNIT			256	/* MAX number of 6pack channels */
+#define SIXP_MTU			256	/* Default MTU */
+
+enum sixpack_flags {
+	SIXPF_INUSE,	/* Channel in use	*/
+	SIXPF_ERROR,	/* Parity, etc. error	*/
+};
+
+struct sixpack {
+	int			magic;
+
+	/* Various fields. */
+	struct tty_struct	*tty;		/* ptr to TTY structure		*/
+	struct net_device	*dev;		/* easy for intr handling	*/
+
+	/* These are pointers to the malloc()ed frame buffers. */
+	unsigned char		*rbuff;		/* receiver buffer		*/
+	int			rcount;         /* received chars counter       */
+	unsigned char		*xbuff;		/* transmitter buffer		*/
+	unsigned char		*xhead;         /* pointer to next byte to XMIT */
+	int			xleft;          /* bytes left in XMIT queue     */
+
+	unsigned char		raw_buf[4];
+	unsigned char		cooked_buf[400];
+
+	unsigned int		rx_count;
+	unsigned int		rx_count_cooked;
+
+	/* 6pack interface statistics. */
+	struct net_device_stats stats;
+
+	int			mtu;		/* Our mtu (to spot changes!)   */
+	int			buffsize;       /* Max buffers sizes            */
+
+	unsigned long		flags;		/* Flag values/ mode etc	*/
+	unsigned char		mode;		/* 6pack mode			*/
+
+	/* 6pack stuff */
+	unsigned char		tx_delay;
+	unsigned char		persistance;
+	unsigned char		slottime;
+	unsigned char		duplex;
+	unsigned char		led_state;
+	unsigned char		status;
+	unsigned char		status1;
+	unsigned char		status2;
+	unsigned char		tx_enable;
+	unsigned char		tnc_ok;
+
+	struct timer_list	tx_t;
+	struct timer_list	resync_t;
+};
+
+/* should later be moved to include/net/ax25.h */
+#define AX25_6PACK_HEADER_LEN 0
+#define SIXPACK_MAGIC 0x5304
 
+static const char banner[] __initdata = KERN_INFO "AX.25: 6pack driver, " SIXPACK_VERSION " (dynamic channels, max=%d)\n";
 
 typedef struct sixpack_ctrl {
 	struct sixpack	ctrl;		/* 6pack things			*/
 	struct net_device	dev;		/* the device			*/
 } sixpack_ctrl_t;
-static sixpack_ctrl_t	**sixpack_ctrls = NULL;
+static sixpack_ctrl_t **sixpack_ctrls;
 int sixpack_maxdev = SIXP_NRUNIT;	/* Can be overridden with insmod! */
 
 static struct tty_ldisc	sp_ldisc;
@@ -62,26 +146,23 @@
 static void sp_start_tx_timer(struct sixpack *);
 static void sp_xmit_on_air(unsigned long);
 static void resync_tnc(unsigned long);
-void sixpack_decode(struct sixpack *, unsigned char[], int);
-int encode_sixpack(unsigned char *, unsigned char *, int, unsigned char);
-
-void decode_prio_command(byte, struct sixpack *);
-void decode_std_command(byte, struct sixpack *);
-void decode_data(byte, struct sixpack *);
+static void sixpack_decode(struct sixpack *, unsigned char[], int);
+static int encode_sixpack(unsigned char *, unsigned char *, int, unsigned char);
+static int sixpack_init(struct net_device *dev);
+
+static void decode_prio_command(unsigned char, struct sixpack *);
+static void decode_std_command(unsigned char, struct sixpack *);
+static void decode_data(unsigned char, struct sixpack *);
 
 static int tnc_init(struct sixpack *);
 
 /* Find a free 6pack channel, and link in this `tty' line. */
-static inline struct sixpack *
-sp_alloc(void)
+static inline struct sixpack *sp_alloc(void)
 {
 	sixpack_ctrl_t *spp = NULL;
 	int i;
 
-	if (sixpack_ctrls == NULL) return NULL;	/* Master array missing ! */
-
-	for (i = 0; i < sixpack_maxdev; i++) 
-	{
+	for (i = 0; i < sixpack_maxdev; i++) {
 		spp = sixpack_ctrls[i];
 
 		if (spp == NULL)
@@ -92,14 +173,13 @@
 	}
 
 	/* Too many devices... */
-	if (i >= sixpack_maxdev) 
+	if (i >= sixpack_maxdev)
 		return NULL;
 
 	/* If no channels are available, allocate one */
 	if (!spp &&
 	    (sixpack_ctrls[i] = (sixpack_ctrl_t *)kmalloc(sizeof(sixpack_ctrl_t),
-						    GFP_KERNEL)) != NULL) 
-	{
+						    GFP_KERNEL)) != NULL) {
 		spp = sixpack_ctrls[i];
 		memset(spp, 0, sizeof(sixpack_ctrl_t));
 
@@ -108,26 +188,24 @@
 		spp->ctrl.tty         = NULL;
 		sprintf(spp->dev.name, "sp%d", i);
 		spp->dev.base_addr    = i;
-		spp->dev.priv         = (void*)&(spp->ctrl);
+		spp->dev.priv         = (void *) &spp->ctrl;
 		spp->dev.next         = NULL;
 		spp->dev.init         = sixpack_init;
 	}
 
-	if (spp != NULL) 
-	{
+	if (spp != NULL) {
 		/* register device so that it can be ifconfig'ed       */
 		/* sixpack_init() will be called as a side-effect         */
 		/* SIDE-EFFECT WARNING: sixpack_init() CLEARS spp->ctrl ! */
 
-		if (register_netdev(&(spp->dev)) == 0) 
-		{
+		if (register_netdev(&spp->dev) == 0) {
 			set_bit(SIXPF_INUSE, &spp->ctrl.flags);
-			spp->ctrl.dev = &(spp->dev);
-			spp->dev.priv = (void*)&(spp->ctrl);
-
-			return (&(spp->ctrl));
+			spp->ctrl.dev = &spp->dev;
+			spp->dev.priv = (void *) &spp->ctrl;
+			SET_MODULE_OWNER(&spp->dev);
+			return &spp->ctrl;
 		} else {
-		  	clear_bit(SIXPF_INUSE,&(spp->ctrl.flags));
+			clear_bit(SIXPF_INUSE, &spp->ctrl.flags);
 			printk(KERN_WARNING "sp_alloc() - register_netdev() failure.\n");
 		}
 	}
@@ -137,22 +215,18 @@
 
 
 /* Free a 6pack channel. */
-static inline void
-sp_free(struct sixpack *sp)
+static inline void sp_free(struct sixpack *sp)
 {
 	/* Free all 6pack frame buffers. */
 	if (sp->rbuff)
 		kfree(sp->rbuff);
 	sp->rbuff = NULL;
-	if (sp->xbuff) {
+	if (sp->xbuff)
 		kfree(sp->xbuff);
-	}
 	sp->xbuff = NULL;
 
-	if (!test_and_clear_bit(SIXPF_INUSE, &sp->flags)) 
-	{
+	if (!test_and_clear_bit(SIXPF_INUSE, &sp->flags))
 		printk(KERN_WARNING "%s: sp_free for already free unit.\n", sp->dev->name);
-	}
 }
 
 
@@ -161,8 +235,7 @@
 /* This is the routine that sends the received data to the kernel AX.25.
    'cmd' is the KISS command. For AX.25 data, it is zero. */
 
-static void
-sp_bump(struct sixpack *sp, char cmd)
+static void sp_bump(struct sixpack *sp, char cmd)
 {
 	struct sk_buff *skb;
 	int count;
@@ -170,13 +243,11 @@
 
 	count = sp->rcount+1;
 
-	sp->rx_bytes+=count;
+	sp->stats.rx_bytes += count;
 
-	skb = dev_alloc_skb(count);
-	if (skb == NULL)
-	{
+	if ((skb = dev_alloc_skb(count)) == NULL) {
 		printk(KERN_DEBUG "%s: memory squeeze, dropping packet.\n", sp->dev->name);
-		sp->rx_dropped++;
+		sp->stats.rx_dropped++;
 		return;
 	}
 
@@ -185,61 +256,54 @@
 	*ptr++ = cmd;	/* KISS command */
 
 	memcpy(ptr, (sp->cooked_buf)+1, count);
-	skb->mac.raw=skb->data;
-	skb->protocol=htons(ETH_P_AX25);
+	skb->mac.raw = skb->data;
+	skb->protocol = htons(ETH_P_AX25);
 	netif_rx(skb);
-	sp->rx_packets++;
+	sp->stats.rx_packets++;
 }
 
 
 /* ----------------------------------------------------------------------- */
 
 /* Encapsulate one AX.25 frame and stuff into a TTY queue. */
-static void
-sp_encaps(struct sixpack *sp, unsigned char *icp, int len)
+static void sp_encaps(struct sixpack *sp, unsigned char *icp, int len)
 {
 	unsigned char *p;
 	int actual, count;
 
-	if (len > sp->mtu) 	/* sp->mtu = AX25_MTU = max. PACLEN = 256 */ 
-	{
-		len = sp->mtu;
+	if (len > sp->mtu) {	/* sp->mtu = AX25_MTU = max. PACLEN = 256 */
 		printk(KERN_DEBUG "%s: truncating oversized transmit packet!\n", sp->dev->name);
-		sp->tx_dropped++;
+		sp->stats.tx_dropped++;
 		netif_start_queue(sp->dev);
 		return;
 	}
 
 	p = icp;
 
-	if (p[0] > 5)
-	{
+	if (p[0] > 5) {
 		printk(KERN_DEBUG "%s: invalid KISS command -- dropped\n", sp->dev->name);
 		netif_start_queue(sp->dev);
 		return;
 	}
 
-	if ((p[0] != 0) && (len > 2))
-	{
+	if ((p[0] != 0) && (len > 2)) {
 		printk(KERN_DEBUG "%s: KISS control packet too long -- dropped\n", sp->dev->name);
 		netif_start_queue(sp->dev);
 		return;
 	}
 
-	if ((p[0] == 0) && (len < 15))
-	{
+	if ((p[0] == 0) && (len < 15)) {
 		printk(KERN_DEBUG "%s: bad AX.25 packet to transmit -- dropped\n", sp->dev->name);
 		netif_start_queue(sp->dev);
-		sp->tx_dropped++;
+		sp->stats.tx_dropped++;
 		return;
 	}
 
 	count = encode_sixpack(p, (unsigned char *) sp->xbuff, len, sp->tx_delay);
 	sp->tty->flags |= (1 << TTY_DO_WRITE_WAKEUP);
 
-	switch(p[0])
-	{
-		case 1:	sp->tx_delay = p[1]; 		return;
+	switch (p[0]) {
+		case 1:	sp->tx_delay = p[1];		return;
 		case 2:	sp->persistance = p[1];		return;
 		case 3: sp->slottime = p[1];		return;
 		case 4: /* ignored */			return;
@@ -251,17 +315,16 @@
 		   about the state of the DCD or of any timers, as the determination
 		   of the correct time to send is the job of the AX.25 layer. We send
 		   immediately after data has arrived. */
-		if (sp->duplex == 1){
+		if (sp->duplex == 1) {
 			sp->led_state = 0x70;
-			sp->tty->driver.write(sp->tty, 0, &(sp->led_state), 1);
+			sp->tty->driver.write(sp->tty, 0, &sp->led_state, 1);
 			sp->tx_enable = 1;
 			actual = sp->tty->driver.write(sp->tty, 0, sp->xbuff, count);
 			sp->xleft = count - actual;
 			sp->xhead = sp->xbuff + actual;
 			sp->led_state = 0x60;
-			sp->tty->driver.write(sp->tty, 0, &(sp->led_state), 1);
-		}
-		else {
+			sp->tty->driver.write(sp->tty, 0, &sp->led_state, 1);
+		} else {
 			sp->xleft = count;
 			sp->xhead = sp->xbuff;
 			sp->status2 = count;
@@ -282,14 +345,13 @@
 
 	/* First make sure we're connected. */
 	if (!sp || sp->magic != SIXPACK_MAGIC ||
-	    !netif_running(sp->dev)) {
+	    !netif_running(sp->dev))
 		return;
-	}
 
 	if (sp->xleft <= 0)  {
 		/* Now serial buffer is almost free & we can start
 		 * transmission of another packet */
-		sp->tx_packets++;
+		sp->stats.tx_packets++;
 		tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
 		sp->tx_enable = 0;
 		netif_wake_queue(sp->dev);
@@ -307,51 +369,45 @@
 
 /* Encapsulate an IP datagram and kick it into a TTY queue. */
 
-static int
-sp_xmit(struct sk_buff *skb, struct net_device *dev)
+static int sp_xmit(struct sk_buff *skb, struct net_device *dev)
 {
-	struct sixpack *sp = (struct sixpack*)(dev->priv);
+	struct sixpack *sp = (struct sixpack *) dev->priv;
 
 	/* We were not busy, so we are now... :-) */
-	if (skb != NULL) {
-		netif_stop_queue(dev);
-		sp->tx_bytes+=skb->len; /*---2.1.x---*/
-		sp_encaps(sp, skb->data, skb->len);
-		dev_kfree_skb(skb);
-	}
+	netif_stop_queue(dev);
+	sp->stats.tx_bytes += skb->len;
+	sp_encaps(sp, skb->data, skb->len);
+	dev_kfree_skb(skb);
 	return 0;
 }
-/* #endif */
 
 
 /* perform the persistence/slottime algorithm for CSMA access. If the persistence
    check was successful, write the data to the serial driver. Note that in case
    of DAMA operation, the data is not sent here. */
 
-static 
-void sp_xmit_on_air(unsigned long channel)
+static void sp_xmit_on_air(unsigned long channel)
 {
 	struct sixpack *sp = (struct sixpack *) channel;
 	int actual;
 	static unsigned char random;
-	
+
 	random = random * 17 + 41;
 
 	if (((sp->status1 & SIXP_DCD_MASK) == 0) && (random < sp->persistance)) {
 		sp->led_state = 0x70;
-		sp->tty->driver.write(sp->tty, 0, &(sp->led_state),1);
+		sp->tty->driver.write(sp->tty, 0, &sp->led_state, 1);
 		sp->tx_enable = 1;
 		actual = sp->tty->driver.write(sp->tty, 0, sp->xbuff, sp->status2);
 		sp->xleft -= actual;
 		sp->xhead += actual;
 		sp->led_state = 0x60;
-		sp->tty->driver.write(sp->tty, 0, &(sp->led_state),1);
+		sp->tty->driver.write(sp->tty, 0, &sp->led_state, 1);
 		sp->status2 = 0;
 	} else
 		sp_start_tx_timer(sp);
-} /* sp_xmit */
+}
 
-/* #if defined(CONFIG_6PACK) || defined(CONFIG_6PACK_MODULE) */
 
 /* Return the frame type ID */
 static int sp_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
@@ -374,16 +430,14 @@
 #endif
 }
 
-/* #endif */ /* CONFIG_{AX25,AX25_MODULE} */
 
 /* Open the low-level part of the 6pack channel. */
-static int
-sp_open(struct net_device *dev)
+static int sp_open(struct net_device *dev)
 {
-	struct sixpack *sp = (struct sixpack*)(dev->priv);
+	struct sixpack *sp = (struct sixpack *) dev->priv;
 	unsigned long len;
 
-	if (sp->tty == NULL) 
+	if (sp->tty == NULL)
 		return -ENODEV;
 
 	/*
@@ -391,21 +445,17 @@
 	 *
 	 * rbuff	Receive buffer.
 	 * xbuff	Transmit buffer.
-	 * cbuff        Temporary compression buffer.
 	 */
-	 
+
 	/* !!! length of the buffers. MTU is IP MTU, not PACLEN!
 	 */
 
 	len = dev->mtu * 2;
 
-	sp->rbuff = (unsigned char *) kmalloc(len + 4, GFP_KERNEL);
-	if (sp->rbuff == NULL)   
+	if ((sp->rbuff = kmalloc(len + 4, GFP_KERNEL)) == NULL)
 		return -ENOMEM;
 
-	sp->xbuff = (unsigned char *) kmalloc(len + 4, GFP_KERNEL);
-	if (sp->xbuff == NULL)   
-	{
+	if ((sp->xbuff = kmalloc(len + 4, GFP_KERNEL)) == NULL) {
 		kfree(sp->rbuff);
 		return -ENOMEM;
 	}
@@ -429,7 +479,7 @@
 	sp->status2     = 0;
 	sp->tnc_ok      = 0;
 	sp->tx_enable   = 0;
-	
+
 	netif_start_queue(dev);
 
 	init_timer(&sp->tx_t);
@@ -439,22 +489,20 @@
 
 
 /* Close the low-level part of the 6pack channel. */
-static int
-sp_close(struct net_device *dev)
+static int sp_close(struct net_device *dev)
 {
-	struct sixpack *sp = (struct sixpack*)(dev->priv);
+	struct sixpack *sp = (struct sixpack *) dev->priv;
 
-	if (sp->tty == NULL) {
+	if (sp->tty == NULL)
 		return -EBUSY;
-	}
+
 	sp->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
-	
+
 	netif_stop_queue(dev);
 	return 0;
 }
 
-static int
-sixpack_receive_room(struct tty_struct *tty)
+static int sixpack_receive_room(struct tty_struct *tty)
 {
 	return 65536;  /* We can handle an infinite amount of data. :-) */
 }
@@ -467,8 +515,7 @@
  * a block of 6pack data has been received, which can now be decapsulated
  * and sent on to some IP layer for further processing.
  */
-static void
-sixpack_receive_buf(struct tty_struct *tty, const unsigned char *cp, char *fp, int count)
+static void sixpack_receive_buf(struct tty_struct *tty, const unsigned char *cp, char *fp, int count)
 {
 	unsigned char buf[512];
 	unsigned long flags;
@@ -484,18 +531,15 @@
 	cli();
 	memcpy(buf, cp, count<sizeof(buf)? count:sizeof(buf));
 	restore_flags(flags);
-	
+
 	/* Read the characters out of the buffer */
 
 	count1 = count;
-	while(count)
-	{
+	while (count) {
 		count--;
-		if (fp && *fp++)
-		{
-			if (!test_and_set_bit(SIXPF_ERROR, &sp->flags))  {
-				sp->rx_errors++;
-			}			
+		if (fp && *fp++) {
+			if (!test_and_set_bit(SIXPF_ERROR, &sp->flags))
+				sp->stats.rx_errors++;
 			continue;
 		}
 	}
@@ -509,15 +553,14 @@
  * sure the tty line exists, we only have to link it to
  * a free 6pcack channel...
  */
-static int
-sixpack_open(struct tty_struct *tty)
+static int sixpack_open(struct tty_struct *tty)
 {
 	struct sixpack *sp = (struct sixpack *) tty->disc_data;
 	int err;
 
 	/* First make sure we're not already connected. */
 
-	if (sp && sp->magic == SIXPACK_MAGIC) 
+	if (sp && sp->magic == SIXPACK_MAGIC)
 		return -EEXIST;
 
 	/* OK.  Find a free 6pack channel to use. */
@@ -525,21 +568,18 @@
 		return -ENFILE;
 	sp->tty = tty;
 	tty->disc_data = sp;
-	if (tty->driver.flush_buffer) 
+	if (tty->driver.flush_buffer)
 		tty->driver.flush_buffer(tty);
 
 	if (tty->ldisc.flush_buffer)
 		tty->ldisc.flush_buffer(tty);
 
-
 	/* Restore default settings */
 	sp->dev->type = ARPHRD_AX25;
 
 	/* Perform the low-level 6pack initialization. */
-	if ((err = sp_open(sp->dev)))  
+	if ((err = sp_open(sp->dev)))
 		return err;
-	
-	MOD_INC_USE_COUNT;
 
 	/* Done.  We have linked the TTY line to a channel. */
 
@@ -555,8 +595,7 @@
  * TTY line discipline to what it was before it got hooked to 6pack
  * (which usually is TTY again).
  */
-static void
-sixpack_close(struct tty_struct *tty)
+static void sixpack_close(struct tty_struct *tty)
 {
 	struct sixpack *sp = (struct sixpack *) tty->disc_data;
 
@@ -565,106 +604,62 @@
 		return;
 
 	rtnl_lock();
-	if (sp->dev->flags & IFF_UP)
-		(void) dev_close(sp->dev);
+	dev_close(sp->dev);
+
+	del_timer(&sp->tx_t);
+	del_timer(&sp->resync_t);
 
-	del_timer(&(sp->tx_t));
-	del_timer(&(sp->resync_t));
-	
 	tty->disc_data = 0;
 	sp->tty = NULL;
-	/* VSV = very important to remove timers */
 
 	sp_free(sp);
-	unregister_netdev(sp->dev);
+	unregister_netdevice(sp->dev);
 	rtnl_unlock();
-	MOD_DEC_USE_COUNT;
 }
 
 
-static struct net_device_stats *
-sp_get_stats(struct net_device *dev)
+static struct net_device_stats *sp_get_stats(struct net_device *dev)
 {
-	static struct net_device_stats stats;
-	struct sixpack *sp = (struct sixpack*)(dev->priv);
-
-	memset(&stats, 0, sizeof(struct net_device_stats));
-
-	stats.rx_packets     = sp->rx_packets;
-	stats.tx_packets     = sp->tx_packets;
-	stats.rx_bytes       = sp->rx_bytes;
-	stats.tx_bytes       = sp->tx_bytes;
-	stats.rx_dropped     = sp->rx_dropped;
-	stats.tx_dropped     = sp->tx_dropped;
-	stats.tx_errors      = sp->tx_errors;
-	stats.rx_errors      = sp->rx_errors;
-	stats.rx_over_errors = sp->rx_over_errors;
-	return (&stats);
+	struct sixpack *sp = (struct sixpack *) dev->priv;
+	return &sp->stats;
 }
 
 
-int
-sp_set_mac_address(struct net_device *dev, void *addr)
+static int sp_set_mac_address(struct net_device *dev, void *addr)
 {
-	int err;
-
-	err = verify_area(VERIFY_READ, addr, AX25_ADDR_LEN);
-	if (err)  {
-		return err;
-	}
-
-	copy_from_user(dev->dev_addr, addr, AX25_ADDR_LEN);	/* addr is an AX.25 shifted ASCII mac address */
-
-	return 0;
+	return copy_from_user(dev->dev_addr, addr, AX25_ADDR_LEN) ? -EFAULT : 0;
 }
 
-static int
-sp_set_dev_mac_address(struct net_device *dev, void *addr)
+static int sp_set_dev_mac_address(struct net_device *dev, void *addr)
 {
-	struct sockaddr *sa=addr;
+	struct sockaddr *sa = addr;
 	memcpy(dev->dev_addr, sa->sa_data, AX25_ADDR_LEN);
 	return 0;
 }
 
 
 /* Perform I/O control on an active 6pack channel. */
-static int
-sixpack_ioctl(struct tty_struct *tty, void *file, int cmd, void *arg)
+static int sixpack_ioctl(struct tty_struct *tty, void *file, int cmd, void *arg)
 {
 	struct sixpack *sp = (struct sixpack *) tty->disc_data;
-	int err;
 	unsigned int tmp;
 
 	/* First make sure we're connected. */
-	if (!sp || sp->magic != SIXPACK_MAGIC) {
+	if (!sp || sp->magic != SIXPACK_MAGIC)
 		return -EINVAL;
-	}
 
 	switch(cmd) {
-	 case SIOCGIFNAME:
-		err = verify_area(VERIFY_WRITE, arg, strlen(sp->dev->name) + 1);
-		if (err)  {
-			return err;
-		}
-		copy_to_user(arg, sp->dev->name, strlen(sp->dev->name) + 1);
-		return 0;
+	case SIOCGIFNAME:
+		return copy_to_user(arg, sp->dev->name, strlen(sp->dev->name) + 1) ? -EFAULT : 0;
 
 	case SIOCGIFENCAP:
-		err = verify_area(VERIFY_WRITE, arg, sizeof(int));
-		if (err)  {
-			return err;
-		}
-		put_user(0, (int *)arg);
-		return 0;
+		return put_user(0, (int *)arg);
 
 	case SIOCSIFENCAP:
-		err = verify_area(VERIFY_READ, arg, sizeof(int));
-		if (err)  {
-			return err;
-		}
-		get_user(tmp,(int *)arg);
+		if (get_user(tmp, (int *) arg))
+			return -EFAULT;
 
- 		sp->mode = tmp;
+		sp->mode = tmp;
 		sp->dev->addr_len        = AX25_ADDR_LEN;	  /* sizeof an AX.25 addr */
 		sp->dev->hard_header_len = AX25_KISS_HEADER_LEN + AX25_MAX_HEADER_LEN + 3;
 		sp->dev->type            = ARPHRD_AX25;
@@ -686,26 +681,25 @@
 
 static int sp_open_dev(struct net_device *dev)
 {
-	struct sixpack *sp = (struct sixpack*)(dev->priv);
-	if(sp->tty==NULL)
+	struct sixpack *sp = (struct sixpack *) dev->priv;
+	if (sp->tty == NULL)
 		return -ENODEV;
 	return 0;
 }
 
 /* Initialize 6pack control device -- register 6pack line discipline */
 
-static int __init sixpack_init_ctrl_dev(void)
+static int __init sixpack_init_driver(void)
 {
 	int status;
 
-	if (sixpack_maxdev < 4) sixpack_maxdev = 4; /* Sanity */
+	if (sixpack_maxdev < 4)
+		sixpack_maxdev = 4; /* Sanity */
 
-	printk(KERN_INFO "AX.25: 6pack driver, %s (dynamic channels, max=%d)\n",
-	       SIXPACK_VERSION, sixpack_maxdev);
+	printk(banner, sixpack_maxdev);
 
 	sixpack_ctrls = (sixpack_ctrl_t **) kmalloc(sizeof(void*)*sixpack_maxdev, GFP_KERNEL);
-	if (sixpack_ctrls == NULL) 
-	{
+	if (sixpack_ctrls == NULL) {
 		printk(KERN_WARNING "6pack: Can't allocate sixpack_ctrls[] array!  Uaargh! (-> No 6pack available)\n");
 		return -ENOMEM;
 	}
@@ -713,9 +707,7 @@
 	/* Clear the pointer array, we allocate devices when we need them */
 	memset(sixpack_ctrls, 0, sizeof(void*)*sixpack_maxdev); /* Pointers */
 
-
 	/* Fill in our line protocol discipline, and register it */
-	memset(&sp_ldisc, 0, sizeof(sp_ldisc));
 	sp_ldisc.magic  = TTY_LDISC_MAGIC;
 	sp_ldisc.name   = "6pack";
 	sp_ldisc.flags  = 0;
@@ -731,6 +723,7 @@
 	sp_ldisc.write_wakeup = sixpack_write_wakeup;
 	if ((status = tty_register_ldisc(N_6PACK, &sp_ldisc)) != 0)  {
 		printk(KERN_WARNING "6pack: can't register line discipline (err = %d)\n", status);
+		kfree(sixpack_ctrls);
 	}
 
 	return status;
@@ -740,38 +733,28 @@
 {
 	int i;
 
-	if (sixpack_ctrls != NULL) 
-	{
-		for (i = 0; i < sixpack_maxdev; i++)  
-		{
-			if (sixpack_ctrls[i])
-			{
-				/*
-				 * VSV = if dev->start==0, then device
-				 * unregistered while close proc.
-				 */ 
-				if (netif_running(&sixpack_ctrls[i]->dev))
-					unregister_netdev(&sixpack_ctrls[i]->dev);
+	for (i = 0; i < sixpack_maxdev; i++) {
+		if (sixpack_ctrls[i]) {
+			/*
+			* VSV = if dev->start==0, then device
+			* unregistered while close proc.
+			*/
+			if (netif_running(&sixpack_ctrls[i]->dev))
+				 unregister_netdev(&sixpack_ctrls[i]->dev);
 
-				kfree(sixpack_ctrls[i]);
-				sixpack_ctrls[i] = NULL;
-			}
+			kfree(sixpack_ctrls[i]);
 		}
-		kfree(sixpack_ctrls);
-		sixpack_ctrls = NULL;
 	}
-	if ((i = tty_register_ldisc(N_6PACK, NULL)))  
-	{
+	kfree(sixpack_ctrls);
+	if ((i = tty_register_ldisc(N_6PACK, NULL)))
 		printk(KERN_WARNING "6pack: can't unregister line discipline (err = %d)\n", i);
-	}
 }
 
 
 /* Initialize the 6pack driver.  Called by DDI. */
-int
-sixpack_init(struct net_device *dev)
+static int sixpack_init(struct net_device *dev)
 {
-	struct sixpack *sp = (struct sixpack*)(dev->priv);
+	struct sixpack *sp = (struct sixpack *) dev->priv;
 
 	static char ax25_bcast[AX25_ADDR_LEN] =
 		{'Q'<<1,'S'<<1,'T'<<1,' '<<1,' '<<1,' '<<1,'0'<<1};
@@ -779,14 +762,14 @@
 		{'L'<<1,'I'<<1,'N'<<1,'U'<<1,'X'<<1,' '<<1,'1'<<1};
 
 	if (sp == NULL)		/* Allocation failed ?? */
-	  return -ENODEV;
+		return -ENODEV;
 
 	/* Set up the "6pack Control Block". (And clear statistics) */
-	
+
 	memset(sp, 0, sizeof (struct sixpack));
 	sp->magic  = SIXPACK_MAGIC;
 	sp->dev	   = dev;
-	
+
 	/* Finish setting up the DEVICE info. */
 	dev->mtu		= SIXP_MTU;
 	dev->hard_start_xmit	= sp_xmit;
@@ -817,66 +800,62 @@
 
 
 /* ----> 6pack timer interrupt handler and friends. <---- */
-static void 
-sp_start_tx_timer(struct sixpack *sp)
+static void sp_start_tx_timer(struct sixpack *sp)
 {
 	int when = sp->slottime;
-	
-	del_timer(&(sp->tx_t));
+
+	del_timer(&sp->tx_t);
 	sp->tx_t.data = (unsigned long) sp;
 	sp->tx_t.function = sp_xmit_on_air;
 	sp->tx_t.expires = jiffies + ((when+1)*HZ)/100;
-	add_timer(&(sp->tx_t));
+	add_timer(&sp->tx_t);
 }
 
 
 /* encode an AX.25 packet into 6pack */
 
-int encode_sixpack(byte *tx_buf, byte *tx_buf_raw, int length, byte tx_delay)
+static int encode_sixpack(unsigned char *tx_buf, unsigned char *tx_buf_raw, int length, unsigned char tx_delay)
 {
 	int count = 0;
-	byte checksum = 0, buf[400];
+	unsigned char checksum = 0, buf[400];
 	int raw_count = 0;
 
 	tx_buf_raw[raw_count++] = SIXP_PRIO_CMD_MASK | SIXP_TX_MASK;
 	tx_buf_raw[raw_count++] = SIXP_SEOF;
 
 	buf[0] = tx_delay;
-	for(count = 1; count < length; count++)
+	for (count = 1; count < length; count++)
 		buf[count] = tx_buf[count];
 
-	for(count = 0; count < length; count++)
+	for (count = 0; count < length; count++)
 		checksum += buf[count];
-	buf[length] = (byte)0xff - checksum;
-	
-	for(count = 0; count <= length; count++) {
-		if((count % 3) == 0) {
+	buf[length] = (unsigned char) 0xff - checksum;
+
+	for (count = 0; count <= length; count++) {
+		if ((count % 3) == 0) {
 			tx_buf_raw[raw_count++] = (buf[count] & 0x3f);
 			tx_buf_raw[raw_count] = ((buf[count] >> 2) & 0x30);
-		}
-		else if((count % 3) == 1) {
+		} else if ((count % 3) == 1) {
 			tx_buf_raw[raw_count++] |= (buf[count] & 0x0f);
-			tx_buf_raw[raw_count] =
-				((buf[count] >> 2) & 0x3c);
+			tx_buf_raw[raw_count] =	((buf[count] >> 2) & 0x3c);
 		} else {
 			tx_buf_raw[raw_count++] |= (buf[count] & 0x03);
-			tx_buf_raw[raw_count++] =
-				(buf[count] >> 2);
-		} /* else */
-	} /* for */
+			tx_buf_raw[raw_count++] = (buf[count] >> 2);
+		}
+	}
 	if ((length % 3) != 2)
 		raw_count++;
 	tx_buf_raw[raw_count++] = SIXP_SEOF;
-	return(raw_count);
+	return raw_count;
 }
 
 
 /* decode a 6pack packet */
 
-void
+static void
 sixpack_decode(struct sixpack *sp, unsigned char pre_rbuff[], int count)
 {
-	byte inbyte;
+	unsigned char inbyte;
 	int count1;
 
 	for (count1 = 0; count1 < count; count1++) {
@@ -884,32 +863,28 @@
 		if (inbyte == SIXP_FOUND_TNC) {
 			printk(KERN_INFO "6pack: TNC found.\n");
 			sp->tnc_ok = 1;
-			del_timer(&(sp->resync_t));
+			del_timer(&sp->resync_t);
 		}
-		if((inbyte & SIXP_PRIO_CMD_MASK) != 0)
+		if ((inbyte & SIXP_PRIO_CMD_MASK) != 0)
 			decode_prio_command(inbyte, sp);
-		else if((inbyte & SIXP_STD_CMD_MASK) != 0)
+		else if ((inbyte & SIXP_STD_CMD_MASK) != 0)
 			decode_std_command(inbyte, sp);
-		else {
-			if ((sp->status & SIXP_RX_DCD_MASK) == SIXP_RX_DCD_MASK)
-				decode_data(inbyte, sp);
-		} /* else */
-	} /* for */
+		else if ((sp->status & SIXP_RX_DCD_MASK) == SIXP_RX_DCD_MASK)
+			decode_data(inbyte, sp);
+	}
 }
 
-static int
-tnc_init(struct sixpack *sp)
-{
-	static byte inbyte;
-	
-	inbyte = 0xe8;
+static int tnc_init(struct sixpack *sp)
+{
+	unsigned char inbyte = 0xe8;
+
 	sp->tty->driver.write(sp->tty, 0, &inbyte, 1);
 
-	del_timer(&(sp->resync_t));
+	del_timer(&sp->resync_t);
 	sp->resync_t.data = (unsigned long) sp;
 	sp->resync_t.function = resync_tnc;
 	sp->resync_t.expires = jiffies + SIXP_RESYNC_TIMEOUT;
-	add_timer(&(sp->resync_t));
+	add_timer(&sp->resync_t);
 
 	return 0;
 }
@@ -917,9 +892,9 @@
 
 /* identify and execute a 6pack priority command byte */
 
-void decode_prio_command(byte cmd, struct sixpack *sp)
+static void decode_prio_command(unsigned char cmd, struct sixpack *sp)
 {
-	byte channel;
+	unsigned char channel;
 	int actual;
 
 	channel = cmd & SIXP_CHN_MASK;
@@ -941,11 +916,11 @@
 				cmd &= !SIXP_RX_DCD_MASK;
 		}
 		sp->status = cmd & SIXP_PRIO_DATA_MASK;
-	} /* if */
+	}
 	else { /* output watchdog char if idle */
 		if ((sp->status2 != 0) && (sp->duplex == 1)) {
 			sp->led_state = 0x70;
-			sp->tty->driver.write(sp->tty, 0, &(sp->led_state), 1);
+			sp->tty->driver.write(sp->tty, 0, &sp->led_state, 1);
 			sp->tx_enable = 1;
 			actual = sp->tty->driver.write(sp->tty, 0, sp->xbuff, sp->status2);
 			sp->xleft -= actual;
@@ -953,21 +928,21 @@
 			sp->led_state = 0x60;
 			sp->status2 = 0;
 
-		} /* if */
-	} /* else */
+		}
+	}
 
 	/* needed to trigger the TNC watchdog */
-	sp->tty->driver.write(sp->tty, 0, &(sp->led_state), 1);
+	sp->tty->driver.write(sp->tty, 0, &sp->led_state, 1);
 
         /* if the state byte has been received, the TNC is present,
            so the resync timer can be reset. */
 
 	if (sp->tnc_ok == 1) {
-		del_timer(&(sp->resync_t));
+		del_timer(&sp->resync_t);
 		sp->resync_t.data = (unsigned long) sp;
 		sp->resync_t.function = resync_tnc;
 		sp->resync_t.expires = jiffies + SIXP_INIT_RESYNC_TIMEOUT;
-		add_timer(&(sp->resync_t));
+		add_timer(&sp->resync_t);
 	}
 
 	sp->status1 = cmd & SIXP_PRIO_DATA_MASK;
@@ -976,8 +951,7 @@
 /* try to resync the TNC. Called by the resync timer defined in
   decode_prio_command */
 
-static void
-resync_tnc(unsigned long channel)
+static void resync_tnc(unsigned long channel)
 {
 	static char resync_cmd = 0xe8;
 	struct sixpack *sp = (struct sixpack *) channel;
@@ -985,7 +959,7 @@
 	printk(KERN_INFO "6pack: resyncing TNC\n");
 
 	/* clear any data that might have been received */
-	
+
 	sp->rx_count = 0;
 	sp->rx_count_cooked = 0;
 
@@ -995,63 +969,63 @@
 	sp->status1 = 1;
 	sp->status2 = 0;
 	sp->tnc_ok = 0;
-	
+
 	/* resync the TNC */
 
 	sp->led_state = 0x60;
-	sp->tty->driver.write(sp->tty, 0, &(sp->led_state), 1);
+	sp->tty->driver.write(sp->tty, 0, &sp->led_state, 1);
 	sp->tty->driver.write(sp->tty, 0, &resync_cmd, 1);
 
 
 	/* Start resync timer again -- the TNC might be still absent */
 
-	del_timer(&(sp->resync_t));
+	del_timer(&sp->resync_t);
 	sp->resync_t.data = (unsigned long) sp;
 	sp->resync_t.function = resync_tnc;
 	sp->resync_t.expires = jiffies + SIXP_RESYNC_TIMEOUT;
-	add_timer(&(sp->resync_t));
+	add_timer(&sp->resync_t);
 }
 
 
 
 /* identify and execute a standard 6pack command byte */
 
-void decode_std_command(byte cmd, struct sixpack *sp)
+static void decode_std_command(unsigned char cmd, struct sixpack *sp)
 {
-	byte checksum = 0, rest = 0, channel;
+	unsigned char checksum = 0, rest = 0, channel;
 	short i;
 
 	channel = cmd & SIXP_CHN_MASK;
-	switch(cmd & SIXP_CMD_MASK) {     /* normal command */
+	switch (cmd & SIXP_CMD_MASK) {     /* normal command */
 		case SIXP_SEOF:
 			if ((sp->rx_count == 0) && (sp->rx_count_cooked == 0)) {
 				if ((sp->status & SIXP_RX_DCD_MASK) ==
 					SIXP_RX_DCD_MASK) {
 					sp->led_state = 0x68;
-					sp->tty->driver.write(sp->tty, 0, &(sp->led_state), 1);
-				} /* if */
+					sp->tty->driver.write(sp->tty, 0, &sp->led_state, 1);
+				}
 			} else {
 				sp->led_state = 0x60;
 				/* fill trailing bytes with zeroes */
-				sp->tty->driver.write(sp->tty, 0, &(sp->led_state), 1);
+				sp->tty->driver.write(sp->tty, 0, &sp->led_state, 1);
 				rest = sp->rx_count;
 				if (rest != 0)
-					 for(i=rest; i<=3; i++)
+					 for (i = rest; i <= 3; i++)
 						decode_data(0, sp);
 				if (rest == 2)
 					sp->rx_count_cooked -= 2;
 				else if (rest == 3)
 					sp->rx_count_cooked -= 1;
-				for (i=0; i<sp->rx_count_cooked; i++)
-					checksum+=sp->cooked_buf[i];
+				for (i = 0; i < sp->rx_count_cooked; i++)
+					checksum += sp->cooked_buf[i];
 				if (checksum != SIXP_CHKSUM) {
 					printk(KERN_DEBUG "6pack: bad checksum %2.2x\n", checksum);
 				} else {
 					sp->rcount = sp->rx_count_cooked-2;
 					sp_bump(sp, 0);
-				} /* else */
+				}
 				sp->rx_count_cooked = 0;
-			} /* else */
+			}
 			break;
 		case SIXP_TX_URUN: printk(KERN_DEBUG "6pack: TX underrun\n");
 			break;
@@ -1059,14 +1033,13 @@
 			break;
 		case SIXP_RX_BUF_OVL:
 			printk(KERN_DEBUG "6pack: RX buffer overflow\n");
-	} /* switch */
-} /* function */
+	}
+}
 
 /* decode 4 sixpack-encoded bytes into 3 data bytes */
 
-void decode_data(byte inbyte, struct sixpack *sp)
+static void decode_data(unsigned char inbyte, struct sixpack *sp)
 {
-
 	unsigned char *buf;
 
 	if (sp->rx_count != 3)
@@ -1086,5 +1059,8 @@
 
 MODULE_AUTHOR("Andreas K�nsgen <ajk@ccac.rwth-aachen.de>");
 MODULE_DESCRIPTION("6pack driver for AX.25");
-module_init(sixpack_init_ctrl_dev);
+MODULE_PARM(sixpack_maxdev, "i");
+MODULE_PARM_DESC(sixpack_maxdev, "number of 6PACK devices");
+
+module_init(sixpack_init_driver);
 module_exit(sixpack_cleanup_driver);
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/net/hamradio/6pack.h linux/drivers/net/hamradio/6pack.h
--- v2.4.0-prerelease/linux/drivers/net/hamradio/6pack.h	Mon Dec 11 17:59:44 2000
+++ linux/drivers/net/hamradio/6pack.h	Wed Dec 31 16:00:00 1969
@@ -1,136 +0,0 @@
-/*
- * 6pack.h	Define the 6pack device driver interface and constants.
- *
- * NOTE:	THIS FILE WILL BE MOVED TO THE LINUX INCLUDE DIRECTORY
- *		AS SOON AS POSSIBLE!
- *
- * Version:	@(#)6pack.h	0.3.0	04/07/98
- *
- * Fixes:
- *
- * Author:	Andreas K�nsgen <ajk@iehk.rwth-aachen.de>
- *
- *		This file is derived from slip.h, written by
- *		Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org>
- */
-
-#ifndef _LINUX_6PACK_H
-#define _LINUX_6PACK_H
-
-#define SIXPACK_VERSION    "Revision: 0.3.0"
-
-#ifdef __KERNEL__
-
-/* sixpack priority commands */
-#define SIXP_SEOF	0x40	/* start and end of a 6pack frame */
-#define SIXP_TX_URUN	0x48	/* transmit overrun */
-#define SIXP_RX_ORUN	0x50	/* receive overrun */
-#define SIXP_RX_BUF_OVL	0x58	/* receive buffer overflow */
-
-#define SIXP_CHKSUM	0xFF	/* valid checksum of a 6pack frame */
-
-/* masks to get certain bits out of the status bytes sent by the TNC */
-
-#define SIXP_CMD_MASK		0xC0
-#define SIXP_CHN_MASK		0x07
-#define SIXP_PRIO_CMD_MASK	0x80
-#define SIXP_STD_CMD_MASK	0x40
-#define SIXP_PRIO_DATA_MASK	0x38
-#define SIXP_TX_MASK		0x20
-#define SIXP_RX_MASK		0x10
-#define SIXP_RX_DCD_MASK	0x18
-#define SIXP_LEDS_ON		0x78
-#define SIXP_LEDS_OFF		0x60
-#define SIXP_CON		0x08
-#define SIXP_STA		0x10
-
-#define SIXP_FOUND_TNC		0xe9
-#define SIXP_CON_ON		0x68
-#define SIXP_DCD_MASK		0x08
-#define SIXP_DAMA_OFF		0
-
-/* default level 2 parameters */
-#define SIXP_TXDELAY			25	/* in 10 ms */
-#define SIXP_PERSIST			50	/* in 256ths */
-#define SIXP_SLOTTIME			10	/* in 10 ms */
-#define SIXP_INIT_RESYNC_TIMEOUT	150	/* in 10 ms */
-#define SIXP_RESYNC_TIMEOUT		500	/* in 10 ms */
-
-/* 6pack configuration. */
-#define SIXP_NRUNIT			256	/* MAX number of 6pack channels */
-#define SIXP_MTU			256	/* Default MTU */
-
-enum sixpack_flags {
-	SIXPF_INUSE,	/* Channel in use	*/
-	SIXPF_ERROR,	/* Parity, etc. error	*/
-};
-
-struct sixpack {
-  int			magic;
-
-  /* Various fields. */
-  struct tty_struct	*tty;		/* ptr to TTY structure		*/
-  struct net_device		*dev;		/* easy for intr handling	*/
-
-  /* These are pointers to the malloc()ed frame buffers. */
-  unsigned char		*rbuff;		/* receiver buffer		*/
-  int                   rcount;         /* received chars counter       */
-  unsigned char		*xbuff;		/* transmitter buffer		*/
-  unsigned char         *xhead;         /* pointer to next byte to XMIT */
-  int                   xleft;          /* bytes left in XMIT queue     */
-
-  unsigned char		raw_buf[4];
-  unsigned char		cooked_buf[400];
-
-  unsigned int		rx_count;
-  unsigned int		rx_count_cooked;
-
-  /* 6pack interface statistics. */
-  unsigned long		rx_packets;	/* inbound frames counter	*/
-  unsigned long         tx_packets;     /* outbound frames counter      */
-  unsigned long         rx_bytes;       /* inbound bytes counter        */
-  unsigned long         tx_bytes;       /* outboud bytes counter        */
-  unsigned long         rx_errors;      /* Parity, etc. errors          */
-  unsigned long         tx_errors;      /* Planned stuff                */
-  unsigned long         rx_dropped;     /* No memory for skb            */
-  unsigned long         tx_dropped;     /* When MTU change              */
-  unsigned long         rx_over_errors; /* Frame bigger then 6pack buf. */
-
-  /* Detailed 6pack statistics. */
-
-  int			mtu;		/* Our mtu (to spot changes!)   */
-  int                   buffsize;       /* Max buffers sizes            */
-
-  unsigned long		flags;		/* Flag values/ mode etc	*/
-					/* long req'd for set_bit --RR */
-  unsigned char		mode;		/* 6pack mode			*/
-
-/* 6pack stuff */
-  
-  unsigned char tx_delay;
-  unsigned char persistance;
-  unsigned char slottime;
-  unsigned char duplex;
-  unsigned char led_state;
-  unsigned char status;
-  unsigned char status1;
-  unsigned char status2;
-  unsigned char tx_enable;
-  unsigned char tnc_ok;
-  
-/*  unsigned char retval; */
-
-  struct timer_list tx_t;
-  struct timer_list resync_t;
-}; /* struct sixpack */
-
-
-/* should later be moved to include/net/ax25.h */
-#define AX25_6PACK_HEADER_LEN 0
-#define SIXPACK_MAGIC 0x5304
-
-extern int sixpack_init(struct net_device *dev);
-
-#endif
-
-#endif	/* _LINUX_6PACK.H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/net/hamradio/Makefile linux/drivers/net/hamradio/Makefile
--- v2.4.0-prerelease/linux/drivers/net/hamradio/Makefile	Mon Jan  1 09:38:35 2001
+++ linux/drivers/net/hamradio/Makefile	Thu Jan  4 12:50:12 2001
@@ -21,8 +21,6 @@
 obj-$(CONFIG_MKISS)		+= mkiss.o
 obj-$(CONFIG_6PACK)		+= 6pack.o
 obj-$(CONFIG_YAM)		+= yam.o
-obj-$(CONFIG_PI)		+= pi2.o
-obj-$(CONFIG_PT)		+= pt.o
 obj-$(CONFIG_BPQETHER)		+= bpqether.o
 obj-$(CONFIG_BAYCOM_SER_FDX)	+= baycom_ser_fdx.o	hdlcdrv.o
 obj-$(CONFIG_BAYCOM_SER_HDX)	+= baycom_ser_hdx.o	hdlcdrv.o
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/net/hamradio/mkiss.c linux/drivers/net/hamradio/mkiss.c
--- v2.4.0-prerelease/linux/drivers/net/hamradio/mkiss.c	Mon Jan  1 09:38:35 2001
+++ linux/drivers/net/hamradio/mkiss.c	Thu Jan  4 12:50:12 2001
@@ -690,13 +690,12 @@
 	if (ax == NULL || ax->magic != AX25_MAGIC)
 		return;
 
-	dev_close(ax->dev);
+	unregister_netdev(ax->dev);
 
 	tty->disc_data = 0;
 	ax->tty        = NULL;
 
 	ax_free(ax);
-	unregister_netdev(ax->dev);
 	MOD_DEC_USE_COUNT;
 }
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/net/hamradio/pi2.c linux/drivers/net/hamradio/pi2.c
--- v2.4.0-prerelease/linux/drivers/net/hamradio/pi2.c	Sun Oct  8 10:50:19 2000
+++ linux/drivers/net/hamradio/pi2.c	Wed Dec 31 16:00:00 1969
@@ -1,1677 +0,0 @@
-/*
-   pi2.c: Driver for the Ottawa Amateur Radio Club PI and PI2 interface.
-   Copyright (c) 1994 David Perry
-
-   This program is free software; you can redistribute it and/or modify
-   it under the terms of the GNU General Public License version 2, as
-   published by the Free Software Foundation.
-
-   This program is distributed in the hope that it will be useful, but
-   WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-   General Public License for more details.
-
-   You should have received a copy of the GNU General Public License
-   along with this program; if not, write to the Free Software Foundation,
-   Inc., 675 Mass Ave, Cambridge MA 02139, USA.
-
-   The file skeleton.c by Donald Becker was used as a starting point
-   for this driver.
-
-   Revision History
-
-   April 6, 1994  (dp) Created
-		       version 0.0 ALPHA
-   April 10, 1994 (dp) Included cleanup, suggestions from J. P. Morrison.
-                       version 0.1 ALPHA
-   April 13, 1994 (dp) Included address probing from JPM, autoirq
-		       version 0.2 ALPHA
-   April 14, 1994 (ac) Sketched in the NET3 changes.
-   April 17, 1994 (dp) Finished the NET3 changes. Used init_etherdev()
-		       instead of kmalloc() to ensure that DMA buffers will
-		       reside under the 16 meg line.
-		       version 0.4 ALPHA
-   April 18, 1994 (dp) Now using the kernel provided sk_buff handling functions.
-		       Fixed a nasty problem with DMA.
-		       version 0.5 ALPHA
-   June 6, 1994 (ac)   Fixed to match the buffer locking changes. Added a hack to
-   		       fix a funny I see (search for HACK) and fixed the calls in
-   		       init() so it doesn't migrate module based ethernet cards up
-   		       to eth2 Took out the old module ideas as they are no longer
-   		       relevant to the PI driver.
-   July 16, 1994 (dp)  Fixed the B channel rx overrun problem ac referred to
-   		       above. Also added a bit of a hack to improve the maximum
-   	               baud rate on the B channel (Search for STUFF2). Included
-   		       ioctl stuff from John Paul Morrison. version 0.6 ALPHA
-   Feb 9, 1995 (dp)    Updated for 1.1.90 kernel
-                       version 0.7 ALPHA
-   Apr 6, 1995 (ac)    Tweaks for NET3 pre snapshot 002 AX.25
-   April 23, 1995 (dp) Fixed ioctl so it works properly with piconfig program
-                       when changing the baud rate or clock mode.
-                       version 0.8 ALPHA
-   July 17, 1995 (ac)  Finally polishing of AX25.030+ support
-   Oct  29, 1995 (ac)  A couple of minor fixes before this, and this release changes
-   		       to the proper set_mac_address semantics which will break
-   		       a few programs I suspect.
-   Aug  18, 1996 (jsn) Converted to be used as a module.
-   Dec  13, 1996 (jsn) Fixed to match Linux networking changes.
-*/
-
-/* The following #define invokes a hack that will improve performance (baud)
-   for the B port. The up side is it makes 9600 baud work ok on the B port.
-   It may do 38400, depending on the host. The down side is it burns up
-   CPU cycles with ints locked for up to 1 character time, at the beginning
-   of each transmitted packet. If this causes you to lose sleep, #undefine it.
-*/
-
-/*#define STUFF2 1*/
-
-/* The default configuration */
-#define PI_DMA 3
-
-#define DEF_A_SPEED 0		/* 0 means external clock */
-#define DEF_A_TXDELAY 15	/* 15 mS transmit delay */
-#define DEF_A_PERSIST 128	/* 50% persistence */
-#define DEF_A_SLOTIME 15	/* 15 mS slot time */
-#define DEF_A_SQUELDELAY 1	/* 1 mS squelch delay - allows fcs and flag */
-#define DEF_A_CLOCKMODE 0	/* clock mode - 0 is normal */
-
-#define DEF_B_SPEED 1200	/* 1200 baud */
-#define DEF_B_TXDELAY 40	/* 400 mS */
-#define DEF_B_PERSIST 128	/* 50% */
-#define DEF_B_SLOTIME 30	/* 300 mS */
-#define DEF_B_SQUELDELAY 3	/* 30 mS */
-#define DEF_B_CLOCKMODE 0	/* Normal clock mode */
-
-/* The following #define is only really required for the PI card, not
-   the PI2 - but it's safer to leave it in. */
-#define REALLY_SLOW_IO 1
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ptrace.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/malloc.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <asm/system.h>
-#include <asm/bitops.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <asm/uaccess.h>
-#include <linux/inet.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/timer.h>
-#include <linux/if_arp.h>
-#include <linux/pi2.h>
-#include <linux/init.h>
-#include "z8530.h"
-#include <net/ax25.h>
-
-struct mbuf {
-    struct mbuf *next;
-    int cnt;
-    char data[0];
-};
-
-/*
- *	The actual devices we will use
- */
-
-/*
- *	PI device declarations.
- */
-
-static int pi0_preprobe(struct net_device *dev){return 0;}	/* Dummy probe function */
-static struct net_device pi0a = { "pi0a", 0, 0, 0, 0, 0, 0, 0, 0, 0, NULL, pi0_preprobe };
-static struct net_device pi0b = { "pi0b", 0, 0, 0, 0, 0, 0, 0, 0, 0, NULL, pi0_preprobe };
-
-
-/* The number of low I/O ports used by the card. */
-#define PI_TOTAL_SIZE	8
-
-
-/* Index to functions, as function prototypes. */
-
-static int pi_probe(struct net_device *dev, int card_type);
-static int pi_open(struct net_device *dev);
-static int pi_send_packet(struct sk_buff *skb, struct net_device *dev);
-static void pi_interrupt(int reg_ptr, void *dev_id, struct pt_regs *regs);
-static int pi_close(struct net_device *dev);
-static int pi_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
-static struct net_device_stats *pi_get_stats(struct net_device *dev);
-static void rts(struct pi_local *lp, int x);
-static void b_rxint(struct net_device *dev, struct pi_local *lp);
-static void b_txint(struct pi_local *lp);
-static void b_exint(struct pi_local *lp);
-static void a_rxint(struct net_device *dev, struct pi_local *lp);
-static void a_txint(struct pi_local *lp);
-static void a_exint(struct pi_local *lp);
-static char *get_dma_buffer(unsigned long *mem_ptr);
-static int valid_dma_page(unsigned long addr, unsigned long dev_buffsize);
-
-static char ax25_bcast[7] =
-{'Q' << 1, 'S' << 1, 'T' << 1, ' ' << 1, ' ' << 1, ' ' << 1, '0' << 1};
-static char ax25_test[7] =
-{'L' << 1, 'I' << 1, 'N' << 1, 'U' << 1, 'X' << 1, ' ' << 1, '1' << 1};
-
-static int ext2_secrm_seed = 152;	/* Random generator base */
-
-extern inline unsigned char random(void)
-{
-	return (unsigned char) (ext2_secrm_seed = ext2_secrm_seed
-			    * 69069l + 1);
-}
-
-extern inline void wrtscc(int cbase, int ctl, int sccreg, int val)
-{
-	/* assume caller disables interrupts! */
-	outb_p(0, cbase + DMAEN);	/* Disable DMA while we touch the scc */
-	outb_p(sccreg, ctl);		/* Select register */
-	outb_p(val, ctl);		/* Output value */
-	outb_p(1, cbase + DMAEN);	/* Enable DMA */
-}
-
-extern inline int rdscc(int cbase, int ctl, int sccreg)
-{
-	int retval;
-
-	/* assume caller disables interrupts! */
-	outb_p(0, cbase + DMAEN);	/* Disable DMA while we touch the scc */
-	outb_p(sccreg, ctl);	/* Select register */
-	retval = inb_p(ctl);
-	outb_p(1, cbase + DMAEN);	/* Enable DMA */
-	return retval;
-}
-
-static void switchbuffers(struct pi_local *lp)
-{
-	if (lp->rcvbuf == lp->rxdmabuf1)
-		lp->rcvbuf = lp->rxdmabuf2;
-	else
-		lp->rcvbuf = lp->rxdmabuf1;
-}
-
-static void hardware_send_packet(struct pi_local *lp, struct sk_buff *skb)
-{
-	char kickflag;
-	unsigned long flags;
-
-	lp->stats.tx_packets++;
-
-	save_flags(flags);
-	cli();
-	kickflag = (skb_peek(&lp->sndq) == NULL) && (lp->sndbuf == NULL);
-	restore_flags(flags);
-
-	skb_queue_tail(&lp->sndq, skb);
-	if (kickflag) 
-	{
-		/* simulate interrupt to xmit */
-		switch (lp->base & 2) 
-		{
-			case 2:
-				a_txint(lp);	/* process interrupt */
-				break;
-			case 0:
-				save_flags(flags);
-				cli();
-				if (lp->tstate == IDLE)
-					b_txint(lp);
-				restore_flags(flags);
-				break;
-		}
-	}
-}
-
-static void setup_rx_dma(struct pi_local *lp)
-{
-    unsigned long flags;
-    int cmd;
-    unsigned long dma_abs;
-    unsigned dmachan;
-
-    save_flags(flags);
-    cli();
-
-    dma_abs = (unsigned long) (lp->rcvbuf->data);
-    dmachan = lp->dmachan;
-    cmd = lp->base + CTL;
-
-    if(!valid_dma_page(dma_abs, DMA_BUFF_SIZE + sizeof(struct mbuf)))
-	panic("PI: RX buffer violates DMA boundary!");
-
-    /* Get ready for RX DMA */
-    wrtscc(lp->cardbase, cmd, R1, WT_FN_RDYFN | WT_RDY_RT | INT_ERR_Rx | EXT_INT_ENAB);
-
-    disable_dma(dmachan);
-    clear_dma_ff(dmachan);
-
-    /* Set DMA mode register to single transfers, incrementing address,
-     *  auto init, writes
-     */
-    set_dma_mode(dmachan, DMA_MODE_READ | 0x10);
-    set_dma_addr(dmachan, dma_abs);
-    set_dma_count(dmachan, lp->bufsiz);
-    enable_dma(dmachan);
-
-    /* If a packet is already coming in, this line is supposed to
-	   avoid receiving a partial packet.
-     */
-    wrtscc(lp->cardbase, cmd, R0, RES_Rx_CRC);
-
-    /* Enable RX dma */
-    wrtscc(lp->cardbase, cmd, R1,
-      WT_RDY_ENAB | WT_FN_RDYFN | WT_RDY_RT | INT_ERR_Rx | EXT_INT_ENAB);
-
-    restore_flags(flags);
-}
-
-static void setup_tx_dma(struct pi_local *lp, int length)
-{
-    unsigned long dma_abs;
-    unsigned long flags;
-    unsigned long dmachan;
-
-    save_flags(flags);
-    cli();
-
-    dmachan = lp->dmachan;
-    dma_abs = (unsigned long) (lp->txdmabuf);
-
-    if(!valid_dma_page(dma_abs, DMA_BUFF_SIZE + sizeof(struct mbuf)))
-	panic("PI: TX buffer violates DMA boundary!");
-
-    disable_dma(dmachan);
-    /* Set DMA mode register to single transfers, incrementing address,
-     *  no auto init, reads
-     */
-    set_dma_mode(dmachan, DMA_MODE_WRITE);
-    clear_dma_ff(dmachan);
-    set_dma_addr(dmachan, dma_abs);
-    /* output byte count */
-    set_dma_count(dmachan, length);
-
-    restore_flags(flags);
-}
-
-static void tdelay(struct pi_local *lp, int time)
-{
-    int port;
-    unsigned int t1;
-    unsigned char sc;
-
-    if (lp->base & 2) {		/* If A channel */
-	sc = SC1;
-	t1 = time;
-	port = lp->cardbase + TMR1;
-    } else {
-	sc = SC2;
-	t1 = 10 * time;		/* 10s of milliseconds for the B channel */
-	port = lp->cardbase + TMR2;
-	wrtscc(lp->cardbase, lp->base + CTL, R1, INT_ALL_Rx | EXT_INT_ENAB);
-    }
-
-    /* Setup timer sc */
-    outb_p(sc | LSB_MSB | MODE0, lp->cardbase + TMRCMD);
-
-    /* times 2 to make millisecs */
-    outb_p((t1 << 1) & 0xFF, port);
-    outb_p((t1 >> 7) & 0xFF, port);
-
-    /* Enable correct int for timeout */
-    wrtscc(lp->cardbase, lp->base + CTL, R15, CTSIE);
-    wrtscc(lp->cardbase, lp->base + CTL, R0, RES_EXT_INT);
-}
-
-static void a_txint(struct pi_local *lp)
-{
-    int cmd;
-    unsigned long flags;
-
-    save_flags(flags);
-    cli();
-
-    cmd = CTL + lp->base;
-
-    switch (lp->tstate) {
-    case IDLE:
-	/* Transmitter idle. Find a frame for transmission */
-	if ((lp->sndbuf = skb_dequeue(&lp->sndq)) == NULL) {
-	    rts(lp, OFF);
-	    restore_flags(flags);
-	    return;
-	}
-	/* If a buffer to send, we drop thru here */
-    case DEFER:
-	/* we may have deferred prev xmit attempt */
-	/* Check DCD - debounce it
-         * See Intel Microcommunications Handbook, p2-308
-         */
-	wrtscc(lp->cardbase, cmd, R0, RES_EXT_INT);
-	wrtscc(lp->cardbase, cmd, R0, RES_EXT_INT);
-	if ((rdscc(lp->cardbase, cmd, R0) & DCD) != 0) {
-	    lp->tstate = DEFER;
-	    tdelay(lp, 100);
-	    /* defer until DCD transition or timeout */
-	    wrtscc(lp->cardbase, cmd, R15, CTSIE | DCDIE);
-	    restore_flags(flags);
-	    return;
-	}
-	if (random() > lp->persist) {
-	    lp->tstate = DEFER;
-	    tdelay(lp, lp->slotime);
-	    restore_flags(flags);
-	    return;
-	}
-	/* Assert RTS early minimize collision window */
-	wrtscc(lp->cardbase, cmd, R5, TxCRC_ENAB | RTS | Tx8);
-	rts(lp, ON);		/* Transmitter on */
-	lp->tstate = ST_TXDELAY;
-	tdelay(lp, lp->txdelay);
-	restore_flags(flags);
-	return;
-    default:
-	break;
-    }				/* end switch(lp->state) */
-
-    restore_flags(flags);
-}				/*a_txint */
-
-static void a_exint(struct pi_local *lp)
-{
-    unsigned long flags;
-    int cmd;
-    char st;
-    int length;
-
-    save_flags(flags);
-    cli();			/* disable interrupts */
-
-    st = rdscc(lp->cardbase, lp->base + CTL, R0);	/* Fetch status */
-
-    /* reset external status latch */
-    wrtscc(lp->cardbase, CTL + lp->base, R0, RES_EXT_INT);
-    cmd = lp->base + CTL;
-
-    if ((lp->rstate >= ACTIVE) && (st & BRK_ABRT)) {
-	setup_rx_dma(lp);
-	lp->rstate = ACTIVE;
-    }
-    switch (lp->tstate) {
-    case ACTIVE:
-	kfree_skb(lp->sndbuf);
-	lp->sndbuf = NULL;
-	lp->tstate = FLAGOUT;
-	tdelay(lp, lp->squeldelay);
-	break;
-    case FLAGOUT:
-	if ((lp->sndbuf = skb_dequeue(&lp->sndq)) == NULL) {
-	    /* Nothing to send - return to receive mode */
-	    lp->tstate = IDLE;
-	    rts(lp, OFF);
-	    restore_flags(flags);
-	    return;
-	}
-	/* NOTE - fall through if more to send */
-    case ST_TXDELAY:
-	/* Disable DMA chan */
-	disable_dma(lp->dmachan);
-
-	/* Set up for TX dma */
-	wrtscc(lp->cardbase, cmd, R1, WT_FN_RDYFN | EXT_INT_ENAB);
-
-
-	/* Get all chars */
-	/* Strip KISS control byte */
-	length = lp->sndbuf->len - 1;
-	memcpy(lp->txdmabuf, &lp->sndbuf->data[1], length);
-
-
-	/* Setup DMA controller for tx */
-	setup_tx_dma(lp, length);
-
-	/* select transmit interrupts to enable */
-	/* Allow DMA on chan */
-	enable_dma(lp->dmachan);
-
-	/* reset CRC, Txint pend*/
-	wrtscc(lp->cardbase, cmd, R0, RES_Tx_CRC | RES_Tx_P);
-
-	/* allow Underrun int only */
-	wrtscc(lp->cardbase, cmd, R15, TxUIE);
-
-	/* Enable TX DMA */
-	wrtscc(lp->cardbase, cmd, R1, WT_RDY_ENAB | WT_FN_RDYFN | EXT_INT_ENAB);
-
-	/* Send CRC on underrun */
-	wrtscc(lp->cardbase, cmd, R0, RES_EOM_L);
-
-
-	/* packet going out now */
-	lp->tstate = ACTIVE;
-	break;
-    case DEFER:
-	/* we have deferred prev xmit attempt
-         * See Intel Microcommunications Handbook, p2-308
-         */
-	wrtscc(lp->cardbase, cmd, R0, RES_EXT_INT);
-	wrtscc(lp->cardbase, cmd, R0, RES_EXT_INT);
-	if ((rdscc(lp->cardbase, cmd, R0) & DCD) != 0) {
-	    lp->tstate = DEFER;
-	    tdelay(lp, 100);
-	    /* Defer until dcd transition or 100mS timeout */
-	    wrtscc(lp->cardbase, CTL + lp->base, R15, CTSIE | DCDIE);
-	    restore_flags(flags);
-	    return;
-	}
-	if (random() > lp->persist) {
-	    lp->tstate = DEFER;
-	    tdelay(lp, lp->slotime);
-	    restore_flags(flags);
-	    return;
-	}
-	/* Assert RTS early minimize collision window */
-	wrtscc(lp->cardbase, cmd, R5, TxCRC_ENAB | RTS | Tx8);
-	rts(lp, ON);		/* Transmitter on */
-	lp->tstate = ST_TXDELAY;
-	tdelay(lp, lp->txdelay);
-	restore_flags(flags);
-	return;
-    }				/* switch(lp->tstate) */
-
-    restore_flags(flags);
-}				/* a_exint() */
-
-/* Receive interrupt handler for the A channel
- */
-static void a_rxint(struct net_device *dev, struct pi_local *lp)
-{
-    unsigned long flags;
-    int cmd;
-    int bytecount;
-    char rse;
-    struct sk_buff *skb;
-    int sksize, pkt_len;
-    struct mbuf *cur_buf;
-    unsigned char *cfix;
-
-    save_flags(flags);
-    cli();			/* disable interrupts */
-    cmd = lp->base + CTL;
-
-    rse = rdscc(lp->cardbase, cmd, R1);	/* Get special condition bits from R1 */
-    if (rse & Rx_OVR)
-	lp->rstate = RXERROR;
-
-    if (rse & END_FR) {
-	/* If end of frame */
-	/* figure length of frame from 8237 */
-	clear_dma_ff(lp->dmachan);
-	bytecount = lp->bufsiz - get_dma_residue(lp->dmachan);
-
-	if ((rse & CRC_ERR) || (lp->rstate > ACTIVE) || (bytecount < 10)) {
-	    if ((bytecount >= 10) && (rse & CRC_ERR)) {
-		lp->stats.rx_crc_errors++;
-	    }
-	    if (lp->rstate == RXERROR) {
-		lp->stats.rx_errors++;
-		lp->stats.rx_over_errors++;
-	    }
-	    /* Reset buffer pointers */
-	    lp->rstate = ACTIVE;
-	    setup_rx_dma(lp);
-	} else {
-	    /* Here we have a valid frame */
-	    /* Toss 2 crc bytes , add one for KISS */
-	    pkt_len = lp->rcvbuf->cnt = bytecount - 2 + 1;
-
-	    /* Get buffer for next frame */
-	    cur_buf = lp->rcvbuf;
-	    switchbuffers(lp);
-	    setup_rx_dma(lp);
-
-
-	    /* Malloc up new buffer. */
-	    sksize = pkt_len;
-
-	    skb = dev_alloc_skb(sksize);
-	    if (skb == NULL) {
-		printk(KERN_ERR "PI: %s: Memory squeeze, dropping packet.\n", dev->name);
-		lp->stats.rx_dropped++;
-		restore_flags(flags);
-		return;
-	    }
-	    skb->dev = dev;
-
-	    /* KISS kludge -  prefix with a 0 byte */
-	    cfix=skb_put(skb,pkt_len);
-	    *cfix++=0;
-	    /* 'skb->data' points to the start of sk_buff data area. */
-	    memcpy(cfix, (char *) cur_buf->data,
-		   pkt_len - 1);
-	    skb->protocol=htons(ETH_P_AX25);
-	    skb->mac.raw=skb->data;
-	    netif_rx(skb);
-	    lp->stats.rx_packets++;
-	}			/* end good frame */
-    }				/* end EOF check */
-    wrtscc(lp->cardbase, lp->base + CTL, R0, ERR_RES);	/* error reset */
-    restore_flags(flags);
-}
-
-static void b_rxint(struct net_device *dev, struct pi_local *lp)
-{
-    unsigned long flags;
-    int cmd;
-    char rse;
-    struct sk_buff *skb;
-    int sksize;
-    int pkt_len;
-    unsigned char *cfix;
-
-    save_flags(flags);
-    cli();			/* disable interrupts */
-    cmd = CTL + lp->base;
-
-    rse = rdscc(lp->cardbase, cmd, R1);	/* get status byte from R1 */
-
-    if ((rdscc(lp->cardbase, cmd, R0)) & Rx_CH_AV) {
-	/* there is a char to be stored
-         * read special condition bits before reading the data char
-         */
-	if (rse & Rx_OVR) {
-	    /* Rx overrun - toss buffer */
-	    /* reset buffer pointers */
-	    lp->rcp = lp->rcvbuf->data;
-	    lp->rcvbuf->cnt = 0;
-
-	    lp->rstate = RXERROR;	/* set error flag */
-	    lp->stats.rx_errors++;
-	    lp->stats.rx_over_errors++;
-	} else if (lp->rcvbuf->cnt >= lp->bufsiz) {
-	    /* Too large -- toss buffer */
-	    /* reset buffer pointers */
-	    lp->rcp = lp->rcvbuf->data;
-	    lp->rcvbuf->cnt = 0;
-	    lp->rstate = TOOBIG;/* when set, chars are not stored */
-	}
-	/* ok, we can store the received character now */
-	if (lp->rstate == ACTIVE) {	/* If no errors... */
-	    *lp->rcp++ = rdscc(lp->cardbase, cmd, R8);	/* char to rcv buff */
-	    lp->rcvbuf->cnt++;	/* bump count */
-	} else {
-	    /* got to empty FIFO */
-	    (void) rdscc(lp->cardbase, cmd, R8);
-	    wrtscc(lp->cardbase, cmd, R0, ERR_RES);	/* reset err latch */
-	    lp->rstate = ACTIVE;
-	}
-    }
-    if (rse & END_FR) {
-	/* END OF FRAME -- Make sure Rx was active */
-	if (lp->rcvbuf->cnt > 0) {
-	    if ((rse & CRC_ERR) || (lp->rstate > ACTIVE) || (lp->rcvbuf->cnt < 10)) {
-		if ((lp->rcvbuf->cnt >= 10) && (rse & CRC_ERR)) {
-		    lp->stats.rx_crc_errors++;
-		}
-		lp->rcp = lp->rcvbuf->data;
-		lp->rcvbuf->cnt = 0;
-	    } else {
-		/* Here we have a valid frame */
-		pkt_len = lp->rcvbuf->cnt -= 2;	/* Toss 2 crc bytes */
-		pkt_len += 1;	/* Make room for KISS control byte */
-
-		/* Malloc up new buffer. */
-		sksize = pkt_len;
-		skb = dev_alloc_skb(sksize);
-		if (skb == NULL) {
-		    printk(KERN_ERR "PI: %s: Memory squeeze, dropping packet.\n", dev->name);
-		    lp->stats.rx_dropped++;
-		    restore_flags(flags);
-		    return;
-		}
-		skb->dev = dev;
-
-		/* KISS kludge -  prefix with a 0 byte */
-		cfix=skb_put(skb,pkt_len);
-		*cfix++=0;
-		/* 'skb->data' points to the start of sk_buff data area. */
-		memcpy(cfix, lp->rcvbuf->data, pkt_len - 1);
-		skb->protocol=ntohs(ETH_P_AX25);
-		skb->mac.raw=skb->data;
-		netif_rx(skb);
-		lp->stats.rx_packets++;
-		/* packet queued - initialize buffer for next frame */
-		lp->rcp = lp->rcvbuf->data;
-		lp->rcvbuf->cnt = 0;
-
-	    }			/* end good frame queued */
-	}			/* end check for active receive upon EOF */
-	lp->rstate = ACTIVE;	/* and clear error status */
-    }				/* end EOF check */
-    restore_flags(flags);
-}
-
-
-static void b_txint(struct pi_local *lp)
-{
-    unsigned long flags;
-    int cmd;
-    unsigned char c;
-
-    save_flags(flags);
-    cli();
-    cmd = CTL + lp->base;
-
-    switch (lp->tstate) {
-    case CRCOUT:
-	lp->tstate = FLAGOUT;
-	tdelay(lp, lp->squeldelay);
-	restore_flags(flags);
-	return;
-    case IDLE:
-	/* Transmitter idle. Find a frame for transmission */
-	if ((lp->sndbuf = skb_dequeue(&lp->sndq)) == NULL) {
-	    /* Nothing to send - return to receive mode
-             * Tx OFF now - flag should have gone
-             */
-	    rts(lp, OFF);
-
-	    restore_flags(flags);
-	    return;
-	}
-	lp->txptr = lp->sndbuf->data;
-	lp->txptr++;		/* Ignore KISS control byte */
-	lp->txcnt = (int) lp->sndbuf->len - 1;
-	/* If a buffer to send, we drop thru here */
-    case DEFER:		/* we may have deferred prev xmit attempt */
-	/* Check DCD - debounce it */
-	/* See Intel Microcommunications Handbook, p2-308 */
-	wrtscc(lp->cardbase, cmd, R0, RES_EXT_INT);
-	wrtscc(lp->cardbase, cmd, R0, RES_EXT_INT);
-	if ((rdscc(lp->cardbase, cmd, R0) & DCD) != 0) {
-	    lp->tstate = DEFER;
-	    tdelay(lp, 100);
-	    /* defer until DCD transition or timeout */
-	    wrtscc(lp->cardbase, cmd, R15, CTSIE | DCDIE);
-	    restore_flags(flags);
-	    return;
-	}
-	if (random() > lp->persist) {
-	    lp->tstate = DEFER;
-	    tdelay(lp, lp->slotime);
-	    restore_flags(flags);
-	    return;
-	}
-	rts(lp, ON);		/* Transmitter on */
-	lp->tstate = ST_TXDELAY;
-	tdelay(lp, lp->txdelay);
-	restore_flags(flags);
-	return;
-
-    case ACTIVE:
-	/* Here we are actively sending a frame */
-	if (lp->txcnt--) {
-	    c = *lp->txptr++;
-	    /* next char is gone */
-	    wrtscc(lp->cardbase, cmd, R8, c);
-	    /* stuffing a char satisfies Interrupt condition */
-	} else {
-	    /* No more to send */
-	    kfree_skb(lp->sndbuf);
-	    lp->sndbuf = NULL;
-	    if ((rdscc(lp->cardbase, cmd, R0) & 0x40)) {
-		/* Did we underrun? */
-		/* unexpected underrun */
-		lp->stats.tx_errors++;
-		lp->stats.tx_fifo_errors++;
-		wrtscc(lp->cardbase, cmd, R0, SEND_ABORT);
-		lp->tstate = FLAGOUT;
-		tdelay(lp, lp->squeldelay);
-		restore_flags(flags);
-		return;
-	    }
-	    lp->tstate = UNDERRUN;	/* Now we expect to underrun */
-	    /* Send flags on underrun */
-	    if (lp->speed) {	/* If internally clocked */
-		wrtscc(lp->cardbase, cmd, R10, CRCPS | NRZI);
-	    } else {
-		wrtscc(lp->cardbase, cmd, R10, CRCPS);
-	    }
-	    wrtscc(lp->cardbase, cmd, R0, RES_Tx_P);	/* reset Tx Int Pend */
-	}
-	restore_flags(flags);
-	return;			/* back to wait for interrupt */
-    }				/* end switch */
-    restore_flags(flags);
-}
-
-/* Pi SIO External/Status interrupts (for the B channel)
- * This can be caused by a receiver abort, or a Tx UNDERRUN/EOM.
- * Receiver automatically goes to Hunt on an abort.
- *
- * If the Tx Underrun interrupt hits, change state and
- * issue a reset command for it, and return.
- */
-static void b_exint(struct pi_local *lp)
-{
-    unsigned long flags;
-    char st;
-    int cmd;
-    char c;
-
-    cmd = CTL + lp->base;
-    save_flags(flags);
-    cli();			/* disable interrupts */
-    st = rdscc(lp->cardbase, cmd, R0);	/* Fetch status */
-    /* reset external status latch */
-    wrtscc(lp->cardbase, cmd, R0, RES_EXT_INT);
-
-
-    switch (lp->tstate) {
-    case ACTIVE:		/* Unexpected underrun */
-	kfree_skb(lp->sndbuf);
-	lp->sndbuf = NULL;
-	wrtscc(lp->cardbase, cmd, R0, SEND_ABORT);
-	lp->tstate = FLAGOUT;
-	lp->stats.tx_errors++;
-	lp->stats.tx_fifo_errors++;
-	tdelay(lp, lp->squeldelay);
-	restore_flags(flags);
-	return;
-    case UNDERRUN:
-	lp->tstate = CRCOUT;
-	restore_flags(flags);
-	return;
-    case FLAGOUT:
-	/* Find a frame for transmission */
-	if ((lp->sndbuf = skb_dequeue(&lp->sndq)) == NULL) {
-	    /* Nothing to send - return to receive mode
-             * Tx OFF now - flag should have gone
-             */
-	    rts(lp, OFF);
-	    lp->tstate = IDLE;
-	    restore_flags(flags);
-	    return;
-	}
-	lp->txptr = lp->sndbuf->data;
-	lp->txptr++;		/* Ignore KISS control byte */
-	lp->txcnt = (int) lp->sndbuf->len - 1;
-	/* Get first char to send */
-	lp->txcnt--;
-	c = *lp->txptr++;
-	wrtscc(lp->cardbase, cmd, R0, RES_Tx_CRC);	/* reset for next frame */
-
-	/* Send abort on underrun */
-	if (lp->speed) {	/* If internally clocked */
-	    wrtscc(lp->cardbase, cmd, R10, CRCPS | NRZI | ABUNDER);
-	} else {
-	    wrtscc(lp->cardbase, cmd, R10, CRCPS | ABUNDER);
-	}
-
-	wrtscc(lp->cardbase, cmd, R8, c);	/* First char out now */
-	wrtscc(lp->cardbase, cmd, R0, RES_EOM_L);	/* Reset end of message latch */
-
-#ifdef STUFF2
-        /* stuff an extra one if we can */
-	if (lp->txcnt) {
-	    lp->txcnt--;
-	    c = *lp->txptr++;
-	    /* Wait for tx buffer empty */
-	    while((rdscc(lp->cardbase, cmd, R0) & 0x04) == 0)
-		;
-	    wrtscc(lp->cardbase, cmd, R8, c);
-	}
-#endif
-
-	/* select transmit interrupts to enable */
-
-	wrtscc(lp->cardbase, cmd, R15, TxUIE);	/* allow Underrun int only */
-	wrtscc(lp->cardbase, cmd, R0, RES_EXT_INT);
-	wrtscc(lp->cardbase, cmd, R1, TxINT_ENAB | EXT_INT_ENAB);	/* Tx/Ext ints */
-
-	lp->tstate = ACTIVE;	/* char going out now */
-	restore_flags(flags);
-	return;
-
-    case DEFER:
-	/* Check DCD - debounce it
-         * See Intel Microcommunications Handbook, p2-308
-         */
-	wrtscc(lp->cardbase, cmd, R0, RES_EXT_INT);
-	wrtscc(lp->cardbase, cmd, R0, RES_EXT_INT);
-	if ((rdscc(lp->cardbase, cmd, R0) & DCD) != 0) {
-	    lp->tstate = DEFER;
-	    tdelay(lp, 100);
-	    /* defer until DCD transition or timeout */
-	    wrtscc(lp->cardbase, cmd, R15, CTSIE | DCDIE);
-	    restore_flags(flags);
-	    return;
-	}
-	if (random() > lp->persist) {
-	    lp->tstate = DEFER;
-	    tdelay(lp, lp->slotime);
-	    restore_flags(flags);
-	    return;
-	}
-	rts(lp, ON);		/* Transmitter on */
-	lp->tstate = ST_TXDELAY;
-	tdelay(lp, lp->txdelay);
-	restore_flags(flags);
-	return;
-
-    case ST_TXDELAY:
-
-	/* Get first char to send */
-	lp->txcnt--;
-	c = *lp->txptr++;
-	wrtscc(lp->cardbase, cmd, R0, RES_Tx_CRC);	/* reset for next frame */
-
-	/* Send abort on underrun */
-	if (lp->speed) {	/* If internally clocked */
-	    wrtscc(lp->cardbase, cmd, R10, CRCPS | NRZI | ABUNDER);
-	} else {
-	    wrtscc(lp->cardbase, cmd, R10, CRCPS | ABUNDER);
-	}
-
-	wrtscc(lp->cardbase, cmd, R8, c);	/* First char out now */
-	wrtscc(lp->cardbase, cmd, R0, RES_EOM_L);	/* Reset end of message latch */
-
-#ifdef STUFF2
-        /* stuff an extra one if we can */
-	if (lp->txcnt) {
-	    lp->txcnt--;
-	    c = *lp->txptr++;
-	    /* Wait for tx buffer empty */
-	    while((rdscc(lp->cardbase, cmd, R0) & 0x04) == 0)
-		;
-	    wrtscc(lp->cardbase, cmd, R8, c);
-	}
-#endif
-
-	/* select transmit interrupts to enable */
-
-	wrtscc(lp->cardbase, cmd, R15, TxUIE);	/* allow Underrun int only */
-	wrtscc(lp->cardbase, cmd, R0, RES_EXT_INT);
-	/* Tx/Extern ints on */
-	wrtscc(lp->cardbase, cmd, R1, TxINT_ENAB | EXT_INT_ENAB);
-
-	lp->tstate = ACTIVE;	/* char going out now */
-	restore_flags(flags);
-	return;
-    }
-
-    /* Receive Mode only
-     * This triggers when hunt mode is entered, & since an ABORT
-     * automatically enters hunt mode, we use that to clean up
-     * any waiting garbage
-     */
-    if ((lp->rstate == ACTIVE) && (st & BRK_ABRT)) {
-	(void) rdscc(lp->cardbase, cmd, R8);
-	(void) rdscc(lp->cardbase, cmd, R8);
-	(void) rdscc(lp->cardbase, cmd, R8);
-	lp->rcp = lp->rcvbuf->data;
-	lp->rcvbuf->cnt = 0;	/* rewind on DCD transition */
-    }
-    restore_flags(flags);
-}
-
-/* Probe for a PI card. */
-/* This routine also initializes the timer chip */
-
-static int __init hw_probe(int ioaddr)
-{
-    int time = 1000;		/* Number of milliseconds for test */
-    unsigned long start_time, end_time;
-
-    int base, tmr0, tmr1, tmrcmd;
-    int a = 1;
-    int b = 1;
-
-    base = ioaddr & 0x3f0;
-    tmr0 = TMR0 + base;
-    tmr1 = TMR1 + base;
-    tmrcmd = TMRCMD + base;
-
-    /* Set up counter chip timer 0 for 500 uS period square wave */
-    /* assuming a 3.68 mhz clock for now */
-    outb_p(SC0 | LSB_MSB | MODE3, tmrcmd);
-    outb_p(922 & 0xFF, tmr0);
-    outb_p(922 >> 8, tmr0);
-
-    /* Setup timer control word for timer 1*/
-    outb_p(SC1 | LSB_MSB | MODE0, tmrcmd);
-    outb_p((time << 1) & 0xFF, tmr1);
-    outb_p((time >> 7) & 0XFF, tmr1);
-
-    /* wait until counter reg is loaded */
-    do {
-	/* Latch count for reading */
-	outb_p(SC1, tmrcmd);
-	a = inb_p(tmr1);
-	b = inb_p(tmr1);
-    } while (b == 0);
-    start_time = jiffies;
-    while (b != 0) {
-	/* Latch count for reading */
-	outb_p(SC1, tmrcmd);
-	a = inb_p(tmr1);
-	b = inb_p(tmr1);
-	end_time = jiffies;
-	/* Don't wait forever - there may be no card here */
-	if ((end_time - start_time) > 200)
-	    return 0;		/* No card found */
-    }
-    end_time = jiffies;
-    /* 87 jiffies, for a 3.68 mhz clock, half that for a double speed clock */
-    if ((end_time - start_time) > 65) {
-	return (1);		/* PI card found */
-    } else {
-	/* Faster crystal - tmr0 needs adjusting */
-	/* Set up counter chip */
-	/* 500 uS square wave */
-	outb_p(SC0 | LSB_MSB | MODE3, tmrcmd);
-	outb_p(1844 & 0xFF, tmr0);
-	outb_p(1844 >> 8, tmr0);
-	return (2);		/* PI2 card found */
-    }
-}
-
-static void rts(struct pi_local *lp, int x)
-{
-    int tc;
-    long br;
-    int cmd;
-    int dummy;
-
-    /* assumes interrupts are off */
-    cmd = CTL + lp->base;
-
-    /* Reprogram BRG and turn on transmitter to send flags */
-    if (x == ON) {		/* Turn Tx ON and Receive OFF */
-	/* Exints off first to avoid abort int */
-	wrtscc(lp->cardbase, cmd, R15, 0);
-	wrtscc(lp->cardbase, cmd, R3, Rx8);	/* Rx off */
-	lp->rstate = IDLE;
-	if (cmd & 2) {		/* if channel a */
-	    /* Set up for TX dma */
-	    wrtscc(lp->cardbase, cmd, R1, WT_FN_RDYFN | EXT_INT_ENAB);
-	} else {
-	    wrtscc(lp->cardbase, cmd, R1, 0);	/* No interrupts */
-	}
-
-	if (!lp->clockmode) {
-	    if (lp->speed) {	/* if internally clocked */
-		br = lp->speed;	/* get desired speed */
-		tc = (lp->xtal / br) - 2;	/* calc 1X BRG divisor */
-		wrtscc(lp->cardbase, cmd, R12, tc & 0xFF);	/* lower byte */
-		wrtscc(lp->cardbase, cmd, R13, (tc >> 8) & 0xFF);	/* upper byte */
-	    }
-	}
-	wrtscc(lp->cardbase, cmd, R5, TxCRC_ENAB | RTS | TxENAB | Tx8 | DTR);
-	/* Transmitter now on */
-    } else {			/* Tx OFF and Rx ON */
-	lp->tstate = IDLE;
-	wrtscc(lp->cardbase, cmd, R5, Tx8 | DTR);	/*  TX off */
-
-	if (!lp->clockmode) {
-	    if (lp->speed) {	/* if internally clocked */
-		/* Reprogram BRG for 32x clock for receive DPLL */
-		/* BRG off, keep Pclk source */
-		wrtscc(lp->cardbase, cmd, R14, BRSRC);
-		br = lp->speed;	/* get desired speed */
-		/* calc 32X BRG divisor */
-		tc = ((lp->xtal / 32) / br) - 2;
-		wrtscc(lp->cardbase, cmd, R12, tc & 0xFF);	/* lower byte */
-		wrtscc(lp->cardbase, cmd, R13, (tc >> 8) & 0xFF);	/* upper byte */
-		/* SEARCH mode, BRG source */
-		wrtscc(lp->cardbase, cmd, R14, BRSRC | SEARCH);
-		/* Enable the BRG */
-		wrtscc(lp->cardbase, cmd, R14, BRSRC | BRENABL);
-	    }
-	}
-	/* Flush rx fifo */
-	wrtscc(lp->cardbase, cmd, R3, Rx8);	/* Make sure rx is off */
-	wrtscc(lp->cardbase, cmd, R0, ERR_RES);	/* reset err latch */
-	dummy = rdscc(lp->cardbase, cmd, R1);	/* get status byte from R1 */
-	(void) rdscc(lp->cardbase, cmd, R8);
-	(void) rdscc(lp->cardbase, cmd, R8);
-
-	(void) rdscc(lp->cardbase, cmd, R8);
-
-	/* Now, turn on the receiver and hunt for a flag */
-	wrtscc(lp->cardbase, cmd, R3, RxENABLE | Rx8);
-	lp->rstate = ACTIVE;	/* Normal state */
-
-	if (cmd & 2) {		/* if channel a */
-	    setup_rx_dma(lp);
-	} else {
-	    /* reset buffer pointers */
-	    lp->rcp = lp->rcvbuf->data;
-	    lp->rcvbuf->cnt = 0;
-	    wrtscc(lp->cardbase, cmd, R1, (INT_ALL_Rx | EXT_INT_ENAB));
-	}
-	wrtscc(lp->cardbase, cmd, R15, BRKIE);	/* allow ABORT int */
-    }
-}
-
-static void scc_init(struct net_device *dev)
-{
-    unsigned long flags;
-    struct pi_local *lp = (struct pi_local *) dev->priv;
-
-    int tc;
-    long br;
-    register int cmd;
-
-    /* Initialize 8530 channel for SDLC operation */
-
-    cmd = CTL + lp->base;
-    save_flags(flags);
-    cli();
-
-    switch (cmd & CHANA) {
-    case CHANA:
-	wrtscc(lp->cardbase, cmd, R9, CHRA);	/* Reset channel A */
-	wrtscc(lp->cardbase, cmd, R2, 0xff);	/* Initialize interrupt vector */
-	break;
-    default:
-	wrtscc(lp->cardbase, cmd, R9, CHRB);	/* Reset channel B */
-	break;
-    }
-
-    /* Deselect all Rx and Tx interrupts */
-    wrtscc(lp->cardbase, cmd, R1, 0);
-
-    /* Turn off external interrupts (like CTS/CD) */
-    wrtscc(lp->cardbase, cmd, R15, 0);
-
-    /* X1 clock, SDLC mode */
-    wrtscc(lp->cardbase, cmd, R4, SDLC | X1CLK);
-
-    /* Tx/Rx parameters */
-    if (lp->speed) {		/* Use internal clocking */
-	wrtscc(lp->cardbase, cmd, R10, CRCPS | NRZI);
-	if (!lp->clockmode)
-	    /* Tx Clk from BRG. Rcv Clk from DPLL, TRxC pin outputs DPLL */
-	    wrtscc(lp->cardbase, cmd, R11, TCBR | RCDPLL | TRxCDP | TRxCOI);
-	else
-	    /* Tx Clk from DPLL, Rcv Clk from DPLL, TRxC Outputs BRG */
-	    wrtscc(lp->cardbase, cmd, R11, TCDPLL | RCDPLL | TRxCBR | TRxCOI);
-    } else {			/* Use external clocking */
-	wrtscc(lp->cardbase, cmd, R10, CRCPS);
-	/* Tx Clk from Trxcl. Rcv Clk from Rtxcl, TRxC pin is input */
-	wrtscc(lp->cardbase, cmd, R11, TCTRxCP);
-    }
-
-    /* Null out SDLC start address */
-    wrtscc(lp->cardbase, cmd, R6, 0);
-
-    /* SDLC flag */
-    wrtscc(lp->cardbase, cmd, R7, FLAG);
-
-    /* Set up the Transmitter but don't enable it
-     *  DTR, 8 bit TX chars only
-     */
-    wrtscc(lp->cardbase, cmd, R5, Tx8 | DTR);
-
-    /* Receiver initial setup */
-    wrtscc(lp->cardbase, cmd, R3, Rx8);	/* 8 bits/char */
-
-    /* Setting up BRG now - turn it off first */
-    wrtscc(lp->cardbase, cmd, R14, BRSRC);	/* BRG off, keep Pclk source */
-
-    /* set the 32x time constant for the BRG in Receive mode */
-
-    if (lp->speed) {
-	br = lp->speed;		/* get desired speed */
-	tc = ((lp->xtal / 32) / br) - 2;	/* calc 32X BRG divisor */
-    } else {
-	tc = 14;
-    }
-
-    wrtscc(lp->cardbase, cmd, R12, tc & 0xFF);	/* lower byte */
-    wrtscc(lp->cardbase, cmd, R13, (tc >> 8) & 0xFF);	/* upper byte */
-
-    /* Following subroutine sets up and ENABLES the receiver */
-    rts(lp, OFF);		/* TX OFF and RX ON */
-
-    if (lp->speed) {
-	/* DPLL frm BRG, BRG src PCLK */
-	wrtscc(lp->cardbase, cmd, R14, BRSRC | SSBR);
-    } else {
-	/* DPLL frm rtxc,BRG src PCLK */
-	wrtscc(lp->cardbase, cmd, R14, BRSRC | SSRTxC);
-    }
-    wrtscc(lp->cardbase, cmd, R14, BRSRC | SEARCH);	/* SEARCH mode, keep BRG src */
-    wrtscc(lp->cardbase, cmd, R14, BRSRC | BRENABL);	/* Enable the BRG */
-
-    if (!(cmd & 2))		/* if channel b */
-	wrtscc(lp->cardbase, cmd, R1, (INT_ALL_Rx | EXT_INT_ENAB));
-
-    wrtscc(lp->cardbase, cmd, R15, BRKIE);	/* ABORT int */
-
-    /* Now, turn on the receiver and hunt for a flag */
-    wrtscc(lp->cardbase, cmd, R3, RxENABLE | RxCRC_ENAB | Rx8);
-
-    restore_flags(flags);
-}
-
-static void chipset_init(struct net_device *dev)
-{
-    int cardbase;
-    unsigned long flags;
-
-    cardbase = dev->base_addr & 0x3f0;
-
-    save_flags(flags);
-    cli();
-    wrtscc(cardbase, dev->base_addr + CTL, R9, FHWRES);	/* Hardware reset */
-    /* Disable interrupts with master interrupt ctrl reg */
-    wrtscc(cardbase, dev->base_addr + CTL, R9, 0);
-    restore_flags(flags);
-
-}
-
-
-int __init pi2_init(void)
-{
-    int *port;
-    int ioaddr = 0;
-    int card_type = 0;
-    int ports[] = {0x380, 0x300, 0x320, 0x340, 0x360, 0x3a0, 0};
-
-    printk(KERN_INFO "PI: V0.8 ALPHA April 23 1995 David Perry (dp@hydra.carleton.ca)\n");
-
-    /* Only one card supported for now */
-    for (port = &ports[0]; *port && !card_type; port++) {
-	ioaddr = *port;
-
-	if (check_region(ioaddr, PI_TOTAL_SIZE) == 0) {
-	    printk(KERN_INFO "PI: Probing for card at address %#3x\n",ioaddr);
-	    card_type = hw_probe(ioaddr);
-	}
-    }
-
-    switch (card_type) {
-    case 1:
-	printk(KERN_INFO "PI: Found a PI card at address %#3x\n", ioaddr);
-	break;
-    case 2:
-	printk(KERN_INFO "PI: Found a PI2 card at address %#3x\n", ioaddr);
-	break;
-    default:
-	printk(KERN_ERR "PI: ERROR: No card found\n");
-	return -EIO;
-    }
-
-    /* Link a couple of device structures into the chain */
-    /* For the A port */
-    /* Allocate space for 4 buffers even though we only need 3,
-       because one of them may cross a DMA page boundary and
-       be rejected by get_dma_buffer().
-    */
-    register_netdev(&pi0a);
-
-    pi0a.priv = kmalloc(sizeof(struct pi_local) + (DMA_BUFF_SIZE + sizeof(struct mbuf)) * 4, GFP_KERNEL | GFP_DMA);
-
-    pi0a.dma = PI_DMA;
-    pi0a.base_addr = ioaddr + 2;
-    pi0a.irq = 0;
-
-    /* And the B port */
-    register_netdev(&pi0b);
-    pi0b.base_addr = ioaddr;
-    pi0b.irq = 0;
-
-    pi0b.priv = kmalloc(sizeof(struct pi_local) + (DMA_BUFF_SIZE + sizeof(struct mbuf)) * 4, GFP_KERNEL | GFP_DMA);
-
-    /* Now initialize them */
-    pi_probe(&pi0a, card_type);
-    pi_probe(&pi0b, card_type);
-
-    pi0b.irq = pi0a.irq;	/* IRQ is shared */
-
-    return 0;
-}
-
-static int valid_dma_page(unsigned long addr, unsigned long dev_buffsize)
-{
-    if (((addr & 0xffff) + dev_buffsize) <= 0x10000)
-	return 1;
-    else
-	return 0;
-}
-
-static int pi_set_mac_address(struct net_device *dev, void *addr)
-{
-    struct sockaddr *sa = (struct sockaddr *)addr;
-    memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);	/* addr is an AX.25 shifted ASCII */
-    return 0;						/* mac address */
-}
-
-/* Allocate a buffer which does not cross a DMA page boundary */
-static char *
-get_dma_buffer(unsigned long *mem_ptr)
-{
-    char *ret;
-
-    ret = (char *)*mem_ptr;
-
-    if(!valid_dma_page(*mem_ptr, DMA_BUFF_SIZE + sizeof(struct mbuf))){
-	*mem_ptr += (DMA_BUFF_SIZE + sizeof(struct mbuf));
-	ret = (char *)*mem_ptr;
-    }
-    *mem_ptr += (DMA_BUFF_SIZE + sizeof(struct mbuf));
-    return (ret);
-}
-
-static int pi_probe(struct net_device *dev, int card_type)
-{
-    short ioaddr;
-    struct pi_local *lp;
-    unsigned long flags;
-    unsigned long mem_ptr;
-
-    ioaddr = dev->base_addr;
-
-    /* Initialize the device structure. */
-    /* Must be done before chipset_init */
-    /* Make certain the data structures used by the PI2 are aligned. */
-    dev->priv = (void *) (((int) dev->priv + 7) & ~7);
-    lp = (struct pi_local *) dev->priv;
-
-    memset(dev->priv, 0, sizeof(struct pi_local));
-
-    /* Allocate some buffers which do not cross DMA page boundaries */
-    mem_ptr = (unsigned long) dev->priv + sizeof(struct pi_local);
-    lp->txdmabuf = get_dma_buffer(&mem_ptr);
-    lp->rxdmabuf1 = (struct mbuf *) get_dma_buffer(&mem_ptr);
-    lp->rxdmabuf2 = (struct mbuf *) get_dma_buffer(&mem_ptr);
-
-    /* Initialize rx buffer */
-    lp->rcvbuf = lp->rxdmabuf1;
-    lp->rcp = lp->rcvbuf->data;
-    lp->rcvbuf->cnt = 0;
-
-    /* Initialize the transmit queue head structure */
-    skb_queue_head_init(&lp->sndq);
-
-    /* These need to be initialized before scc_init is called. */
-    if (card_type == 1)
-	lp->xtal = (unsigned long) SINGLE / 2;
-    else
-	lp->xtal = (unsigned long) DOUBLE / 2;
-    lp->base = dev->base_addr;
-    lp->cardbase = dev->base_addr & 0x3f0;
-    if (dev->base_addr & CHANA) {
-	lp->speed = DEF_A_SPEED;
-	/* default channel access Params */
-	lp->txdelay = DEF_A_TXDELAY;
-	lp->persist = DEF_A_PERSIST;
-	lp->slotime = DEF_A_SLOTIME;
-	lp->squeldelay = DEF_A_SQUELDELAY;
-	lp->clockmode = DEF_A_CLOCKMODE;
-
-    } else {
-	lp->speed = DEF_B_SPEED;
-	/* default channel access Params */
-	lp->txdelay = DEF_B_TXDELAY;
-	lp->persist = DEF_B_PERSIST;
-	lp->slotime = DEF_B_SLOTIME;
-	lp->squeldelay = DEF_B_SQUELDELAY;
-	lp->clockmode = DEF_B_CLOCKMODE;
-    }
-    lp->bufsiz = DMA_BUFF_SIZE;
-    lp->tstate = IDLE;
-
-    chipset_init(dev);
-
-    if (dev->base_addr & CHANA) {	/* Do these things only for the A port */
-	/* Note that a single IRQ services 2 devices (A and B channels) */
-
-	lp->dmachan = dev->dma;
-	if (lp->dmachan < 1 || lp->dmachan > 3)
-	    printk(KERN_ERR "PI: DMA channel %d out of range\n", lp->dmachan);
-
-	/* chipset_init() was already called */
-
-	if (dev->irq < 2) {
-	    autoirq_setup(0);
-	    save_flags(flags);
-	    cli();
-	    wrtscc(lp->cardbase, CTL + lp->base, R1, EXT_INT_ENAB);
-	    /* enable PI card interrupts */
-	    wrtscc(lp->cardbase, CTL + lp->base, R9, MIE | NV);
-	    restore_flags(flags);
-	    /* request a timer interrupt for 1 mS hence */
-	    tdelay(lp, 1);
-	    /* 20 "jiffies" should be plenty of time... */
-	    dev->irq = autoirq_report(20);
-	    if (!dev->irq) {
-		printk(KERN_ERR "PI: Failed to detect IRQ line.\n");
-	    }
-	    save_flags(flags);
-	    cli();
-	    wrtscc(lp->cardbase, dev->base_addr + CTL, R9, FHWRES);	/* Hardware reset */
-	    /* Disable interrupts with master interrupt ctrl reg */
-	    wrtscc(lp->cardbase, dev->base_addr + CTL, R9, 0);
-	    restore_flags(flags);
-	}
-
-	printk(KERN_INFO "PI: Autodetected IRQ %d, assuming DMA %d.\n",
-	       dev->irq, dev->dma);
-
-	/* This board has jumpered interrupts. Snarf the interrupt vector
-		   now.  There is no point in waiting since no other device can use
-		   the interrupt, and this marks the 'irqaction' as busy. */
-	{
-	    int irqval = request_irq(dev->irq, &pi_interrupt,0, "pi2", dev);
-	    if (irqval) {
-		printk(KERN_ERR "PI: unable to get IRQ %d (irqval=%d).\n",
-		       dev->irq, irqval);
-		return -EAGAIN;
-	    }
-	}
-
-	/* Grab the region */
-	request_region(ioaddr & 0x3f0, PI_TOTAL_SIZE, "pi2" );
-
-
-    }				/* Only for A port */
-    dev->open = pi_open;
-    dev->stop = pi_close;
-    dev->do_ioctl = pi_ioctl;
-    dev->hard_start_xmit = pi_send_packet;
-    dev->get_stats = pi_get_stats;
-
-    /* Fill in the fields of the device structure */
-
-    dev_init_buffers(dev);
-    
-#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
-    dev->hard_header    = ax25_encapsulate;
-    dev->rebuild_header = ax25_rebuild_header;
-#endif
-
-    dev->set_mac_address = pi_set_mac_address;
-
-    dev->type = ARPHRD_AX25;			/* AF_AX25 device */
-    dev->hard_header_len = 73;			/* We do digipeaters now */
-    dev->mtu = 1500;				/* eth_mtu is the default */
-    dev->addr_len = 7;				/* sizeof an ax.25 address */
-    memcpy(dev->broadcast, ax25_bcast, 7);
-    memcpy(dev->dev_addr, ax25_test, 7);
-
-    /* New-style flags. */
-    dev->flags = 0;
-    return 0;
-}
-
-/* Open/initialize the board.  This is called (in the current kernel)
-   sometime after booting when the 'ifconfig' program is run.
-
-   This routine should set everything up anew at each open, even
-   registers that "should" only need to be set once at boot, so that
-   there is non-reboot way to recover if something goes wrong.
-   */
-static int pi_open(struct net_device *dev)
-{
-    unsigned long flags;
-    static first_time = 1;
-
-    struct pi_local *lp = (struct pi_local *) dev->priv;
-
-    if (dev->base_addr & 2) {	/* if A channel */
-	if (first_time) {
-	    if (request_dma(dev->dma,"pi2")) {
-		free_irq(dev->irq, dev);
-		return -EAGAIN;
-	    }
-	}
-	/* Reset the hardware here. */
-	chipset_init(dev);
-    }
-    lp->tstate = IDLE;
-
-    if (dev->base_addr & 2) {	/* if A channel */
-	scc_init(dev);		/* Called once for each channel */
-	scc_init(dev->next);
-    }
-    /* master interrupt enable */
-    save_flags(flags);
-    cli();
-    wrtscc(lp->cardbase, CTL + lp->base, R9, MIE | NV);
-    restore_flags(flags);
-
-    lp->open_time = jiffies;
-
-    dev->tbusy = 0;
-    dev->interrupt = 0;
-    dev->start = 1;
-    first_time = 0;
-
-    MOD_INC_USE_COUNT;
-
-    return 0;
-}
-
-static int pi_send_packet(struct sk_buff *skb, struct net_device *dev)
-{
-    struct pi_local *lp = (struct pi_local *) dev->priv;
-
-    hardware_send_packet(lp, skb);
-    dev->trans_start = jiffies;
-
-    return 0;
-}
-
-/* The typical workload of the driver:
-   Handle the network interface interrupts. */
-static void pi_interrupt(int reg_ptr, void *dev_id, struct pt_regs *regs)
-{
-/*    int irq = -(((struct pt_regs *) reg_ptr)->orig_eax + 2);*/
-    struct pi_local *lp;
-    int st;
-    unsigned long flags;
-
-/*    dev_b = dev_a->next;	 Relies on the order defined in Space.c */
-
-#if 0
-    if (dev_a == NULL) {
-	printk(KERN_ERR "PI: pi_interrupt(): irq %d for unknown device.\n", irq);
-	return;
-    }
-#endif
-    /* Read interrupt status register (only valid from channel A)
-     * Process all pending interrupts in while loop
-     */
-    lp = (struct pi_local *) pi0a.priv;	/* Assume channel A */
-    while ((st = rdscc(lp->cardbase, pi0a.base_addr | CHANA | CTL, R3)) != 0) {
-	if (st & CHBTxIP) {
-	    /* Channel B Transmit Int Pending */
-	    lp = (struct pi_local *) pi0b.priv;
-	    b_txint(lp);
-	} else if (st & CHARxIP) {
-	    /* Channel A Rcv Interrupt Pending */
-	    lp = (struct pi_local *) pi0a.priv;
-	    a_rxint(&pi0a, lp);
-	} else if (st & CHATxIP) {
-	    /* Channel A Transmit Int Pending */
-	    lp = (struct pi_local *) pi0a.priv;
-	    a_txint(lp);
-	} else if (st & CHAEXT) {
-	    /* Channel A External Status Int */
-	    lp = (struct pi_local *) pi0a.priv;
-	    a_exint(lp);
-	} else if (st & CHBRxIP) {
-	    /* Channel B Rcv Interrupt Pending */
-	    lp = (struct pi_local *) pi0b.priv;
-	    b_rxint(&pi0b, lp);
-	} else if (st & CHBEXT) {
-	    /* Channel B External Status Int */
-	    lp = (struct pi_local *) pi0b.priv;
-	    b_exint(lp);
-	}
-	/* Reset highest interrupt under service */
-	save_flags(flags);
-	cli();
-	wrtscc(lp->cardbase, lp->base + CTL, R0, RES_H_IUS);
-	restore_flags(flags);
-    }				/* End of while loop on int processing */
-    return;
-}
-
-/* The inverse routine to pi_open(). */
-static int pi_close(struct net_device *dev)
-{
-    unsigned long flags;
-    struct pi_local *lp;
-    struct sk_buff *ptr;
-
-    save_flags(flags);
-    cli();
-
-    lp = (struct pi_local *) dev->priv;
-    ptr = NULL;
-
-    chipset_init(dev);		/* reset the scc */
-    disable_dma(lp->dmachan);
-
-    lp->open_time = 0;
-
-    dev->tbusy = 1;
-    dev->start = 0;
-
-    /* Free any buffers left in the hardware transmit queue */
-    while ((ptr = skb_dequeue(&lp->sndq)) != NULL)
-	kfree_skb(ptr);
-
-    restore_flags(flags);
-
-    MOD_DEC_USE_COUNT;
-
-    return 0;
-}
-
-static int pi_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
-    unsigned long flags;
-    struct pi_req rq;
-    struct pi_local *lp = (struct pi_local *) dev->priv;
-
-    int ret = verify_area(VERIFY_WRITE, ifr->ifr_data, sizeof(struct pi_req));
-    if (ret)
-	return ret;
-
-    if(cmd!=SIOCDEVPRIVATE)
-    	return -EINVAL;
-
-    copy_from_user(&rq, ifr->ifr_data, sizeof(struct pi_req));
-
-    switch (rq.cmd) {
-    case SIOCSPIPARAM:
-
-	if (!capable(CAP_NET_ADMIN))
-	    return -EPERM;
-	save_flags(flags);
-	cli();
-	lp->txdelay = rq.txdelay;
-	lp->persist = rq.persist;
-	lp->slotime = rq.slotime;
-	lp->squeldelay = rq.squeldelay;
-	lp->clockmode = rq.clockmode;
-	lp->speed = rq.speed;
-	pi_open(&pi0a); /* both channels get reset %%% */
-	restore_flags(flags);
-	ret = 0;
-	break;
-
-    case SIOCSPIDMA:
-
-	if (!capable(CAP_SYS_RAWIO))
-	    return -EPERM;
-	ret = 0;
-	if (dev->base_addr & 2) {   /* if A channel */
-	   if (rq.dmachan < 1 || rq.dmachan > 3)
-		return -EINVAL;
-	   save_flags(flags);
-	   cli();
-	   pi_close(dev);
-	   free_dma(lp->dmachan);
-	   dev->dma = lp->dmachan = rq.dmachan;
-	   if (request_dma(lp->dmachan,"pi2"))
-		ret = -EAGAIN;
-	   pi_open(dev);
-	   restore_flags(flags);
-	}
-	break;
-
-    case SIOCSPIIRQ:
-	ret = -EINVAL;      /* add this later */
-	break;
-
-    case SIOCGPIPARAM:
-    case SIOCGPIDMA:
-    case SIOCGPIIRQ:
-
-	rq.speed = lp->speed;
-	rq.txdelay = lp->txdelay;
-	rq.persist = lp->persist;
-	rq.slotime = lp->slotime;
-	rq.squeldelay = lp->squeldelay;
-	rq.clockmode = lp->clockmode;
-	rq.dmachan = lp->dmachan;
-	rq.irq = dev->irq;
-	copy_to_user(ifr->ifr_data, &rq, sizeof(struct pi_req));
-	ret = 0;
-	break;
-
-    default:
-	ret = -EINVAL;
-    }
-    return ret;
-}
-
-/* Get the current statistics.	This may be called with the card open or
-   closed. */
-static struct net_device_stats *pi_get_stats(struct net_device *dev)
-{
-	struct pi_local *lp = (struct pi_local *) dev->priv;
-
-	return &lp->stats;
-}
-
-#ifdef MODULE
-EXPORT_NO_SYMBOLS;
-
-MODULE_AUTHOR("David Perry <dp@hydra.carleton.ca>");
-MODULE_DESCRIPTION("AX.25 driver for the Ottawa PI and PI/2 HDLC cards");
-
-int init_module(void)
-{
-    return pi2_init();
-}
-
-void cleanup_module(void)
-{
-    free_irq(pi0a.irq, &pi0a);	/* IRQs and IO Ports are shared */
-    release_region(pi0a.base_addr & 0x3f0, PI_TOTAL_SIZE);
-
-    kfree(pi0a.priv);
-    pi0a.priv = NULL;
-    unregister_netdev(&pi0a);
-
-    kfree(pi0b.priv);
-    pi0b.priv = NULL;
-    unregister_netdev(&pi0b);
-}
-#endif
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/net/hamradio/pt.c linux/drivers/net/hamradio/pt.c
--- v2.4.0-prerelease/linux/drivers/net/hamradio/pt.c	Sun Oct  8 10:50:19 2000
+++ linux/drivers/net/hamradio/pt.c	Wed Dec 31 16:00:00 1969
@@ -1,1777 +0,0 @@
-#undef PT_DEBUG 1
-/*
- * pt.c: Linux device driver for the Gracilis PackeTwin.
- * Copyright (c) 1995 Craig Small VK2XLZ (vk2xlz@vk2xlz.ampr.org.)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge MA 02139, USA.
- *
- * This driver is largely based upon the PI driver by David Perry.
- *
- * Revision History
- * 23/02/95 cs  Started again on driver, last one scrapped
- * 27/02/95 cs  Program works, we have chan A only.  Tx stays on
- * 28/02/95 cs  Fix Tx problem (& TxUIE instead of | )
- *		Fix Chan B Tx timer problem, used TMR2 instead of TMR1
- * 03/03/95 cs  Painfully found out (after 3 days) SERIAL_CFG is write only
- *              created image of it and DMA_CFG
- * 21/06/95 cs  Upgraded to suit PI driver 0.8 ALPHA
- * 22/08/95 cs	Changed it all around to make it like pi driver
- * 23/08/95 cs  It now works, got caught again by TMR2 and we must have
- *				auto-enables for daughter boards.
- * 07/10/95 cs  Fixed for 1.3.30 (hopefully)
- * 26/11/95 cs  Fixed for 1.3.43, ala 29/10 for pi2.c by ac
- * 21/12/95 cs  Got rid of those nasty warnings when compiling, for 1.3.48
- * 08/08/96 jsn Convert to use as a module. Removed send_kiss, empty_scc and
- *		pt_loopback functions - they were unused.
- * 13/12/96 jsn Fixed to match Linux networking changes.
- */
-
-/*
- * default configuration of the PackeTwin,
- * ie What Craig uses his PT for.
- */
-#define PT_DMA 3
-
-#define DEF_A_SPEED	4800		/* 4800 baud */
-#define DEF_A_TXDELAY	350		/* 350 mS */
-#define DEF_A_PERSIST	64		/* 25% persistence */
-#define DEF_A_SLOTIME	10		/* 10 mS */
-#define DEF_A_SQUELDELAY 30		/* 30 mS */
-#define DEF_A_CLOCKMODE	0		/* Normal clock mode */
-#define DEF_A_NRZI		1		/* NRZI mode */
-
-#define DEF_B_SPEED	0		/* 0 means external clock */
-#define DEF_B_TXDELAY	250		/* 250 mS */
-#define DEF_B_PERSIST	64		/* 25% */
-#define DEF_B_SLOTIME	10		/* 10 mS */
-#define DEF_B_SQUELDELAY 30		/* 30 mS */
-#define DEF_B_CLOCKMODE 0 		/* Normal clock mode ?!? */
-#define DEF_B_NRZI		1		/* NRZI mode */
-
-
-#define	PARAM_TXDELAY	1
-#define	PARAM_PERSIST	2
-#define	PARAM_SLOTTIME	3
-#define	PARAM_FULLDUP	5
-#define	PARAM_HARDWARE	6
-#define	PARAM_RETURN	255
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ptrace.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/malloc.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <asm/system.h>
-#include <asm/bitops.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <asm/uaccess.h>
-#include <linux/inet.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/timer.h>
-#include <linux/if_arp.h>
-#include <linux/pt.h>
-#include <linux/init.h>
-#include "z8530.h"
-#include <net/ax25.h>
-
-struct mbuf {
-    struct mbuf *next;
-    int cnt;
-    char data[0];
-};
-
-/*
- * The actual PT devices we will use
- */
-static int pt0_preprobe(struct net_device *dev) {return 0;} /* Dummy probe function */
-static struct net_device pt0a = { "pt0a", 0, 0, 0, 0, 0, 0, 0, 0, 0, NULL, pt0_preprobe };
-static struct net_device pt0b = { "pt0b", 0, 0, 0, 0, 0, 0, 0, 0, 0, NULL, pt0_preprobe };
-
-/* Ok, they shouldn't be here, but both channels share them */
-/* The Images of the Serial and DMA config registers */
-static unsigned char pt_sercfg = 0;
-static unsigned char pt_dmacfg = 0;
-
-/* The number of IO ports used by the card */
-#define PT_TOTAL_SIZE   16
-
-/* Index to functions, as function prototypes. */
-
-static int pt_probe(struct net_device *dev);
-static int pt_open(struct net_device *dev);
-static int pt_send_packet(struct sk_buff *skb, struct net_device *dev);
-static void pt_interrupt(int irq, void *dev_id, struct pt_regs *regs);
-static int pt_close(struct net_device *dev);
-static int pt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
-static struct net_device_stats *pt_get_stats(struct net_device *dev);
-static void pt_rts(struct pt_local *lp, int x);
-static void pt_rxisr(struct net_device *dev);
-static void pt_txisr(struct pt_local *lp);
-static void pt_exisr(struct pt_local *lp);
-static void pt_tmrisr(struct pt_local *lp);
-static char *get_dma_buffer(unsigned long *mem_ptr);
-static int valid_dma_page(unsigned long addr, unsigned long dev_buffsize);
-static int hw_probe(int ioaddr);
-static void tdelay(struct pt_local *lp, int time);
-static void chipset_init(struct net_device *dev);
-
-static char ax25_bcast[7] =
-{'Q' << 1, 'S' << 1, 'T' << 1, ' ' << 1, ' ' << 1, ' ' << 1, '0' << 1};
-static char ax25_test[7] =
-{'L' << 1, 'I' << 1, 'N' << 1, 'U' << 1, 'X' << 1, ' ' << 1, '1' << 1};
-
-
-
-static int ext2_secrm_seed = 152;
-
-static inline unsigned char random(void)
-{
-    return (unsigned char) (ext2_secrm_seed = ext2_secrm_seed * 60691 + 1);
-}
-
-static inline void wrtscc(int cbase, int ctl, int sccreg, unsigned char val)
-{
-    outb_p(sccreg, ctl);        /* Select register */
-    outb_p(val, ctl);           /* Output value */
-}
-
-static inline unsigned char rdscc(int cbase, int ctl, int sccreg)
-{
-    unsigned char retval;
-
-    outb_p(sccreg, ctl);        /* Select register */
-    retval = inb_p(ctl);
-    return retval;
-}
-
-static void switchbuffers(struct pt_local *lp)
-{
-    if (lp->rcvbuf == lp->rxdmabuf1)
-	lp->rcvbuf = lp->rxdmabuf2;
-    else
-	lp->rcvbuf = lp->rxdmabuf1;
-}
-
-static void hardware_send_packet(struct pt_local *lp, struct sk_buff *skb)
-{
-	char kickflag;
-	unsigned long flags;
-	char *ptr;
-	struct net_device *dev;
-
-	/* First, let's see if this packet is actually a KISS packet */
-	ptr = skb->data;
-	if (ptr[0] != 0 && skb->len >= 2)
-	{
-#ifdef PT_DEBUG
-		printk(KERN_DEBUG "PT: Rx KISS... Control = %d, value = %d.\n", ptr[0], (skb->len > 1? ptr[1] : -1));
-#endif
-		/* Kludge to get device */
-		if ((struct pt_local*)(&pt0b.priv) == lp)
-			dev = &pt0b;
-		else
-			dev = &pt0a;
-		switch(ptr[0])
-		{
-
-			case PARAM_TXDELAY:
-				/*TxDelay is in 10mS increments */
-				lp->txdelay = ptr[1] * 10;
-				break;
-			case PARAM_PERSIST:
-				lp->persist = ptr[1];
-				break;
-			case PARAM_SLOTTIME:
-				lp->slotime = ptr[1];
-				break;
-			case PARAM_FULLDUP:
-				/* Yeah right, you wish!  Fullduplex is a little while to
-				 * go folks, but this is how you fire it up
-				 */
-				break;
-			/* Perhaps we should have txtail here?? */
-		} /*switch */
-		return;
-	}
-
-	lp->stats.tx_packets++;
-	lp->stats.tx_bytes+=skb->len;
-	save_flags(flags);
-	cli();
-	kickflag = (skb_peek(&lp->sndq) == NULL) && (lp->sndbuf == NULL);
-	restore_flags(flags);
-
-#ifdef PT_DEBUG
-	printk(KERN_DEBUG "PT: hardware_send_packet(): kickflag = %d (%d).\n", kickflag, lp->base & CHANA);
-#endif
-	skb_queue_tail(&lp->sndq, skb);
-	if (kickflag) 
-	{
-        /* Simulate interrupt to transmit */
-        	if (lp->dmachan)
-			pt_txisr(lp);
-		else 
-		{
-            		save_flags(flags);
-	           	cli();
-            		if (lp->tstate == IDLE)
-                		pt_txisr(lp);
-            		restore_flags(flags);
-		}
-	}
-} /* hardware_send_packet() */
-
-static void setup_rx_dma(struct pt_local *lp)
-{
-	unsigned long flags;
-	int cmd;
-	unsigned long dma_abs;
-	unsigned char dmachan;
-
-	save_flags(flags);
-	cli();
-
-	dma_abs = (unsigned long) (lp->rcvbuf->data);
-	dmachan = lp->dmachan;
-	cmd = lp->base + CTL;
-
-	if(!valid_dma_page(dma_abs, DMA_BUFF_SIZE + sizeof(struct mbuf)))
-		panic("PI: RX buffer violates DMA boundary!");
-
-	/* Get ready for RX DMA */
-	wrtscc(lp->cardbase, cmd, R1, WT_FN_RDYFN | WT_RDY_RT | INT_ERR_Rx | EXT_INT_ENAB);
-
-	disable_dma(dmachan);
-	clear_dma_ff(dmachan);
-
-	/*
-	 *	Set DMA mode register to single transfers, incrementing address,
-	 *	auto init, writes
-	 */
-
-	set_dma_mode(dmachan, DMA_MODE_READ | 0x10);
-	set_dma_addr(dmachan, dma_abs);
-	set_dma_count(dmachan, lp->bufsiz);
-	enable_dma(dmachan);
-
-	/*
-	 *	If a packet is already coming in, this line is supposed to
-	 *	avoid receiving a partial packet.
-	 */
-
-	wrtscc(lp->cardbase, cmd, R0, RES_Rx_CRC);
-
-	/* Enable RX dma */
-	wrtscc(lp->cardbase, cmd, R1,
-		WT_RDY_ENAB | WT_FN_RDYFN | WT_RDY_RT | INT_ERR_Rx | EXT_INT_ENAB);
-
-	restore_flags(flags);
-}
-
-static void setup_tx_dma(struct pt_local *lp, int length)
-{
-    unsigned long dma_abs;
-    unsigned long flags;
-    unsigned long dmachan;
-
-    save_flags(flags);
-    cli();
-
-    dmachan = lp->dmachan;
-    dma_abs = (unsigned long) (lp->txdmabuf);
-
-    if(!valid_dma_page(dma_abs, DMA_BUFF_SIZE + sizeof(struct mbuf)))
-	panic("PT: TX buffer violates DMA boundary!");
-
-    disable_dma(dmachan);
-    /* Set DMA mode register to single transfers, incrementing address,
-     *  no auto init, reads
-     */
-    set_dma_mode(dmachan, DMA_MODE_WRITE);
-    clear_dma_ff(dmachan);
-    set_dma_addr(dmachan, dma_abs);
-    /* output byte count */
-    set_dma_count(dmachan, length);
-
-    restore_flags(flags);
-}
-
-/*
- * This sets up all the registers in the SCC for the given channel
- * based upon tsync_hwint()
- */
-static void scc_init(struct net_device *dev)
-{
-	unsigned long flags;
-	struct pt_local *lp = (struct pt_local*) dev->priv;
-	register int cmd = lp->base + CTL;
-	int tc, br;
-
-#ifdef PT_DEBUG
-	printk(KERN_DEBUG "PT: scc_init(): (%d).\n", lp->base & CHANA);
-#endif
-	save_flags(flags);
-	cli();
-
-	/* We may put something here to enable_escc */
-
-	if (cmd & CHANA)
-	{
-	        wrtscc(lp->cardbase, cmd, R9, CHRA);	/* Reset channel A */
-	        wrtscc(lp->cardbase, cmd, R2, 0xff);	/* Initialise interrupt vector */
-	}
-	else
-	    	wrtscc(lp->cardbase, cmd, R9, CHRB);	/* Reset channel B */
-
-	/* Deselect all Rx and Tx interrupts */
-	wrtscc(lp->cardbase, cmd, R1, 0);
-
-	/* Turn off external interrupts (like CTS/CD) */
-	wrtscc(lp->cardbase, cmd, R15, 0);
-
-	/* X1 clock, SDLC mode */
-	wrtscc(lp->cardbase, cmd, R4, SDLC | X1CLK);
-
-	/* Preset CRC and set mode */
-	if (lp->nrzi)
-	    	/* Preset Tx CRC, put into NRZI mode */
-		wrtscc(lp->cardbase, cmd, R10, CRCPS | NRZI);
-	else
-		/* Preset Tx CRC, put into NRZ mode */
-		wrtscc(lp->cardbase, cmd, R10, CRCPS);
-
-	/* Tx/Rx parameters */
-	if (lp->speed)		/* Use internal clocking */
-	       /* Tx Clk from BRG. Rx Clk form DPLL, TRxC pin outputs DPLL */
-	       wrtscc(lp->cardbase, cmd, R11, TCBR | RCDPLL | TRxCDP | TRxCOI);
-	else	/* Use external clocking */
-	{
-		/* Tx Clk from TRxCL. Rx Clk from RTxCL, TRxC pin if input */
-		wrtscc(lp->cardbase, cmd, R11, TCTRxCP | RCRTxCP | TRxCBR);
-        	wrtscc(lp->cardbase,cmd, R14, 0);	/* wiz1 */
-	}
-
-	/* Null out SDLC start address */
-	wrtscc(lp->cardbase, cmd, R6, 0);
-
-	/* SDLC flag */
-	wrtscc(lp->cardbase, cmd, R7, FLAG);
-
-	/* Setup Tx but don't enable it */
-	wrtscc(lp->cardbase, cmd, R5, Tx8 | DTR);
-
-	/* Setup Rx */
-	wrtscc(lp->cardbase, cmd, R3, AUTO_ENAB | Rx8);
-
-	/* Setup the BRG, turn it off first */
-	wrtscc(lp->cardbase, cmd, R14, BRSRC);
-
-	/* set the 32x time constant for the BRG in Rx mode */
-	if (lp->speed)
-	{
-		br = lp->speed;
-		tc = ((lp->xtal / 32) / (br * 2)) - 2;
-		wrtscc(lp->cardbase, cmd, R12, tc & 0xff);		/* lower byte */
-   		wrtscc(lp->cardbase, cmd, R13, (tc >> 8) & 0xff);	/* upper byte */
-	}
-
-	/* Turn transmitter off, to setup stuff */
-   	pt_rts(lp, OFF);
-
-	/* External clocking */
-	if (lp->speed)
-	{
-		/* DPLL frm BRG, BRG src PCLK */
-		wrtscc(lp->cardbase, cmd, R14, BRSRC | SSBR);
-		wrtscc(lp->cardbase, cmd, R14, BRSRC | SEARCH);	/* SEARCH mode, keep BRG src */
-		wrtscc(lp->cardbase, cmd, R14, BRSRC | BRENABL);	/* Enable the BRG */
-
-	    /* Turn off external clock port */
-		if (lp->base & CHANA)
-			outb_p( (pt_sercfg &= ~PT_EXTCLKA), (lp->cardbase + SERIAL_CFG) );
-		else
-			outb_p( (pt_sercfg &= ~PT_EXTCLKB), (lp->cardbase + SERIAL_CFG) );
-	}
-	else
-	{
-		/* DPLL frm rtxc,BRG src PCLK */
-		/* Turn on external clock port */
-		if (lp->base & CHANA)
-			outb_p( (pt_sercfg |= PT_EXTCLKA), (lp->cardbase + SERIAL_CFG) );
-		else
-			outb_p( (pt_sercfg |= PT_EXTCLKB), (lp->cardbase + SERIAL_CFG) );
-	}
-
-	if (!lp->dmachan)
-		wrtscc(lp->cardbase, cmd, R1, (INT_ALL_Rx | EXT_INT_ENAB));
-
-	wrtscc(lp->cardbase, cmd, R15, BRKIE);	/* ABORT int */
-
-	/* Turn on the DTR to tell modem we're alive */
-	if (lp->base & CHANA)
-		outb_p( (pt_sercfg |= PT_DTRA_ON), (lp->cardbase + SERIAL_CFG) );
-	else
-	    	outb_p( (pt_sercfg |= PT_DTRB_ON), (lp->cardbase + SERIAL_CFG) );
-
-	/* Now, turn on the receiver and hunt for a flag */
-	wrtscc(lp->cardbase, cmd, R3, RxENABLE | RxCRC_ENAB | AUTO_ENAB | Rx8 );
-
-	restore_flags(flags);
-
-} /* scc_init() */
-
-/* Resets the given channel and whole SCC if both channels off */
-static void chipset_init(struct net_device *dev)
-{
-
-	struct pt_local *lp = (struct pt_local*) dev->priv;
-#ifdef PT_DEBUG
-	printk(KERN_DEBUG "PT: chipset_init(): pt0a tstate = %d.\n", ((struct pt_local*)pt0a.priv)->tstate);
-	printk(KERN_DEBUG "PT: chipset_init(): pt0b tstate = %d.\n", ((struct pt_local*)pt0b.priv)->tstate);
-#endif
-	/* Reset SCC if both channels are to be canned */
-	if ( ((lp->base & CHANA) && !(pt_sercfg & PT_DTRB_ON)) ||
-			(!(lp->base & CHANA) && !(pt_sercfg & PT_DTRA_ON)) )
-	{
-		wrtscc(lp->cardbase, lp->base + CTL, R9, FHWRES);
-        	/* Reset int and dma registers */
-	        outb_p((pt_sercfg = 0), lp->cardbase + SERIAL_CFG);
-	        outb_p((pt_dmacfg = 0), lp->cardbase + DMA_CFG);
-#ifdef PT_DEBUG
-		printk(KERN_DEBUG "PT: chipset_init() Resetting SCC, called by ch (%d).\n", lp->base & CHANA);
-#endif
-	}
-	/* Reset individual channel */
-    	if (lp->base & CHANA) {
-        	wrtscc(lp->cardbase, lp->base + CTL, R9, MIE | DLC | NV | CHRA);
-        	outb_p( (pt_sercfg &= ~PT_DTRA_ON), lp->cardbase + SERIAL_CFG);
-    	} else {
-        	wrtscc(lp->cardbase, lp->base + CTL, R9, MIE | DLC | NV | CHRB);
-			outb_p( (pt_sercfg &= ~PT_DTRB_ON), lp->cardbase + SERIAL_CFG);
-	}
-} /* chipset_init() */
-
-
-int __init ptwin_init(void)
-{
-    int *port;
-    int ioaddr = 0;
-    int card_type = 0;
-    int ports[] =
-    { 0x230, 0x240, 0x250, 0x260, 0x270, 0x280, 0x290, 0x2a0,
-      0x2b0, 0x300, 0x330, 0x3f0,  0};
-
-    printk(KERN_INFO "PT: 0.41 ALPHA 07 October 1995 Craig Small (csmall@small.dropbear.id.au)\n");
-
-    for (port = &ports[0]; *port && !card_type; port++) {
-        ioaddr = *port;
-
-        if (check_region(ioaddr, PT_TOTAL_SIZE) == 0) {
-            printk(KERN_INFO "PT: Probing for card at address %#3x\n", ioaddr);
-            card_type = hw_probe(ioaddr);
-        }
-    }
-    if (card_type) {
-        printk(KERN_INFO "PT: Found a PT at address %#3x\n",ioaddr);
-    } else {
-        printk(KERN_ERR "PT: ERROR: No card found.\n");
-        return -EIO;
-    }
-
-    /*
-     * Link a couple of device structures into the chain
-     *
-     * For the A port
-     * Allocate space for 4 buffers even though we only need 3,
-     * because one of them may cross a DMA page boundary and
-     * be rejected by get_dma_buffer().
-     */
-    register_netdev(&pt0a);
-
-    pt0a.priv= kmalloc(sizeof(struct pt_local) + (DMA_BUFF_SIZE + sizeof(struct mbuf)) * 4, GFP_KERNEL | GFP_DMA);
-
-    pt0a.dma = 0;	/* wizzer - no dma yet */
-    pt0a.base_addr = ioaddr + CHANA;
-    pt0a.irq = 0;
-
-    /* And B port */
-    register_netdev(&pt0b);
-
-    pt0b.priv= kmalloc(sizeof(struct pt_local) + (DMA_BUFF_SIZE + sizeof(struct mbuf)) * 4, GFP_KERNEL | GFP_DMA);
-
-    pt0b.base_addr = ioaddr + CHANB;
-    pt0b.irq = 0;
-
-    /* Now initialise them */
-    pt_probe(&pt0a);
-    pt_probe(&pt0b);
-
-    pt0b.irq = pt0a.irq;	/* IRQ is shared */
-
-    return 0;
-} /* ptwin_init() */
-
-/*
- * Probe for PT card.  Also initialises the timers
- */
-static int __init hw_probe(int ioaddr)
-{
-    int time = 1000;		/* Number of milliseconds to test */
-    int a = 1;
-    int b = 1;
-    unsigned long start_time, end_time;
-
-    inb_p(ioaddr + TMR1CLR);
-    inb_p(ioaddr + TMR2CLR);
-
-    /* Timer counter channel 0, 1mS period */
-    outb_p(SC0 | LSB_MSB | MODE3, ioaddr + TMRCMD);
-    outb_p(0x00, ioaddr + TMR0);
-    outb_p(0x18, ioaddr + TMR0);
-
-    /* Setup timer control word for timer 1 */
-    outb_p(SC1 | LSB_MSB | MODE0, ioaddr + TMRCMD);
-    outb_p((time << 1) & 0xff, ioaddr + TMR1);
-    outb_p((time >> 7) & 0xff, ioaddr + TMR1);
-
-    /* wait until counter reg is loaded */
-    do {
-        /* Latch count for reading */
-        outb_p(SC1, ioaddr + TMRCMD);
-        a = inb_p(ioaddr + TMR1);
-        b = inb_p(ioaddr + TMR1);
-    } while (b == 0);
-    start_time = jiffies;
-    while(b != 0)
-    {
-        /* Latch count for reading */
-        outb_p(SC1, ioaddr + TMRCMD);
-        a = inb_p(ioaddr + TMR1);
-        b = inb_p(ioaddr + TMR1);
-        end_time = jiffies;
-        /* Don't wait forever - there may be no card here */
-        if ((end_time - start_time) > 200)
-        {
-        	inb_p(ioaddr + TMR1CLR);
-            return 0;
-        }
-    }
-
-    /* Now fix the timers up for general operation */
-
-    /* Clear the timers */
-    inb_p(ioaddr + TMR1CLR);
-    inb_p(ioaddr + TMR2CLR);
-
-    outb_p(SC1 | LSB_MSB | MODE0, ioaddr + TMRCMD);
-    inb_p(ioaddr + TMR1CLR);
-
-    outb_p(SC2 | LSB_MSB | MODE0, ioaddr + TMRCMD);
-    /* Should this be tmr1 or tmr2? wiz3*/
-    inb_p(ioaddr + TMR1CLR);
-
-    return 1;
-} /* hw_probe() */
-
-
-static void pt_rts(struct pt_local *lp, int x)
-{
-	int tc;
-	long br;
-	int cmd = lp->base + CTL;
-#ifdef PT_DEBUG
-	printk(KERN_DEBUG "PT: pt_rts(): Transmitter status will be %d (%d).\n", x, lp->base & CHANA);
-#endif
-	if (x == ON) {
-	    /* Ex ints off to avoid int */
-	    wrtscc(lp->cardbase, cmd, R15, 0);
-	    wrtscc(lp->cardbase, cmd, R3, AUTO_ENAB | Rx8);	/* Rx off */
-	    lp->rstate = IDLE;
-
-	    if(lp->dmachan)
-	    {
-	        /* Setup for Tx DMA */
-	        wrtscc(lp->cardbase, cmd, R1, WT_FN_RDYFN | EXT_INT_ENAB);
-	    } else {
-	        /* No interrupts */
-	        wrtscc(lp->cardbase, cmd, R1, 0);
-	    }
-
-            if (!lp->clockmode)
-            {
-                if (lp->speed)
-                {
-                    br = lp->speed;
-                    tc = (lp->xtal / (br * 2)) - 2;
-                    wrtscc(lp->cardbase, cmd, R12, tc & 0xff);
-                    wrtscc(lp->cardbase, cmd, R13, (tc >> 8) & 0xff);
-                }
-            }
-            /* Turn on Tx by raising RTS */
-            wrtscc(lp->cardbase, cmd, R5, TxCRC_ENAB | RTS | TxENAB | Tx8 | DTR);
-            /* Transmitter on now */
-        } else {		/* turning off Tx */
-            lp->tstate = IDLE;
-
-            /* Turn off Tx by dropping RTS */
-            wrtscc(lp->cardbase, cmd, R5, Tx8 | DTR);
-            if (!lp->clockmode)
-            {
-                if (lp->speed)		/* internally clocked */
-                {
-                    /* Reprogram BRG from 32x clock for Rx DPLL */
-                    /* BRG off, keep PClk source */
-                    wrtscc(lp->cardbase, cmd, R14, BRSRC);
-                    br = lp->speed;
-                    tc = ((lp->xtal / 32) / (br * 2)) - 2;
-                    wrtscc(lp->cardbase, cmd, R12, tc & 0xff);
-                    wrtscc(lp->cardbase, cmd, R13, (tc >> 8) & 0xff);
-
-                    /* SEARCH mode, BRG source */
-                    wrtscc(lp->cardbase, cmd, R14, BRSRC | SEARCH);
-                    /* Enable the BRG */
-                    wrtscc(lp->cardbase, cmd, R14, BRSRC | BRENABL);
-                }
-            }
-            /* Flush Rx fifo */
-            /* Turn Rx off */
-            wrtscc(lp->cardbase, cmd, R3, AUTO_ENAB | Rx8);
-
-            /* Reset error latch */
-            wrtscc(lp->cardbase, cmd, R0, ERR_RES);
-
-            /* get status byte from R1 */
-            (void) rdscc(lp->cardbase, cmd, R1);
-
-            /* Read and dump data in queue */
-            (void) rdscc(lp->cardbase, cmd, R8);
-            (void) rdscc(lp->cardbase, cmd, R8);
-            (void) rdscc(lp->cardbase, cmd, R8);
-
-            /* Now, turn on Rx and hunt for a flag */
-              wrtscc(lp->cardbase, cmd, R3, RxENABLE | AUTO_ENAB | Rx8 );
-
-            lp->rstate = ACTIVE;
-
-            if (lp->dmachan)
-            {
-                setup_rx_dma(lp);
-            } else {
-                /* Reset buffer pointers */
-                lp->rcp = lp->rcvbuf->data;
-                lp->rcvbuf->cnt = 0;
-                /* Allow aborts to interrupt us */
-                wrtscc(lp->cardbase, cmd, R1, INT_ALL_Rx | EXT_INT_ENAB);
-
-	}
-	wrtscc(lp->cardbase, cmd, R15, BRKIE );
-    }
-} /* pt_rts() */
-
-
-static int valid_dma_page(unsigned long addr, unsigned long dev_bufsize)
-{
-    if (((addr & 0xffff) + dev_bufsize) <= 0x10000)
-        return 1;
-    else
-        return 0;
-}
-
-static int pt_set_mac_address(struct net_device *dev, void *addr)
-{
-	struct sockaddr *sa = (struct sockaddr *)addr;
-	memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);		/* addr is an AX.25 shifted ASCII */
-	return 0;		/* mac address */
-}
-
-
-/* Allocate a buffer which does not cross a DMA page boundary */
-static char * get_dma_buffer(unsigned long *mem_ptr)
-{
-    char *ret;
-
-    ret = (char *) *mem_ptr;
-
-    if (!valid_dma_page(*mem_ptr, DMA_BUFF_SIZE + sizeof(struct mbuf))) {
-        *mem_ptr += (DMA_BUFF_SIZE + sizeof(struct mbuf));
-        ret = (char *) *mem_ptr;
-    }
-    *mem_ptr += (DMA_BUFF_SIZE + sizeof(struct mbuf));
-    return (ret);
-} /* get_dma_buffer() */
-
-
-/*
- * Sets up all the structures for the PT device
- */
-static int pt_probe(struct net_device *dev)
-{
-    short ioaddr;
-    struct pt_local *lp;
-    unsigned long flags;
-    unsigned long mem_ptr;
-
-    ioaddr = dev->base_addr;
-
-    /*
-     * Initialise the device structure.
-     * Must be done before chipset_init()
-     * Make sure data structures used by  the PT are aligned
-     */
-    dev->priv = (void *) (((int) dev->priv + 7) & ~7);
-    lp = (struct pt_local*) dev->priv;
-
-    memset(dev->priv, 0, sizeof(struct pt_local));
-
-    /* Allocate some buffers which do not cross DMA boundaries */
-    mem_ptr = (unsigned long) dev->priv + sizeof(struct pt_local);
-    lp->txdmabuf = get_dma_buffer(&mem_ptr);
-    lp->rxdmabuf1 = (struct mbuf *) get_dma_buffer(&mem_ptr);
-    lp->rxdmabuf2 = (struct mbuf *) get_dma_buffer(&mem_ptr);
-
-    /* Initialise the Rx buffer */
-    lp->rcvbuf = lp->rxdmabuf1;
-    lp->rcp = lp->rcvbuf->data;
-    lp->rcvbuf->cnt = 0;
-
-    /* Initialise the transmit queue head structure */
-    skb_queue_head_init(&lp->sndq);
-
-    lp->base = dev->base_addr;
-    lp->cardbase = dev->base_addr & 0x3f0;
-
-    /* These need to be initialised before scc_init() is called.
-     */
-    lp->xtal = XTAL;
-
-    if (dev->base_addr & CHANA) {
-        lp->speed = DEF_A_SPEED;
-        lp->txdelay = DEF_A_TXDELAY;
-        lp->persist = DEF_A_PERSIST;
-        lp->slotime = DEF_A_SLOTIME;
-        lp->squeldelay = DEF_A_SQUELDELAY;
-        lp->clockmode = DEF_A_CLOCKMODE;
-        lp->nrzi = DEF_A_NRZI;
-    } else {
-        lp->speed = DEF_B_SPEED;
-        lp->txdelay = DEF_B_TXDELAY;
-        lp->persist = DEF_B_PERSIST;
-        lp->slotime = DEF_B_SLOTIME;
-        lp->squeldelay = DEF_B_SQUELDELAY;
-        lp->clockmode = DEF_B_CLOCKMODE;
-        lp->nrzi = DEF_B_NRZI;
-    }
-    lp->bufsiz = DMA_BUFF_SIZE;
-    lp->tstate = IDLE;
-
-    chipset_init(dev);
-
-    if (dev->base_addr & CHANA) {
-        /* Note that a single IRQ services 2 devices (A and B channels)
-        */
-
-	/*
-	 * We disable the dma for a while, we have to get ints working
-	 * properly first!!
-	 */
-	lp->dmachan = 0;
-
-        if (dev->irq < 2) {
-            autoirq_setup(0);
-
-            /* Turn on PT interrupts */
-            save_flags(flags);
-            cli();
-            outb_p( pt_sercfg |= PT_EI, lp->cardbase + INT_CFG);
-            restore_flags(flags);
-
-            /* Set a timer interrupt */
-            tdelay(lp, 1);
-            dev->irq = autoirq_report(20);
-
-	    /* Turn off PT interrupts */
-	    save_flags(flags);
-	    cli();
-            outb_p( (pt_sercfg  &= ~ PT_EI), lp->cardbase + INT_CFG);
-            restore_flags(flags);
-
-            if (!dev->irq) {
-                printk(KERN_ERR "PT: ERROR: Failed to detect IRQ line, assuming IRQ7.\n");
-            }
-        }
-
-        printk(KERN_INFO "PT: Autodetected IRQ %d, assuming DMA %d\n", dev->irq, dev->dma);
-
-        /* This board has jumpered interrupts. Snarf the interrupt vector
-         * now.  There is no point in waiting since no other device can use
-         * the interrupt, and this marks the 'irqaction' as busy.
-         */
-        {
-            int irqval = request_irq(dev->irq, &pt_interrupt,0, "pt", dev);
-            if (irqval) {
-                printk(KERN_ERR "PT: ERROR: Unable to get IRQ %d (irqval = %d).\n",
-                    dev->irq, irqval);
-                return -EAGAIN;
-            }
-        }
-
-        /* Grab the region */
-        request_region(ioaddr & 0x3f0, PT_TOTAL_SIZE, "pt" );
-    } /* A port */
-    dev->open = pt_open;
-    dev->stop = pt_close;
-    dev->do_ioctl = pt_ioctl;
-    dev->hard_start_xmit = pt_send_packet;
-    dev->get_stats = pt_get_stats;
-
-    /* Fill in the fields of the device structure */
-    dev_init_buffers(dev);
-
-#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
-    dev->hard_header    = ax25_encapsulate;
-    dev->rebuild_header = ax25_rebuild_header;
-#endif
-
-    dev->set_mac_address = pt_set_mac_address;
-
-    dev->type = ARPHRD_AX25;            /* AF_AX25 device */
-    dev->hard_header_len = 73;      /* We do digipeaters now */
-    dev->mtu = 1500;                /* eth_mtu is default */
-    dev->addr_len = 7;               /* sizeof an ax.25 address */
-    memcpy(dev->broadcast, ax25_bcast, 7);
-    memcpy(dev->dev_addr, ax25_test, 7);
-
-    /* New style flags */
-    dev->flags = 0;
-
-    return 0;
-} /* pt_probe() */
-
-
-/* Open/initialise the board.  This is called (in the current kernel)
- * sometime after booting when the 'ifconfig' program is run.
- *
- * This routine should set everything up anew at each open, even
- * registers that 'should' only be set once at boot, so that there is
- * a non-reboot way to recover if something goes wrong.
- * derived from last half of tsync_attach()
- */
-static int pt_open(struct net_device *dev)
-{
-    unsigned long flags;
-    struct pt_local *lp = dev->priv;
-    static first_time = 1;
-
-    if (dev->base_addr & CHANA)
-    {
-        if (first_time)
-        {
-            if (request_dma(dev->dma, "pt"))
-            {
-                free_irq(dev->irq, dev);
-                return -EAGAIN;
-            }
-        }
-
-         /* Reset hardware */
-         chipset_init(dev);
-     }
-     lp->tstate = IDLE;
-
-     if (dev->base_addr & CHANA)
-     {
-         scc_init(dev);
-         scc_init(dev->next);
-     }
-     /* Save a copy of register RR0 for comparing with later on */
-     /* We always put 0 in zero count */
-     lp->saved_RR0 = rdscc(lp->cardbase, lp->base + CTL, R0) & ~ZCOUNT;
-
-    /* master interrupt enable */
-    save_flags(flags);
-    cli();
-    wrtscc(lp->cardbase, lp->base + CTL, R9, MIE | NV);
-    outb_p( pt_sercfg |= PT_EI, lp->cardbase + INT_CFG);
-    restore_flags(flags);
-
-    lp->open_time = jiffies;
-
-    dev->tbusy = 0;
-    dev->interrupt = 0;
-    dev->start = 1;
-    first_time = 0;
-
-    MOD_INC_USE_COUNT;
-
-    return 0;
-} /* pt_open() */
-
-static int pt_send_packet(struct sk_buff *skb, struct net_device *dev)
-{
-	struct pt_local *lp = (struct pt_local *) dev->priv;
-
-#ifdef PT_DEBUG
-	printk(KERN_DEBUG "PT: pt_send_packet(): (%d)\n", lp->base & CHANA);
-#endif
-	hardware_send_packet(lp, skb);
-	dev->trans_start = jiffies;
-
-	return 0;
-}
-
-
-
-/* The inverse routine to pt_open() */
-static int pt_close(struct net_device *dev)
-{
-	unsigned long flags;
-	struct pt_local *lp = dev->priv;
-	struct sk_buff *ptr = NULL;
-	int cmd;
-
-	cmd = lp->base + CTL;
-
-	save_flags(flags);
-	cli();
-
-	/* Reset SCC or channel */
-	chipset_init(dev);
-	disable_dma(lp->dmachan);
-
-	lp->open_time = 0;
-	dev->tbusy = 1;
-	dev->start = 0;
-
-	/* Free any buffers left in the hardware transmit queue */
-	while ((ptr = skb_dequeue(&lp->sndq)) != NULL)
-		kfree_skb(ptr);
-
-	restore_flags(flags);
-
-#ifdef PT_DEBUG
-	printk(KERN_DEBUG "PT: pt_close(): Closing down channel (%d).\n", lp->base & CHANA);
-#endif
-
-	MOD_DEC_USE_COUNT;
-
-	return 0;
-} /* pt_close() */
-
-
-static int pt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
-    unsigned long flags;
-    struct pt_req rq;
-    struct pt_local *lp = (struct pt_local *) dev->priv;
-
-    int ret = verify_area(VERIFY_WRITE, ifr->ifr_data, sizeof(struct pt_req));
-    if (ret)
-	return ret;
-
-    if (cmd != SIOCDEVPRIVATE)
-        return -EINVAL;
-
-    copy_from_user(&rq, ifr->ifr_data, sizeof(struct pt_req));
-
-    switch (rq.cmd) {
-    case SIOCSPIPARAM:
-
-	if (!capable(CAP_NET_ADMIN))
-	    return -EPERM;
-	save_flags(flags);
-	cli();
-	lp->txdelay = rq.txdelay;
-	lp->persist = rq.persist;
-	lp->slotime = rq.slotime;
-	lp->squeldelay = rq.squeldelay;
-	lp->clockmode = rq.clockmode;
-	lp->speed = rq.speed;
-	pt_open(&pt0a);
-	restore_flags(flags);
-	ret = 0;
-	break;
-
-    case SIOCSPIDMA:
-
-	if (!capable(CAP_SYS_RAWIO))
-	    return -EPERM;
-	ret = 0;
-	if (dev->base_addr & CHANA) {   /* if A channel */
-	   if (rq.dmachan < 1 || rq.dmachan > 3)
-		return -EINVAL;
-	   save_flags(flags);
-	   cli();
-	   pt_close(dev);
-	   free_dma(lp->dmachan);
-	   dev->dma = lp->dmachan = rq.dmachan;
-	   if (request_dma(lp->dmachan,"pt"))
-		ret = -EAGAIN;
-	   pt_open(dev);
-	   restore_flags(flags);
-	}
-	break;
-
-    case SIOCSPIIRQ:
-	ret = -EINVAL;      /* add this later */
-	break;
-
-    case SIOCGPIPARAM:
-    case SIOCGPIDMA:
-    case SIOCGPIIRQ:
-
-	rq.speed = lp->speed;
-	rq.txdelay = lp->txdelay;
-	rq.persist = lp->persist;
-	rq.slotime = lp->slotime;
-	rq.squeldelay = lp->squeldelay;
-	rq.clockmode = lp->clockmode;
-	rq.dmachan = lp->dmachan;
-	rq.irq = dev->irq;
-	copy_to_user(ifr->ifr_data, &rq, sizeof(struct pt_req));
-	ret = 0;
-	break;
-
-    default:
-	ret = -EINVAL;
-    }
-    return ret;
-}
-
-/*
- *	Get the current statistics.
- *	This may be called with the card open or closed. 
- */
- 
-static struct net_device_stats *pt_get_stats(struct net_device *dev)
-{
-	struct pt_local *lp = (struct pt_local *) dev->priv;
-	return &lp->stats;
-}
-
-
-/*
- * Local variables:
- *  compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c skeleton.c"
- *  version-control: t
- *  kept-new-versions: 5
- *  tab-width: 4
- * End:
- */
-
-
-static void tdelay(struct pt_local *lp, int time)
-{
-	/* For some reason, we turn off the Tx interrupts here! */
-	if (!lp->dmachan)
-		wrtscc(lp->cardbase, lp->base + CTL, R1, INT_ALL_Rx | EXT_INT_ENAB);
-
-	if (lp->base & CHANA) 
-	{
-		outb_p(time & 0xff, lp->cardbase + TMR1);
-		outb_p((time >> 8)&0xff, lp->cardbase + TMR1);
-	}
-	else
-	{
-		outb_p(time & 0xff, lp->cardbase + TMR2);
-		outb_p((time >> 8)&0xff, lp->cardbase + TMR2);
-	}
-} /* tdelay */
-
-
-static void pt_txisr(struct pt_local *lp)
-{
-	unsigned long flags;
-	int cmd;
-	unsigned char c;
-
-	save_flags(flags);
-	cli();
-	cmd = lp->base + CTL;
-
-#ifdef PT_DEBUG
-	printk(KERN_DEBUG "PT: pt_txisr(): tstate = %d (%d).\n", lp->tstate, lp->base & CHANA);
-#endif
-
-	switch (lp->tstate)
-	{
-	case CRCOUT:
-	    lp->tstate = FLAGOUT;
-	    tdelay(lp, lp->squeldelay);
-	    restore_flags(flags);
-	    return;
-
-	case IDLE:
-	    /* Transmitter idle. Find a frame for transmission */
-	    if ((lp->sndbuf = skb_dequeue(&lp->sndq)) == NULL)
-	    {
-	        /* Nothing to send - return to receive mode
-	         * Tx off now - flag should have gone
-	         */
-	        pt_rts(lp, OFF);
-
-	        restore_flags(flags);
-	        return;
-	    }
-	    if (!lp->dmachan)
-	    {
-		    lp->txptr = lp->sndbuf->data;
-		    lp->txptr++;		/* Ignore KISS control byte */
-		    lp->txcnt = (int) lp->sndbuf->len - 1;
-		}
-	    /* If a buffer to send, drop though here */
-
-	case DEFER:
-	    /* Check DCD - debounce it */
-	    /* See Intel Microcommunications Handbook p2-308 */
-	    wrtscc(lp->cardbase, cmd, R0, RES_EXT_INT);
-	    wrtscc(lp->cardbase, cmd, R0, RES_EXT_INT);
-	    if ((rdscc(lp->cardbase, cmd, R0) & DCD) != 0)
-	    {
-	        lp->tstate = DEFER;
-	        tdelay(lp, 100);
-	        /* DEFER until DCD transition or timeout */
-	        wrtscc(lp->cardbase, cmd, R15, DCDIE);
-	        restore_flags(flags);
-	        return;
-	    }
-	    if (random() > lp->persist)
-	    {
-	        lp->tstate = DEFER;
-	        tdelay(lp, lp->slotime);
-	        restore_flags(flags);
-	        return;
-	    }
-	    pt_rts(lp, ON);		/* Tx on */
-	    if (lp->dmachan)
-	    	wrtscc(lp->cardbase, cmd, R5, TxCRC_ENAB | RTS | Tx8);
-	    lp->tstate = ST_TXDELAY;
-	    tdelay(lp, lp->txdelay);
-	    restore_flags(flags);
-	    return;
-
-	case ACTIVE:
-	    /* Here we are actively sending a frame */
-	    if (lp->txcnt--)
-	    {
-	        /* XLZ - checkout Gracilis PT code to see if the while
-	         * loop is better or not.
-	         */
-	        c = *lp->txptr++;
-	        /* next char is gone */
-	        wrtscc(lp->cardbase, cmd, R8, c);
-	        /* stuffing a char satisfies interrupt condition */
-	    } else {
-	        /* No more to send */
-	        kfree_skb(lp->sndbuf);
-	        lp->sndbuf = NULL;
-	        if ((rdscc(lp->cardbase, cmd, R0) & TxEOM))
-	        {
-	            /* Did we underrun */
-	            lp->stats.tx_errors++;
-	            lp->stats.tx_fifo_errors++;
-	            wrtscc(lp->cardbase, cmd, R0, SEND_ABORT);
-	            lp->tstate = FLAGOUT;
-	            tdelay(lp, lp->squeldelay);
-	            restore_flags(flags);
-	            return;
-	        }
-	        lp->tstate = UNDERRUN;
-	        /* Send flags on underrun */
-	       if (lp->nrzi)
-	       {
-	           wrtscc(lp->cardbase, cmd, R10, CRCPS | NRZI);
-	       } else {
-	           wrtscc(lp->cardbase, cmd, R10, CRCPS | NRZ);
-	       }
-	       /* Reset Tx interrupt pending */
-	       wrtscc(lp->cardbase, cmd, R0, RES_Tx_P);
-	   }
-	   restore_flags(flags);
-	   return;
-	default:
-		printk(KERN_ERR "PT: pt_txisr(): Invalid tstate (%d) for chan %s.\n", lp->tstate, (cmd & CHANA? "A": "B") );
-		pt_rts(lp, OFF);
-		lp->tstate = IDLE;
-		break;
-    } 				/*switch */
-    restore_flags(flags);
-}
-
-static void pt_rxisr(struct net_device *dev)
-{
-    struct pt_local *lp = (struct pt_local*) dev->priv;
-    int cmd = lp->base + CTL;
-    int bytecount;
-    unsigned long flags;
-    char rse;
-    struct sk_buff *skb;
-    int sksize, pkt_len;
-    struct mbuf *cur_buf = NULL;
-    unsigned char *cfix;
-
-    save_flags(flags);
-    cli();
-
-    /* Get status byte from R1 */
-    rse = rdscc(lp->cardbase, cmd, R1);
-
-#ifdef PT_DEBUG
-    printk(KERN_DEBUG "PT: pt_rxisr(): R1 = %#3x. (%d)\n", rse, lp->base & CHANA);
-#endif
-
-	if (lp->dmachan && (rse & Rx_OVR))
-		lp->rstate = RXERROR;
-
-    if (rdscc(lp->cardbase, cmd, R0) & Rx_CH_AV && !lp->dmachan)
-    {
-        /* There is a char to be stored
-         * Read special condition bits before reading the data char
-         */
-        if (rse & Rx_OVR)
-        {
-             /* Rx overrun - toss buffer */
-             /* wind back the pointers */
-             lp->rcp = lp->rcvbuf->data;
-             lp->rcvbuf->cnt = 0;
-             lp->rstate = RXERROR;
-             lp->stats.rx_errors++;
-             lp->stats.rx_fifo_errors++;
-         } else if (lp->rcvbuf->cnt >= lp->bufsiz)
-             {
-                 /* Too large packet
-                  * wind back Rx buffer pointers
-                  */
-                 lp->rcp = lp->rcvbuf->data;
-                 lp->rcvbuf->cnt = 0;
-                 lp->rstate = TOOBIG;
-             }
-         /* ok, we can store the Rx char if no errors */
-         if (lp->rstate == ACTIVE)
-         {
-             *lp->rcp++ = rdscc(lp->cardbase, cmd, R8);
-             lp->rcvbuf->cnt++;
-         } else {
-             /* we got an error, dump the FIFO */
-             (void) rdscc(lp->cardbase, cmd, R8);
-             (void) rdscc(lp->cardbase, cmd, R8);
-             (void) rdscc(lp->cardbase, cmd, R8);
-
-             /* Reset error latch */
-             wrtscc(lp->cardbase, cmd, R0, ERR_RES);
-             lp->rstate = ACTIVE;
-
-             /* Resync the SCC */
-             wrtscc(lp->cardbase, cmd, R3, RxENABLE | ENT_HM | AUTO_ENAB | Rx8);
-
-         }
-     }
-
-     if (rse & END_FR)
-     {
-#ifdef PT_DEBUG
-	printk(KERN_DEBUG "PT: pt_rxisr() Got end of a %u byte frame.\n", lp->rcvbuf->cnt);
-#endif
-		if (lp->dmachan)
-		{
-			clear_dma_ff(lp->dmachan);
-			bytecount = lp->bufsiz - get_dma_residue(lp->dmachan);
-		} else {
-			bytecount = lp->rcvbuf->cnt;
-		}
-
-         /* END OF FRAME - Make sure Rx was active */
-         if (lp->rcvbuf->cnt > 0 || lp->dmachan)
-         {
-             if ((rse & CRC_ERR) || (lp->rstate > ACTIVE) || (bytecount < 10))
-             {
-                 if ((bytecount >= 10) && (rse & CRC_ERR))
-                 {
-                     lp->stats.rx_crc_errors++;
-                 }
-                 if (lp->dmachan)
-                 {
-                 	if (lp->rstate == RXERROR)
-                 	{
-                 		lp->stats.rx_errors++;
-                 		lp->stats.rx_over_errors++;
-                 	}
-                 	lp->rstate = ACTIVE;
-                 	setup_rx_dma(lp);
-                 } else {
-	                 /* wind back Rx buffer pointers */
-    	             lp->rcp = lp->rcvbuf->data;
-        	         lp->rcvbuf->cnt = 0;
-
-					/* Re-sync the SCC */
-					wrtscc(lp->cardbase, cmd, R3, RxENABLE | ENT_HM | AUTO_ENAB | Rx8);
-
-        	     }
-#ifdef PT_DEBUG
-	printk(KERN_DEBUG "PT: pt_rxisr() %s error.\n", (rse & CRC_ERR)? "CRC" : "state");
-#endif
-             } else {
-                 /* We have a valid frame */
-                 if (lp->dmachan)
-                 {
-                 	pkt_len = lp->rcvbuf->cnt = bytecount - 2 +1;
-					/* Get buffer for next frame */
-                 	cur_buf = lp->rcvbuf;
-                 	switchbuffers(lp);
-                 	setup_rx_dma(lp);
-                 } else {
-	                 pkt_len = lp->rcvbuf->cnt -= 2;  /* Toss 2 CRC bytes */
-    	             pkt_len += 1;	/* make room for KISS control byte */
-        		}
-
-                 /* Malloc up new buffer */
-                 sksize = pkt_len;
-                 skb = dev_alloc_skb(sksize);
-                 if (skb == NULL)
-                 {
-                     printk(KERN_ERR "PT: %s: Memory squeeze, dropping packet.\n", dev->name);
-                     lp->stats.rx_dropped++;
-                     restore_flags(flags);
-                     return;
-                 }
-                 skb->dev = dev;
-
-                 /* KISS kludge = prefix with a 0 byte */
-                 cfix=skb_put(skb,pkt_len);
-                 *cfix++=0;
-                 /* skb->data points to the start of sk_buff area */
-                 if (lp->dmachan)
-                 	memcpy(cfix, (char*)cur_buf->data, pkt_len - 1);
-                 else
-                    memcpy(cfix, lp->rcvbuf->data, pkt_len - 1);
-                 skb->protocol = ntohs(ETH_P_AX25);
-                 skb->mac.raw=skb->data;
-                 lp->stats.rx_bytes+=skb->len;
-                 netif_rx(skb);
-                 lp->stats.rx_packets++;
-                 if (!lp->dmachan)
-                 {
-	                 /* packet queued - wind back buffer for next frame */
-    	             lp->rcp = lp->rcvbuf->data;
-	                 lp->rcvbuf->cnt = 0;
-	             }
-             } /* good frame */
-         } /* check active Rx */
-         /* Clear error status */
-         lp->rstate = ACTIVE;
-         /* Reset error latch */
-     } /* end EOF check */
-     wrtscc(lp->cardbase, cmd, R0, ERR_RES);
-     restore_flags(flags);
-} /* pt_rxisr() */
-
-/*
- * This handles the two timer interrupts.
- * This is a real bugger, cause you have to rip it out of the pi's
- * external status code.  They use the CTS line or something.
- */
-static void pt_tmrisr(struct pt_local *lp)
-{
-    unsigned long flags;
-
-#ifdef PT_DEBUG
-	printk(KERN_DEBUG "PT: pt_tmrisr(): tstate = %d (%d).\n", lp->tstate, lp->base & CHANA);
-#endif
-
-    save_flags(flags);
-    cli();
-
-
-    switch (lp->tstate)
-    {
-    /* Most of this stuff is in pt_exisr() */
-    case FLAGOUT:
-    case ST_TXDELAY:
-    case DEFER:
-/*    case ACTIVE:
-    case UNDERRUN:*/
-        pt_exisr(lp);
-        break;
-
-    default:
-	if (lp->base & CHANA)
- 	    printk(KERN_ERR "PT: pt_tmrisr(): Invalid tstate %d for Channel A\n", lp->tstate);
-	else
-	    printk(KERN_ERR "PT: pt_tmrisr(): Invalid tstate %d for Channel B\n", lp->tstate);
-	break;
-    } /* end switch */
-    restore_flags(flags);
-} /* pt_tmrisr() */
-
-
-/*
- * This routine is called by the kernel when there is an interrupt for the
- * PT.
- */
-static void pt_interrupt(int irq, void *dev_id, struct pt_regs *regs)
-{
-    /* It's a tad dodgy here, but we assume pt0a until proven otherwise */
-    struct net_device *dev = &pt0a;
-    struct pt_local *lp = dev->priv;
-    unsigned char intreg;
-    unsigned char st;
-    register int cbase = dev->base_addr & 0x3f0;
-    unsigned long flags;
-
-    /* Read the PT's interrupt register, this is not the SCC one! */
-    intreg = inb_p(cbase + INT_REG);
-    while(( intreg & 0x07) != 0x07) {
-        /* Read interrupt register pending from Channel A */
-        while ((st = rdscc(cbase, cbase + CHANA + CTL, R3)) != 0)
-        {
-        	/* Read interrupt vector from R2, channel B */
-#ifdef PT_DEBUG
-		printk(KERN_DEBUG "PT: pt_interrupt(): R3 = %#3x", st);
-#endif
-/*        	st = rdscc(lp->cardbase, cbase + CHANB + CTL, R2) & 0x0e;*/
-#ifdef PT_DEBUG
-		printk(KERN_DEBUG "PI: R2 = %#3x.\n", st);
-#endif
-			if (st & CHARxIP) {
-			    /* Channel A Rx */
-	            lp = (struct pt_local*)pt0a.priv;
-	            pt_rxisr(&pt0a);
-	        } else if (st & CHATxIP) {
-	            /* Channel A Tx */
-	            lp = (struct pt_local*)pt0a.priv;
-	            pt_txisr(lp);
-	        } else if (st & CHAEXT) {
-	        	/* Channel A External Status */
-	        	lp = (struct pt_local*)pt0a.priv;
-	        	pt_exisr(lp);
-	        } else if (st & CHBRxIP) {
-	        	/* Channel B Rx */
-	            lp= (struct pt_local*)pt0b.priv;
-	            pt_rxisr(&pt0b);
-	        } else if (st & CHBTxIP) {
-            	/* Channel B Tx */
-	            lp = (struct pt_local*)pt0b.priv;
-	            pt_txisr(lp);
-			} else if (st & CHBEXT) {
-	        	/* Channel B External Status */
-	        	lp = (struct pt_local*)pt0b.priv;
-	        	pt_exisr(lp);
-	        }
-            /* Reset highest interrupt under service */
-            save_flags(flags);
-            cli();
-            wrtscc(lp->cardbase, lp->base + CTL, R0, RES_H_IUS);
-            restore_flags(flags);
-        }  /* end of SCC ints */
-
-        if (!(intreg & PT_TMR1_MSK))
-        {
-            /* Clear timer 1 */
-            inb_p(cbase + TMR1CLR);
-
-            pt_tmrisr( (struct pt_local*)pt0a.priv);
-        }
-
-        if (!(intreg & PT_TMR2_MSK))
-        {
-            /* Clear timer 2 */
-            inb_p(cbase + TMR2CLR);
-
-            pt_tmrisr( (struct pt_local*)pt0b.priv);
-        }
-
-        /* Get the next PT interrupt vector */
-        intreg = inb_p(cbase + INT_REG);
-    } /* while (intreg) */
-} /* pt_interrupt() */
-
-
-static void pt_exisr(struct pt_local *lp)
-{
-    unsigned long flags;
-    int cmd = lp->base + CTL;
-    unsigned char st;
-    char c;
-    int length;
-
-    save_flags(flags);
-    cli();
-
-    /* Get external status */
-    st = rdscc(lp->cardbase, cmd, R0);
-
-#ifdef PT_DEBUG
-	printk(KERN_DEBUG "PT: exisr(): R0 = %#3x tstate = %d (%d).\n", st, lp->tstate, lp->base & CHANA);
-#endif
-    /* Reset external status latch */
-    wrtscc(lp->cardbase, cmd, R0, RES_EXT_INT);
-
-    if ((lp->rstate >= ACTIVE) && (st & BRK_ABRT) && lp->dmachan)
-    {
-    	setup_rx_dma(lp);
-    	lp->rstate = ACTIVE;
-    }
-
-    switch (lp->tstate)
-    {
-    case ACTIVE:		/* Unexpected underrun */
-#ifdef PT_DEBUG
-	printk(KERN_DEBUG "PT: exisr(): unexpected underrun detected.\n");
-#endif
-        kfree_skb(lp->sndbuf);
-        lp->sndbuf = NULL;
-        if (!lp->dmachan)
-        {
-	        wrtscc(lp->cardbase, cmd, R0, SEND_ABORT);
-	        lp->stats.tx_errors++;
-	        lp->stats.tx_fifo_errors++;
-	    }
-        lp->tstate = FLAGOUT;
-        tdelay(lp, lp->squeldelay);
-        restore_flags(flags);
-        return;
-    case UNDERRUN:
-        lp->tstate = CRCOUT;
-        restore_flags(flags);
-        return;
-    case FLAGOUT:
-        /* squeldelay has timed out */
-        /* Find a frame for transmission */
-        if ((lp->sndbuf = skb_dequeue(&lp->sndq)) == NULL)
-        {
-            /* Nothing to send - return to Rx mode */
-            pt_rts(lp, OFF);
-            lp->tstate = IDLE;
-            restore_flags(flags);
-            return;
-        }
-        if (!lp->dmachan)
-        {
-	        lp->txptr = lp->sndbuf->data;
-    	    lp->txptr++;		/* Ignore KISS control byte */
-	        lp->txcnt = (int) lp->sndbuf->len - 1;
-	    }
-        /* Fall through if we have a packet */
-
-    case ST_TXDELAY:
-    	if (lp->dmachan)
-    	{
-    		/* Disable DMA chan */
-    		disable_dma(lp->dmachan);
-
-    		/* Set up for TX dma */
-    		wrtscc(lp->cardbase, cmd, R1, WT_FN_RDYFN | EXT_INT_ENAB);
-
-    		length = lp->sndbuf->len - 1;
-    		memcpy(lp->txdmabuf, &lp->sndbuf->data[1], length);
-
-    		/* Setup DMA controller for Tx */
-    		setup_tx_dma(lp, length);
-
-    		enable_dma(lp->dmachan);
-
-    		/* Reset CRC, Txint pending */
-    		wrtscc(lp->cardbase, cmd, R0, RES_Tx_CRC | RES_Tx_P);
-
-    		/* Allow underrun only */
-    		wrtscc(lp->cardbase, cmd, R15, TxUIE);
-
-    		/* Enable TX DMA */
-    		wrtscc(lp->cardbase, cmd, R1, WT_RDY_ENAB | WT_FN_RDYFN | EXT_INT_ENAB);
-
-    		/* Send CRC on underrun */
-    		wrtscc(lp->cardbase, cmd, R0, RES_EOM_L);
-
-    		lp->tstate = ACTIVE;
-    		break;
-    	}
-        /* Get first char to send */
-        lp->txcnt--;
-        c = *lp->txptr++;
-        /* Reset CRC for next frame */
-        wrtscc(lp->cardbase, cmd, R0, RES_Tx_CRC);
-
-        /* send abort on underrun */
-        if (lp->nrzi)
-        {
-            wrtscc(lp->cardbase, cmd, R10, CRCPS | NRZI | ABUNDER);
-        } else {
-            wrtscc(lp->cardbase, cmd, R10, CRCPS | NRZ | ABUNDER);
-        }
-        /* send first char */
-        wrtscc(lp->cardbase, cmd, R8, c);
-
-        /* Reset end of message latch */
-        wrtscc(lp->cardbase, cmd, R0, RES_EOM_L);
-
-        /* stuff an extra one in */
-/*        while ((rdscc(lp->cardbase, cmd, R0) & Tx_BUF_EMP) && lp->txcnt)
-        {
-            lp->txcnt--;
-            c = *lp->txptr++;
-            wrtscc(lp->cardbase, cmd, R8, c);
-        }*/
-
-        /* select Tx interrupts to enable */
-        /* Allow underrun int only */
-        wrtscc(lp->cardbase, cmd, R15, TxUIE);
-
-        /* Reset external interrupts */
-        wrtscc(lp->cardbase, cmd, R0, RES_EXT_INT);
-
-        /* Tx and Rx ints enabled */
-        wrtscc(lp->cardbase, cmd, R1, TxINT_ENAB | EXT_INT_ENAB);
-
-        lp->tstate = ACTIVE;
-        restore_flags(flags);
-        return;
-
-        /* slotime has timed out */
-    case DEFER:
-        /* Check DCD - debounce it
-         * see Intel Microcommunications Handbook, p2-308
-         */
-        wrtscc(lp->cardbase, cmd, R0, RES_EXT_INT);
-        wrtscc(lp->cardbase, cmd, R0, RES_EXT_INT);
-        if ((rdscc(lp->cardbase, cmd, R0) & DCD) != 0)
-        {
-            lp->tstate = DEFER;
-            tdelay(lp, 100);
-            /* DEFER until DCD transition or timeout */
-            wrtscc(lp->cardbase, cmd, R15, DCDIE);
-            restore_flags(flags);
-            return;
-        }
-        if (random() > lp->persist)
-        {
-            lp->tstate = DEFER;
-            tdelay(lp, lp->slotime);
-            restore_flags(flags);
-            return;
-        }
-        if (lp->dmachan)
-        	wrtscc(lp->cardbase, cmd, R5, TxCRC_ENAB | RTS | Tx8);
-        pt_rts(lp, ON);			/* Tx on */
-        lp->tstate = ST_TXDELAY;
-        tdelay(lp, lp->txdelay);
-        restore_flags(flags);
-        return;
-
- 	/* Only for int driven parts */
- 	if (lp->dmachan)
- 	{
- 		restore_flags(flags);
- 		return;
- 	}
-
-    } /* end switch */
-    /*
-     * Rx mode only
-     * This triggers when hunt mode is entered, & since an ABORT
-     * automatically enters hunt mode, we use that to clean up
-     * any waiting garbage
-     */
-    if ((lp->rstate == ACTIVE) && (st & BRK_ABRT) )
-    {
-#ifdef PT_DEBUG
-	printk(KERN_DEBUG "PT: exisr(): abort detected.\n");
-#endif
-  		/* read and dump all of SCC Rx FIFO */
-        (void) rdscc(lp->cardbase, cmd, R8);
-        (void) rdscc(lp->cardbase, cmd, R8);
-        (void) rdscc(lp->cardbase, cmd, R8);
-
-        lp->rcp = lp->rcvbuf->data;
-        lp->rcvbuf->cnt = 0;
-
-		/* Re-sync the SCC */
-		wrtscc(lp->cardbase, cmd, R3, RxENABLE | ENT_HM | AUTO_ENAB | Rx8);
-
-    }
-
-    /* Check for DCD transitions */
-    if ( (st & DCD) != (lp->saved_RR0 & DCD))
-    {
-#ifdef PT_DEBUG
-        printk(KERN_DEBUG "PT: pt_exisr(): DCD is now %s.\n", (st & DCD)? "ON" : "OFF" );
-#endif
-		if (st & DCD)
-		{
-			/* Check that we don't already have some data */
-			if (lp->rcvbuf->cnt > 0)
-			{
-#ifdef PT_DEBUG
-				printk(KERN_DEBUG "PT: pt_exisr() dumping %u bytes from buffer.\n", lp->rcvbuf->cnt);
-#endif
-				/* wind back buffers */
-				lp->rcp = lp->rcvbuf->data;
-				lp->rcvbuf->cnt = 0;
-			}
-		} else {  /* DCD off */
-
-			/* read and dump al SCC FIFO */
-			(void)rdscc(lp->cardbase, cmd, R8);
-			(void)rdscc(lp->cardbase, cmd, R8);
-			(void)rdscc(lp->cardbase, cmd, R8);
-
-			/* wind back buffers */
-			lp->rcp = lp->rcvbuf->data;
-			lp->rcvbuf->cnt = 0;
-
-			/* Re-sync the SCC */
-			wrtscc(lp->cardbase, cmd, R3, RxENABLE | ENT_HM | AUTO_ENAB | Rx8);
-		}
-
-    }
-    /* Update the saved version of register RR) */
-    lp->saved_RR0 = st &~ ZCOUNT;
-    restore_flags(flags);
-
-} /* pt_exisr() */
-
-#ifdef MODULE
-EXPORT_NO_SYMBOLS;
-
-MODULE_AUTHOR("Craig Small VK2XLZ <vk2xlz@vk2xlz.ampr.org>");
-MODULE_DESCRIPTION("AX.25 driver for the Gracillis PacketTwin HDLC card");
-
-int init_module(void)
-{
-	return ptwin_init();
-}
-
-void cleanup_module(void)
-{
-	free_irq(pt0a.irq, &pt0a);	/* IRQs and IO Ports are shared */
-	release_region(pt0a.base_addr & 0x3f0, PT_TOTAL_SIZE);
-
-	kfree(pt0a.priv);
-	pt0a.priv = NULL;
-	unregister_netdev(&pt0a);
-
-	kfree(pt0b.priv);
-	pt0b.priv = NULL;
-	unregister_netdev(&pt0b);
-}
-#endif
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/net/hamradio/scc.c linux/drivers/net/hamradio/scc.c
--- v2.4.0-prerelease/linux/drivers/net/hamradio/scc.c	Tue Oct 31 12:42:26 2000
+++ linux/drivers/net/hamradio/scc.c	Thu Jan  4 12:50:12 2001
@@ -1,7 +1,6 @@
 #define RCS_ID "$Id: scc.c,v 1.75 1998/11/04 15:15:01 jreuter Exp jreuter $"
 
 #define VERSION "3.0"
-#define BANNER  "AX.25: Z8530 SCC driver version "VERSION".dl1bke\n"
 
 /*
  * Please use z8530drv-utils-3.0 with this version.
@@ -142,8 +141,6 @@
 
 #define SCC_MAXCHIPS	4       /* number of max. supported chips */
 #define SCC_BUFSIZE	384     /* must not exceed 4096 */
-#undef  SCC_DISABLE_ALL_INTS	/* use cli()/sti() in ISR instead of */
-				/* enable_irq()/disable_irq()        */
 #undef	SCC_DEBUG
 
 #define SCC_DEFAULT_CLOCK	4915200 
@@ -187,12 +184,7 @@
 #include <linux/kernel.h>
 #include <linux/proc_fs.h>
 
-#ifdef MODULE
-int init_module(void);
-void cleanup_module(void);
-#endif
-
-int scc_init(void);
+static const char banner[] __initdata = KERN_INFO "AX.25: Z8530 SCC driver version "VERSION".dl1bke\n";
 
 static void t_dwait(unsigned long);
 static void t_txdelay(unsigned long);
@@ -220,7 +212,6 @@
 static int scc_net_tx(struct sk_buff *skb, struct net_device *dev);
 static int scc_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
 static int scc_net_set_mac_address(struct net_device *dev, void *addr);
-static int scc_net_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, void *daddr, void *saddr, unsigned len);
 static struct net_device_stats * scc_net_get_stats(struct net_device *dev);
 
 static unsigned char *SCC_DriverName = "scc";
@@ -235,9 +226,9 @@
 	int irq;
 } SCC_ctrl[SCC_MAXCHIPS+1];
 
-static unsigned char Driver_Initialized = 0;
-static int Nchips = 0;
-static io_port Vector_Latch = 0;
+static unsigned char Driver_Initialized;
+static int Nchips;
+static io_port Vector_Latch;
 
 
 /* ******************************************************************** */
@@ -298,18 +289,6 @@
 	OutReg(scc->ctrl, reg, (scc->wreg[reg] &= ~val));
 }
 
-#ifdef SCC_DISABLE_ALL_INTS
-static inline void scc_cli(int irq)
-{ cli(); }
-static inline void scc_sti(int irq)
-{ sti(); }
-#else
-static inline void scc_cli(int irq)
-{ disable_irq(irq); }
-static inline void scc_sti(int irq)
-{ enable_irq(irq); }
-#endif
-
 /* ******************************************************************** */
 /* *			Some useful macros			      * */
 /* ******************************************************************** */
@@ -1427,7 +1406,6 @@
 }
 
 #undef CAST
-#undef SVAL
 
 /* ******************************************************************* */
 /* *			Send calibration pattern		     * */
@@ -1572,12 +1550,12 @@
 	dev->priv = (void *) scc;
 	dev->init = scc_net_init;
 
-	if ((addev? register_netdevice(dev) : register_netdev(dev)) != 0)
-	{
+	if ((addev? register_netdevice(dev) : register_netdev(dev)) != 0) {
 		kfree(dev);
                 return -EIO;
         }
 
+	SET_MODULE_OWNER(dev);
 	return 0;
 }
 
@@ -1604,7 +1582,7 @@
 	dev->stop	     = scc_net_close;
 
 	dev->hard_start_xmit = scc_net_tx;
-	dev->hard_header     = scc_net_header;
+	dev->hard_header     = ax25_encapsulate;
 	dev->rebuild_header  = ax25_rebuild_header;
 	dev->set_mac_address = scc_net_set_mac_address;
 	dev->get_stats       = scc_net_get_stats;
@@ -1633,8 +1611,6 @@
  	if (!scc->init)
 		return -EINVAL;
 
-	MOD_INC_USE_COUNT;
-
 	scc->tx_buff = NULL;
 	skb_queue_head_init(&scc->tx_queue);
  
@@ -1667,7 +1643,6 @@
 	
 	scc_discard_buffers(scc);
 
-	MOD_DEC_USE_COUNT;
 	return 0;
 }
 
@@ -1675,8 +1650,7 @@
 
 static void scc_net_rx(struct scc_channel *scc, struct sk_buff *skb)
 {
-	if (skb->len == 0)
-	{
+	if (skb->len == 0) {
 		dev_kfree_skb_irq(skb);
 		return;
 	}
@@ -1700,8 +1674,7 @@
 	unsigned long flags;
 	char kisscmd;
 
-	if (skb->len > scc->stat.bufsize || skb->len < 2)
-	{
+	if (skb->len > scc->stat.bufsize || skb->len < 2) {
 		scc->dev_stat.tx_dropped++;	/* bogus frame */
 		dev_kfree_skb(skb);
 		return 0;
@@ -1713,8 +1686,7 @@
 	kisscmd = *skb->data & 0x1f;
 	skb_pull(skb, 1);
 
-	if (kisscmd)
-	{
+	if (kisscmd) {
 		scc_set_param(scc, kisscmd, *skb->data);
 		dev_kfree_skb(skb);
 		return 0;
@@ -1723,8 +1695,7 @@
 	save_flags(flags);
 	cli();
 	
-	if (skb_queue_len(&scc->tx_queue) > scc->dev->tx_queue_len)
-	{
+	if (skb_queue_len(&scc->tx_queue) > scc->dev->tx_queue_len) {
 		struct sk_buff *skb_del;
 		skb_del = skb_dequeue(&scc->tx_queue);
 		dev_kfree_skb(skb_del);
@@ -1739,8 +1710,7 @@
 	 * algorithm for normal halfduplex operation.
 	 */
 
-	if(scc->stat.tx_state == TXS_IDLE || scc->stat.tx_state == TXS_IDLE2)
-	{
+	if(scc->stat.tx_state == TXS_IDLE || scc->stat.tx_state == TXS_IDLE2) {
 		scc->stat.tx_state = TXS_BUSY;
 		if (scc->kiss.fulldup == KISS_DUPLEX_HALF)
 			scc_start_tx_timer(scc, t_dwait, scc->kiss.waittime);
@@ -1804,8 +1774,12 @@
 					Ivec[hwcfg.irq].used = 1;
 			}
 
-			if (hwcfg.vector_latch) 
-				Vector_Latch = hwcfg.vector_latch;
+			if (hwcfg.vector_latch) {
+				if (!request_region(Vector_Latch, 1, "scc vector latch"))
+					printk(KERN_WARNING "z8530drv: warning, cannot reserve vector latch port 0x%x\n, disabled.", hwcfg.vector_latch);
+				else
+					Vector_Latch = hwcfg.vector_latch;
+			}
 
 			if (hwcfg.clock == 0)
 				hwcfg.clock = SCC_DEFAULT_CLOCK;
@@ -2001,14 +1975,6 @@
 	return 0;
 }
 
-/* ----> "hard" header <---- */
-
-static int  scc_net_header(struct sk_buff *skb, struct net_device *dev, 
-	unsigned short type, void *daddr, void *saddr, unsigned len)
-{
-	return ax25_encapsulate(skb, dev, type, daddr, saddr, len);
-}
-
 /* ----> get statistics <---- */
 
 static struct net_device_stats *scc_net_get_stats(struct net_device *dev)
@@ -2133,41 +2099,18 @@
         return len;
 }
 
-#ifdef CONFIG_PROC_FS
-#define scc_net_procfs_init() proc_net_create("z8530drv",0,scc_net_get_info)
-#define scc_net_procfs_remove() proc_net_remove("z8530drv")
-#else
-#define scc_net_procfs_init()
-#define scc_net_procfs_remove()
-#endif
-
-  
+ 
 /* ******************************************************************** */
 /* * 			Init SCC driver 			      * */
 /* ******************************************************************** */
 
 static int __init scc_init_driver (void)
 {
-	int chip, chan, k, result;
+	int result;
 	char devname[10];
 	
-	printk(KERN_INFO BANNER);
-	
-	memset(&SCC_ctrl, 0, sizeof(SCC_ctrl));
+	printk(banner);
 	
-	/* pre-init channel information */
-	
-	for (chip = 0; chip < SCC_MAXCHIPS; chip++)
-	{
-		memset((char *) &SCC_Info[2*chip  ], 0, sizeof(struct scc_channel));
-		memset((char *) &SCC_Info[2*chip+1], 0, sizeof(struct scc_channel));
-		
-		for (chan = 0; chan < 2; chan++)
-			SCC_Info[2*chip+chan].magic = SCC_MAGIC;
-	}
-
-	for (k = 0; k < 16; k++) Ivec[k].used = 0;
-
 	sprintf(devname,"%s0", SCC_DriverName);
 	
 	result = scc_net_setup(SCC_Info, devname, 0);
@@ -2177,7 +2120,7 @@
 		return result;
 	}
 
-	scc_net_procfs_init();
+	proc_net_create("z8530drv", 0, scc_net_get_info);
 
 	return 0;
 }
@@ -2193,7 +2136,10 @@
 	cli();
 
 	if (Nchips == 0)
+	{
 		unregister_netdev(SCC_Info[0].dev);
+		kfree(SCC_Info[0].dev);
+	}
 
 	for (k = 0; k < Nchips; k++)
 		if ( (ctrl = SCC_ctrl[k].chan_A) )
@@ -2206,24 +2152,27 @@
 	for (k = 0; k < Nchips*2; k++)
 	{
 		scc = &SCC_Info[k];
-		if (scc)
+		if (scc->ctrl)
 		{
 			release_region(scc->ctrl, 1);
 			release_region(scc->data, 1);
-			if (scc->dev)
-			{
-				unregister_netdev(scc->dev);
-				kfree(scc->dev);
-			}
+		}
+		if (scc->dev)
+		{
+			unregister_netdev(scc->dev);
+			kfree(scc->dev);
 		}
 	}
 	
 	for (k=0; k < 16 ; k++)
 		if (Ivec[k].used) free_irq(k, NULL);
 		
+	if (Vector_Latch)
+		release_region(Vector_Latch, 1);
+
 	restore_flags(flags);
 
-	scc_net_procfs_remove();
+	proc_net_remove("z8530drv");
 }
 
 MODULE_AUTHOR("Joerg Reuter <jreuter@yaina.de>");
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/net/hydra.c linux/drivers/net/hydra.c
--- v2.4.0-prerelease/linux/drivers/net/hydra.c	Mon Jan  1 09:38:35 2001
+++ linux/drivers/net/hydra.c	Thu Jan  4 13:00:55 2001
@@ -1,24 +1,16 @@
-/* Linux/68k Hydra Amiganet board driver v2.1 BETA                          */
-/* copyleft by Topi Kanerva (topi@susanna.oulu.fi)                          */
-/* also some code & lots of fixes by Timo Rossi (trossi@cc.jyu.fi)          */
-
-/* The code is mostly based on the linux/68k Ariadne driver                 */
-/* copyrighted by Geert Uytterhoeven (geert@linux-m68k.org)                 */
-/* and Peter De Schrijver (Peter.DeSchrijver@linux.cc.kuleuven.ac.be)       */
+/* New Hydra driver using generic 8390 core */
+/* Based on old hydra driver by Topi Kanerva (topi@susanna.oulu.fi) */
 
 /* This file is subject to the terms and conditions of the GNU General      */
 /* Public License.  See the file COPYING in the main directory of the       */
 /* Linux distribution for more details.                                     */
 
+/* Peter De Schrijver (p2@mind.be) */
+/* Oldenburg 2000 */
+
 /* The Amiganet is a Zorro-II board made by Hydra Systems. It contains a    */
 /* NS8390 NIC (network interface controller) clone, 16 or 64K on-board RAM  */
 /* and 10BASE-2 (thin coax) and AUI connectors.                             */
-/*                                                                          */
-/* Changes                                                                  */
-/* Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 08/06/2000            */
-/* - check init_etherdev in hydra_probe                                     */
-/* - dev->priv is already zeroed by init_etherdev                           */
-
 
 #include <linux/module.h>
 #include <linux/kernel.h>
@@ -42,649 +34,229 @@
 #include <asm/amigahw.h>
 #include <linux/zorro.h>
 
-#include "hydra.h"
+#include "8390.h"
 
+#define NE_BASE		(dev->base_addr)
+#define NE_CMD		(0x00*2)
 
-#define HYDRA_DEBUG
-#undef HAVE_MULTICAST
+#define NE_EN0_ISR      (0x07*2)
+#define NE_EN0_DCFG     (0x0e*2)
 
-#define HYDRA_VERSION "v2.1 BETA"
+#define NE_EN0_RSARLO   (0x08*2)
+#define NE_EN0_RSARHI   (0x09*2)
+#define NE_EN0_RCNTLO   (0x0a*2)
+#define NE_EN0_RXCR     (0x0c*2)
+#define NE_EN0_TXCR     (0x0d*2)
+#define NE_EN0_RCNTHI   (0x0b*2)
+#define NE_EN0_IMR      (0x0f*2)
+
+#define NESM_START_PG   0x0    /* First page of TX buffer */
+#define NESM_STOP_PG    0x40    /* Last page +1 of RX ring */
+
+#define HYDRA_NIC_BASE 0xffe1
+#define HYDRA_ADDRPROM 0xffc0
+#define HYDRA_VERSION "v3.0alpha"
 
-#undef HYDRA_DEBUG        /* define this for (lots of) debugging information */
+#define WORDSWAP(a)     ((((a)>>8)&0xff) | ((a)<<8))
 
-#if 0                         /* currently hardwired to one transmit buffer */
-	#define TX_RING_SIZE	5
-	#define RX_RING_SIZE	16
-#else
-	#define TX_RING_SIZE 1
-	#define RX_RING_SIZE 8
+#ifdef MODULE
+static struct net_device *root_hydra_dev = NULL;
 #endif
 
-#define ETHER_MIN_LEN 64
-#define ETHER_MAX_LEN 1518
-#define ETHER_ADDR_LEN 6
-
-
-/*
- *   let's define here nice macros for writing and reading NIC registers
- *
- * the CIA accesses here are uses to make sure the minimum time
- * requirement between NIC chip selects is met.
- */
-#define WRITE_REG(reg, val) (ciaa.pra, ((u8)(*(nicbase+(reg))=val)))
-#define READ_REG(reg) (ciaa.pra, ((u8)(*(nicbase+(reg)))))
-
-/* mask value for the interrupts we use */
-#define NIC_INTS (ISR_PRX | ISR_PTX | ISR_RXE | ISR_TXE | ISR_OVW | ISR_CNT)
-
-/* only broadcasts, no promiscuous mode for now */
-#define NIC_RCRBITS (0)
-
-/*
- *   Private Device Data
- */
-struct hydra_private
-{
-	u16 tx_page_start;
-	u16 rx_page_start;
-	u16 rx_page_stop;
-	u16 next_pkt;
-	struct net_device_stats stats;
-};
-
+static int __init hydra_probe(void);
+static int hydra_init(unsigned long board);
 static int hydra_open(struct net_device *dev);
-static int hydra_start_xmit(struct sk_buff *skb, struct net_device *dev);
-static void hydra_interrupt(int irq, void *data, struct pt_regs *fp);
-static void __inline__ hydra_rx(struct net_device *dev, struct hydra_private *priv, volatile u8 *nicbase);
 static int hydra_close(struct net_device *dev);
-static struct net_device_stats *hydra_get_stats(struct net_device *dev);
-#ifdef HAVE_MULTICAST
-static void set_multicast_list(struct net_device *dev, int num_addrs, void *addrs);
-#endif
-
-
-/* this is now coherent with the C version below, */
-/* compile the source with -D__USE_ASM__ if you   */
-/* want it - it'll only be some 10% faster though */
-
-#if defined (__GNUC__) && defined (__mc68000__) && defined (USE_ASM)
-
-static __inline__ void *memcpyw(u16 *dest, u16 *src, int len)
-{
-    __asm__("   move.l %0,%/a1; move.l %1,%/a0; move.l %2,%/d0 \n\t"
-	    "   cmpi.l #2,%/d0 \n\t"
-	    "1: bcs.s  2f \n\t"
-            "   move.w %/a0@+,%/a1@+ \n\t"
-	    "   subq.l #2,%/d0 \n\t"
-	    "   bra.s  1b \n\t"
-	    "2: cmpi.l #1,%/d0 \n\t"
-	    "   bne.s  3f \n\t"
-	    "   move.w %/a0@,%/d0 \n\t"
-	    "   swap.w %/d0 \n\t"
-	    "   move.b %/d0,%/a1@ \n\t"
-	    "3: moveq  #0,%/d0 \n\t"
-	  :
-	  : "g" (dest), "g" (src), "g" (len)
-	  : "a1", "a0", "d0");
-    return;
-}
-
-#else
-
-/* hydra memory can only be read or written as words or longwords.  */
-/* that will mean that we'll have to write a special memcpy for it. */
-/* this one here relies on the fact that _writes_ to hydra memory   */
-/* are guaranteed to be of even length. (reads can be arbitrary)    */
-
-/*
- *	FIXME: Surely we should be using the OS generic stuff and do
- *
- *	memcpy(dest,src,(len+1)&~1);
- *
- *	Can a 68K guy with this card check that ? - better yet
- *	use a copy/checksum on it.
- */
- 
-static void memcpyw(u16 *dest, u16 *src, int len)
-{
-	if(len & 1)
-		len++;
-
-	while (len >= 2) 
-	{
-		*(dest++) = *(src++);
-		len -= 2;
+static void hydra_reset_8390(struct net_device *dev);
+static void hydra_get_8390_hdr(struct net_device *dev,
+			       struct e8390_pkt_hdr *hdr, int ring_page);
+static void hydra_block_input(struct net_device *dev, int count,
+			      struct sk_buff *skb, int ring_offset);
+static void hydra_block_output(struct net_device *dev, int count,
+			       const unsigned char *buf, int start_page);
+static void __exit hydra_cleanup(void);
+
+static int __init hydra_probe(void)
+{
+    struct zorro_dev *z = NULL;
+    unsigned long board;
+    int err = -ENODEV;
+
+    if (load_8390_module("hydra.c"))
+	return -ENOSYS;
+
+    while ((z = zorro_find_device(ZORRO_PROD_HYDRA_SYSTEMS_AMIGANET, z))) {
+	board = z->resource.start;
+	if (!request_mem_region(board, 0x10000, "Hydra"))
+	    continue;
+	if ((err = hydra_init(ZTWO_VADDR(board)))) {
+	    release_mem_region(board, 0x10000);
+	    return err;
 	}
-}
+	err = 0;
+    }
 
-#endif
+    if (err == -ENODEV) {
+	printk("No Hydra ethernet card found.\n");
+	unload_8390_module();
+    }
+    return err;
+}
 
-int __init hydra_probe(struct net_device *dev)
+int __init hydra_init(unsigned long board)
 {
-	struct zorro_dev *z = NULL;
-	int j;
+    struct net_device *dev;
+    unsigned long ioaddr = board+HYDRA_NIC_BASE;
+    const char *name = NULL;
+    int start_page, stop_page;
+    int j;
+
+    static u32 hydra_offsets[16] = {
+	0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
+	0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
+    };
+
+    dev = init_etherdev(0, 0);
+    if (!dev)
+	return -ENOMEM;
+
+    for(j = 0; j < ETHER_ADDR_LEN; j++)
+	dev->dev_addr[j] = *((u8 *)(board + HYDRA_ADDRPROM + 2*j));
+
+    /* We must set the 8390 for word mode. */
+    writeb(0x4b, ioaddr + NE_EN0_DCFG);
+    start_page = NESM_START_PG;
+    stop_page = NESM_STOP_PG;
+
+    dev->base_addr = ioaddr;
+    dev->irq = IRQ_AMIGA_PORTS;
+
+    /* Install the Interrupt handler */
+    if (request_irq(IRQ_AMIGA_PORTS, ei_interrupt, SA_SHIRQ, "Hydra Ethernet",
+		    dev))
+	return -EAGAIN;
+
+    /* Allocate dev->priv and fill in 8390 specific dev fields. */
+    if (ethdev_init(dev)) {
+	printk("Unable to get memory for dev->priv.\n");
+	return -ENOMEM;
+    }
 
-#ifdef HYDRA_DEBUG
- printk("hydra_probe(%x)\n", dev);
-#endif
+    name = "NE2000";
 
-	while ((z = zorro_find_device(ZORRO_PROD_HYDRA_SYSTEMS_AMIGANET, z))) {
-		unsigned long board = z->resource.start;
-		unsigned long base_addr = board+HYDRA_NIC_BASE;
-
-		if (!request_mem_region(base_addr, 0x20, "NS8390"))
-			continue;
-		if (!request_mem_region(board, 0x4000, "RAM")) {
-		    release_mem_region(base_addr, 0x20);
-		    continue;
-		}
-
-		dev = init_etherdev(NULL, sizeof(struct hydra_private));
-
-		if (!dev) {
-		    	release_mem_region(base_addr, 0x20);
-		    	release_mem_region(board, 0x4000);
-			continue;
-		}
-		SET_MODULE_OWNER(dev);
-
-		for(j = 0; j < ETHER_ADDR_LEN; j++)
-			dev->dev_addr[j] = *((u8 *)ZTWO_VADDR(board + HYDRA_ADDRPROM + 2*j));
-    
-		printk("%s: hydra at 0x%08x, address %02x:%02x:%02x:%02x:%02x:%02x (hydra.c " HYDRA_VERSION ")\n",
-			dev->name, (int)board, dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
-			dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
-
-		dev->base_addr = ZTWO_VADDR(base_addr);
-		dev->mem_start = ZTWO_VADDR(board);
-		dev->mem_end = dev->mem_start+0x4000;
-
-		dev->open = &hydra_open;
-		dev->stop = &hydra_close;
-		dev->hard_start_xmit = &hydra_start_xmit;
-		dev->get_stats = &hydra_get_stats;
-#ifdef HAVE_MULTICAST
-		dev->set_multicast_list = &set_multicast_list;
+    printk("%s: hydra at 0x%08lx, address %02x:%02x:%02x:%02x:%02x:%02x (hydra.c " HYDRA_VERSION ")\n", dev->name, ZTWO_PADDR(board),
+	dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+	dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+
+    ei_status.name = name;
+    ei_status.tx_start_page = start_page;
+    ei_status.stop_page = stop_page;
+    ei_status.word16 = 1;
+    ei_status.bigendian = 1;
+
+    ei_status.rx_start_page = start_page + TX_PAGES;
+
+    ei_status.reset_8390 = &hydra_reset_8390;
+    ei_status.block_input = &hydra_block_input;
+    ei_status.block_output = &hydra_block_output;
+    ei_status.get_8390_hdr = &hydra_get_8390_hdr;
+    ei_status.reg_offset = hydra_offsets;
+    dev->open = &hydra_open;
+    dev->stop = &hydra_close;
+#ifdef MODULE
+    ei_status.priv = (unsigned long)root_hydra_dev;
+    root_hydra_dev = dev;
 #endif
-		
-		/*
-		 *	Cannot yet do multicast
-		 */
-		dev->flags&=~IFF_MULTICAST;
-		return(0);
-	}
-	return(-ENODEV);
+    NS8390_init(dev, 0);
+    return 0;
 }
 
-
 static int hydra_open(struct net_device *dev)
 {
-	struct hydra_private *priv = (struct hydra_private *)dev->priv;
-	volatile u8 *nicbase = (u8 *)dev->base_addr;
-	int i;
-    
-#ifdef HYDRA_DEBUG
-	printk("hydra_open(0x%x)\n", dev);
-#endif
-
-	/* first, initialize the private structure */
-	priv->tx_page_start = 0;   /* these are 256 byte buffers for NS8390 */
-	priv->rx_page_start = 6;
-	priv->rx_page_stop  = 62;  /* these values are hard coded for now */
-
-	 /* Reset the NS8390 NIC */
-	WRITE_REG(NIC_CR, CR_PAGE0 | CR_NODMA | CR_STOP);
-
-	/* be sure that the NIC is in stopped state */
-	while(!(READ_REG(NIC_ISR) & ISR_RST));
-
-	/* word transfer, big endian bytes, loopback, FIFO threshold 4 bytes */
-	WRITE_REG(NIC_DCR, DCR_WTS | DCR_BOS | DCR_LS | DCR_FT0);
-
-	/* clear remote byte count registers */
-	WRITE_REG(NIC_RBCR0, 0);
-	WRITE_REG(NIC_RBCR1, 0);
-
-	/* accept packets addressed to this card and also broadcast packets */
-	WRITE_REG(NIC_RCR, NIC_RCRBITS);
-
-	/* enable loopback mode 1 */
-	WRITE_REG(NIC_TCR, TCR_LB1);
-
-	/* initialize receive buffer ring */
-	WRITE_REG(NIC_PSTART, priv->rx_page_start);
-	WRITE_REG(NIC_PSTOP, priv->rx_page_stop);
-	WRITE_REG(NIC_BNDRY, priv->rx_page_start);
-
-	/* clear interrupts */
-	WRITE_REG(NIC_ISR, 0xff);
-
-	/* enable interrupts */
-	WRITE_REG(NIC_IMR, NIC_INTS);
-
-	/* set the ethernet hardware address */
-	WRITE_REG(NIC_CR, CR_PAGE1 | CR_NODMA | CR_STOP); /* goto page 1 */
-
-	WRITE_REG(NIC_PAR0, dev->dev_addr[0]);
-	WRITE_REG(NIC_PAR1, dev->dev_addr[1]);
-	WRITE_REG(NIC_PAR2, dev->dev_addr[2]);
-	WRITE_REG(NIC_PAR3, dev->dev_addr[3]);
-	WRITE_REG(NIC_PAR4, dev->dev_addr[4]);
-	WRITE_REG(NIC_PAR5, dev->dev_addr[5]);
-
-	/* clear multicast hash table */
-	for(i = 0; i < 8; i++)
-		WRITE_REG(NIC_MAR0 + 2*i, 0);
-
-	priv->next_pkt = priv->rx_page_start+1; /* init our s/w variable */
-	WRITE_REG(NIC_CURR, priv->next_pkt);    /* set the next buf for current */
-
-	/* goto page 0, start NIC */
-	WRITE_REG(NIC_CR, CR_PAGE0 | CR_NODMA | CR_START);
-
-	/* take interface out of loopback */
-	WRITE_REG(NIC_TCR, 0);
-
-	netif_start_queue(dev);
-    
-	i = request_irq(IRQ_AMIGA_PORTS, hydra_interrupt, SA_SHIRQ,
-		        dev->name, dev);
-	if (i) return i;
-
-	return(0);
+    ei_open(dev);
+    MOD_INC_USE_COUNT;
+    return 0;
 }
 
-
 static int hydra_close(struct net_device *dev)
 {
-	struct hydra_private *priv = (struct hydra_private *)dev->priv;
-	volatile u8 *nicbase = (u8 *)dev->base_addr;
-	int n = 5000;
-
-	netif_stop_queue(dev);
-
-#ifdef HYDRA_DEBUG
-	printk("%s: Shutting down ethercard\n", dev->name);
-	printk("%s: %d packets missed\n", dev->name, priv->stats.rx_missed_errors);
-#endif
-
-	WRITE_REG(NIC_CR, CR_PAGE0 | CR_NODMA | CR_STOP);
-
-	/* wait for NIC to stop (what a nice timeout..) */
-	while(((READ_REG(NIC_ISR) & ISR_RST) == 0) && --n);
-    
-	free_irq(IRQ_AMIGA_PORTS, dev);
-
-	return(0);
+    if (ei_debug > 1)
+	printk("%s: Shutting down ethercard.\n", dev->name);
+    ei_close(dev);
+    MOD_DEC_USE_COUNT;
+    return 0;
 }
 
-
-static void hydra_interrupt(int irq, void *data, struct pt_regs *fp)
+static void hydra_reset_8390(struct net_device *dev)
 {
-	volatile u8 *nicbase;
-  
-	struct net_device *dev = (struct net_device *) data;
-	struct hydra_private *priv;
-	u16 intbits;
-
-	if(dev == NULL)
-	{
-		printk("hydra_interrupt(): irq for unknown device\n");
-		return;
-	}
-
-	/* this is not likely a problem - i think */
-	if(dev->interrupt)
-		printk("%s: re-entering the interrupt handler\n", dev->name);
-
-	dev->interrupt = 1;
-
-	priv = (struct hydra_private *) dev->priv;
-	nicbase = (u8 *)dev->base_addr;
-
-	/* select page 0 */
-	WRITE_REG(NIC_CR, CR_PAGE0 | CR_START | CR_NODMA);
-
-	intbits = READ_REG(NIC_ISR) & NIC_INTS;
-	if(intbits == 0)
-        {
-		dev->interrupt = 0;
-		return;
-	}
-
-	/* acknowledge all interrupts, by clearing the interrupt flag */
-	WRITE_REG(NIC_ISR, intbits);
-
-	if((intbits & ISR_PTX) && !(intbits & ISR_TXE))
-	{
-		dev->tbusy = 0;
-		mark_bh(NET_BH);
-	}
-	
-	if((intbits & ISR_PRX) && !(intbits & ISR_RXE))/* packet received OK */
-		hydra_rx(dev, priv, nicbase);
-
-        if(intbits & ISR_TXE)
-		priv->stats.tx_errors++;
-
-        if(intbits & ISR_RXE)
-		priv->stats.rx_errors++;
-
-	if(intbits & ISR_CNT) 
-	{
-		/*
-		 * read the tally counters and (currently) ignore the values
-		 * might be useful because of bugs of some versions of the 8390 NIC
-		 */
-#ifdef HYDRA_DEBUG
-		printk("hydra_interrupt(): ISR_CNT\n");
-#endif
-		(void)READ_REG(NIC_CNTR0);
-		(void)READ_REG(NIC_CNTR1);
-		(void)READ_REG(NIC_CNTR2);
-	}
-	
-	if(intbits & ISR_OVW)
-	{
-#ifdef HYDRA_DEBUG
-		WRITE_REG(NIC_CR, CR_PAGE1 | CR_START | CR_NODMA);
-/* another one just too much for me to comprehend - basically this could  */
-/* only occur because of invalid access to hydra ram, thus invalidating  */
-/* the interrupt bits read - in average usage these do not occur at all */
-		printk("hydra_interrupt(): overwrite warning, NIC_ISR %02x, NIC_CURR %02x\n",
-			intbits, READ_REG(NIC_CURR));
-		WRITE_REG(NIC_CR, CR_PAGE0 | CR_START | CR_NODMA);
-#endif
-	    
-
-		/* overwrite warning occurred, stop NIC & check the BOUNDARY pointer */
-		/* FIXME - real overwrite handling needed !! */
-
-		printk("hydra_interrupt(): overwrite warning, resetting NIC\n");
-		WRITE_REG(NIC_CR, CR_PAGE0 | CR_NODMA | CR_STOP);
-		while(!(READ_REG(NIC_ISR) & ISR_RST));
-		/* wait for NIC to reset */
-		WRITE_REG(NIC_DCR, DCR_WTS | DCR_BOS | DCR_LS | DCR_FT0);
-		WRITE_REG(NIC_RBCR0, 0);
-		WRITE_REG(NIC_RBCR1, 0);
-		WRITE_REG(NIC_RCR, NIC_RCRBITS);
-		WRITE_REG(NIC_TCR, TCR_LB1);
-		WRITE_REG(NIC_PSTART, priv->rx_page_start);
-		WRITE_REG(NIC_PSTOP, priv->rx_page_stop);
-		WRITE_REG(NIC_BNDRY, priv->rx_page_start);
-		WRITE_REG(NIC_ISR, 0xff);
-		WRITE_REG(NIC_IMR, NIC_INTS);
-		/* currently this _won't_ reset my hydra, even though it is */
-		/* basically the same code as in the board init - any ideas? */
-
-		priv->next_pkt = priv->rx_page_start+1; /* init our s/w variable */
-		WRITE_REG(NIC_CURR, priv->next_pkt);    /* set the next buf for current */
-	    
-		WRITE_REG(NIC_CR, CR_PAGE0 | CR_NODMA | CR_START);
-
-		WRITE_REG(NIC_TCR, 0);
-	}
-
-	dev->interrupt = 0;
-	return;
-    }
-
-
-/*
- * packet transmit routine
- */
-
-static int hydra_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
-	struct hydra_private *priv = (struct hydra_private *)dev->priv;
-	volatile u8 *nicbase = (u8 *)dev->base_addr;
-	int len, len1;
-
-	/* Transmitter timeout, serious problems. */
-
-	if(dev->tbusy)
-	{
-		int tickssofar = jiffies - dev->trans_start;
-		if(tickssofar < 20)
-			return(1);
-		WRITE_REG(NIC_CR, CR_STOP);
-		printk("%s: transmit timed out, status %4.4x, resetting.\n", dev->name, 0);
-		priv->stats.tx_errors++;
-		dev->tbusy = 0;
-		dev->trans_start = jiffies;
-		return(0);
-	}
-
-	len=skb->len;
-
-	/* fill in a tx ring entry */
-    
-#ifdef HYDRA_DEBUG
-	printk("TX pkt type 0x%04x from ", ((u16 *)skb->data)[6]);
-	{
-		int i;
-		u8 *ptr = &((u8 *)skb->data)[6];
-		for (i = 0; i < 6; i++)
-			printk("%02x", ptr[i]);
-	}
-	printk(" to ");
-	{
-		int i;
-		u8 *ptr = (u8 *)skb->data;
-		for (i = 0; i < 6; i++)
-			printk("%02x", ptr[i]);
-	}
-	printk(" data 0x%08x len %d\n", (int)skb->data, len);
-#endif
-
-	/*
-	 * make sure that the packet size is at least the minimum
-	 * allowed ethernet packet length.
-	 * (FIXME: Should also clear the unused space...)
-	 * note: minimum packet length is 64, including CRC
-	 */
-	len1 = len;
-
-	if(len < (ETHER_MIN_LEN-4))
-		len = (ETHER_MIN_LEN-1);
-
-	/* make sure we've got an even number of bytes to copy to hydra's mem */
-	if(len & 1) len++;
-
-	if((u32)(dev->mem_start + (priv->tx_page_start << 8)) < 0x80000000)
-		printk("weirdness: memcpyw(txbuf, skbdata, len): txbuf = 0x%x\n", (u_int)(dev->mem_start+(priv->tx_page_start<<8)));
-
-	/* copy the packet data to the transmit buffer 
-	   in the ethernet card RAM */
-	memcpyw((u16 *)(dev->mem_start + (priv->tx_page_start << 8)),
-		(u16 *)skb->data, len);
-	/* clear the unused space */
-	for(; len1<len; len1++)
-		(u16)*((u8 *)dev->mem_start + (priv->tx_page_start<<8) + len1)
-		    = 0;
-	dev_kfree_skb(skb);
-
-	priv->stats.tx_packets++;
-
-	cli();
-	/* make sure we are on the correct page */
-	WRITE_REG(NIC_CR, CR_PAGE0 | CR_NODMA | CR_START);
-
-	/* here we configure the transmit page start register etc */
-	/* notice that this code is hardwired to one transmit buffer */
-	WRITE_REG(NIC_TPSR, priv->tx_page_start);
-	WRITE_REG(NIC_TBCR0, len & 0xff);
-	WRITE_REG(NIC_TBCR1, len >> 8);
-
-	/* commit the packet to the wire */
-	WRITE_REG(NIC_CR, CR_PAGE0 | CR_START | CR_NODMA | CR_TXP);
-	sti();
-
-	 dev->trans_start = jiffies;
-
-	return(0);
+    printk("Hydra hw reset not there\n");
 }
 
-
-static void __inline__ hydra_rx(struct net_device *dev, struct hydra_private *priv, volatile u8 *nicbase)
+static void hydra_get_8390_hdr(struct net_device *dev,
+			       struct e8390_pkt_hdr *hdr, int ring_page)
 {
-	volatile u16 *board_ram_ptr;
-	struct sk_buff *skb;
-	int hdr_next_pkt, pkt_len, len1, boundary;
+    int nic_base = dev->base_addr;
+    short *ptrs;
+    unsigned long hdr_start= (nic_base-HYDRA_NIC_BASE) +
+			     ((ring_page - NESM_START_PG)<<8);
+    ptrs = (short *)hdr;
 
-
-	/* remove packet(s) from the ring and commit them to TCP layer */
-	WRITE_REG(NIC_CR, CR_PAGE1 | CR_NODMA | CR_START); /* page 1 */
-	while(priv->next_pkt != READ_REG(NIC_CURR)) /* should read this only once? */
-	{
-		board_ram_ptr = (u16 *)(dev->mem_start + (priv->next_pkt << 8));
-	
-#ifdef HYDRA_DEBUG
-		printk("next_pkt = 0x%x, board_ram_ptr = 0x%x\n", priv->next_pkt, board_ram_ptr);
-#endif
-	
-		/* the following must be done with two steps, or
-		   GCC optimizes it to a byte access to Hydra memory,
-		   which doesn't work... */
-		hdr_next_pkt = board_ram_ptr[0];
-		hdr_next_pkt >>= 8;
-	
-		pkt_len = board_ram_ptr[1];
-		pkt_len = ((pkt_len >> 8) | ((pkt_len & 0xff) << 8));
-	
-#ifdef HYDRA_DEBUG
-		printk("hydra_interrupt(): hdr_next_pkt = 0x%02x, len = %d\n", hdr_next_pkt, pkt_len);
-#endif
-	
-		if(pkt_len >= ETHER_MIN_LEN && pkt_len <= ETHER_MAX_LEN)
-		{
-			/* note that board_ram_ptr is u16 */
-			/* CRC is not included in the packet length */
-		
-			pkt_len -= 4;
-			skb = dev_alloc_skb(pkt_len+2);
-			if(skb == NULL)
-		  	{
-				printk(KERN_INFO "%s: memory squeeze, dropping packet.\n", dev->name);
-				priv->stats.rx_dropped++;
-			}
-			else
-			{
-				skb->dev = dev;
-				skb_reserve(skb, 2);
-				if(hdr_next_pkt < priv->next_pkt && hdr_next_pkt != priv->rx_page_start)
-				{
-					/* here, the packet is wrapped */
-					len1 = ((priv->rx_page_stop - priv->next_pkt)<<8)-4;
-			
-					memcpyw((u16 *)skb_put(skb, len1), (u16 *)(board_ram_ptr+2), len1);
-					memcpyw((u16 *)skb_put(skb, pkt_len-len1),  (u16 *)(dev->mem_start+(priv->rx_page_start<<8)), pkt_len-len1);
-			
-#ifdef HYDRA_DEBUG
-					printk("wrapped packet: %d/%d bytes\n", len1, pkt_len-len1);
-#endif
-				}  /* ... here, packet is not wrapped */
-				else
-					memcpyw((u16 *) skb_put(skb, pkt_len), (u16 *)(board_ram_ptr+2), pkt_len);
-			}
-		}
-		else
-		{
-			WRITE_REG(NIC_CR, CR_PAGE1 | CR_START | CR_NODMA);
-			printk("hydra_interrupt(): invalid packet len: %d, NIC_CURR = %02x\n", pkt_len, READ_REG(NIC_CURR));
-/*
-this is the error i kept getting until i switched to 0.9.10. it still doesn't
-mean that the bug would have gone away - so be alarmed. the packet is likely
-being fetched from a wrong memory location - but why - dunno
-   
-note-for-v2.1: not really problem anymore. hasn't been for a long time.
-*/
-		
-			WRITE_REG(NIC_CR, CR_PAGE0 | CR_START | CR_NODMA);
-			/* should probably reset the NIC here ?? */
-		
-			hydra_open(dev);  /* FIXME - i shouldn't really be doing this. */
-			return;
-		}
-	
-		/* now, update the next_pkt pointer */
-		if(hdr_next_pkt < priv->rx_page_stop)
-			priv->next_pkt = hdr_next_pkt;
-		else
-			printk("hydra_interrupt(): invalid next_pkt pointer %d\n", hdr_next_pkt);
-	
-		/* update the boundary pointer */
-		boundary = priv->next_pkt - 1;
-		if(boundary < priv->rx_page_start)
-			boundary = priv->rx_page_stop - 1;
-	
-		/* set NIC to page 0 to update the NIC_BNDRY register */
-		WRITE_REG(NIC_CR, CR_PAGE0 | CR_START | CR_NODMA);
-		WRITE_REG(NIC_BNDRY, boundary);
-	
-		/* select page1 to access the NIC_CURR register */
-		WRITE_REG(NIC_CR, CR_PAGE1 | CR_START | CR_NODMA);
-	
-	
-		skb->protocol = eth_type_trans(skb, dev);
-		netif_rx(skb);
-		priv->stats.rx_packets++;
-	
-	}
-	return;
+    *(ptrs++) = readw(hdr_start);
+    *((short *)hdr) = WORDSWAP(*((short *)hdr));
+    hdr_start += 2;
+    *(ptrs++) = readw(hdr_start);
+    *((short *)hdr+1) = WORDSWAP(*((short *)hdr+1));
 }
-    
 
-static struct net_device_stats *hydra_get_stats(struct net_device *dev)
+static void hydra_block_input(struct net_device *dev, int count,
+			      struct sk_buff *skb, int ring_offset)
 {
-	struct hydra_private *priv = (struct hydra_private *)dev->priv;
-#if 0
-	u8 *board = (u8 *)dev->mem_start; 
+    unsigned long nic_base = dev->base_addr;
+    unsigned long mem_base = nic_base - HYDRA_NIC_BASE;
+    unsigned long xfer_start = mem_base + ring_offset - (NESM_START_PG<<8);
 
-	short saved_addr;
-#endif
-/* currently does nothing :) i'll finish this later */
+    if (count&1)
+	count++;
 
-	return(&priv->stats);
-}
+    if (xfer_start+count >  mem_base + (NESM_STOP_PG<<8)) {
+	int semi_count = (mem_base + (NESM_STOP_PG<<8)) - xfer_start;
 
-#ifdef HAVE_MULTICAST
-static void set_multicast_list(struct net_device *dev, int num_addrs, void *addrs)
-{
-	struct hydra_private *priv = (struct hydra_private *)dev->priv;
-	u8 *board = (u8 *)dev->mem_start;
+	memcpy_fromio(skb->data,xfer_start,semi_count);
+	count -= semi_count;
+	memcpy_fromio(skb->data+semi_count, mem_base, count);
+    } else
+	memcpy_fromio(skb->data, xfer_start,count);
 
-	/* yes, this code is also waiting for someone to complete.. :) */
-	/* (personally i don't care about multicasts at all :) */
-	return;
 }
-#endif
-
-
-#ifdef MODULE
-static struct net_device hydra_dev;
 
-int init_module(void)
+static void hydra_block_output(struct net_device *dev, int count,
+			       const unsigned char *buf, int start_page)
 {
-	int err;
+    unsigned long nic_base = dev->base_addr;
+    unsigned long mem_base = nic_base - HYDRA_NIC_BASE;
 
-	hydra_dev.init = hydra_probe;
-	if ((err = register_netdev(&hydra_dev))) {
-		if (err == -EIO)
-			printk("No Hydra board found. Module not loaded.\n");
-		return(err);
-	}
-	return(0);
+    if (count&1)
+	count++;
+
+    memcpy_toio(mem_base+((start_page - NESM_START_PG)<<8), buf, count);
 }
 
-void cleanup_module(void)
+static void __exit hydra_cleanup(void)
 {
-	struct hydra_private *priv = (struct hydra_private *)hydra_dev.priv;
+#ifdef MODULE
+    struct net_device *dev, *next;
 
-	unregister_netdev(&hydra_dev);
-	release_mem_region(ZTWO_PADDR(hydra_dev.base_addr), 0x20);
-	release_mem_region(ZTWO_PADDR(hydra_dev.mem_start), 0x4000);
-	kfree(priv);
+    while ((dev = root_hydra_dev)) {
+	next = (struct net_device *)(ei_status.priv);
+	unregister_netdev(dev);
+	free_irq(IRQ_AMIGA_PORTS, dev);
+	release_mem_region(ZTWO_PADDR(dev->base_addr)-HYDRA_NIC_BASE, 0x10000);
+	kfree(dev);
+	root_hydra_dev = next;
+    }
+    unload_8390_module();
+#endif
 }
 
-#endif /* MODULE */
+module_init(hydra_probe);
+module_exit(hydra_cleanup);
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/net/ioc3-eth.c linux/drivers/net/ioc3-eth.c
--- v2.4.0-prerelease/linux/drivers/net/ioc3-eth.c	Mon Dec 11 17:59:44 2000
+++ linux/drivers/net/ioc3-eth.c	Thu Jan  4 13:00:55 2001
@@ -50,7 +50,6 @@
 #include <linux/errno.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/pci_ids.h>
 
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/net/irda/toshoboe.c linux/drivers/net/irda/toshoboe.c
--- v2.4.0-prerelease/linux/drivers/net/irda/toshoboe.c	Sun Nov 19 18:44:11 2000
+++ linux/drivers/net/irda/toshoboe.c	Thu Jan  4 12:50:12 2001
@@ -896,7 +896,7 @@
 /*FIXME: can't sleep here wait one second */
 
   while ((i--) && (self->txpending))
-    udelay (100000);
+    mdelay (100);
 
   toshoboe_stopchip (self);
   toshoboe_disablebm (self);
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/net/mvme147.c linux/drivers/net/mvme147.c
--- v2.4.0-prerelease/linux/drivers/net/mvme147.c	Mon Dec 11 17:59:44 2000
+++ linux/drivers/net/mvme147.c	Thu Jan  4 13:00:55 2001
@@ -63,7 +63,7 @@
 
 typedef void (*writerap_t)(void *, unsigned short);
 typedef void (*writerdp_t)(void *, unsigned short);
-typedef void (*readrdp_t)(void *);
+typedef unsigned short (*readrdp_t)(void *);
 
 #ifdef MODULE
 static struct m147lance_private *root_m147lance_dev = NULL;
@@ -79,7 +79,7 @@
 	u_long address;
 
 	if (!MACH_IS_MVME147 || called)
-		return(-ENODEV);
+		return -ENODEV;
 	called++;
 
 	SET_MODULE_OWNER(dev);
@@ -96,6 +96,7 @@
 	dev->hard_start_xmit = &lance_start_xmit;
 	dev->get_stats = &lance_get_stats;
 	dev->set_multicast_list = &lance_set_multicast;
+	dev->tx_timeout = &lance_tx_timeout;
 	dev->dma = 0;
 
 	addr=(u_long *)ETHERNET_ADDRESS;
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/net/pcmcia/wavelan_cs.c linux/drivers/net/pcmcia/wavelan_cs.c
--- v2.4.0-prerelease/linux/drivers/net/pcmcia/wavelan_cs.c	Thu May  4 11:24:42 2000
+++ linux/drivers/net/pcmcia/wavelan_cs.c	Thu Jan  4 12:50:12 2001
@@ -132,7 +132,7 @@
 {
   hacr_write(base, hacr);
   /* delay might only be needed sometimes */
-  udelay(1000L);
+  mdelay(1L);
 } /* hacr_write_slow */
 
 /*------------------------------------------------------------------*/
@@ -190,7 +190,7 @@
        * sequence to write is. This hack seem to work for me... */
       count = 0;
       while((readb(verify) != PSA_COMP_PCMCIA_915) && (count++ < 100))
-	udelay(1000);
+	mdelay(1);
     }
 
   /* Put the host interface back in standard state */
@@ -479,7 +479,7 @@
       mmc_out(base, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_WRITE);
 
       /* Wavelan doc says : wait at least 10 ms for EEBUSY = 0 */
-      udelay(10000);
+      mdelay(10);
       fee_wait(base, 10, 100);
     }
 
@@ -3612,7 +3612,7 @@
 
   /* reset the LAN controller (i82593) */
   outb(OP0_RESET, LCCR(base));
-  udelay(1000L);	/* A bit crude ! */
+  mdelay(1);	/* A bit crude ! */
 
   /* Initialize the LAN controler */
   if((wv_82593_config(dev) == FALSE) ||
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/net/rcpci45.c linux/drivers/net/rcpci45.c
--- v2.4.0-prerelease/linux/drivers/net/rcpci45.c	Mon Jan  1 09:38:35 2001
+++ linux/drivers/net/rcpci45.c	Mon Jan  1 10:17:49 2001
@@ -157,7 +157,7 @@
 	{ RC_PCI45_VENDOR_ID, RC_PCI45_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
 	{0, }
 };
-MODULE_DEVICE_TABLE(pci, rcpci_pci_table);
+MODULE_DEVICE_TABLE(pci, rcpci45_pci_table);
 
 static void rcpci45_remove_one(struct pci_dev *pdev)
 {
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/net/setup.c linux/drivers/net/setup.c
--- v2.4.0-prerelease/linux/drivers/net/setup.c	Mon Dec 11 17:59:44 2000
+++ linux/drivers/net/setup.c	Thu Jan  4 12:50:17 2001
@@ -9,13 +9,11 @@
 #include <linux/init.h>
 #include <linux/netlink.h>
 
-extern int mkiss_init_ctrl_dev(void);
 extern int slip_init_ctrl_dev(void);
 extern int strip_init_ctrl_dev(void);
 extern int x25_asy_init_ctrl_dev(void);
   
 extern int dmascc_init(void);
-extern int yam_init(void);
 
 extern int awc4500_pci_probe(void);
 extern int awc4500_isa_probe(void);
@@ -102,13 +100,6 @@
 #endif
 
 #endif
-/*
- *	Amateur Radio Drivers
- */	
-
-#ifdef CONFIG_YAM
-	{yam_init, 0},
-#endif	/* CONFIG_YAM */
 
 /*
  *	Token Ring Drivers
@@ -148,9 +139,6 @@
 #endif
 #if defined(CONFIG_X25_ASY)
 	x25_asy_init_ctrl_dev();
-#endif
-#if defined(CONFIG_MKISS)
-	mkiss_init_ctrl_dev();
 #endif
 #if defined(CONFIG_STRIP)
 	strip_init_ctrl_dev();
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/net/tulip/media.c linux/drivers/net/tulip/media.c
--- v2.4.0-prerelease/linux/drivers/net/tulip/media.c	Mon Jun 19 13:42:39 2000
+++ linux/drivers/net/tulip/media.c	Mon Jan  1 09:54:07 2001
@@ -263,6 +263,24 @@
 			tulip_mdio_write(dev, tp->phys[phy_num], 4, to_advertise);
 			break;
 		}
+		case 5: case 6: {
+			u16 setup[5];
+			u32 csr13val, csr14val, csr15dir, csr15val;
+			for (i = 0; i < 5; i++)
+				setup[i] = get_u16(&p[i*2 + 1]);
+
+			if (startup && mtable->has_reset) {
+				struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset];
+				unsigned char *rst = rleaf->leafdata;
+				if (tulip_debug > 1)
+					printk(KERN_DEBUG "%s: Resetting the transceiver.\n",
+						   dev->name);
+				for (i = 0; i < rst[0]; i++)
+					outl(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15);
+			}
+
+			break;
+		}
 		default:
 			printk(KERN_DEBUG "%s:  Invalid media table selection %d.\n",
 					   dev->name, mleaf->type);
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/net/tulip/tulip_core.c linux/drivers/net/tulip/tulip_core.c
--- v2.4.0-prerelease/linux/drivers/net/tulip/tulip_core.c	Mon Jan  1 09:38:35 2001
+++ linux/drivers/net/tulip/tulip_core.c	Thu Jan  4 12:50:17 2001
@@ -28,7 +28,7 @@
 #include <asm/unaligned.h>
 
 static char version[] __devinitdata =
-	"Linux Tulip driver version 0.9.12 (December 17, 2000)\n";
+	"Linux Tulip driver version 0.9.13 (January 2, 2001)\n";
 
 
 /* A few user-configurable values. */
@@ -1044,11 +1044,52 @@
 	if (tulip_debug > 0  &&  did_version++ == 0)
 		printk (KERN_INFO "%s", version);
 
+	/*
+	 *	Lan media wire a tulip chip to a wan interface. Needs a very
+	 *	different driver (lmc driver)
+	 */
+	 
         if( pdev->subsystem_vendor == 0x1376 ){
 		printk (KERN_ERR PFX "skipping LMC card.\n");
 		return -ENODEV;
 	}
-
+	
+	/*
+	 *	Early DM9100's need software CRC and the DMFE driver
+	 */
+	 
+	if (pdev->vendor == 0x1282 && pdev->device == 0x9100)
+	{
+		u32 dev_rev;
+		/* Read Chip revision */
+		pci_read_config_dword(pdev, PCI_REVISION_ID, &dev_rev);
+		if(dev_rev < 0x02000030)
+		{
+			printk(KERN_ERR PFX "skipping early DM9100 with Crc bug (use dmfe)\n");
+			return -ENODEV;
+		}
+	}
+		
+	/*
+	 *	Looks for early PCI chipsets where people report hangs 
+	 *	without the workarounds being on.
+	 */
+	 
+	/* Intel Saturn. Switch to 8 long words burst, 8 long word cache aligned 
+	   Aries might need this too. The Saturn errata are not pretty reading but
+	   thankfully its an old 486 chipset.
+	*/
+	
+	if (pci_find_device(PCI_VENDOR_ID_INTEL, 0x0483, NULL))
+		csr0 = 0x00A04800;
+	/* The dreaded SiS496 486 chipset. Same workaround as above. */
+	if (pci_find_device(PCI_VENDOR_ID_SI, 0x0496, NULL))
+		csr0 = 0x00A04800;
+		
+	/*
+	 *	And back to business
+	 */
+ 
 	ioaddr = pci_resource_start (pdev, 0);
 	irq = pdev->irq;
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/parport/ieee1284.c linux/drivers/parport/ieee1284.c
--- v2.4.0-prerelease/linux/drivers/parport/ieee1284.c	Wed Jul 19 10:34:52 2000
+++ linux/drivers/parport/ieee1284.c	Thu Jan  4 13:00:55 2001
@@ -524,7 +524,8 @@
 					     PARPORT_STATUS_PAPEROUT,
 					     PARPORT_STATUS_PAPEROUT);
 		if (r)
-			DPRINTK (KERN_INFO "%s: Timeout at event 31\n");
+			DPRINTK (KERN_INFO "%s: Timeout at event 31\n",
+				 port->name);
 
 		port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
 		DPRINTK (KERN_DEBUG "%s: ECP direction: forward\n",
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/parport/parport_pc.c linux/drivers/parport/parport_pc.c
--- v2.4.0-prerelease/linux/drivers/parport/parport_pc.c	Mon Dec 11 17:59:44 2000
+++ linux/drivers/parport/parport_pc.c	Thu Jan  4 13:12:48 2001
@@ -2113,6 +2113,9 @@
 		printmode(DMA);
 	}
 #undef printmode
+#ifndef CONFIG_PARPORT_1284
+	printk ("(,...)");
+#endif /* CONFIG_PARPORT_1284 */
 	printk("]\n");
 	if (probedirq != PARPORT_IRQ_NONE) 
 		printk(KERN_INFO "%s: irq %d detected\n", p->name, probedirq);
@@ -2182,7 +2185,8 @@
 /* Via support maintained by Jeff Garzik <jgarzik@mandrakesoft.com> */
 static int __devinit sio_via_686a_probe (struct pci_dev *pdev)
 {
-	u8 dma, irq, tmp;
+	u8 tmp;
+	int dma, irq;
 	unsigned port1, port2, have_eppecp;
 
 	/*
@@ -2235,21 +2239,19 @@
 	 */
 
 	/* 0x50_3-2: PnP Routing for Parallel Port DRQ */
-	pci_read_config_byte (pdev, 0x50, &dma);
-	dma = ((dma >> 2) & 0x03);
+	pci_read_config_byte (pdev, 0x50, &tmp);
+	dma = ((tmp >> 2) & 0x03);
 	
 	/* 0x51_7-4: PnP Routing for Parallel Port IRQ */
-	pci_read_config_byte (pdev, 0x51, &irq);
-	irq = ((irq >> 4) & 0x0F);
+	pci_read_config_byte (pdev, 0x51, &tmp);
+	irq = ((tmp >> 4) & 0x0F);
 
 	/* filter bogus IRQs */
-	/* 255 means NONE, and is bogus as well */
 	switch (irq) {
 	case 0:
 	case 2:
 	case 8:
 	case 13:
-	case 255:
 		irq = PARPORT_IRQ_NONE;
 		break;
 
@@ -2258,15 +2260,18 @@
 	}
 
 	/* if ECP not enabled, DMA is not enabled, assumed bogus 'dma' value */
-	/* 255 means NONE. Looks like some BIOS don't set the DMA correctly
-	 * even on ECP mode */
-	if (!have_eppecp || dma == 255)
+	if (!have_eppecp)
 		dma = PARPORT_DMA_NONE;
 
 	/* finally, do the probe with values obtained */
 	if (parport_pc_probe_port (port1, port2, irq, dma, NULL)) {
-		printk (KERN_INFO "parport_pc: Via 686A parallel port: io=0x%X, irq=%d, dma=%d\n",
-			port1, irq, dma);
+		printk (KERN_INFO
+			"parport_pc: Via 686A parallel port: io=0x%X", port1);
+		if (irq != PARPORT_IRQ_NONE)
+			printk (", irq=%d", irq);
+		if (dma != PARPORT_DMA_NONE)
+			printk (", dma=%d", dma);
+		printk ("\n");
 		return 1;
 	}
 	
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/parport/probe.c linux/drivers/parport/probe.c
--- v2.4.0-prerelease/linux/drivers/parport/probe.c	Thu Feb 24 10:08:32 2000
+++ linux/drivers/parport/probe.c	Thu Jan  4 13:12:53 2001
@@ -72,8 +72,12 @@
 		if (q) *q = 0;
 		sep = strchr(p, ':');
 		if (sep) {
-			char *u = p;
+			char *u;
 			*(sep++) = 0;
+			/* Get rid of trailing blanks */
+			u = strchr (sep, ' ');
+			if (u) *u = '\0';
+			u = p;
 			while (*u) {
 				*u = toupper(*u);
 				u++;
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/pci/pci.ids linux/drivers/pci/pci.ids
--- v2.4.0-prerelease/linux/drivers/pci/pci.ids	Mon Jan  1 09:38:35 2001
+++ linux/drivers/pci/pci.ids	Tue Jan  2 16:58:45 2001
@@ -734,6 +734,7 @@
 	0601  85C601
 	0620  620 Host
 	0630  630 Host
+	0730  730 Host
 	0900  SiS900 10/100 Ethernet
 		1039 0900  SiS900 10/100 Ethernet Adapter
 	3602  83C602
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/sbus/char/zs.c linux/drivers/sbus/char/zs.c
--- v2.4.0-prerelease/linux/drivers/sbus/char/zs.c	Tue Oct 31 12:42:26 2000
+++ linux/drivers/sbus/char/zs.c	Thu Jan  4 12:50:17 2001
@@ -1,4 +1,4 @@
-/* $Id: zs.c,v 1.60 2000/10/14 10:09:04 davem Exp $
+/* $Id: zs.c,v 1.61 2001/01/03 08:08:49 ecd Exp $
  * zs.c: Zilog serial port driver for the Sparc.
  *
  * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -944,7 +944,6 @@
  */
 static void change_speed(struct sun_serial *info)
 {
-	unsigned short port;
 	unsigned cflag;
 	int	quot = 0;
 	int	i;
@@ -953,7 +952,7 @@
 	if (!info->tty || !info->tty->termios)
 		return;
 	cflag = info->tty->termios->c_cflag;
-	if (!(port = info->port))
+	if (!info->port)
 		return;
 	i = cflag & CBAUD;
 	if (cflag & CBAUDEX) {
@@ -1913,7 +1912,7 @@
 
 static void show_serial_version(void)
 {
-	char *revision = "$Revision: 1.60 $";
+	char *revision = "$Revision: 1.61 $";
 	char *version, *p;
 
 	version = strchr(revision, ' ');
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/scsi/README.ibmmca linux/drivers/scsi/README.ibmmca
--- v2.4.0-prerelease/linux/drivers/scsi/README.ibmmca	Mon Jan  1 09:38:35 2001
+++ linux/drivers/scsi/README.ibmmca	Thu Jan  4 12:37:14 2001
@@ -1,5 +1,4 @@
 
-
                -=< The IBM Microchannel SCSI-Subsystem >=-
 	       
 	                 for the IBM PS/2 series
@@ -8,13 +7,22 @@
 		 
      Copyright (c) 1995 Strom Systems, Inc. under the terms of the GNU 
   General Public License. Originally written by Martin Kolinek, December 1995.
-         Officially maintained by Michael Lang since January 1999.
+   Officially modified and maintained by Michael Lang since January 1999.
 	   
- 	                       Version 4.0
-	
+ 	                       Version 4.0a
 	
-   Last update: 28 December 2000
-
+   Last update: January 3, 2001
+   
+   Before you Start
+   ----------------
+   This is the common README.ibmmca file for all driver releases of the 
+   IBM MCA SCSI driver for Linux. Please note, that driver releases 4.0
+   or newer do not work with kernel versions older than 2.4.0, while driver
+   versions older than 4.0 do not work with kernels 2.4.0 or later! If you
+   try to compile your kernel with the wrong driver source, the 
+   compilation is aborted and you get a corresponding error message. This is
+   no bug in the driver. It prevents you from using the wrong sourcecode
+   with the wrong kernel version.
 
    Authors of this Driver
    ----------------------
@@ -22,9 +30,10 @@
     - Martin Kolinek (origin, first release of this driver)
     - Klaus Kudielka (multiple SCSI-host management/detection, adaption to
                       Linux Kernel 2.1.x, module support)
-    - Michael Lang (assigning original pun,lun mapping, dynamical ldn 
-                    assignment, this file, patch, official driver maintenance
-		    and subsequent pains related with the driver :-))
+    - Michael Lang (assigning original pun/lun mapping, dynamical ldn
+                    assignment, rewritten adapter detection, this file, 
+		    patches, official driver maintenance and subsequent 
+		    debugging, related with the driver)
 
    Table of Contents
    -----------------
@@ -362,7 +371,7 @@
    The following IBM SCSI-subsystems are supported by this driver:
    
      - IBM Fast/Wide SCSI-2 Adapter
-     - IBM 7568 Industrial Computer SCSI Adapter w/cache
+     - IBM 7568 Industrial Computer SCSI Adapter w/Cache
      - IBM Expansion Unit SCSI Controller
      - IBM SCSI Adapter w/Cache
      - IBM SCSI Adapter
@@ -377,7 +386,11 @@
    are fully implemented up from version 3.1e of the driver. This means, that
    you just need the latest ibmmca.h and ibmmca.c file and copy it in the
    linux/drivers/scsi directory. The code is automatically adapted during 
-   kernel compilation.
+   kernel compilation. This is different from kernel 2.4.0! Here version 
+   4.0 or later of the driver must be used for kernel 2.4.0 or later. Version
+   4.0 or later does not work together with older kernels! Driver versions
+   older than 4.0 do not work together with kernel 2.4.0 or later. They work
+   on all older kernels.
 
    3 Code History
    --------------
@@ -890,31 +903,50 @@
    2) Inquiry requests can be shorter than 255 bytes of return buffer. Due
       to a bug in the ibmmca_queuecommand routine, this buffer was forced
       to 255 at minimum. If the memory address, this return buffer is pointing
-      does not offer more space, invalid memory accesses destabilized the
+      to does not offer more space, invalid memory accesses destabilized the
       kernel.
    3) version 4.0 is only valid for kernel 2.4.0 or later. This is necessary
       to remove old kernel version dependant waste from the driver. 3.2d is
       only distributed with older kernels but keeps compatibility with older
-      kernel versions.
+      kernel versions. 4.0 and higher versions cannot be used with older 
+      kernels anymore!! You must have at least kernel 2.4.0!!
    4) The commandline argument 'bypass' and all its functionality got removed
-      in version 4.0. This was never really necessary and is kept in 3.2X for
-      debugging reasons.
-   5) Dynamical reassignment of ldns was again verified and should work now.
+      in version 4.0. This was never really necessary, as all troubles were
+      based on non-command related reasons up to now, so bypassing commands
+      did not help to avoid any bugs. It is kept in 3.2X for debugging reasons.
+   5) Dynamical reassignment of ldns was again verified and analyzed to be
+      completely inoperational. This is corrected and should work now.
    6) All commands that get sent to the SCSI adapter were verified and
       completed in such a way, that they are now completely conform to the
       demands in the technical description of IBM. Main candidates were the
       DEVICE_INQUIRY, REQUEST_SENSE and DEVICE_CAPACITY commands. They must
       be tranferred by bypassing the internal command buffer of the adapter
-      or else the response is a random result.
+      or else the response can be a random result. GET_POS_INFO would be more
+      safe in usage, if one could use the SUPRESS_EXCEPTION_SHORT, but this
+      is not allowed by the technical references of IBM. (Sorry, folks, the
+      model 80 problem is still a task to be solved in a different way.)
    7) v3.2d is still hold back for some days for testing, while 4.0 is 
       released.
    - Michael Lang
+   
+   January 3, 2001 (v4.0a)
+   1) A lot of complains after the 2.4.0-prerelease kernel came in about
+      the impossibility to compile the driver as a module. This problem is
+      solved. In combination with that problem, some unprecise declaration
+      of the function option_setup() gave some warnings during compilation.
+      This is solved, too by a forward declaration in ibmmca.c.
+   2) #ifdef argument concerning CONFIG_SCSI_IBMMCA is no longer needed and
+      was entirely removed.
+   3) Some switch statements got optimized in code, as some minor variables
+      in internal SCSI-command handlers.
+   - Michael Lang
 
    4 To do
    -------
-        - IBM SCSI-2 F/W external SCSI bus support in seperate mode.
+        - IBM SCSI-2 F/W external SCSI bus support in seperate mode!
 	- It seems that the handling of bad disks is really bad -
-	  non-existent, in fact.
+	  non-existent, in fact. However, a low-level driver cannot help
+	  much, if such things happen.
 
    5 Users' Manual
    ---------------
@@ -1032,6 +1064,7 @@
    should try first to remove your commandline arguments of such type with a 
    newer driver. I bet, it will be recognized correctly. Even multiple and 
    different types of IBM SCSI-adapters should be recognized correctly, too.
+   Use the forced detection method only as last solution!
    
    Examples:
    
@@ -1079,6 +1112,10 @@
 	was: http://www.uni-mainz.de/~langm000/linux.html
      Q: My SCSI-adapter is not recognized by the driver, what can I do?
      A: Just force it to be recognized by kernel parameters. See section 5.1.
+        If this really happens, do also send e-mail to the maintainer, as
+	forced detection should be never necessary. Forced detection is in
+	principal some flaw of the driver adapter detection and goes into 
+	bugreports.
      Q: The driver screws up, if it starts to probe SCSI-devices, is there
         some way out of it?
      A: Yes, that was some recognition problem of the correct SCSI-adapter
@@ -1142,7 +1179,7 @@
 	is NULL. From version 3.2, it is taken care of this.
      Q: I have a F/W adapter and the driver sees my internal SCSI-devices,
         but ignores the external ones.
-     A: Select combined busmode in the config-program and check for that
+     A: Select combined busmode in the IBM config-program and check for that
         no SCSI-id on the external devices appears on internal devices.
         Reboot afterwards. Dual busmode is supported, but works only for the
 	internal bus, yet. External bus is still ignored. Take care for your
@@ -1173,7 +1210,8 @@
 	everything is running smoothly.
      A: No real answer, yet. In any case, one should force the kernel to
         present SCBs only below the 16 MBytes barrier. Maybe this solves the
-	problem. Not yet tried, but guessing that it could work.
+	problem. Not yet tried, but guessing that it could work. To get this,
+	set unchecked_isa_dma argument of ibmmca.h from 0 to 1.
 
    5.3 Bugreports
    --------------
@@ -1215,7 +1253,7 @@
    Here you can find info about the background of this driver, patches,
    troubleshooting support, news and a bugreport form. Please check that
    WWW-page regularly for latest hints. If ever this URL changes, please 
-   refer to the MAINTAINERS File in order to get the latest address.
+   refer to the MAINTAINERS file in order to get the latest address.
    
    For the bugreport, please fill out the formular on the corresponding
    WWW-page. Read the dedicated instructions and write as much as you
@@ -1310,7 +1348,7 @@
    Erik Weber
                 for the great deal we made about a model 9595 and the nice
                 surrounding equipment and the cool trip to Mannheim
-                second-hand computer market. In addition, I would like to 
+                second-hand computer market. In addition, I would like
 		to thank him for his exhaustive SCSI-driver testing on his 
 		95er PS/2 park.
    Anthony Hogbin
@@ -1326,7 +1364,7 @@
                 for his model 30, which serves me as part of my teststand
 		and his cool remark about how you make an ordinary diskette
 		drive working and how to connect it to an IBM-diskette port.
-   Johannes Gutenberg-University, Mainz &
+   Johannes Gutenberg-Universitaet, Mainz &
    Institut fuer Kernphysik, Mainz Microtron (MAMI)
                 for the offered space, the link, placed on the central
                 homepage and the space to store and offer the driver and 
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/scsi/ibmmca.c linux/drivers/scsi/ibmmca.c
--- v2.4.0-prerelease/linux/drivers/scsi/ibmmca.c	Mon Jan  1 09:38:35 2001
+++ linux/drivers/scsi/ibmmca.c	Thu Jan  4 12:37:14 2001
@@ -38,7 +38,7 @@
 #include <linux/config.h>
 
 /* current version of this driver-source: */
-#define IBMMCA_SCSI_DRIVER_VERSION "4.0"
+#define IBMMCA_SCSI_DRIVER_VERSION "4.0a"
 
 #define IBMLOCK spin_lock_irqsave(&io_request_lock, flags);
 #define IBMUNLOCK spin_unlock_irqrestore(&io_request_lock, flags);
@@ -490,6 +490,7 @@
 static int device_exists (int, int, int *, int *);
 static struct Scsi_Host *ibmmca_register(Scsi_Host_Template *,
 					 int, int, int, char *);
+static int option_setup(char *);
 /* local functions needed for proc_info */
 static int ldn_access_load(int, int);
 static int ldn_access_total_read_write(int);
@@ -750,7 +751,7 @@
 /* SCSI-SCB-command for device_inquiry */
 static int device_inquiry(int host_index, int ldn)
 {
-   int retries;
+   int retr;
    struct im_scb *scb;
    struct im_tsb *tsb;
    unsigned char *buf;
@@ -759,14 +760,14 @@
    tsb = &(ld(host_index)[ldn].tsb);
    buf = (unsigned char *)(&(ld(host_index)[ldn].buf));
    ld(host_index)[ldn].tsb.dev_status = 0; /* prepare statusblock */
-   for (retries = 0; retries < 3; retries++) {
+   for (retr=0; retr<3; retr++) {
       /* fill scb with inquiry command */
       scb->command = IM_DEVICE_INQUIRY_CMD | IM_NO_DISCONNECT;
       scb->enable = IM_REPORT_TSB_ONLY_ON_ERROR | IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT | IM_RETRY_ENABLE | IM_BYPASS_BUFFER;
       last_scsi_command(host_index)[ldn] = IM_DEVICE_INQUIRY_CMD;
       last_scsi_type(host_index)[ldn] = IM_SCB;
       scb->sys_buf_adr = virt_to_bus(buf);
-      scb->sys_buf_length = 0xff; /* maximum bufferlength gives max info */
+      scb->sys_buf_length = 255; /* maximum bufferlength gives max info */
       scb->tsb_adr = virt_to_bus(tsb);
       /* issue scb to passed ldn, and busy wait for interrupt */
       got_interrupt(host_index) = 0;
@@ -780,7 +781,7 @@
 	return 1;
    }
    /*if all three retries failed, return "no device at this ldn" */
-   if (retries >= 3)
+   if (retr >= 3)
      return 0;
    else
      return 1;
@@ -788,7 +789,7 @@
 
 static int read_capacity(int host_index, int ldn)
 {
-   int retries;
+   int retr;
    struct im_scb *scb;
    struct im_tsb *tsb;
    unsigned char *buf;
@@ -797,7 +798,7 @@
    tsb = &(ld(host_index)[ldn].tsb);
    buf = (unsigned char *)(&(ld(host_index)[ldn].buf));
    ld(host_index)[ldn].tsb.dev_status = 0;
-   for (retries = 0; retries < 3; retries++) {
+   for (retr=0; retr<3; retr++) {
       /*fill scb with read capacity command */
       scb->command = IM_READ_CAPACITY_CMD;
       scb->enable = IM_REPORT_TSB_ONLY_ON_ERROR | IM_READ_CONTROL | IM_RETRY_ENABLE | IM_BYPASS_BUFFER;
@@ -818,7 +819,7 @@
 	return 1;
    }
    /*if all three retries failed, return "no device at this ldn" */
-   if (retries >= 3)
+   if (retr >= 3)
      return 0;
    else
      return 1;
@@ -826,7 +827,7 @@
 
 static int get_pos_info(int host_index)
 {
-   int retries;
+   int retr;
    struct im_scb *scb;
    struct im_tsb *tsb;
    unsigned char *buf;
@@ -835,7 +836,7 @@
    tsb = &(ld(host_index)[MAX_LOG_DEV].tsb);
    buf = (unsigned char *)(&(ld(host_index)[MAX_LOG_DEV].buf));
    ld(host_index)[MAX_LOG_DEV].tsb.dev_status = 0;
-   for (retries = 0; retries < 3; retries++) {
+   for (retr=0; retr<3; retr++) {
       /*fill scb with get_pos_info command */
       scb->command = IM_GET_POS_INFO_CMD;
       scb->enable = IM_READ_CONTROL | IM_REPORT_TSB_ONLY_ON_ERROR | IM_RETRY_ENABLE | IM_BYPASS_BUFFER;
@@ -859,7 +860,7 @@
 	return 1;
    }
    /* if all three retries failed, return "no device at this ldn" */
-   if (retries >= 3)
+   if (retr >= 3)
      return 0;
    else
      return 1;
@@ -872,33 +873,33 @@
                             unsigned int lun, unsigned int ldn,
                             unsigned int operation)
 {
-   int retries;
-   unsigned long imm_command;
+   int retr;
+   unsigned long imm_cmd;
 
-   for (retries=0; retries<3; retries ++) {
+   for (retr=0; retr<3; retr++) {
       /* select mutation level of the SCSI-adapter */
       switch (special(host_index)) {
        case IBM_SCSI2_FW:
-	 imm_command = (unsigned long)(IM_ASSIGN_IMM_CMD);
-	 imm_command |= (unsigned long)((lun & 7) << 24);
-	 imm_command |= (unsigned long)((operation & 1) << 23);
-	 imm_command |= (unsigned long)((pun & 7)<< 20)|((pun & 8)<< 24);
-	 imm_command |= (unsigned long)((ldn & 15) << 16);
+	 imm_cmd = (unsigned long)(IM_ASSIGN_IMM_CMD);
+	 imm_cmd |= (unsigned long)((lun & 7) << 24);
+	 imm_cmd |= (unsigned long)((operation & 1) << 23);
+	 imm_cmd |= (unsigned long)((pun & 7)<< 20)|((pun & 8)<< 24);
+	 imm_cmd |= (unsigned long)((ldn & 15) << 16);
 	 break;
        default:
-	 imm_command = inl(IM_CMD_REG(host_index));
-	 imm_command &= (unsigned long)(0xF8000000); /* keep reserved bits */
-	 imm_command |= (unsigned long)(IM_ASSIGN_IMM_CMD);
-	 imm_command |= (unsigned long)((lun & 7) << 24);
-	 imm_command |= (unsigned long)((operation & 1) << 23);
-	 imm_command |= (unsigned long)((pun & 7) << 20);
-	 imm_command |= (unsigned long)((ldn & 15) << 16);
+	 imm_cmd = inl(IM_CMD_REG(host_index));
+	 imm_cmd &= (unsigned long)(0xF8000000); /* keep reserved bits */
+	 imm_cmd |= (unsigned long)(IM_ASSIGN_IMM_CMD);
+	 imm_cmd |= (unsigned long)((lun & 7) << 24);
+	 imm_cmd |= (unsigned long)((operation & 1) << 23);
+	 imm_cmd |= (unsigned long)((pun & 7) << 20);
+	 imm_cmd |= (unsigned long)((ldn & 15) << 16);
 	 break;
       }
       last_scsi_command(host_index)[MAX_LOG_DEV] = IM_ASSIGN_IMM_CMD;
       last_scsi_type(host_index)[MAX_LOG_DEV] = IM_IMM_CMD;
       got_interrupt(host_index) = 0;
-      issue_cmd (host_index, (unsigned long)(imm_command), IM_IMM_CMD | MAX_LOG_DEV);
+      issue_cmd (host_index, (unsigned long)(imm_cmd), IM_IMM_CMD | MAX_LOG_DEV);
       while (!got_interrupt(host_index))
 	barrier ();
 
@@ -906,7 +907,7 @@
       if (stat_result(host_index) == IM_IMMEDIATE_CMD_COMPLETED)
 	return 1;
    }
-   if (retries >= 3)
+   if (retr >= 3)
      return 0;
    else
      return 1;
@@ -915,21 +916,21 @@
 static int immediate_feature(int host_index, unsigned int speed,
 			     unsigned int timeout)
 {
-   int retries;
-   unsigned long imm_command;
+   int retr;
+   unsigned long imm_cmd;
 
-   for (retries=0; retries<3; retries ++) {
+   for (retr=0; retr<3; retr++) {
       /* select mutation level of the SCSI-adapter */
-      imm_command  = IM_FEATURE_CTR_IMM_CMD;
-      imm_command |= (unsigned long)((speed & 0x7) << 29);
-      imm_command |= (unsigned long)((timeout & 0x1fff) << 16);
+      imm_cmd  = IM_FEATURE_CTR_IMM_CMD;
+      imm_cmd |= (unsigned long)((speed & 0x7) << 29);
+      imm_cmd |= (unsigned long)((timeout & 0x1fff) << 16);
       last_scsi_command(host_index)[MAX_LOG_DEV] = IM_FEATURE_CTR_IMM_CMD;
       last_scsi_type(host_index)[MAX_LOG_DEV] = IM_IMM_CMD;
       got_interrupt(host_index) = 0;
       /* we need to run into command errors in order to probe for the
        * right speed! */
       global_command_error_excuse = 1;
-      issue_cmd (host_index, (unsigned long)(imm_command), IM_IMM_CMD | MAX_LOG_DEV);
+      issue_cmd (host_index, (unsigned long)(imm_cmd), IM_IMM_CMD | MAX_LOG_DEV);
       while (!got_interrupt(host_index))
 	barrier ();
       if (global_command_error_excuse == CMD_FAIL) {
@@ -941,7 +942,7 @@
       if (stat_result(host_index) == IM_IMMEDIATE_CMD_COMPLETED)
 	return 1;
    }
-   if (retries >= 3)
+   if (retr >= 3)
      return 0;
    else
      return 1;
@@ -988,33 +989,31 @@
 #endif
 
 /* type-interpreter for physical device numbers */
-static char *ti_p(int value)
+static char *ti_p(int dev)
 {
-   switch (value) {
-    case TYPE_IBM_SCSI_ADAPTER: return("A"); break;
-    case TYPE_DISK:             return("D"); break;
-    case TYPE_TAPE:             return("T"); break;
-    case TYPE_PROCESSOR:        return("P"); break;
-    case TYPE_WORM:             return("W"); break;
-    case TYPE_ROM:              return("R"); break;
-    case TYPE_SCANNER:          return("S"); break;
-    case TYPE_MOD:              return("M"); break;
-    case TYPE_MEDIUM_CHANGER:   return("C"); break;
-    case TYPE_NO_LUN:           return("+"); break; /* show NO_LUN */
-    case TYPE_NO_DEVICE:
-    default:                    return("-"); break;
+   switch (dev) {
+    case TYPE_IBM_SCSI_ADAPTER: return("A");
+    case TYPE_DISK:             return("D");
+    case TYPE_TAPE:             return("T");
+    case TYPE_PROCESSOR:        return("P");
+    case TYPE_WORM:             return("W");
+    case TYPE_ROM:              return("R");
+    case TYPE_SCANNER:          return("S");
+    case TYPE_MOD:              return("M");
+    case TYPE_MEDIUM_CHANGER:   return("C");
+    case TYPE_NO_LUN:           return("+"); /* show NO_LUN */
    }
-   return("-");
+   return("-"); /* TYPE_NO_DEVICE and others */
 }
 
 /* interpreter for logical device numbers (ldn) */
-static char *ti_l(int value)
+static char *ti_l(int val)
 {
    const char hex[16] = "0123456789abcdef";
    static char answer[2];
 
    answer[1] = (char)(0x0);
-   if (value<=MAX_LOG_DEV) answer[0] = hex[value]; else answer[0] = '-';
+   if (val<=MAX_LOG_DEV) answer[0] = hex[val]; else answer[0] = '-';
    return (char *)&answer;
 }
 
@@ -1022,14 +1021,14 @@
 static char *ibmrate(unsigned int speed, int i)
 {
    switch (speed) {
-    case 0: if (i) return "5.00"; else return "10.00"; break;
-    case 1: if (i) return "4.00"; else return "8.00"; break;
-    case 2: if (i) return "3.33"; else return "6.66"; break;
-    case 3: if (i) return "2.86"; else return "5.00"; break;
-    case 4: if (i) return "2.50"; else return "4.00"; break;
-    case 5: if (i) return "2.22"; else return "3.10"; break;
-    case 6: if (i) return "2.00"; else return "2.50"; break;
-    case 7: if (i) return "1.82"; else return "2.00"; break;
+    case 0: return i ? "5.00" : "10.00";
+    case 1: return i ? "4.00" : "8.00";
+    case 2: return i ? "3.33" : "6.66";
+    case 3: return i ? "2.86" : "5.00";
+    case 4: return i ? "2.50" : "4.00";
+    case 5: return i ? "2.22" : "3.10";
+    case 6: return i ? "2.00" : "2.50";
+    case 7: return i ? "1.82" : "2.00";
    }
    return "---";
 }
@@ -1399,7 +1398,6 @@
    return 0;
 }
 
-#ifdef CONFIG_SCSI_IBMMCA
 void internal_ibmmca_scsi_setup (char *str, int *ints)
 {
    int i, j, io_base, id_base;
@@ -1436,7 +1434,6 @@
    }
    return;
 }
-#endif
 
 static int ibmmca_getinfo (char *buf, int slot, void *dev)
 {
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/scsi/mac_NCR5380.c linux/drivers/scsi/mac_NCR5380.c
--- v2.4.0-prerelease/linux/drivers/scsi/mac_NCR5380.c	Wed Aug 18 10:00:52 1999
+++ linux/drivers/scsi/mac_NCR5380.c	Thu Jan  4 13:00:55 2001
@@ -3134,6 +3134,10 @@
 #endif /* 1 */
 }
 
+static Scsi_Host_Template driver_template = MAC_NCR5380;
+
+#include "scsi_module.c"
+
 /* Local Variables: */
 /* tab-width: 8     */
 /* End:             */
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/scsi/mac_esp.c linux/drivers/scsi/mac_esp.c
--- v2.4.0-prerelease/linux/drivers/scsi/mac_esp.c	Mon Jun 19 17:59:41 2000
+++ linux/drivers/scsi/mac_esp.c	Thu Jan  4 13:00:55 2001
@@ -709,3 +709,7 @@
 	printk("mac_esp: dma_setup_quick\n");
 #endif
 }
+
+static Scsi_Host_Template driver_template = SCSI_MAC_ESP;
+
+#include "scsi_module.c"
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/scsi/osst.c linux/drivers/scsi/osst.c
--- v2.4.0-prerelease/linux/drivers/scsi/osst.c	Mon Jan  1 09:38:35 2001
+++ linux/drivers/scsi/osst.c	Thu Jan  4 13:00:55 2001
@@ -31,6 +31,7 @@
 #define OSST_FW_NEED_POLL_MAX 10708 /*(108D)*/
 #define OSST_FW_NEED_POLL(x,d) ((x) >= OSST_FW_NEED_POLL_MIN && (x) <= OSST_FW_NEED_POLL_MAX && d->host->this_id != 7)
 
+#include <linux/config.h>
 #include <linux/module.h>
 
 #include <linux/fs.h>
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/scsi/ppa.c linux/drivers/scsi/ppa.c
--- v2.4.0-prerelease/linux/drivers/scsi/ppa.c	Mon Dec 11 17:59:44 2000
+++ linux/drivers/scsi/ppa.c	Thu Jan  4 13:00:55 2001
@@ -31,6 +31,7 @@
     Scsi_Cmnd *cur_cmd;		/* Current queued command       */
     struct tq_struct ppa_tq;	/* Polling interupt stuff       */
     unsigned long jstart;	/* Jiffies at start             */
+    unsigned long recon_tmo;    /* How many usecs to wait for reconnection (6th bit) */
     unsigned int failed:1;	/* Failure flag                 */
     unsigned int p_busy:1;	/* Parport sharing busy flag    */
 } ppa_struct;
@@ -43,6 +44,7 @@
 	cur_cmd:	NULL,		\
 	ppa_tq:		{ routine: ppa_interrupt },	\
 	jstart:		0,		\
+	recon_tmo:      PPA_RECON_TMO,	\
 	failed:		0,		\
 	p_busy:		0		\
 }
@@ -248,6 +250,12 @@
 	ppa_hosts[hostno].mode = x;
 	return length;
     }
+    if ((length > 10) && (strncmp(buffer, "recon_tmo=", 10) == 0)) {
+	x = simple_strtoul(buffer + 10, NULL, 0);
+	ppa_hosts[hostno].recon_tmo = x;
+        printk("ppa: recon_tmo set to %ld\n", x);
+	return length;
+    }
     printk("ppa /proc: invalid variable\n");
     return (-EINVAL);
 }
@@ -268,6 +276,9 @@
     len += sprintf(buffer + len, "Version : %s\n", PPA_VERSION);
     len += sprintf(buffer + len, "Parport : %s\n", ppa_hosts[i].dev->port->name);
     len += sprintf(buffer + len, "Mode    : %s\n", PPA_MODE_STRING[ppa_hosts[i].mode]);
+#if PPA_DEBUG > 0
+    len += sprintf(buffer + len, "recon_tmo : %lu\n", ppa_hosts[i].recon_tmo);
+#endif
 
     /* Request for beyond end of buffer */
     if (offset > length)
@@ -556,6 +567,7 @@
     k = PPA_SELECT_TMO;
     do {
 	k--;
+	udelay(1);
     } while ((r_str(ppb) & 0x40) && (k));
     if (!k)
 	return 0;
@@ -569,6 +581,7 @@
     k = PPA_SELECT_TMO;
     do {
 	k--;
+	udelay(1);
     }
     while (!(r_str(ppb) & 0x40) && (k));
     if (!k)
@@ -652,6 +665,7 @@
      *  1     Finished data transfer
      */
     int host_no = cmd->host->unique_id;
+    unsigned short ppb = PPA_BASE(host_no);
     unsigned long start_jiffies = jiffies;
 
     unsigned char r, v;
@@ -663,7 +677,11 @@
 	    (v == WRITE_6) ||
 	    (v == WRITE_10));
 
-    r = ppa_wait(host_no); /* Need a ppa_wait() - PJC */
+    /*
+     * We only get here if the drive is ready to comunicate,
+     * hence no need for a full ppa_wait.
+     */
+    r = (r_str(ppb) & 0xf0);
 
     while (r != (unsigned char) 0xf0) {
 	/*
@@ -673,12 +691,36 @@
 	if (time_after(jiffies, start_jiffies + 1))
 	    return 0;
 
-	if (((r & 0xc0) != 0xc0) || (cmd->SCp.this_residual <= 0)) {
+	if ((cmd->SCp.this_residual <= 0)) {
 	    ppa_fail(host_no, DID_ERROR);
 	    return -1;		/* ERROR_RETURN */
 	}
-	/* determine if we should use burst I/O */ fast = (bulk && (cmd->SCp.this_residual >= PPA_BURST_SIZE))
-	    ? PPA_BURST_SIZE : 1;
+
+	/* On some hardware we have SCSI disconnected (6th bit low)
+	 * for about 100usecs. It is too expensive to wait a 
+	 * tick on every loop so we busy wait for no more than
+	 * 500usecs to give the drive a chance first. We do not 
+	 * change things for "normal" hardware since generally 
+	 * the 6th bit is always high.
+	 * This makes the CPU load higher on some hardware 
+	 * but otherwise we can not get more then 50K/secs 
+	 * on this problem hardware.
+	 */
+	if ((r & 0xc0) != 0xc0) {
+	   /* Wait for reconnection should be no more than 
+	    * jiffy/2 = 5ms = 5000 loops
+	    */
+	   unsigned long k = ppa_hosts[host_no].recon_tmo; 
+	   for (; k && ((r = (r_str(ppb) & 0xf0)) & 0xc0) != 0xc0; k--)
+	     udelay(1);
+
+	   if(!k) 
+	     return 0;
+	}	   
+
+	/* determine if we should use burst I/O */ 
+	fast = (bulk && (cmd->SCp.this_residual >= PPA_BURST_SIZE)) 
+	     ? PPA_BURST_SIZE : 1;
 
 	if (r == (unsigned char) 0xc0)
 	    status = ppa_out(host_no, cmd->SCp.ptr, fast);
@@ -701,7 +743,7 @@
 	    }
 	}
 	/* Now check to see if the drive is ready to comunicate */
-	r = ppa_wait(host_no); /* need ppa_wait() - PJC */
+	r = (r_str(ppb) & 0xf0);
 	/* If not, drop back down to the scheduler and wait a timer tick */
 	if (!(r & 0x80))
 	    return 0;
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/scsi/ppa.h linux/drivers/scsi/ppa.h
--- v2.4.0-prerelease/linux/drivers/scsi/ppa.h	Mon Dec 11 17:59:44 2000
+++ linux/drivers/scsi/ppa.h	Thu Jan  4 13:00:55 2001
@@ -10,7 +10,7 @@
 #ifndef _PPA_H
 #define _PPA_H
 
-#define   PPA_VERSION   "2.06 (for Linux 2.2.x)"
+#define   PPA_VERSION   "2.07 (for Linux 2.4.x)"
 
 /* 
  * this driver has been hacked by Matteo Frigo (athena@theory.lcs.mit.edu)
@@ -56,11 +56,20 @@
  * Add ppa_wait() calls to ppa_completion()
  *  by Peter Cherriman <pjc@ecs.soton.ac.uk> and
  *     Tim Waugh <twaugh@redhat.com>
- *                                                      [2.04]
+ *							[2.04]
+ *
  * Fix kernel panic on scsi timeout, 2000-08-18		[2.05]
  *
  * Avoid io_request_lock problems.
  * John Cavan <johncavan@home.com>			[2.06]
+ *
+ * Busy wait for connected status bit in ppa_completion()
+ *  in order to cope with some hardware that has this bit low
+ *  for short periods of time.
+ * Add udelay() to ppa_select()
+ *  by Peter Cherriman <pjc@ecs.soton.ac.uk> and
+ *     Oleg Makarenko <omakarenko@cyberplat.ru>         
+ *                                                      [2.07]
  */
 /* ------ END OF USER CONFIGURABLE PARAMETERS ----- */
 
@@ -116,6 +125,7 @@
 #define PPA_BURST_SIZE	512	/* data burst size */
 #define PPA_SELECT_TMO  5000	/* how long to wait for target ? */
 #define PPA_SPIN_TMO    50000	/* ppa_wait loop limiter */
+#define PPA_RECON_TMO   500	/* scsi reconnection loop limiter */
 #define PPA_DEBUG	0	/* debuging option */
 #define IN_EPP_MODE(x) (x == PPA_EPP_8 || x == PPA_EPP_16 || x == PPA_EPP_32)
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/scsi/sg.c linux/drivers/scsi/sg.c
--- v2.4.0-prerelease/linux/drivers/scsi/sg.c	Tue Oct 31 12:42:27 2000
+++ linux/drivers/scsi/sg.c	Thu Jan  4 12:50:17 2001
@@ -650,6 +650,11 @@
     }
 /*  SCSI_LOG_TIMEOUT(7, printk("sg_write: allocating device\n")); */
     SRpnt = scsi_allocate_request(sdp->device);
+    if(SRpnt == NULL) {
+    	SCSI_LOG_TIMEOUT(1, printk("sg_write: no mem\n"));
+    	sg_finish_rem_req(srp);
+    	return -ENOMEM;
+    }
 
 /*  SCSI_LOG_TIMEOUT(7, printk("sg_write: device allocated\n")); */
     srp->my_cmdp = SRpnt;
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/scsi/sun3x_esp.c linux/drivers/scsi/sun3x_esp.c
--- v2.4.0-prerelease/linux/drivers/scsi/sun3x_esp.c	Thu Aug 12 10:28:34 1999
+++ linux/drivers/scsi/sun3x_esp.c	Thu Jan  4 13:00:55 2001
@@ -6,13 +6,13 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/delay.h>
 #include <linux/types.h>
 #include <linux/string.h>
 #include <linux/malloc.h>
 #include <linux/blk.h>
 #include <linux/proc_fs.h>
 #include <linux/stat.h>
+#include <linux/delay.h>
 
 #include "scsi.h"
 #include "hosts.h"
@@ -288,3 +288,7 @@
 {
     sp->SCp.ptr = (char *)((unsigned long)sp->SCp.buffer->dvma_address);
 }
+
+static Scsi_Host_Template driver_template = SCSI_SUN3X_ESP;
+
+#include "scsi_module.c"
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/scsi/sym53c8xx.c linux/drivers/scsi/sym53c8xx.c
--- v2.4.0-prerelease/linux/drivers/scsi/sym53c8xx.c	Sun Oct  8 10:50:25 2000
+++ linux/drivers/scsi/sym53c8xx.c	Mon Jan  1 10:23:21 2001
@@ -11862,7 +11862,7 @@
 out_clrack:
 	OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, clrack));
 	return;
-out_stuck:
+out_stuck:;
 }
 
 
@@ -14830,10 +14830,10 @@
 	return retv;
 }
 
-#undef SET_BIT 0
-#undef CLR_BIT 1
-#undef SET_CLK 2
-#undef CLR_CLK 3
+#undef SET_BIT /* 0 */
+#undef CLR_BIT /* 1 */
+#undef SET_CLK /* 2 */
+#undef CLR_CLK /* 3 */
 
 /*
  *  Try reading Symbios NVRAM.
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/sound/cs46xx.c linux/drivers/sound/cs46xx.c
--- v2.4.0-prerelease/linux/drivers/sound/cs46xx.c	Mon Dec 11 17:59:44 2000
+++ linux/drivers/sound/cs46xx.c	Mon Jan  1 11:24:54 2001
@@ -1971,7 +1971,6 @@
 			start_adc(state);
 			if (file->f_flags & O_NONBLOCK) {
 				if (!ret) ret = -EAGAIN;
-				remove_wait_queue(&state->dmabuf.wait, &wait);
 				break;
  			}
 			schedule();
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/sound/i810_audio.c linux/drivers/sound/i810_audio.c
--- v2.4.0-prerelease/linux/drivers/sound/i810_audio.c	Mon Jan  1 09:38:35 2001
+++ linux/drivers/sound/i810_audio.c	Thu Jan  4 12:50:17 2001
@@ -144,13 +144,13 @@
 
 #define ENUM_ENGINE(PRE,DIG) 									\
 enum {												\
-	##PRE##_BDBAR =	0x##DIG##0,		/* Buffer Descriptor list Base Address */	\
-	##PRE##_CIV =	0x##DIG##4,		/* Current Index Value */			\
-	##PRE##_LVI =	0x##DIG##5,		/* Last Valid Index */				\
-	##PRE##_SR =	0x##DIG##6,		/* Status Register */				\
-	##PRE##_PICB =	0x##DIG##8,		/* Position In Current Buffer */		\
-	##PRE##_PIV =	0x##DIG##a,		/* Prefetched Index Value */			\
-	##PRE##_CR =	0x##DIG##b		/* Control Register */				\
+	PRE##_BDBAR =	0x##DIG##0,		/* Buffer Descriptor list Base Address */	\
+	PRE##_CIV =	0x##DIG##4,		/* Current Index Value */			\
+	PRE##_LVI =	0x##DIG##5,		/* Last Valid Index */				\
+	PRE##_SR =	0x##DIG##6,		/* Status Register */				\
+	PRE##_PICB =	0x##DIG##8,		/* Position In Current Buffer */		\
+	PRE##_PIV =	0x##DIG##a,		/* Prefetched Index Value */			\
+	PRE##_CR =	0x##DIG##b		/* Control Register */				\
 }
 
 ENUM_ENGINE(OFF,0);	/* Offsets */
@@ -770,7 +770,10 @@
 	swptr = dmabuf->swptr;
 	spin_unlock_irqrestore(&state->card->lock, flags);
 
-	len = swptr % (dmabuf->dmasize/SG_LEN);
+	if(dmabuf->dmasize)
+		len = swptr % (dmabuf->dmasize/SG_LEN);
+	else
+		len = 0;
 	
 	memset(dmabuf->rawbuf + swptr, silence, len);
 
@@ -1800,7 +1803,7 @@
 			if(!(i810_ac97_get(codec, AC97_EXTENDED_STATUS)&1))
 			{
 				printk(KERN_WARNING "i810_audio: Codec refused to allow VRA, using 48Khz only.\n");
-					card->ac97_features&=~1;
+				card->ac97_features&=~1;
 			}
 		}
    		
@@ -1894,12 +1897,6 @@
 	}
 	pci_dev->driver_data = card;
 	pci_dev->dma_mask = I810_DMA_MASK;
-
-//	printk("resetting codec?\n");
-	outl(0, card->iobase + GLOB_CNT);
-	udelay(500);
-//	printk("bringing it back?\n");
-	outl(1<<1, card->iobase + GLOB_CNT);
 	return 0;
 }
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/sound/mpu401.c linux/drivers/sound/mpu401.c
--- v2.4.0-prerelease/linux/drivers/sound/mpu401.c	Sun Nov 19 18:44:15 2000
+++ linux/drivers/sound/mpu401.c	Mon Jan  1 10:23:21 2001
@@ -1449,7 +1449,7 @@
 			}
 			break;
 
-		default:
+		default:;
 	}
 	return TIMER_NOT_ARMED;
 }
@@ -1559,7 +1559,7 @@
 			setup_metronome(midi_dev);
 			return 0;
 
-		default:
+		default:;
 	}
 	return -EINVAL;
 }
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/sound/sb_ess.c linux/drivers/sound/sb_ess.c
--- v2.4.0-prerelease/linux/drivers/sound/sb_ess.c	Fri Aug 11 08:26:43 2000
+++ linux/drivers/sound/sb_ess.c	Mon Jan  1 10:23:21 2001
@@ -770,7 +770,7 @@
 		case IMODE_INIT:
 			break;
 
-		default:
+		default:;
 			/* printk(KERN_WARN "ESS: Unexpected interrupt\n"); */
 	}
 }
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/sound/sequencer.c linux/drivers/sound/sequencer.c
--- v2.4.0-prerelease/linux/drivers/sound/sequencer.c	Sun Oct  8 10:50:27 2000
+++ linux/drivers/sound/sequencer.c	Mon Jan  1 10:23:21 2001
@@ -511,7 +511,7 @@
 			synth_devs[dev]->aftertouch(dev, voice, parm);
 			break;
 
-		default:
+		default:;
 	}
 #undef dev
 #undef cmd
@@ -614,7 +614,7 @@
 				synth_devs[dev]->bender(dev, chn, w14);
 			break;
 
-		default:
+		default:;
 	}
 }
 
@@ -684,7 +684,7 @@
 			}
 			break;
 
-		default:
+		default:;
 	}
 
 	return TIMER_NOT_ARMED;
@@ -701,7 +701,7 @@
 			DMAbuf_start_devices(parm);
 			break;
 
-		default:
+		default:;
 	}
 }
 
@@ -859,7 +859,7 @@
 			seq_sysex_message(q);
 			break;
 
-		default:
+		default:;
 	}
 	return 0;
 }
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/sound/sound_timer.c linux/drivers/sound/sound_timer.c
--- v2.4.0-prerelease/linux/drivers/sound/sound_timer.c	Sun Oct  8 10:50:29 2000
+++ linux/drivers/sound/sound_timer.c	Mon Jan  1 10:23:21 2001
@@ -165,7 +165,7 @@
 			seq_copy_to_input(event, 8);
 			break;
 
-		default:
+		default:;
 	}
 	return TIMER_NOT_ARMED;
 }
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/usb/acm.c linux/drivers/usb/acm.c
--- v2.4.0-prerelease/linux/drivers/usb/acm.c	Mon Dec 11 17:59:44 2000
+++ linux/drivers/usb/acm.c	Thu Jan  4 13:15:32 2001
@@ -489,15 +489,6 @@
 	int readsize, ctrlsize, minor, i;
 	unsigned char *buf;
 
-/*
- * Since 0 is treated as a wildcard by the USB pattern matching,
- * we explicitly check bDeviceSubClass and bDeviceProtocol here.
- */
-
-	if (dev->descriptor.bDeviceSubClass != 0 ||
-	    dev->descriptor.bDeviceProtocol != 0)
-		return NULL;
-
 	for (i = 0; i < dev->descriptor.bNumConfigurations; i++) {
 
 		cfacm = dev->config + i;
@@ -632,7 +623,7 @@
  */
 
 static struct usb_device_id acm_ids[] = {
-	{ bDeviceClass: 2, bDeviceSubClass: 0, bDeviceProtocol: 0},
+	{ USB_DEVICE_INFO(2, 0, 0) },
 	{ }
 };
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/usb/audio.c linux/drivers/usb/audio.c
--- v2.4.0-prerelease/linux/drivers/usb/audio.c	Mon Dec 11 17:59:44 2000
+++ linux/drivers/usb/audio.c	Thu Jan  4 13:15:32 2001
@@ -2698,7 +2698,8 @@
 static void usb_audio_disconnect(struct usb_device *dev, void *ptr);
 
 static struct usb_device_id usb_audio_ids [] = {
-    { bInterfaceClass: USB_CLASS_AUDIO, bInterfaceSubClass: 1},
+    { match_flags: (USB_DEVICE_ID_MATCH_INT_CLASS | USB_DEVICE_ID_MATCH_INT_SUBCLASS),
+      bInterfaceClass: USB_CLASS_AUDIO, bInterfaceSubClass: 1},
     { }						/* Terminating entry */
 };
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/usb/bluetooth.c linux/drivers/usb/bluetooth.c
--- v2.4.0-prerelease/linux/drivers/usb/bluetooth.c	Mon Dec 11 17:59:44 2000
+++ linux/drivers/usb/bluetooth.c	Thu Jan  4 13:15:32 2001
@@ -193,12 +193,8 @@
 
 
 static struct usb_device_id usb_bluetooth_ids [] = {
-    {
-	bDeviceClass: WIRELESS_CLASS_CODE,
-	bDeviceSubClass: RF_SUBCLASS_CODE,
-	bDeviceProtocol: BLUETOOTH_PROGRAMMING_PROTOCOL_CODE
-    },
-    { }						/* Terminating entry */
+	{ USB_DEVICE_INFO(WIRELESS_CLASS_CODE, RF_SUBCLASS_CODE, BLUETOOTH_PROGRAMMING_PROTOCOL_CODE) },
+	{ }						/* Terminating entry */
 };
 
 MODULE_DEVICE_TABLE (usb, usb_bluetooth_ids);
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/usb/dabusb.c linux/drivers/usb/dabusb.c
--- v2.4.0-prerelease/linux/drivers/usb/dabusb.c	Sun Nov 19 18:44:16 2000
+++ linux/drivers/usb/dabusb.c	Thu Jan  4 13:15:32 2001
@@ -787,9 +787,9 @@
 }
 
 static struct usb_device_id dabusb_ids [] = {
-    { idVendor: 0x0547, idProduct: 0x2131 },
-    { idVendor: 0x0547, idProduct: 0x9999 },
-    { }						/* Terminating entry */
+	{ USB_DEVICE(0x0547, 0x2131) },
+	{ USB_DEVICE(0x0547, 0x9999) },
+	{ }						/* Terminating entry */
 };
 
 MODULE_DEVICE_TABLE (usb, dabusb_ids);
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/usb/dc2xx.c linux/drivers/usb/dc2xx.c
--- v2.4.0-prerelease/linux/drivers/usb/dc2xx.c	Sun Nov 19 18:44:16 2000
+++ linux/drivers/usb/dc2xx.c	Thu Jan  4 13:15:32 2001
@@ -91,29 +91,28 @@
 
 /* table of cameras that work through this driver */
 static struct usb_device_id camera_table [] = {
-
 	/* These have the same application level protocol */  
-    { idVendor: 0x040a, idProduct: 0x0120 },		// Kodak DC-240
-    { idVendor: 0x040a, idProduct: 0x0130 },		// Kodak DC-280
-    { idVendor: 0x040a, idProduct: 0x0131 },		// Kodak DC-5000
-    { idVendor: 0x040a, idProduct: 0x0132 },		// Kodak DC-3400
+	{ USB_DEVICE(0x040a, 0x0120) },		// Kodak DC-240
+	{ USB_DEVICE(0x040a, 0x0130) },		// Kodak DC-280
+	{ USB_DEVICE(0x040a, 0x0131) },		// Kodak DC-5000
+	{ USB_DEVICE(0x040a, 0x0132) },		// Kodak DC-3400
 
 	/* These have a different application level protocol which
 	 * is part of the Flashpoint "DigitaOS".  That supports some
 	 * non-camera devices, and some non-Kodak cameras.
 	 */  
-    { idVendor: 0x040a, idProduct: 0x0100 },		// Kodak DC-220
-    { idVendor: 0x040a, idProduct: 0x0110 },		// Kodak DC-260
-    { idVendor: 0x040a, idProduct: 0x0111 },		// Kodak DC-265
-    { idVendor: 0x040a, idProduct: 0x0112 },		// Kodak DC-290
-    { idVendor: 0xf003, idProduct: 0x6002 },		// HP PhotoSmart C500
+	{ USB_DEVICE(0x040a, 0x0100) },		// Kodak DC-220
+	{ USB_DEVICE(0x040a, 0x0110) },		// Kodak DC-260
+	{ USB_DEVICE(0x040a, 0x0111) },		// Kodak DC-265
+	{ USB_DEVICE(0x040a, 0x0112) },		// Kodak DC-290
+	{ USB_DEVICE(0xf003, 0x6002) },		// HP PhotoSmart C500
 
 	/* Other USB devices may well work here too, so long as they
 	 * just stick to half duplex bulk packet exchanges.  That
 	 * means, among other things, no iso or interrupt endpoints.
 	 */
 
-    { }						/* Terminating entry */
+	{ }					/* Terminating entry */
 };
 
 MODULE_DEVICE_TABLE (usb, camera_table);
@@ -353,7 +352,7 @@
 
 
 
-static void * __devinit
+static void *
 camera_probe (struct usb_device *dev, unsigned int ifnum, const struct usb_device_id *camera_info)
 {
 	int				i;
@@ -451,7 +450,7 @@
 	return camera;
 }
 
-static void __devexit camera_disconnect(struct usb_device *dev, void *ptr)
+static void camera_disconnect(struct usb_device *dev, void *ptr)
 {
 	struct camera_state	*camera = (struct camera_state *) ptr;
 	int			subminor = camera->subminor;
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/usb/dsbr100.c linux/drivers/usb/dsbr100.c
--- v2.4.0-prerelease/linux/drivers/usb/dsbr100.c	Sun Nov 19 18:44:16 2000
+++ linux/drivers/usb/dsbr100.c	Thu Jan  4 13:15:32 2001
@@ -102,8 +102,8 @@
 static int users = 0;
 
 static struct usb_device_id usb_dsbr100_table [] = {
-    { idVendor: DSB100_VENDOR, idProduct: DSB100_PRODUCT },
-    { }						/* Terminating entry */
+	{ USB_DEVICE(DSB100_VENDOR, DSB100_PRODUCT) },
+	{ }						/* Terminating entry */
 };
 
 MODULE_DEVICE_TABLE (usb, usb_dsbr100_table);
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/usb/hid.c linux/drivers/usb/hid.c
--- v2.4.0-prerelease/linux/drivers/usb/hid.c	Mon Dec 11 17:59:44 2000
+++ linux/drivers/usb/hid.c	Thu Jan  4 13:15:32 2001
@@ -1529,7 +1529,8 @@
 }
 
 static struct usb_device_id hid_usb_ids [] = {
-    { bInterfaceClass: USB_INTERFACE_CLASS_HID},
+    { match_flags: USB_DEVICE_ID_MATCH_INT_CLASS,
+      bInterfaceClass: USB_INTERFACE_CLASS_HID},
     { }						/* Terminating entry */
 };
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/usb/hub.c linux/drivers/usb/hub.c
--- v2.4.0-prerelease/linux/drivers/usb/hub.c	Sun Nov 19 18:44:16 2000
+++ linux/drivers/usb/hub.c	Thu Jan  4 13:15:32 2001
@@ -765,7 +765,8 @@
 }
 
 static struct usb_device_id hub_id_table [] = {
-    { bInterfaceClass: USB_CLASS_HUB},
+    { match_flags: USB_DEVICE_ID_MATCH_INT_CLASS,
+      bInterfaceClass: USB_CLASS_HUB},
     { }						/* Terminating entry */
 };
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/usb/ibmcam.c linux/drivers/usb/ibmcam.c
--- v2.4.0-prerelease/linux/drivers/usb/ibmcam.c	Sun Nov 19 18:44:16 2000
+++ linux/drivers/usb/ibmcam.c	Thu Jan  4 13:15:32 2001
@@ -3110,19 +3110,9 @@
 }
 
 static struct usb_device_id ibmcam_table [] = {
-    {
-	idVendor: 0x0545,
-	idProduct: 0x8080,
-	bcdDevice_lo: 0x0002,
-	bcdDevice_hi: 0x0002
-    },
-    {
-	idVendor: 0x0545,
-	idProduct: 0x8080,
-	bcdDevice_lo: 0X030a,
-	bcdDevice_hi: 0x030a
-    },
-    { }						/* Terminating entry */
+	{ USB_DEVICE_VER(0x0545, 0x8080, 0x0002, 0x0002) },
+	{ USB_DEVICE_VER(0x0545, 0x8080, 0x030a, 0x030a) },
+	{ }						/* Terminating entry */
 };
 
 MODULE_DEVICE_TABLE (usb, ibmcam_table);
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/usb/mdc800.c linux/drivers/usb/mdc800.c
--- v2.4.0-prerelease/linux/drivers/usb/mdc800.c	Sun Nov 19 18:44:16 2000
+++ linux/drivers/usb/mdc800.c	Thu Jan  4 13:15:32 2001
@@ -870,8 +870,8 @@
 
 
 static struct usb_device_id mdc800_table [] = {
-    { idVendor: MDC800_VENDOR_ID, idProduct: MDC800_PRODUCT_ID },
-    { }						/* Terminating entry */
+	{ USB_DEVICE(MDC800_VENDOR_ID, MDC800_PRODUCT_ID) },
+	{ }						/* Terminating entry */
 };
 
 MODULE_DEVICE_TABLE (usb, mdc800_table);
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/usb/microtek.c linux/drivers/usb/microtek.c
--- v2.4.0-prerelease/linux/drivers/usb/microtek.c	Sun Nov 19 18:44:16 2000
+++ linux/drivers/usb/microtek.c	Thu Jan  4 13:15:32 2001
@@ -824,15 +824,15 @@
 
 static struct usb_device_id mts_usb_ids [] =
 {
-	{idVendor: 0x4ce, idProduct: 0x0300},
-	{idVendor: 0x5da, idProduct: 0x0094},
-	{idVendor: 0x5da, idProduct: 0x0099},
-	{idVendor: 0x5da, idProduct: 0x009a},
-	{idVendor: 0x5da, idProduct: 0x00a0},
-	{idVendor: 0x5da, idProduct: 0x00a3},
-	{idVendor: 0x5da, idProduct: 0x80a3},
-	{idVendor: 0x5da, idProduct: 0x80ac},
-    { }						/* Terminating entry */
+	{ USB_DEVICE(0x4ce, 0x0300) },
+	{ USB_DEVICE(0x5da, 0x0094) },
+	{ USB_DEVICE(0x5da, 0x0099) },
+	{ USB_DEVICE(0x5da, 0x009a) },
+	{ USB_DEVICE(0x5da, 0x00a0) },
+	{ USB_DEVICE(0x5da, 0x00a3) },
+	{ USB_DEVICE(0x5da, 0x80a3) },
+	{ USB_DEVICE(0x5da, 0x80ac) },
+	{ }						/* Terminating entry */
 };
 
 MODULE_DEVICE_TABLE (usb, mts_usb_ids);
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/usb/net1080.c linux/drivers/usb/net1080.c
--- v2.4.0-prerelease/linux/drivers/usb/net1080.c	Sun Nov 19 18:44:16 2000
+++ linux/drivers/usb/net1080.c	Thu Jan  4 13:15:32 2001
@@ -57,11 +57,9 @@
 
 
 static const struct usb_device_id	products [] = {
-	{		// reference design
-	    idProduct:		0x1080,
-	    idVendor:		0x0525,
-	    driver_info:	(unsigned long) "NetChip TurboCONNECT",
-	},
+	// reference design
+	{ USB_DEVICE(0x1080, 0x525), 
+	  driver_info: (unsigned long) "NetChip TurboCONNECT" },
 	// Belkin, ...
 	{ },		// END
 };
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/usb/ov511.c linux/drivers/usb/ov511.c
--- v2.4.0-prerelease/linux/drivers/usb/ov511.c	Sun Nov 19 18:44:16 2000
+++ linux/drivers/usb/ov511.c	Thu Jan  4 13:15:32 2001
@@ -175,9 +175,9 @@
 };
 
 static __devinitdata struct usb_device_id device_table [] = {
-	{ idVendor: 0x05a9, idProduct: 0x0511 },  /* OV511 */
-	{ idVendor: 0x05a9, idProduct: 0xA511 },  /* OV511+ */
-	{ idVendor: 0x0813, idProduct: 0x0002 },  /* Intel Play Me2Cam OV511+ */
+	{ USB_DEVICE(0x05a9, 0x0511) },  /* OV511 */
+	{ USB_DEVICE(0x05a9, 0xA511) },  /* OV511+ */
+	{ USB_DEVICE(0x0813, 0x0002) },  /* Intel Play Me2Cam OV511+ */
 	{ }  /* Terminating entry */
 };
 
@@ -3228,7 +3228,7 @@
  *
  ***************************************************************************/
 
-static void * __devinit
+static void *
 ov511_probe(struct usb_device *dev, unsigned int ifnum,
 	const struct usb_device_id *id)
 {
@@ -3340,7 +3340,7 @@
 }
 
 
-static void __devexit 
+static void
 ov511_disconnect(struct usb_device *dev, void *ptr)
 {
 	struct usb_ov511 *ov511 = (struct usb_ov511 *) ptr;
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/usb/pegasus.c linux/drivers/usb/pegasus.c
--- v2.4.0-prerelease/linux/drivers/usb/pegasus.c	Sun Nov 19 18:44:16 2000
+++ linux/drivers/usb/pegasus.c	Thu Jan  4 13:15:32 2001
@@ -70,7 +70,8 @@
 };
 
 static struct usb_device_id pegasus_ids[] = {
-#define	PEGASUS_DEV(pn, vid, pid, flags) {idVendor:vid, idProduct:pid},
+#define	PEGASUS_DEV(pn, vid, pid, flags) \
+	{match_flags: USB_DEVICE_ID_MATCH_DEVICE, idVendor:vid, idProduct:pid},
 #include "pegasus.h"
 #undef	PEGASUS_DEV
 	{ }
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/usb/printer.c linux/drivers/usb/printer.c
--- v2.4.0-prerelease/linux/drivers/usb/printer.c	Mon Dec 11 17:59:44 2000
+++ linux/drivers/usb/printer.c	Thu Jan  4 13:15:32 2001
@@ -613,10 +613,10 @@
 };
 
 static struct usb_device_id usblp_ids [] = {
-    { bInterfaceClass: 7, bInterfaceSubClass: 1, bInterfaceProtocol: 1},
-    { bInterfaceClass: 7, bInterfaceSubClass: 1, bInterfaceProtocol: 2},
-    { bInterfaceClass: 7, bInterfaceSubClass: 1, bInterfaceProtocol: 3},
-    { }						/* Terminating entry */
+	{ USB_INTERFACE_INFO(7, 1, 1) },
+	{ USB_INTERFACE_INFO(7, 1, 2) },
+	{ USB_INTERFACE_INFO(7, 1, 3) },
+	{ }						/* Terminating entry */
 };
 
 MODULE_DEVICE_TABLE (usb, usblp_ids);
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/usb/rio500.c linux/drivers/usb/rio500.c
--- v2.4.0-prerelease/linux/drivers/usb/rio500.c	Sun Nov 19 18:44:16 2000
+++ linux/drivers/usb/rio500.c	Thu Jan  4 13:15:32 2001
@@ -463,8 +463,8 @@
 };
 
 static struct usb_device_id rio_table [] = {
-    { idVendor: 0x0841, idProduct: 1 },		/* Rio 500 */
-    { }						/* Terminating entry */
+	{ USB_DEVICE(0x0841, 1) }, 		/* Rio 500 */
+	{ }					/* Terminating entry */
 };
 
 MODULE_DEVICE_TABLE (usb, rio_table);
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/usb/scanner.c linux/drivers/usb/scanner.c
--- v2.4.0-prerelease/linux/drivers/usb/scanner.c	Mon Dec 11 17:59:44 2000
+++ linux/drivers/usb/scanner.c	Thu Jan  4 13:15:32 2001
@@ -247,75 +247,74 @@
 /* Table of scanners that may work with this driver */
 static struct usb_device_id scanner_device_ids [] = {
 	/* Acer */
-    { idVendor: 0x04a5, idProduct: 0x2060 },/* Prisa Acerscan 620U & 640U (!)*/
-    { idVendor: 0x04a5, idProduct: 0x2040 },/* Prisa AcerScan 620U (!) */
-    { idVendor: 0x04a5, idProduct: 0x2022 },/* Vuego Scan Brisa 340U */
+	{ USB_DEVICE(0x04a5, 0x2060) },	/* Prisa Acerscan 620U & 640U (!)*/
+	{ USB_DEVICE(0x04a5, 0x2040) },	/* Prisa AcerScan 620U (!) */
+	{ USB_DEVICE(0x04a5, 0x2022) },	/* Vuego Scan Brisa 340U */
 	/* Agfa */
-    { idVendor: 0x06bd, idProduct: 0x0001 },	/* SnapScan 1212U */
-    { idVendor: 0x06bd, idProduct: 0x0002 },	/* SnapScan 1236U */
-    { idVendor: 0x06bd, idProduct: 0x2061 },	/* Another SnapScan 1212U (?)*/
-    { idVendor: 0x06bd, idProduct: 0x0100 },	/* SnapScan Touch */
+	{ USB_DEVICE(0x06bd, 0x0001) },	/* SnapScan 1212U */
+	{ USB_DEVICE(0x06bd, 0x0002) },	/* SnapScan 1236U */
+	{ USB_DEVICE(0x06bd, 0x2061) },	/* Another SnapScan 1212U (?)*/
+	{ USB_DEVICE(0x06bd, 0x0100) },	/* SnapScan Touch */
 	/* Colorado -- See Primax/Colorado below */
 	/* Epson -- See Seiko/Epson below */
 	/* Genius */
-    { idVendor: 0x0458, idProduct: 0x2001 },	/* ColorPage-Vivid Pro */
+	{ USB_DEVICE(0x0458, 0x2001) },	/* ColorPage-Vivid Pro */
 	/* Hewlett Packard */
-    { idVendor: 0x03f0, idProduct: 0x0205 },	/* 3300C */
-    { idVendor: 0x03f0, idProduct: 0x0101 },	/* 4100C */
-    { idVendor: 0x03f0, idProduct: 0x0105 },	/* 4200C */
-    { idVendor: 0x03f0, idProduct: 0x0102 },	/* PhotoSmart S20 */
-    { idVendor: 0x03f0, idProduct: 0x0401 },	/* 5200C */
-    { idVendor: 0x03f0, idProduct: 0x0701 },	/* 5300C */
-    { idVendor: 0x03f0, idProduct: 0x0201 },	/* 6200C */
-    { idVendor: 0x03f0, idProduct: 0x0601 },	/* 6300C */
+	{ USB_DEVICE(0x03f0, 0x0205) },	/* 3300C */
+	{ USB_DEVICE(0x03f0, 0x0101) },	/* 4100C */
+	{ USB_DEVICE(0x03f0, 0x0105) },	/* 4200C */
+	{ USB_DEVICE(0x03f0, 0x0102) },	/* PhotoSmart S20 */
+	{ USB_DEVICE(0x03f0, 0x0401) },	/* 5200C */
+	{ USB_DEVICE(0x03f0, 0x0701) },	/* 5300C */
+	{ USB_DEVICE(0x03f0, 0x0201) },	/* 6200C */
+	{ USB_DEVICE(0x03f0, 0x0601) },	/* 6300C */
 	/* iVina */
-    { idVendor: 0x0638, idProduct: 0x0268 },     /* 1200U */
+	{ USB_DEVICE(0x0638, 0x0268) },     /* 1200U */
 	/* Microtek */
-    { idVendor: 0x05da, idProduct: 0x0099 },	/* ScanMaker X6 - X6U */
-    { idVendor: 0x05da, idProduct: 0x0094 },	/* Phantom 336CX - C3 */
-    { idVendor: 0x05da, idProduct: 0x00a0 },	/* Phantom 336CX - C3 #2 */
-    { idVendor: 0x05da, idProduct: 0x009a },	/* Phantom C6 */
-    { idVendor: 0x05da, idProduct: 0x00a3 },	/* ScanMaker V6USL */
-    { idVendor: 0x05da, idProduct: 0x80a3 },	/* ScanMaker V6USL #2 */
-    { idVendor: 0x05da, idProduct: 0x80ac },	/* ScanMaker V6UL - SpicyU */
+	{ USB_DEVICE(0x05da, 0x0099) },	/* ScanMaker X6 - X6U */
+	{ USB_DEVICE(0x05da, 0x0094) },	/* Phantom 336CX - C3 */
+	{ USB_DEVICE(0x05da, 0x00a0) },	/* Phantom 336CX - C3 #2 */
+	{ USB_DEVICE(0x05da, 0x009a) },	/* Phantom C6 */
+	{ USB_DEVICE(0x05da, 0x00a3) },	/* ScanMaker V6USL */
+	{ USB_DEVICE(0x05da, 0x80a3) },	/* ScanMaker V6USL #2 */
+	{ USB_DEVICE(0x05da, 0x80ac) },	/* ScanMaker V6UL - SpicyU */
 	/* Mustek */
-    { idVendor: 0x055f, idProduct: 0x0001 },	/* 1200 CU */
-    { idVendor: 0x0400, idProduct: 0x1000 },	/* BearPaw 1200 */
-    { idVendor: 0x055f, idProduct: 0x0002 },	/* 600 CU */
-    { idVendor: 0x055f, idProduct: 0x0003 },	/* 1200 USB */
-    { idVendor: 0x055f, idProduct: 0x0006 },	/* 1200 UB */
+	{ USB_DEVICE(0x055f, 0x0001) },	/* 1200 CU */
+	{ USB_DEVICE(0x0400, 0x1000) },	/* BearPaw 1200 */
+	{ USB_DEVICE(0x055f, 0x0002) },	/* 600 CU */
+	{ USB_DEVICE(0x055f, 0x0003) },	/* 1200 USB */
+	{ USB_DEVICE(0x055f, 0x0006) },	/* 1200 UB */
 	/* Primax/Colorado */
-    { idVendor: 0x0461, idProduct: 0x0300 },	/* G2-300 #1 */
-    { idVendor: 0x0461, idProduct: 0x0380 },	/* G2-600 #1 */
-    { idVendor: 0x0461, idProduct: 0x0301 },	/* G2E-300 #1 */
-    { idVendor: 0x0461, idProduct: 0x0381 },	/* ReadyScan 636i */
-    { idVendor: 0x0461, idProduct: 0x0302 },	/* G2-300 #2 */
-    { idVendor: 0x0461, idProduct: 0x0382 },	/* G2-600 #2 */
-    { idVendor: 0x0461, idProduct: 0x0303 },	/* G2E-300 #2 */
-    { idVendor: 0x0461, idProduct: 0x0383 },	/* G2E-600 */
-    { idVendor: 0x0461, idProduct: 0x0340 },	/* Colorado USB 9600 */
-    { idVendor: 0x0461, idProduct: 0x0360 },	/* Colorado USB 19200 */
-    { idVendor: 0x0461, idProduct: 0x0341 },	/* Colorado 600u */
-    { idVendor: 0x0461, idProduct: 0x0361 },	/* Colorado 1200u */
+	{ USB_DEVICE(0x0461, 0x0300) },	/* G2-300 #1 */
+	{ USB_DEVICE(0x0461, 0x0380) },	/* G2-600 #1 */
+	{ USB_DEVICE(0x0461, 0x0301) },	/* G2E-300 #1 */
+	{ USB_DEVICE(0x0461, 0x0381) },	/* ReadyScan 636i */
+	{ USB_DEVICE(0x0461, 0x0302) },	/* G2-300 #2 */
+	{ USB_DEVICE(0x0461, 0x0382) },	/* G2-600 #2 */
+	{ USB_DEVICE(0x0461, 0x0303) },	/* G2E-300 #2 */
+	{ USB_DEVICE(0x0461, 0x0383) },	/* G2E-600 */
+	{ USB_DEVICE(0x0461, 0x0340) },	/* Colorado USB 9600 */
+	{ USB_DEVICE(0x0461, 0x0360) },	/* Colorado USB 19200 */
+	{ USB_DEVICE(0x0461, 0x0341) },	/* Colorado 600u */
+	{ USB_DEVICE(0x0461, 0x0361) },	/* Colorado 1200u */
 	/* Seiko/Epson Corp. */
-    { idVendor: 0x04b8, idProduct: 0x0101 },/* Perfection 636U and 636Photo */
-    { idVendor: 0x04b8, idProduct: 0x0103 },/* Perfection 610 */
-    { idVendor: 0x04b8, idProduct: 0x0104 },/* Perfection 1200U and 1200Photo*/
-    { idVendor: 0x04b8, idProduct: 0x0106 },/* Stylus Scan 2500 */
-    { idVendor: 0x04b8, idProduct: 0x0107 },/* Expression 1600 */
+	{ USB_DEVICE(0x04b8, 0x0101) },	/* Perfection 636U and 636Photo */
+	{ USB_DEVICE(0x04b8, 0x0103) },	/* Perfection 610 */
+	{ USB_DEVICE(0x04b8, 0x0104) },	/* Perfection 1200U and 1200Photo*/
+	{ USB_DEVICE(0x04b8, 0x0106) },	/* Stylus Scan 2500 */
+	{ USB_DEVICE(0x04b8, 0x0107) },	/* Expression 1600 */
 	/* Umax */
-    { idVendor: 0x1606, idProduct: 0x0010 },	/* Astra 1220U */
-    { idVendor: 0x1606, idProduct: 0x0030 },	/* Astra 2000U */
-    { idVendor: 0x1606, idProduct: 0x0230 },	/* Astra 2200U */
+	{ USB_DEVICE(0x1606, 0x0010) },	/* Astra 1220U */
+	{ USB_DEVICE(0x1606, 0x0030) },	/* Astra 2000U */
+	{ USB_DEVICE(0x1606, 0x0230) },	/* Astra 2200U */
 	/* Visioneer */
-    { idVendor: 0x04a7, idProduct: 0x0221 },	/* OneTouch 5300 USB */
-    { idVendor: 0x04a7, idProduct: 0x0211 },	/* OneTouch 7600 USB */
-    { idVendor: 0x04a7, idProduct: 0x0231 },	/* 6100 USB */
-    { idVendor: 0x04a7, idProduct: 0x0311 },	/* 6200 EPP/USB */
-    { idVendor: 0x04a7, idProduct: 0x0321 },	/* OneTouch 8100 EPP/USB */
-    { idVendor: 0x04a7, idProduct: 0x0331 }, 	/* OneTouch 8600 EPP/USB */
-
-    { }						/* Terminating entry */
+	{ USB_DEVICE(0x04a7, 0x0221) },	/* OneTouch 5300 USB */
+	{ USB_DEVICE(0x04a7, 0x0211) },	/* OneTouch 7600 USB */
+	{ USB_DEVICE(0x04a7, 0x0231) },	/* 6100 USB */
+	{ USB_DEVICE(0x04a7, 0x0311) },	/* 6200 EPP/USB */
+	{ USB_DEVICE(0x04a7, 0x0321) },	/* OneTouch 8100 EPP/USB */
+	{ USB_DEVICE(0x04a7, 0x0331) }, /* OneTouch 8600 EPP/USB */
+	{ }				/* Terminating entry */
 };
 
 MODULE_DEVICE_TABLE (usb, scanner_device_ids);
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/usb/serial/belkin_sa.c linux/drivers/usb/serial/belkin_sa.c
--- v2.4.0-prerelease/linux/drivers/usb/serial/belkin_sa.c	Sun Nov 19 18:44:16 2000
+++ linux/drivers/usb/serial/belkin_sa.c	Thu Jan  4 13:15:32 2001
@@ -87,24 +87,24 @@
 
 
 static __devinitdata struct usb_device_id id_table_combined [] = {
-	{ idVendor: BELKIN_SA_VID,	idProduct: BELKIN_SA_PID },
-	{ idVendor: BELKIN_OLD_VID,	idProduct: BELKIN_OLD_PID },
-	{ idVendor: PERACOM_VID,	idProduct: PERACOM_PID },
+	{ USB_DEVICE(BELKIN_SA_VID, BELKIN_SA_PID) },
+	{ USB_DEVICE(BELKIN_OLD_VID, BELKIN_OLD_PID) },
+	{ USB_DEVICE(PERACOM_VID, PERACOM_PID) },
 	{ }							/* Terminating entry */
 };
 
 static __devinitdata struct usb_device_id belkin_sa_table [] = {
-	{ idVendor: BELKIN_SA_VID,	idProduct: BELKIN_SA_PID },
+	{ USB_DEVICE(BELKIN_SA_VID, BELKIN_SA_PID) },
 	{ }							/* Terminating entry */
 };
 
 static __devinitdata struct usb_device_id belkin_old_table [] = {
-	{ idVendor: BELKIN_OLD_VID,	idProduct: BELKIN_OLD_PID },
+	{ USB_DEVICE(BELKIN_OLD_VID, BELKIN_OLD_PID) },
 	{ }							/* Terminating entry */
 };
 
 static __devinitdata struct usb_device_id peracom_table [] = {
-	{ idVendor: PERACOM_VID,	idProduct: PERACOM_PID },
+	{ USB_DEVICE(PERACOM_VID, PERACOM_PID) },
 	{ }							/* Terminating entry */
 };
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/usb/serial/digi_acceleport.c linux/drivers/usb/serial/digi_acceleport.c
--- v2.4.0-prerelease/linux/drivers/usb/serial/digi_acceleport.c	Mon Dec 11 17:59:44 2000
+++ linux/drivers/usb/serial/digi_acceleport.c	Thu Jan  4 13:15:32 2001
@@ -473,19 +473,19 @@
 /* Statics */
 
 static __devinitdata struct usb_device_id id_table_combined [] = {
-    { idVendor: DIGI_VENDOR_ID, idProduct: DIGI_2_ID },
-    { idVendor: DIGI_VENDOR_ID, idProduct: DIGI_4_ID },
-    { }						/* Terminating entry */
+	{ USB_DEVICE(DIGI_VENDOR_ID, DIGI_2_ID) },
+	{ USB_DEVICE(DIGI_VENDOR_ID, DIGI_4_ID) },
+	{ }						/* Terminating entry */
 };
 
 static __devinitdata struct usb_device_id id_table_2 [] = {
-    { idVendor: DIGI_VENDOR_ID, idProduct: DIGI_2_ID },
-    { }						/* Terminating entry */
+	{ USB_DEVICE(DIGI_VENDOR_ID, DIGI_2_ID) },
+	{ }						/* Terminating entry */
 };
 
 static __devinitdata struct usb_device_id id_table_4 [] = {
-    { idVendor: DIGI_VENDOR_ID, idProduct: DIGI_4_ID },
-    { }						/* Terminating entry */
+	{ USB_DEVICE(DIGI_VENDOR_ID, DIGI_4_ID) },
+	{ }						/* Terminating entry */
 };
 
 MODULE_DEVICE_TABLE (usb, id_table_combined);
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/usb/serial/empeg.c linux/drivers/usb/serial/empeg.c
--- v2.4.0-prerelease/linux/drivers/usb/serial/empeg.c	Mon Dec 11 17:59:44 2000
+++ linux/drivers/usb/serial/empeg.c	Thu Jan  4 13:15:32 2001
@@ -76,7 +76,7 @@
 static void empeg_read_bulk_callback	(struct urb *urb);
 
 static __devinitdata struct usb_device_id id_table [] = {
-        { idVendor: EMPEG_VENDOR_ID, idProduct: EMPEG_PRODUCT_ID },
+	{ USB_DEVICE(EMPEG_VENDOR_ID, EMPEG_PRODUCT_ID) },
         { }                                     /* Terminating entry */
 };
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/usb/serial/ftdi_sio.c linux/drivers/usb/serial/ftdi_sio.c
--- v2.4.0-prerelease/linux/drivers/usb/serial/ftdi_sio.c	Mon Dec 11 17:59:44 2000
+++ linux/drivers/usb/serial/ftdi_sio.c	Thu Jan  4 13:15:32 2001
@@ -92,8 +92,8 @@
 
 
 static __devinitdata struct usb_device_id id_table_sio [] = {
-    { idVendor: FTDI_VID, idProduct: FTDI_SIO_PID },
-    { }						/* Terminating entry */
+	{ USB_DEVICE(FTDI_VID, FTDI_SIO_PID) },
+	{ }						/* Terminating entry */
 };
 
 /* THe 8U232AM has the same API as the sio - but it can support MUCH 
@@ -102,15 +102,15 @@
 
    
 static __devinitdata struct usb_device_id id_table_8U232AM [] = {
-    { idVendor: FTDI_VID, idProduct: FTDI_8U232AM_PID },
-    { }						/* Terminating entry */
+	{ USB_DEVICE(FTDI_VID, FTDI_8U232AM_PID) },
+	{ }						/* Terminating entry */
 };
 
 
 static __devinitdata struct usb_device_id id_table_combined [] = {
-    { idVendor: FTDI_VID, idProduct: FTDI_SIO_PID },
-    { idVendor: FTDI_VID, idProduct: FTDI_8U232AM_PID },
-    { }						/* Terminating entry */
+	{ USB_DEVICE(FTDI_VID, FTDI_SIO_PID) },
+	{ USB_DEVICE(FTDI_VID, FTDI_8U232AM_PID) },
+	{ }						/* Terminating entry */
 };
 
 MODULE_DEVICE_TABLE (usb, id_table_combined);
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/usb/serial/keyspan.h linux/drivers/usb/serial/keyspan.h
--- v2.4.0-prerelease/linux/drivers/usb/serial/keyspan.h	Mon Dec 11 17:59:44 2000
+++ linux/drivers/usb/serial/keyspan.h	Thu Jan  4 13:15:32 2001
@@ -303,19 +303,19 @@
 #define	keyspan_usa49w_product_id	0x010a
 
 static __devinitdata struct usb_device_id keyspan_ids_combined[] = {
-    {idVendor: KEYSPAN_VENDOR_ID, idProduct: keyspan_usa18x_pre_product_id},
-    {idVendor: KEYSPAN_VENDOR_ID, idProduct: keyspan_usa19_pre_product_id},
-    {idVendor: KEYSPAN_VENDOR_ID, idProduct: keyspan_usa19w_pre_product_id},
-    {idVendor: KEYSPAN_VENDOR_ID, idProduct: keyspan_usa28_pre_product_id},
-    {idVendor: KEYSPAN_VENDOR_ID, idProduct: keyspan_usa28x_pre_product_id},
-    {idVendor: KEYSPAN_VENDOR_ID, idProduct: keyspan_usa49w_pre_product_id},
-    {idVendor: KEYSPAN_VENDOR_ID, idProduct: keyspan_usa18x_product_id},
-    {idVendor: KEYSPAN_VENDOR_ID, idProduct: keyspan_usa19_product_id},
-    {idVendor: KEYSPAN_VENDOR_ID, idProduct: keyspan_usa19w_product_id},
-    {idVendor: KEYSPAN_VENDOR_ID, idProduct: keyspan_usa28_product_id},
-    {idVendor: KEYSPAN_VENDOR_ID, idProduct: keyspan_usa28x_product_id},
-    {idVendor: KEYSPAN_VENDOR_ID, idProduct: keyspan_usa49w_product_id},
-    { } /* Terminating entry */
+	{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa18x_pre_product_id) },
+	{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19_pre_product_id) },
+	{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19w_pre_product_id) },
+	{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28_pre_product_id) },
+	{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28x_pre_product_id) },
+	{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49w_pre_product_id) },
+	{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa18x_product_id) },
+	{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19_product_id) },
+	{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19w_product_id) },
+	{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28_product_id) },
+	{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28x_product_id) },
+	{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49w_product_id)},
+	{ } /* Terminating entry */
 };
 
 MODULE_DEVICE_TABLE(usb, keyspan_ids_combined);
@@ -325,63 +325,63 @@
    behavior for each match. */
 
 static __devinitdata struct usb_device_id keyspan_usa18x_pre_ids[] = {
-    {idVendor: KEYSPAN_VENDOR_ID, idProduct: keyspan_usa18x_pre_product_id},
-    { }	/* Terminating entry */
+	{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa18x_pre_product_id) },
+	{ }	/* Terminating entry */
 };
 
 static __devinitdata struct usb_device_id keyspan_usa19_pre_ids[] = {
-    {idVendor: KEYSPAN_VENDOR_ID, idProduct: keyspan_usa19_pre_product_id},
-    { } /* Terminating entry */
+	{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19_pre_product_id) },
+	{ } /* Terminating entry */
 };
 
 static __devinitdata struct usb_device_id keyspan_usa19w_pre_ids[] = {
-    {idVendor: KEYSPAN_VENDOR_ID, idProduct: keyspan_usa19w_pre_product_id},
-    { } /* Terminating entry */
+	{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19w_pre_product_id) },
+	{ } /* Terminating entry */
 };
 
 static __devinitdata struct usb_device_id keyspan_usa28_pre_ids[] = {
-    {idVendor: KEYSPAN_VENDOR_ID, idProduct: keyspan_usa28_pre_product_id},
-    { } /* Terminating entry */
+	{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28_pre_product_id) },
+	{ } /* Terminating entry */
 };
 
 static __devinitdata struct usb_device_id keyspan_usa28x_pre_ids[] = {
-    {idVendor: KEYSPAN_VENDOR_ID, idProduct: keyspan_usa28x_pre_product_id},
-    { } /* Terminating entry */
+	{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28x_pre_product_id) },
+	{ } /* Terminating entry */
 };
 
 static __devinitdata struct usb_device_id keyspan_usa49w_pre_ids[] = {
-    {idVendor: KEYSPAN_VENDOR_ID, idProduct: keyspan_usa49w_pre_product_id},
-    { } /* Terminating entry */
+	{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49w_pre_product_id) },
+	{ } /* Terminating entry */
 };
 
 static __devinitdata struct usb_device_id keyspan_usa18x_ids[] = {
-    {idVendor: KEYSPAN_VENDOR_ID, idProduct: keyspan_usa18x_product_id},
-    { } /* Terminating entry */
+	{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa18x_product_id) },
+	{ } /* Terminating entry */
 };
 
 static __devinitdata struct usb_device_id keyspan_usa19_ids[] = {
-    {idVendor: KEYSPAN_VENDOR_ID, idProduct: keyspan_usa19_product_id},
-    { } /* Terminating entry */
+	{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19_product_id) },
+	{ } /* Terminating entry */
 };
 
 static __devinitdata struct usb_device_id keyspan_usa19w_ids[] = {
-    {idVendor: KEYSPAN_VENDOR_ID, idProduct: keyspan_usa19w_product_id},
-    { } /* Terminating entry */
+	{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19w_product_id) },
+	{ } /* Terminating entry */
 };
 
 static __devinitdata struct usb_device_id keyspan_usa28_ids[] = {
-    {idVendor: KEYSPAN_VENDOR_ID, idProduct: keyspan_usa28_product_id},
-    { } /* Terminating entry */
+	{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28_product_id) },
+	{ } /* Terminating entry */
 };
 
 static __devinitdata struct usb_device_id keyspan_usa28x_ids[] = {
-    {idVendor: KEYSPAN_VENDOR_ID, idProduct: keyspan_usa28x_product_id},
-    { } /* Terminating entry */
+	{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28x_product_id) },
+	{ } /* Terminating entry */
 };
 
 static __devinitdata struct usb_device_id keyspan_usa49w_ids[] = {
-    {idVendor: KEYSPAN_VENDOR_ID, idProduct: keyspan_usa49w_product_id},
-    { } /* Terminating entry */
+	{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49w_product_id) },
+	{ } /* Terminating entry */
 };
 
     /* Structs for the devices, pre and post renumeration.
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/usb/serial/keyspan_pda.c linux/drivers/usb/serial/keyspan_pda.c
--- v2.4.0-prerelease/linux/drivers/usb/serial/keyspan_pda.c	Mon Dec 11 17:59:44 2000
+++ linux/drivers/usb/serial/keyspan_pda.c	Thu Jan  4 13:15:32 2001
@@ -97,21 +97,21 @@
 #define KEYSPAN_PDA_ID			0x0104 /* no clue */
 
 static __devinitdata struct usb_device_id id_table_combined [] = {
-    { idVendor: KEYSPAN_VENDOR_ID, idProduct: KEYSPAN_PDA_FAKE_ID },
-    { idVendor: KEYSPAN_VENDOR_ID, idProduct: KEYSPAN_PDA_ID },
-    { }						/* Terminating entry */
+	{ USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_FAKE_ID) },
+	{ USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_ID) },
+	{ }						/* Terminating entry */
 };
 
 MODULE_DEVICE_TABLE (usb, id_table_combined);
 
 static __devinitdata struct usb_device_id id_table_std [] = {
-    { idVendor: KEYSPAN_VENDOR_ID, idProduct: KEYSPAN_PDA_ID },
-    { }						/* Terminating entry */
+	{ USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_ID) },
+	{ }						/* Terminating entry */
 };
 
 static __devinitdata struct usb_device_id id_table_fake [] = {
-    { idVendor: KEYSPAN_VENDOR_ID, idProduct: KEYSPAN_PDA_FAKE_ID },
-    { }						/* Terminating entry */
+	{ USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_FAKE_ID) },
+	{ }						/* Terminating entry */
 };
 
 static void keyspan_pda_wakeup_write( struct usb_serial_port *port )
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/usb/serial/omninet.c linux/drivers/usb/serial/omninet.c
--- v2.4.0-prerelease/linux/drivers/usb/serial/omninet.c	Sun Nov 19 18:44:16 2000
+++ linux/drivers/usb/serial/omninet.c	Thu Jan  4 13:15:32 2001
@@ -70,8 +70,8 @@
 static void omninet_shutdown		(struct usb_serial *serial);
 
 static __devinitdata struct usb_device_id id_table [] = {
-    { idVendor: ZYXEL_VENDOR_ID, idProduct: ZYXEL_OMNINET_ID },
-    { }						/* Terminating entry */
+	{ USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNINET_ID) },
+	{ }						/* Terminating entry */
 };
 
 MODULE_DEVICE_TABLE (usb, id_table);
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/usb/serial/visor.c linux/drivers/usb/serial/visor.c
--- v2.4.0-prerelease/linux/drivers/usb/serial/visor.c	Mon Dec 11 17:59:45 2000
+++ linux/drivers/usb/serial/visor.c	Thu Jan  4 13:15:32 2001
@@ -110,7 +110,7 @@
 
 
 static __devinitdata struct usb_device_id id_table [] = {
-	{ idVendor: HANDSPRING_VENDOR_ID, idProduct: HANDSPRING_VISOR_ID },
+	{ USB_DEVICE(HANDSPRING_VENDOR_ID, HANDSPRING_VISOR_ID) },
 	{ }					/* Terminating entry */
 };
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/usb/serial/whiteheat.c linux/drivers/usb/serial/whiteheat.c
--- v2.4.0-prerelease/linux/drivers/usb/serial/whiteheat.c	Sun Nov 19 18:44:16 2000
+++ linux/drivers/usb/serial/whiteheat.c	Thu Jan  4 13:15:32 2001
@@ -88,19 +88,19 @@
    just for the purpose of exporting the autoloading information.
 */
 static __devinitdata struct usb_device_id id_table_std [] = {
-    {idVendor: CONNECT_TECH_VENDOR_ID, idProduct: CONNECT_TECH_WHITE_HEAT_ID},
-    { }						/* Terminating entry */
+	{ USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_WHITE_HEAT_ID) },
+	{ }						/* Terminating entry */
 };
 
 static __devinitdata struct usb_device_id id_table_prerenumeration [] = {
-    {idVendor: CONNECT_TECH_VENDOR_ID, idProduct: CONNECT_TECH_WHITE_HEAT_ID},
-    { }						/* Terminating entry */
+	{ USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_WHITE_HEAT_ID) },
+	{ }						/* Terminating entry */
 };
 
 static __devinitdata struct usb_device_id id_table_combined [] = {
-    {idVendor: CONNECT_TECH_VENDOR_ID, idProduct: CONNECT_TECH_WHITE_HEAT_ID},
-    {idVendor: CONNECT_TECH_VENDOR_ID, idProduct: CONNECT_TECH_FAKE_WHITE_HEAT_ID},
-    { }						/* Terminating entry */
+	{ USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_WHITE_HEAT_ID) },
+	{ USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_FAKE_WHITE_HEAT_ID) },
+	{ }						/* Terminating entry */
 };
 
 MODULE_DEVICE_TABLE (usb, id_table_combined);
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/usb/storage/debug.h linux/drivers/usb/storage/debug.h
--- v2.4.0-prerelease/linux/drivers/usb/storage/debug.h	Sun Oct  8 10:50:30 2000
+++ linux/drivers/usb/storage/debug.h	Thu Jan  4 13:56:48 2001
@@ -57,8 +57,8 @@
 void usb_stor_print_Scsi_Cmnd( Scsi_Cmnd* cmd );
 void usb_stor_show_sense( unsigned char key,
 		unsigned char asc, unsigned char ascq );
-#define US_DEBUGP(x...) printk( KERN_DEBUG USB_STORAGE ## x )
-#define US_DEBUGPX(x...) printk( ## x )
+#define US_DEBUGP(x...) printk( KERN_DEBUG USB_STORAGE x )
+#define US_DEBUGPX(x...) printk( x )
 #define US_DEBUG(x) x 
 #else
 #define US_DEBUGP(x...)
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/usb/usb.c linux/drivers/usb/usb.c
--- v2.4.0-prerelease/linux/drivers/usb/usb.c	Mon Jan  1 09:38:36 2001
+++ linux/drivers/usb/usb.c	Thu Jan  4 13:15:32 2001
@@ -570,47 +570,48 @@
 	for (; id->idVendor || id->bDeviceClass || id->bInterfaceClass ||
 	       id->driver_info; id++) {
 
-		if (id->idVendor &&
+		if ((id->match_flags & USB_DEVICE_ID_MATCH_VENDOR) &&
 		    id->idVendor != dev->descriptor.idVendor)
 			continue;
 
-		if (id->idProduct &&
+		if ((id->match_flags & USB_DEVICE_ID_MATCH_PRODUCT) &&
 		    id->idProduct != dev->descriptor.idProduct)
 			continue;
 
 		/* No need to test id->bcdDevice_lo != 0, since 0 is never
 		   greater than any unsigned number. */
-		if (id->bcdDevice_lo > dev->descriptor.bcdDevice)
+		if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_LO) &&
+		    (id->bcdDevice_lo > dev->descriptor.bcdDevice))
 			continue;
 
-		if (id->bcdDevice_hi &&
-		    id->bcdDevice_hi < dev->descriptor.bcdDevice)
+		if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_HI) &&
+		    (id->bcdDevice_hi < dev->descriptor.bcdDevice))
 			continue;
 
-		if (id->bDeviceClass &&
-		    id->bDeviceClass != dev->descriptor.bDeviceClass)
+		if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_CLASS) &&
+		    (id->bDeviceClass != dev->descriptor.bDeviceClass))
 			continue;
 
-		if (id->bDeviceSubClass &&
-		    id->bDeviceSubClass!= dev->descriptor.bDeviceSubClass)
+		if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_SUBCLASS) &&
+		    (id->bDeviceSubClass!= dev->descriptor.bDeviceSubClass))
 			continue;
 
-		if (id->bDeviceProtocol &&
-		    id->bDeviceProtocol != dev->descriptor.bDeviceProtocol)
+		if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_PROTOCOL) &&
+		    (id->bDeviceProtocol != dev->descriptor.bDeviceProtocol))
 			continue;
 
 		intf = &interface->altsetting [interface->act_altsetting];
 
-		if (id->bInterfaceClass
-		    && id->bInterfaceClass != intf->bInterfaceClass)
+		if ((id->match_flags & USB_DEVICE_ID_MATCH_INT_CLASS) &&
+		    (id->bInterfaceClass != intf->bInterfaceClass))
 			continue;
 
-		if (id->bInterfaceSubClass &&
-		    id->bInterfaceSubClass != intf->bInterfaceSubClass)
+		if ((id->match_flags & USB_DEVICE_ID_MATCH_INT_SUBCLASS) &&
+		    (id->bInterfaceSubClass != intf->bInterfaceSubClass))
 		    continue;
 
-		if (id->bInterfaceProtocol
-		    && id->bInterfaceProtocol != intf->bInterfaceProtocol)
+		if ((id->match_flags & USB_DEVICE_ID_MATCH_INT_PROTOCOL) &&
+		    (id->bInterfaceProtocol != intf->bInterfaceProtocol))
 		    continue;
 
 		return id;
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/usb/usbkbd.c linux/drivers/usb/usbkbd.c
--- v2.4.0-prerelease/linux/drivers/usb/usbkbd.c	Sun Nov 19 18:44:17 2000
+++ linux/drivers/usb/usbkbd.c	Thu Jan  4 13:15:32 2001
@@ -257,8 +257,8 @@
 }
 
 static struct usb_device_id usb_kbd_id_table [] = {
-    { bInterfaceClass: 3, bInterfaceSubClass: 1, bInterfaceProtocol: 1},
-    { }						/* Terminating entry */
+	{ USB_INTERFACE_INFO(3, 1, 1) },
+	{ }						/* Terminating entry */
 };
 
 MODULE_DEVICE_TABLE (usb, usb_kbd_id_table);
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/usb/usbmouse.c linux/drivers/usb/usbmouse.c
--- v2.4.0-prerelease/linux/drivers/usb/usbmouse.c	Sun Nov 19 18:44:17 2000
+++ linux/drivers/usb/usbmouse.c	Thu Jan  4 13:15:32 2001
@@ -171,7 +171,7 @@
 }
 
 static struct usb_device_id usb_mouse_id_table [] = {
-    { bInterfaceClass: 3, bInterfaceSubClass: 1, bInterfaceProtocol: 2},
+	{ USB_INTERFACE_INFO(3, 1, 2) },
     { }						/* Terminating entry */
 };
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/usb/uss720.c linux/drivers/usb/uss720.c
--- v2.4.0-prerelease/linux/drivers/usb/uss720.c	Sun Nov 19 18:44:17 2000
+++ linux/drivers/usb/uss720.c	Thu Jan  4 13:15:32 2001
@@ -625,10 +625,10 @@
 
 /* table of cables that work through this driver */
 static struct usb_device_id uss720_table [] = {
-    { idVendor: 0x047e, idProduct: 0x1001},
-    { idVendor: 0x0557, idProduct: 0x2001},
-    { idVendor: 0x0729, idProduct: 0x1284},
-    { }						/* Terminating entry */
+	{ USB_DEVICE(0x047e, 0x1001) },
+	{ USB_DEVICE(0x0557, 0x2001) },
+	{ USB_DEVICE(0x0729, 0x1284) },
+	{ }						/* Terminating entry */
 };
 
 MODULE_DEVICE_TABLE (usb, uss720_table);
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/usb/wacom.c linux/drivers/usb/wacom.c
--- v2.4.0-prerelease/linux/drivers/usb/wacom.c	Mon Dec 11 17:59:45 2000
+++ linux/drivers/usb/wacom.c	Thu Jan  4 13:15:32 2001
@@ -300,12 +300,12 @@
 };
 
 struct usb_device_id wacom_ids[] = {
-	{ idVendor: USB_VENDOR_ID_WACOM, idProduct: 0x10, driver_info: 0 },
-	{ idVendor: USB_VENDOR_ID_WACOM, idProduct: 0x20, driver_info: 1 },
-	{ idVendor: USB_VENDOR_ID_WACOM, idProduct: 0x21, driver_info: 2 },
-	{ idVendor: USB_VENDOR_ID_WACOM, idProduct: 0x22, driver_info: 3 },
-	{ idVendor: USB_VENDOR_ID_WACOM, idProduct: 0x23, driver_info: 4 },
-	{ idVendor: USB_VENDOR_ID_WACOM, idProduct: 0x24, driver_info: 5 },
+	{ USB_DEVICE(USB_VENDOR_ID_WACOM, 0x10), driver_info: 0 },
+	{ USB_DEVICE(USB_VENDOR_ID_WACOM, 0x20), driver_info: 1 },
+	{ USB_DEVICE(USB_VENDOR_ID_WACOM, 0x21), driver_info: 2 },
+	{ USB_DEVICE(USB_VENDOR_ID_WACOM, 0x22), driver_info: 3 },
+	{ USB_DEVICE(USB_VENDOR_ID_WACOM, 0x23), driver_info: 4 },
+	{ USB_DEVICE(USB_VENDOR_ID_WACOM, 0x24), driver_info: 5 },
 	{ }
 };
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/video/Config.in linux/drivers/video/Config.in
--- v2.4.0-prerelease/linux/drivers/video/Config.in	Mon Jan  1 09:38:36 2001
+++ linux/drivers/video/Config.in	Thu Jan  4 13:00:55 2001
@@ -16,7 +16,7 @@
       if [ "$CONFIG_AMIGA" = "y" -o "$CONFIG_PCI" = "y" ]; then
 	 tristate '  Cirrus Logic support (EXPERIMENTAL)' CONFIG_FB_CLGEN
 	 tristate '  Permedia2 support (EXPERIMENTAL)' CONFIG_FB_PM2
-	 if [ "$CONFIG_FB_PM2" = "y" ]; then
+	 if [ "$CONFIG_FB_PM2" = "y" -o "$CONFIG_FB_PM2" = "m" ]; then
 	    if [ "$CONFIG_PCI" = "y" ]; then
 	       bool '    enable FIFO disconnect feature' CONFIG_FB_PM2_FIFO_DISCONNECT
 	       bool '    generic Permedia2 PCI board support' CONFIG_FB_PM2_PCI
@@ -92,10 +92,10 @@
       define_bool CONFIG_BUS_I2C y
    fi
    if [ "$CONFIG_SUN3" = "y" -o "$CONFIG_SUN3X" = "y" ]; then
-      bool 'Sun3 framebuffer support' CONFIG_FB_SUN3
+      bool '  Sun3 framebuffer support' CONFIG_FB_SUN3
       if [ "$CONFIG_FB_SUN3" != "n" ]; then
-         bool '  BWtwo support' CONFIG_FB_BWTWO
-         bool '  CGsix (GX,TurboGX) support' CONFIG_FB_CGSIX
+         bool '    BWtwo support' CONFIG_FB_BWTWO
+         bool '    CGsix (GX,TurboGX) support' CONFIG_FB_CGSIX
       fi
    fi
    if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/video/atyfb.c linux/drivers/video/atyfb.c
--- v2.4.0-prerelease/linux/drivers/video/atyfb.c	Mon Dec 11 17:59:45 2000
+++ linux/drivers/video/atyfb.c	Thu Jan  4 12:38:41 2001
@@ -1722,7 +1722,7 @@
     aty_st_8(CRTC_GEN_CNTL + 3, old_crtc_ext_disp | (CRTC_EXT_DISP_EN >> 24),
 	     info);
 
-    udelay(15000); /* delay for 50 (15) ms */
+    mdelay(15); /* delay for 50 (15) ms */
 
     program_bits = pll->program_bits;
     locationAddr = pll->locationAddr;
@@ -1754,7 +1754,7 @@
     aty_st_8(CLOCK_CNTL + info->clk_wr_offset, old_clock_cntl | CLOCK_STROBE,
 	     info);
 
-    udelay(50000); /* delay for 50 (15) ms */
+    mdelay(50); /* delay for 50 (15) ms */
     aty_st_8(CLOCK_CNTL + info->clk_wr_offset,
 	     ((pll->locationAddr & 0x0F) | CLOCK_STROBE), info);
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/video/clgenfb.c linux/drivers/video/clgenfb.c
--- v2.4.0-prerelease/linux/drivers/video/clgenfb.c	Sun Nov 19 18:44:17 2000
+++ linux/drivers/video/clgenfb.c	Thu Jan  4 12:38:41 2001
@@ -1899,7 +1899,7 @@
 		break;
 	case BT_PICASSO4:
 		vga_wcrt (fb_info->regs, CL_CRT51, 0x00);	/* disable flickerfixer */
-		udelay (100000);
+		mdelay (100);
 		vga_wgfx (fb_info->regs, CL_GR2F, 0x00);	/* from Klaus' NetBSD driver: */
 		vga_wgfx (fb_info->regs, CL_GR33, 0x00);	/* put blitter into 542x compat */
 		vga_wgfx (fb_info->regs, CL_GR31, 0x00);	/* mode */
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/video/fbmem.c linux/drivers/video/fbmem.c
--- v2.4.0-prerelease/linux/drivers/video/fbmem.c	Mon Dec 11 17:59:45 2000
+++ linux/drivers/video/fbmem.c	Thu Jan  4 13:00:55 2001
@@ -200,9 +200,6 @@
 	 * management!
 	 */
 
-#ifdef CONFIG_FB_STI
-	{ "stifb", stifb_init, stifb_setup },
-#endif
 #ifdef CONFIG_FB_OF
 	{ "offb", offb_init, NULL },
 #endif
@@ -267,6 +264,9 @@
 #ifdef CONFIG_FB_VGA16
 	{ "vga16", vga16fb_init, vga16fb_setup },
 #endif 
+#ifdef CONFIG_FB_STI
+	{ "stifb", stifb_init, stifb_setup },
+#endif
 
 #ifdef CONFIG_GSP_RESOLVER
 	/* Not a real frame buffer device... */
diff -u --recursive --new-file v2.4.0-prerelease/linux/drivers/video/retz3fb.c linux/drivers/video/retz3fb.c
--- v2.4.0-prerelease/linux/drivers/video/retz3fb.c	Sun Nov 19 18:44:17 2000
+++ linux/drivers/video/retz3fb.c	Thu Jan  4 13:00:55 2001
@@ -1450,7 +1450,7 @@
 		       "video memory\n", GET_FB_IDX(fb_info->node),
 		       fb_info->modename, zinfo->fbsize>>10);
 
-		/* TODO: This driver cannot be unloaded yet */
+		/* FIXME: This driver cannot be unloaded yet */
 		MOD_INC_USE_COUNT;
 
 		res = 0;
@@ -1544,9 +1544,9 @@
 	/*
 	 * Not reached because the usecount will never
 	 * be decremented to zero
+	 *
+	 * FIXME: clean up ... *
 	 */
-	unregister_framebuffer(&fb_info);
-	/* TODO: clean up ... */
 }
 #endif
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/fs/buffer.c linux/fs/buffer.c
--- v2.4.0-prerelease/linux/fs/buffer.c	Mon Jan  1 09:38:36 2001
+++ linux/fs/buffer.c	Wed Jan  3 20:45:26 2001
@@ -118,8 +118,7 @@
 				wake-cycle */
 		int nrefill; /* Number of clean buffers to try to obtain
 				each time we call refill */
-		int nref_dirt; /* Dirty buffer threshold for activating bdflush
-				  when trying to refill buffers. */
+		int dummy1;   /* unused */
 		int interval; /* jiffies delay between kupdate flushes */
 		int age_buffer;  /* Time for normal buffer to age before we flush it */
 		int nfract_sync; /* Percentage of buffer cache dirty to 
@@ -128,7 +127,7 @@
 		int dummy3;    /* unused */
 	} b_un;
 	unsigned int data[N_PARAM];
-} bdf_prm = {{40, 500, 64, 256, 5*HZ, 30*HZ, 80, 0, 0}};
+} bdf_prm = {{30, 64, 64, 256, 5*HZ, 30*HZ, 60, 0, 0}};
 
 /* These are the min and max parameter values that we will allow to be assigned */
 int bdflush_min[N_PARAM] = {  0,  10,    5,   25,  0,   1*HZ,   0, 0, 0};
@@ -755,11 +754,15 @@
 
 /*
  * We used to try various strange things. Let's not.
+ * We'll just try to balance dirty buffers, and possibly
+ * launder some pages.
  */
 static void refill_freelist(int size)
 {
-	if (!grow_buffers(size)) 
-		wakeup_bdflush(1);  /* Sets task->state to TASK_RUNNING */
+	balance_dirty(NODEV);
+	if (free_shortage())
+		page_launder(GFP_BUFFER, 0);
+	grow_buffers(size);
 }
 
 void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
@@ -1090,8 +1093,10 @@
 
 void mark_buffer_dirty(struct buffer_head *bh)
 {
-	__mark_buffer_dirty(bh);
-	balance_dirty(bh->b_dev);
+	if (!atomic_set_buffer_dirty(bh)) {
+		__mark_dirty(bh);
+		balance_dirty(bh->b_dev);
+	}
 }
 
 /*
@@ -2528,34 +2533,6 @@
  * response to dirty buffers.  Once this process is activated, we write back
  * a limited number of buffers to the disks and then go back to sleep again.
  */
-static DECLARE_WAIT_QUEUE_HEAD(bdflush_done);
-struct task_struct *bdflush_tsk = 0;
-
-void wakeup_bdflush(int block)
-{
-	DECLARE_WAITQUEUE(wait, current);
-
-	if (current == bdflush_tsk)
-		return;
-
-	if (!block) {
-		wake_up_process(bdflush_tsk);
-		return;
-	}
-
-	/* bdflush can wakeup us before we have a chance to
-	   go to sleep so we must be smart in handling
-	   this wakeup event from bdflush to avoid deadlocking in SMP
-	   (we are not holding any lock anymore in these two paths). */
-	__set_current_state(TASK_UNINTERRUPTIBLE);
-	add_wait_queue(&bdflush_done, &wait);
-
-	wake_up_process(bdflush_tsk);
-	schedule();
-
-	remove_wait_queue(&bdflush_done, &wait);
-	__set_current_state(TASK_RUNNING);
-}
 
 /* This is the _only_ function that deals with flushing async writes
    to disk.
@@ -2611,6 +2588,18 @@
 	return flushed;
 }
 
+struct task_struct *bdflush_tsk = 0;
+
+void wakeup_bdflush(int block)
+{
+	if (current != bdflush_tsk) {
+		wake_up_process(bdflush_tsk);
+
+		if (block)
+			flush_dirty_buffers(0);
+	}
+}
+
 /* 
  * Here we attempt to write back old buffers.  We also try to flush inodes 
  * and supers as well, since this function is essentially "update", and 
@@ -2725,23 +2714,14 @@
 
 		flushed = flush_dirty_buffers(0);
 		if (free_shortage())
-			flushed += page_launder(GFP_BUFFER, 0);
+			flushed += page_launder(GFP_KERNEL, 0);
 
-		/* If wakeup_bdflush will wakeup us
-		   after our bdflush_done wakeup, then
-		   we must make sure to not sleep
-		   in schedule_timeout otherwise
-		   wakeup_bdflush may wait for our
-		   bdflush_done wakeup that would never arrive
-		   (as we would be sleeping) and so it would
-		   deadlock in SMP. */
-		__set_current_state(TASK_INTERRUPTIBLE);
-		wake_up_all(&bdflush_done);
 		/*
 		 * If there are still a lot of dirty buffers around,
 		 * skip the sleep and flush some more. Otherwise, we
 		 * go to sleep waiting a wakeup.
 		 */
+		set_current_state(TASK_INTERRUPTIBLE);
 		if (!flushed || balance_dirty_state(NODEV) < 0) {
 			run_task_queue(&tq_disk);
 			schedule();
diff -u --recursive --new-file v2.4.0-prerelease/linux/fs/dcache.c linux/fs/dcache.c
--- v2.4.0-prerelease/linux/fs/dcache.c	Sun Oct  8 10:50:32 2000
+++ linux/fs/dcache.c	Wed Jan  3 11:03:35 2001
@@ -339,10 +339,18 @@
 
 		if (tmp == &dentry_unused)
 			break;
-		dentry_stat.nr_unused--;
 		list_del_init(tmp);
 		dentry = list_entry(tmp, struct dentry, d_lru);
 
+		/* If the dentry was recently referenced, don't free it. */
+		if (dentry->d_flags & DCACHE_REFERENCED) {
+			dentry->d_flags &= ~DCACHE_REFERENCED;
+			list_add(&dentry->d_lru, &dentry_unused);
+			count--;
+			continue;
+		}
+		dentry_stat.nr_unused--;
+
 		/* Unused dentry with a count? */
 		if (atomic_read(&dentry->d_count))
 			BUG();
@@ -732,6 +740,7 @@
 				continue;
 		}
 		__dget_locked(dentry);
+		dentry->d_flags |= DCACHE_REFERENCED;
 		spin_unlock(&dcache_lock);
 		return dentry;
 	}
diff -u --recursive --new-file v2.4.0-prerelease/linux/fs/exec.c linux/fs/exec.c
--- v2.4.0-prerelease/linux/fs/exec.c	Mon Jan  1 09:38:36 2001
+++ linux/fs/exec.c	Wed Jan  3 20:45:26 2001
@@ -403,6 +403,12 @@
 			mmdrop(mm);
 			return -ENOMEM;
 		}
+
+		/* Add it to the list of mm's */
+		spin_lock(&mmlist_lock);
+		list_add(&mm->mmlist, &init_mm.mmlist);
+		spin_unlock(&mmlist_lock);
+
 		task_lock(current);
 		current->mm = mm;
 		current->active_mm = mm;
diff -u --recursive --new-file v2.4.0-prerelease/linux/fs/isofs/inode.c linux/fs/isofs/inode.c
--- v2.4.0-prerelease/linux/fs/isofs/inode.c	Mon Dec 11 17:59:45 2000
+++ linux/fs/isofs/inode.c	Mon Jan  1 10:23:21 2001
@@ -1264,7 +1264,7 @@
 	    (volume_seq_no != 0) && (volume_seq_no != 1)) {
 		printk(KERN_WARNING "Multi-volume CD somehow got mounted.\n");
 	} else
-#endif IGNORE_WRONG_MULTI_VOLUME_SPECS
+#endif /*IGNORE_WRONG_MULTI_VOLUME_SPECS */
 	{
 		if (S_ISREG(inode->i_mode)) {
 			inode->i_fop = &generic_ro_fops;
diff -u --recursive --new-file v2.4.0-prerelease/linux/fs/nfsd/nfsfh.c linux/fs/nfsd/nfsfh.c
--- v2.4.0-prerelease/linux/fs/nfsd/nfsfh.c	Tue Oct 31 12:42:27 2000
+++ linux/fs/nfsd/nfsfh.c	Thu Jan  4 12:50:17 2001
@@ -346,7 +346,7 @@
 	struct dentry *dentry, *result = NULL;
 	struct dentry *tmp;
 	int  found =0;
-	int err;
+	int err = -ESTALE;
 	/* the sb->s_nfsd_free_path_sem semaphore is needed to make sure that only one unconnected (free)
 	 * dcache path ever exists, as otherwise two partial paths might get
 	 * joined together, which would be very confusing.
@@ -360,19 +360,18 @@
 	 * Attempt to find the inode.
 	 */
  retry:
+	down(&sb->s_nfsd_free_path_sem);
 	result = nfsd_iget(sb, ino, generation);
-	err = PTR_ERR(result);
-	if (IS_ERR(result))
-		goto err_out;
-	err = -ESTALE;
-	if (! (result->d_flags & DCACHE_NFSD_DISCONNECTED))
-		return result;
-
-	/* result is now an anonymous dentry, which may be adequate as it stands, or else
-	 * will get spliced into the dcache tree */
-
-	if (!S_ISDIR(result->d_inode->i_mode) && ! needpath) {
-		nfsdstats.fh_anon++;
+	if (IS_ERR(result)
+	    || !(result->d_flags & DCACHE_NFSD_DISCONNECTED)
+	    || (!S_ISDIR(result->d_inode->i_mode) && ! needpath)) {
+		up(&sb->s_nfsd_free_path_sem);
+	    
+		err = PTR_ERR(result);
+		if (IS_ERR(result))
+			goto err_out;
+		if ((result->d_flags & DCACHE_NFSD_DISCONNECTED))
+			nfsdstats.fh_anon++;
 		return result;
 	}
 
@@ -380,14 +379,6 @@
 	 * location in the tree.
 	 */
 	dprintk("nfs_fh: need to look harder for %d/%ld\n",sb->s_dev,ino);
-	down(&sb->s_nfsd_free_path_sem);
-
-	/* claiming the semaphore might have allowed things to get fixed up */
-	if (! (result->d_flags & DCACHE_NFSD_DISCONNECTED)) {
-		up(&sb->s_nfsd_free_path_sem);
-		return result;
-	}
-
 
 	found = 0;
 	if (!S_ISDIR(result->d_inode->i_mode)) {
diff -u --recursive --new-file v2.4.0-prerelease/linux/fs/smbfs/smb_debug.h linux/fs/smbfs/smb_debug.h
--- v2.4.0-prerelease/linux/fs/smbfs/smb_debug.h	Tue Jul 18 22:30:34 2000
+++ linux/fs/smbfs/smb_debug.h	Mon Jan  1 09:57:08 2001
@@ -11,14 +11,14 @@
  * these are normally enabled.
  */
 #ifdef SMBFS_PARANOIA
-#define PARANOIA(x...) printk(KERN_NOTICE __FUNCTION__ ": " ## x)
+#define PARANOIA(x...) printk(KERN_NOTICE __FUNCTION__ ": " x)
 #else
 #define PARANOIA(x...) do { ; } while(0)
 #endif
 
 /* lots of debug messages */
 #ifdef SMBFS_DEBUG_VERBOSE
-#define VERBOSE(x...) printk(KERN_DEBUG __FUNCTION__ ": " ## x)
+#define VERBOSE(x...) printk(KERN_DEBUG __FUNCTION__ ": " x)
 #else
 #define VERBOSE(x...) do { ; } while(0)
 #endif
@@ -28,7 +28,7 @@
  * too common name.
  */
 #ifdef SMBFS_DEBUG
-#define DEBUG1(x...) printk(KERN_DEBUG __FUNCTION__ ": " ## x)
+#define DEBUG1(x...) printk(KERN_DEBUG __FUNCTION__ ": " x)
 #else
 #define DEBUG1(x...) do { ; } while(0)
 #endif
diff -u --recursive --new-file v2.4.0-prerelease/linux/fs/umsdos/mangle.c linux/fs/umsdos/mangle.c
--- v2.4.0-prerelease/linux/fs/umsdos/mangle.c	Thu Dec 17 09:05:42 1998
+++ linux/fs/umsdos/mangle.c	Mon Jan  1 10:25:22 2001
@@ -435,7 +435,7 @@
 	"HELLO", 1, "hello",
 	"Hello.1", 1, "hello.1",
 	"Hello.c", 1, "hello.c",
-#elseif
+#else
 /*
  * I find the three examples below very unfortunate.  I propose to
  * convert them to lower case in a quick preliminary pass, then test
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-alpha/delay.h linux/include/asm-alpha/delay.h
--- v2.4.0-prerelease/linux/include/asm-alpha/delay.h	Mon May  8 22:00:01 2000
+++ linux/include/asm-alpha/delay.h	Tue Jan  2 16:45:37 2001
@@ -2,12 +2,13 @@
 #define __ALPHA_DELAY_H
 
 #include <linux/config.h>
+#include <asm/param.h>
 #include <asm/smp.h>
 
 /*
  * Copyright (C) 1993, 2000 Linus Torvalds
  *
- * Delay routines, using a pre-computed "loops_per_second" value.
+ * Delay routines, using a pre-computed "loops_per_jiffy" value.
  */
 
 /*
@@ -32,16 +33,16 @@
 }
 
 extern __inline__ void
-__udelay(unsigned long usecs, unsigned long lps)
+__udelay(unsigned long usecs, unsigned long lpj)
 {
-	usecs *= ((1UL << 32) / 1000000) * lps;
+	usecs *= (((unsigned long)HZ << 32) / 1000000) * lpj;
 	__delay((long)usecs >> 32);
 }
 
 #ifdef CONFIG_SMP
-#define udelay(u)  __udelay((u), cpu_data[smp_processor_id()].loops_per_sec)
+#define udelay(u)  __udelay((u), cpu_data[smp_processor_id()].loops_per_jiffy)
 #else
-#define udelay(u)  __udelay((u), loops_per_sec)
+#define udelay(u)  __udelay((u), loops_per_jiffy)
 #endif
 
 #endif /* defined(__ALPHA_DELAY_H) */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-alpha/smp.h linux/include/asm-alpha/smp.h
--- v2.4.0-prerelease/linux/include/asm-alpha/smp.h	Mon Jan  1 09:38:36 2001
+++ linux/include/asm-alpha/smp.h	Tue Jan  2 16:45:37 2001
@@ -24,7 +24,7 @@
 #include <asm/irq.h>
 
 struct cpuinfo_alpha {
-	unsigned long loops_per_sec;
+	unsigned long loops_per_jiffy;
 	unsigned long last_asn;
 	int need_new_asn;
 	int asn_lock;
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-i386/floppy.h linux/include/asm-i386/floppy.h
--- v2.4.0-prerelease/linux/include/asm-i386/floppy.h	Tue Oct 31 12:42:27 2000
+++ linux/include/asm-i386/floppy.h	Thu Jan  4 13:55:20 2001
@@ -285,8 +285,28 @@
 static int FDC1 = 0x3f0;
 static int FDC2 = -1;
 
-#define FLOPPY0_TYPE	((CMOS_READ(0x10) >> 4) & 15)
-#define FLOPPY1_TYPE	(CMOS_READ(0x10) & 15)
+/*
+ * Floppy types are stored in the rtc's CMOS RAM and so rtc_lock
+ * is needed to prevent corrupted CMOS RAM in case "insmod floppy"
+ * coincides with another rtc CMOS user.		Paul G.
+ */
+#define FLOPPY0_TYPE	({				\
+	unsigned long flags;				\
+	unsigned char val;				\
+	spin_lock_irqsave(&rtc_lock, flags);		\
+	val = (CMOS_READ(0x10) >> 4) & 15;		\
+	spin_unlock_irqrestore(&rtc_lock, flags);	\
+	val;						\
+})
+
+#define FLOPPY1_TYPE	({				\
+	unsigned long flags;				\
+	unsigned char val;				\
+	spin_lock_irqsave(&rtc_lock, flags);		\
+	val = CMOS_READ(0x10) & 15;			\
+	spin_unlock_irqrestore(&rtc_lock, flags);	\
+	val;						\
+})
 
 #define N_FDC 2
 #define N_DRIVE 8
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-i386/semaphore.h linux/include/asm-i386/semaphore.h
--- v2.4.0-prerelease/linux/include/asm-i386/semaphore.h	Sun Oct  8 10:50:35 2000
+++ linux/include/asm-i386/semaphore.h	Thu Jan  4 13:55:19 2001
@@ -23,6 +23,12 @@
  *                     Optimized "0(ecx)" -> "(ecx)" (the assembler does not
  *                     do this). Changed calling sequences from push/jmp to
  *                     traditional call/ret.
+ * Modified 2001-01-01 Andreas Franck <afranck@gmx.de>
+ *		       Some hacks to ensure compatibility with recent
+ *		       GCC snapshots, to avoid stack corruption when compiling
+ *		       with -fomit-frame-pointer. It's not sure if this will
+ *		       be fixed in GCC, as our previous implementation was a
+ *		       bit dubious.
  *
  * If you would like to see an analysis of this implementation, please
  * ftp to gcom.com and download the file
@@ -113,14 +119,14 @@
 
 	__asm__ __volatile__(
 		"# atomic down operation\n\t"
-		LOCK "decl (%0)\n\t"     /* --sem->count */
+		LOCK "decl %0\n\t"     /* --sem->count */
 		"js 2f\n"
 		"1:\n"
 		".section .text.lock,\"ax\"\n"
 		"2:\tcall __down_failed\n\t"
 		"jmp 1b\n"
 		".previous"
-		:/* no outputs */
+		:"=m" (sem->count)
 		:"c" (sem)
 		:"memory");
 }
@@ -135,7 +141,7 @@
 
 	__asm__ __volatile__(
 		"# atomic interruptible down operation\n\t"
-		LOCK "decl (%1)\n\t"     /* --sem->count */
+		LOCK "decl %1\n\t"     /* --sem->count */
 		"js 2f\n\t"
 		"xorl %0,%0\n"
 		"1:\n"
@@ -143,7 +149,7 @@
 		"2:\tcall __down_failed_interruptible\n\t"
 		"jmp 1b\n"
 		".previous"
-		:"=a" (result)
+		:"=a" (result), "=m" (sem->count)
 		:"c" (sem)
 		:"memory");
 	return result;
@@ -159,7 +165,7 @@
 
 	__asm__ __volatile__(
 		"# atomic interruptible down operation\n\t"
-		LOCK "decl (%1)\n\t"     /* --sem->count */
+		LOCK "decl %1\n\t"     /* --sem->count */
 		"js 2f\n\t"
 		"xorl %0,%0\n"
 		"1:\n"
@@ -167,7 +173,7 @@
 		"2:\tcall __down_failed_trylock\n\t"
 		"jmp 1b\n"
 		".previous"
-		:"=a" (result)
+		:"=a" (result), "=m" (sem->count)
 		:"c" (sem)
 		:"memory");
 	return result;
@@ -186,14 +192,14 @@
 #endif
 	__asm__ __volatile__(
 		"# atomic up operation\n\t"
-		LOCK "incl (%0)\n\t"     /* ++sem->count */
+		LOCK "incl %0\n\t"     /* ++sem->count */
 		"jle 2f\n"
 		"1:\n"
 		".section .text.lock,\"ax\"\n"
 		"2:\tcall __up_wakeup\n\t"
 		"jmp 1b\n"
 		".previous"
-		:/* no outputs */
+		:"=m" (sem->count)
 		:"c" (sem)
 		:"memory");
 }
@@ -315,14 +321,15 @@
 {
 	__asm__ __volatile__(
 		"# up_read\n\t"
-		LOCK "incl (%%eax)\n\t"
+		LOCK "incl %0\n\t"
 		"jz 2f\n"			/* only do the wake if result == 0 (ie, a writer) */
 		"1:\n\t"
 		".section .text.lock,\"ax\"\n"
 		"2:\tcall __rwsem_wake\n\t"
 		"jmp 1b\n"
 		".previous"
-		::"a" (sem)
+		:"=m" (sem->count)
+		:"a" (sem)
 		:"memory"
 		);
 }
@@ -334,14 +341,15 @@
 {
 	__asm__ __volatile__(
 		"# up_write\n\t"
-		LOCK "addl $" RW_LOCK_BIAS_STR ",(%%eax)\n"
+		LOCK "addl $" RW_LOCK_BIAS_STR ",%0\n"
 		"jc 2f\n"			/* only do the wake if the result was -'ve to 0/+'ve */
 		"1:\n\t"
 		".section .text.lock,\"ax\"\n"
 		"2:\tcall __rwsem_wake\n\t"
 		"jmp 1b\n"
 		".previous"
-		::"a" (sem)
+		:"=m" (sem->count)
+		:"a" (sem)
 		:"memory"
 		);
 }
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/a.out.h linux/include/asm-ia64/a.out.h
--- v2.4.0-prerelease/linux/include/asm-ia64/a.out.h	Sun Feb  6 18:42:40 2000
+++ linux/include/asm-ia64/a.out.h	Thu Jan  4 12:50:17 2001
@@ -7,14 +7,13 @@
  * probably would be better to clean up binfmt_elf.c so it does not
  * necessarily depend on there being a.out support.
  *
- * Copyright (C) 1998, 1999 Hewlett-Packard Co
- * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1998-2000 Hewlett-Packard Co
+ * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
  */
 
 #include <linux/types.h>
 
-struct exec
-{
+struct exec {
 	unsigned long a_info;
 	unsigned long a_text;
 	unsigned long a_data;
@@ -31,7 +30,7 @@
 #define N_TXTOFF(x)	0
 
 #ifdef __KERNEL__
-# define STACK_TOP	0xa000000000000000UL
+# define STACK_TOP	(0x8000000000000000UL + (1UL << (4*PAGE_SHIFT - 12)))
 # define IA64_RBS_BOT	(STACK_TOP - 0x80000000L)	/* bottom of register backing store */
 #endif
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/acpi-ext.h linux/include/asm-ia64/acpi-ext.h
--- v2.4.0-prerelease/linux/include/asm-ia64/acpi-ext.h	Tue Oct 31 12:42:27 2000
+++ linux/include/asm-ia64/acpi-ext.h	Thu Jan  4 12:50:17 2001
@@ -8,19 +8,27 @@
  * 
  * Copyright (C) 1999 VA Linux Systems
  * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
+ * Copyright (C) 2000 Intel Corp.
+ * Copyright (C) 2000 J.I. Lee <jung-ik.lee@intel.com>
+ *	ACPI 2.0 specification 
  */
 
 #include <linux/types.h>
 
+#pragma	pack(1)
 #define ACPI_RSDP_SIG "RSD PTR " /* Trailing space required */
 #define ACPI_RSDP_SIG_LEN 8
 typedef struct {
 	char signature[8];
 	u8 checksum;
 	char oem_id[6];
-	char reserved;		/* Must be 0 */
-	struct acpi_rsdt *rsdt;
-} acpi_rsdp_t;
+	u8 revision;
+	u32 rsdt;
+	u32 lenth;
+	struct acpi_xsdt *xsdt;
+	u8 ext_checksum;
+	u8 reserved[3];
+} acpi20_rsdp_t;
 
 typedef struct {
 	char signature[4];
@@ -32,20 +40,73 @@
 	u32 oem_revision;
 	u32 creator_id;
 	u32 creator_revision;
-	char reserved[4];
 } acpi_desc_table_hdr_t;
 
 #define ACPI_RSDT_SIG "RSDT"
 #define ACPI_RSDT_SIG_LEN 4
-typedef struct acpi_rsdt {
+typedef struct {
+	acpi_desc_table_hdr_t header;
+	u8 reserved[4];
+	u32 entry_ptrs[1];	/* Not really . . . */
+} acpi20_rsdt_t;
+
+#define ACPI_XSDT_SIG "XSDT"
+#define ACPI_XSDT_SIG_LEN 4
+typedef struct acpi_xsdt {
 	acpi_desc_table_hdr_t header;
 	unsigned long entry_ptrs[1];	/* Not really . . . */
+} acpi_xsdt_t;
+
+/* Common structures for ACPI 2.0 and 0.71 */
+
+typedef struct acpi_entry_iosapic {
+	u8 type;
+	u8 length;
+	u8 id;
+	u8 reserved;
+	u32 irq_base;	/* start of IRQ's this IOSAPIC is responsible for. */
+	unsigned long address;	/* Address of this IOSAPIC */
+} acpi_entry_iosapic_t;
+
+/* Local SAPIC flags */
+#define LSAPIC_ENABLED                (1<<0)
+#define LSAPIC_PERFORMANCE_RESTRICTED (1<<1)
+#define LSAPIC_PRESENT                (1<<2)
+
+/* Defines legacy IRQ->pin mapping */
+typedef struct {
+	u8 type;
+	u8 length;
+	u8 bus;		/* Constant 0 == ISA */
+	u8 isa_irq;	/* ISA IRQ # */
+	u32 pin;		/* called vector in spec; really IOSAPIC pin number */
+	u16 flags;	/* Edge/Level trigger & High/Low active */
+} acpi_entry_int_override_t;
+
+#define INT_OVERRIDE_ACTIVE_LOW    0x03
+#define INT_OVERRIDE_LEVEL_TRIGGER 0x0d
+
+/* IA64 ext 0.71 */
+
+typedef struct {
+	char signature[8];
+	u8 checksum;
+	char oem_id[6];
+	char reserved;		/* Must be 0 */
+	struct acpi_rsdt *rsdt;
+} acpi_rsdp_t;
+
+typedef struct {
+	acpi_desc_table_hdr_t header;
+	u8 reserved[4];
+	unsigned long entry_ptrs[1];	/* Not really . . . */
 } acpi_rsdt_t;
 
 #define ACPI_SAPIC_SIG "SPIC"
 #define ACPI_SAPIC_SIG_LEN 4
 typedef struct {
 	acpi_desc_table_hdr_t header;
+	u8 reserved[4];
 	unsigned long interrupt_block;
 } acpi_sapic_t;
 
@@ -55,11 +116,6 @@
 #define ACPI_ENTRY_INT_SRC_OVERRIDE    2
 #define ACPI_ENTRY_PLATFORM_INT_SOURCE 3	/* Unimplemented */
 
-/* Local SAPIC flags */
-#define LSAPIC_ENABLED                (1<<0)
-#define LSAPIC_PERFORMANCE_RESTRICTED (1<<1)
-#define LSAPIC_PRESENT                (1<<2)
-
 typedef struct acpi_entry_lsapic {
 	u8 type;
 	u8 length;
@@ -69,42 +125,71 @@
 	u8 eid;
 } acpi_entry_lsapic_t;
 
-typedef struct acpi_entry_iosapic {
+typedef struct {
 	u8 type;
 	u8 length;
-	u16 reserved;
-	u32 irq_base;	/* start of IRQ's this IOSAPIC is responsible for. */
-	unsigned long address;	/* Address of this IOSAPIC */
-} acpi_entry_iosapic_t;
+	u16 flags;
+	u8 int_type;
+	u8 id;
+	u8 eid;
+	u8 iosapic_vector;
+	u8 reserved[4];
+	u32 global_vector;
+} acpi_entry_platform_src_t;
 
-/* Defines legacy IRQ->pin mapping */
+/* ACPI 2.0 with 1.3 errata specific structures */
+
+#define ACPI_MADT_SIG "APIC"
+#define ACPI_MADT_SIG_LEN 4
 typedef struct {
+	acpi_desc_table_hdr_t header;
+	u32 lapic_address;
+	u32 flags;
+} acpi_madt_t;
+
+/* acpi 2.0 MADT structure types */
+#define ACPI20_ENTRY_LOCAL_APIC                 0
+#define ACPI20_ENTRY_IO_APIC                    1
+#define ACPI20_ENTRY_INT_SRC_OVERRIDE           2
+#define ACPI20_ENTRY_NMI_SOURCE                 3
+#define ACPI20_ENTRY_LOCAL_APIC_NMI             4
+#define ACPI20_ENTRY_LOCAL_APIC_ADDR_OVERRIDE   5
+#define ACPI20_ENTRY_IO_SAPIC                   6
+#define ACPI20_ENTRY_LOCAL_SAPIC                7
+#define ACPI20_ENTRY_PLATFORM_INT_SOURCE        8
+
+typedef struct acpi20_entry_lsapic {
 	u8 type;
 	u8 length;
-	u8 bus;	/* Constant 0 == ISA */
-	u8 isa_irq;	/* ISA IRQ # */
-	u8 pin;	/* called vector in spec; really IOSAPIC pin number */
-	u32 flags;	/* Edge/Level trigger & High/Low active */
-	u8 reserved[6];
-} acpi_entry_int_override_t;
-#define INT_OVERRIDE_ACTIVE_LOW    0x03
-#define INT_OVERRIDE_LEVEL_TRIGGER 0x0d
+	u8 acpi_processor_id;
+	u8 id;
+	u8 eid;
+	u8 reserved[3];
+	u32 flags;
+} acpi20_entry_lsapic_t;
+
+typedef struct acpi20_entry_lapic_addr_override {
+	u8 type;
+	u8 length;
+	u8 reserved[2];
+	unsigned long lapic_address;
+} acpi20_entry_lapic_addr_override_t;
 
 typedef struct {
 	u8 type;
 	u8 length;
-	u32 flags;
+	u16 flags;
 	u8 int_type;
 	u8 id;
 	u8 eid;
 	u8 iosapic_vector;
-	unsigned long reserved;
-	unsigned long global_vector;
-} acpi_entry_platform_src_t;
+	u32 global_vector;
+} acpi20_entry_platform_src_t;
 
+extern int acpi20_parse(acpi20_rsdp_t *);
 extern int acpi_parse(acpi_rsdp_t *);
 extern const char *acpi_get_sysname (void);
 
 extern void (*acpi_idle) (void);	/* power-management idle function, if any */
-
+#pragma	pack()
 #endif /* _ASM_IA64_ACPI_EXT_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/acpikcfg.h linux/include/asm-ia64/acpikcfg.h
--- v2.4.0-prerelease/linux/include/asm-ia64/acpikcfg.h	Tue Oct 31 12:42:27 2000
+++ linux/include/asm-ia64/acpikcfg.h	Thu Jan  4 12:50:17 2001
@@ -1,3 +1,4 @@
+#ifdef	CONFIG_ACPI_KERNEL_CONFIG
 /*
  *  acpikcfg.h - ACPI based Kernel Configuration Manager External Interfaces
  *
@@ -5,9 +6,6 @@
  *  Copyright (C) 2000 J.I. Lee  <jung-ik.lee@intel.com>
  */
 
-#include <linux/config.h>
-
-#ifdef	CONFIG_ACPI_KERNEL_CONFIG
 
 u32	__init acpi_cf_init (void * rsdp);
 u32	__init acpi_cf_terminate (void );
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/cache.h linux/include/asm-ia64/cache.h
--- v2.4.0-prerelease/linux/include/asm-ia64/cache.h	Fri Apr 21 15:21:24 2000
+++ linux/include/asm-ia64/cache.h	Thu Jan  4 12:50:17 2001
@@ -9,11 +9,11 @@
  */
 
 /* Bytes per L1 (data) cache line.  */
-#define LOG_L1_CACHE_BYTES	6
-#define L1_CACHE_BYTES		(1 << LOG_L1_CACHE_BYTES)
+#define L1_CACHE_SHIFT		6
+#define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
 
 #ifdef CONFIG_SMP
-# define SMP_LOG_CACHE_BYTES	LOG_L1_CACHE_BYTES
+# define SMP_CACHE_SHIFT	L1_CACHE_SHIFT
 # define SMP_CACHE_BYTES	L1_CACHE_BYTES
 #else
   /*
@@ -21,7 +21,7 @@
    * safe and provides an easy way to avoid wasting space on a
    * uni-processor:
    */
-# define SMP_LOG_CACHE_BYTES	3
+# define SMP_CACHE_SHIFT	3
 # define SMP_CACHE_BYTES	(1 << 3)
 #endif
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/delay.h linux/include/asm-ia64/delay.h
--- v2.4.0-prerelease/linux/include/asm-ia64/delay.h	Tue Oct 31 12:42:27 2000
+++ linux/include/asm-ia64/delay.h	Thu Jan  4 12:50:17 2001
@@ -55,6 +55,10 @@
 	unsigned long result;
 
 	__asm__ __volatile__("mov %0=ar.itc" : "=r"(result) :: "memory");
+#ifdef CONFIG_ITANIUM
+	while (__builtin_expect ((__s32) result == -1, 0))
+		__asm__ __volatile__("mov %0=ar.itc" : "=r"(result) :: "memory");
+#endif
 	return result;
 }
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/efi.h linux/include/asm-ia64/efi.h
--- v2.4.0-prerelease/linux/include/asm-ia64/efi.h	Tue Oct 31 12:42:27 2000
+++ linux/include/asm-ia64/efi.h	Thu Jan  4 12:50:17 2001
@@ -168,6 +168,9 @@
 #define ACPI_TABLE_GUID    \
     ((efi_guid_t) { 0xeb9d2d30, 0x2d88, 0x11d3, { 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d }})
 
+#define ACPI_20_TABLE_GUID    \
+    ((efi_guid_t) { 0x8868e871, 0xe4f1, 0x11d3, { 0xbc, 0x22, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81 }})
+
 #define SMBIOS_TABLE_GUID    \
     ((efi_guid_t) { 0xeb9d2d31, 0x2d88, 0x11d3, { 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d }})
 	
@@ -204,7 +207,8 @@
 extern struct efi {
 	efi_system_table_t *systab;	/* EFI system table */
 	void *mps;			/* MPS table */
-	void *acpi;			/* ACPI table */
+	void *acpi;			/* ACPI table  (IA64 ext 0.71) */
+	void *acpi20;			/* ACPI table  (ACPI 2.0) */
 	void *smbios;			/* SM BIOS table */
 	void *sal_systab;		/* SAL system table */
 	void *boot_info;		/* boot info table */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/hw_irq.h linux/include/asm-ia64/hw_irq.h
--- v2.4.0-prerelease/linux/include/asm-ia64/hw_irq.h	Tue Oct 31 12:42:27 2000
+++ linux/include/asm-ia64/hw_irq.h	Thu Jan  4 12:50:17 2001
@@ -6,8 +6,12 @@
  * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
  */
 
+#include <linux/config.h>
+
+#include <linux/sched.h>
 #include <linux/types.h>
 
+#include <asm/machvec.h>
 #include <asm/ptrace.h>
 #include <asm/smp.h>
 
@@ -29,13 +33,22 @@
 
 #define IA64_SPURIOUS_INT	0x0f
 
-#define IA64_MIN_VECTORED_IRQ	 16
-#define IA64_MAX_VECTORED_IRQ	255
+/*
+ * Vectors 0x10-0x1f are used for low priority interrupts, e.g. CMCI.
+ */
+#define PCE_IRQ			0x1e	/* platform corrected error interrupt vector */
+#define CMC_IRQ			0x1f	/* correctable machine-check interrupt vector */
+/*
+ * Vectors 0x20-0x2f are reserved for legacy ISA IRQs.
+ */
+#define FIRST_DEVICE_IRQ	0x30
+#define LAST_DEVICE_IRQ		0xe7
 
-#define PERFMON_IRQ		0x28	/* performanc monitor interrupt vector */
+#define MCA_RENDEZ_IRQ		0xe8	/* MCA rendez interrupt */
+#define PERFMON_IRQ		0xee	/* performanc monitor interrupt vector */
 #define TIMER_IRQ		0xef	/* use highest-prio group 15 interrupt for timer */
+#define	MCA_WAKEUP_IRQ		0xf0	/* MCA wakeup interrupt (must be higher than MCA_RENDEZ_IRQ) */
 #define IPI_IRQ			0xfe	/* inter-processor interrupt vector */
-#define CMC_IRQ			0xff	/* correctable machine-check interrupt vector */
 
 /* IA64 inter-cpu interrupt related definitions */
 
@@ -60,12 +73,13 @@
 
 extern struct hw_interrupt_type irq_type_ia64_sapic;	/* CPU-internal interrupt controller */
 
-extern void ipi_send (int cpu, int vector, int delivery_mode, int redirect);
+extern int ia64_alloc_irq (void);	/* allocate a free irq */
+extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect);
 
 static inline void
 hw_resend_irq (struct hw_interrupt_type *h, unsigned int vector)
 {
-	ipi_send(smp_processor_id(), vector, IA64_IPI_DM_INT, 0);
+	platform_send_ipi(smp_processor_id(), vector, IA64_IPI_DM_INT, 0);
 }
 
 #endif /* _ASM_IA64_HW_IRQ_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/ia32.h linux/include/asm-ia64/ia32.h
--- v2.4.0-prerelease/linux/include/asm-ia64/ia32.h	Tue Oct 31 12:42:27 2000
+++ linux/include/asm-ia64/ia32.h	Thu Jan  4 12:50:17 2001
@@ -5,6 +5,8 @@
 
 #ifdef CONFIG_IA32_SUPPORT
 
+#include <linux/param.h>
+
 /*
  * 32 bit structures for IA32 support.
  */
@@ -32,6 +34,8 @@
 
 #define IA32_PAGE_SHIFT		12	/* 4KB pages */
 #define IA32_PAGE_SIZE		(1ULL << IA32_PAGE_SHIFT)
+#define IA32_CLOCKS_PER_SEC	100	/* Cast in stone for IA32 Linux */
+#define IA32_TICK(tick)		((unsigned long long)(tick) * IA32_CLOCKS_PER_SEC / CLOCKS_PER_SEC)
 
 /* fcntl.h */
 struct flock32 {
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/io.h linux/include/asm-ia64/io.h
--- v2.4.0-prerelease/linux/include/asm-ia64/io.h	Tue Oct 31 12:42:27 2000
+++ linux/include/asm-ia64/io.h	Thu Jan  4 12:50:17 2001
@@ -29,6 +29,7 @@
 
 # ifdef __KERNEL__
 
+#include <asm/machvec.h>
 #include <asm/page.h>
 #include <asm/system.h>
 
@@ -54,8 +55,7 @@
 #define bus_to_virt	phys_to_virt
 #define virt_to_bus	virt_to_phys
 
-# else /* !KERNEL */
-# endif /* !KERNEL */
+# endif /* KERNEL */
 
 /*
  * Memory fence w/accept.  This should never be used in code that is
@@ -100,7 +100,7 @@
  */
 
 static inline unsigned int
-__inb (unsigned long port)
+__ia64_inb (unsigned long port)
 {
 	volatile unsigned char *addr = __ia64_mk_io_addr(port);
 	unsigned char ret;
@@ -111,7 +111,7 @@
 }
 
 static inline unsigned int
-__inw (unsigned long port)
+__ia64_inw (unsigned long port)
 {
 	volatile unsigned short *addr = __ia64_mk_io_addr(port);
 	unsigned short ret;
@@ -122,7 +122,7 @@
 }
 
 static inline unsigned int
-__inl (unsigned long port)
+__ia64_inl (unsigned long port)
 {
 	volatile unsigned int *addr = __ia64_mk_io_addr(port);
 	unsigned int ret;
@@ -133,112 +133,148 @@
 }
 
 static inline void
-__insb (unsigned long port, void *dst, unsigned long count)
+__ia64_outb (unsigned char val, unsigned long port)
 {
 	volatile unsigned char *addr = __ia64_mk_io_addr(port);
-	unsigned char *dp = dst;
 
+	*addr = val;
 	__ia64_mf_a();
-	while (count--) {
-		*dp++ = *addr;
-	}
-	__ia64_mf_a();
-	return;
 }
 
 static inline void
-__insw (unsigned long port, void *dst, unsigned long count)
+__ia64_outw (unsigned short val, unsigned long port)
 {
 	volatile unsigned short *addr = __ia64_mk_io_addr(port);
-	unsigned short *dp = dst;
 
+	*addr = val;
 	__ia64_mf_a();
-	while (count--) {
-		*dp++ = *addr;
-	}
-	__ia64_mf_a();
-	return;
 }
 
 static inline void
-__insl (unsigned long port, void *dst, unsigned long count)
+__ia64_outl (unsigned int val, unsigned long port)
 {
 	volatile unsigned int *addr = __ia64_mk_io_addr(port);
-	unsigned int *dp = dst;
 
+	*addr = val;
 	__ia64_mf_a();
-	while (count--) {
-		*dp++ = *addr;
-	}
-	__ia64_mf_a();
-	return;
 }
 
 static inline void
-__outb (unsigned char val, unsigned long port)
+__insb (unsigned long port, void *dst, unsigned long count)
 {
-	volatile unsigned char *addr = __ia64_mk_io_addr(port);
+	unsigned char *dp = dst;
 
-	*addr = val;
-	__ia64_mf_a();
+	if (platform_inb == __ia64_inb) {
+		volatile unsigned char *addr = __ia64_mk_io_addr(port);
+
+		__ia64_mf_a();
+		while (count--)
+			*dp++ = *addr;
+		__ia64_mf_a();
+	} else
+		while (count--)
+			*dp++ = platform_inb(port);
+	return;
 }
 
 static inline void
-__outw (unsigned short val, unsigned long port)
+__insw (unsigned long port, void *dst, unsigned long count)
 {
-	volatile unsigned short *addr = __ia64_mk_io_addr(port);
+	unsigned short *dp = dst;
 
-	*addr = val;
-	__ia64_mf_a();
+	if (platform_inw == __ia64_inw) {
+		volatile unsigned short *addr = __ia64_mk_io_addr(port);
+
+		__ia64_mf_a();
+		while (count--)
+			*dp++ = *addr;
+		__ia64_mf_a();
+	} else
+		while (count--)
+			*dp++ = platform_inw(port);
+	return;
 }
 
 static inline void
-__outl (unsigned int val, unsigned long port)
+__insl (unsigned long port, void *dst, unsigned long count)
 {
-	volatile unsigned int *addr = __ia64_mk_io_addr(port);
+	unsigned int *dp = dst;
 
-	*addr = val;
-	__ia64_mf_a();
+	if (platform_inl == __ia64_inl) {
+		volatile unsigned int *addr = __ia64_mk_io_addr(port);
+
+		__ia64_mf_a();
+		while (count--)
+			*dp++ = *addr;
+		__ia64_mf_a();
+	} else
+		while (count--)
+			*dp++ = platform_inl(port);
+	return;
 }
 
 static inline void
 __outsb (unsigned long port, const void *src, unsigned long count)
 {
-	volatile unsigned char *addr = __ia64_mk_io_addr(port);
 	const unsigned char *sp = src;
 
-	while (count--) {
-		*addr = *sp++;
-	}
-	__ia64_mf_a();
+	if (platform_outb == __ia64_outb) {
+		volatile unsigned char *addr = __ia64_mk_io_addr(port);
+
+		while (count--)
+			*addr = *sp++;
+		__ia64_mf_a();
+	} else
+		while (count--)
+			platform_outb(*sp++, port);
 	return;
 }
 
 static inline void
 __outsw (unsigned long port, const void *src, unsigned long count)
 {
-	volatile unsigned short *addr = __ia64_mk_io_addr(port);
 	const unsigned short *sp = src;
 
-	while (count--) {
-		*addr = *sp++;
-	}
-	__ia64_mf_a();
+	if (platform_outw == __ia64_outw) {
+		volatile unsigned short *addr = __ia64_mk_io_addr(port);
+
+		while (count--)
+			*addr = *sp++;
+		__ia64_mf_a();
+	} else
+		while (count--)
+			platform_outw(*sp++, port);
 	return;
 }
 
 static inline void
 __outsl (unsigned long port, void *src, unsigned long count)
 {
-	volatile unsigned int *addr = __ia64_mk_io_addr(port);
 	const unsigned int *sp = src;
 
-	while (count--) {
-		*addr = *sp++;
-	}
-	__ia64_mf_a();
+	if (platform_outl == __ia64_outl) {
+		volatile unsigned int *addr = __ia64_mk_io_addr(port);
+
+		while (count--)
+			*addr = *sp++;
+		__ia64_mf_a();
+	} else
+		while (count--)
+			platform_outl(*sp++, port);
 	return;
 }
+
+/*
+ * Unfortunately, some platforms are broken and do not follow the
+ * IA-64 architecture specification regarding legacy I/O support.
+ * Thus, we have to make these operations platform dependent...
+ */
+#define __inb		platform_inb
+#define __inw		platform_inw
+#define __inl		platform_inl
+#define __outb		platform_outb
+#define __outw		platform_outw
+#define __outl		platform_outl
 
 #define inb		__inb
 #define inw		__inw
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/iosapic.h linux/include/asm-ia64/iosapic.h
--- v2.4.0-prerelease/linux/include/asm-ia64/iosapic.h	Thu Jun 22 07:09:45 2000
+++ linux/include/asm-ia64/iosapic.h	Thu Jan  4 12:50:17 2001
@@ -3,121 +3,60 @@
 
 #include <linux/config.h>
 
-#define	IO_SAPIC_DEFAULT_ADDR	0xFEC00000
+#define	IOSAPIC_DEFAULT_ADDR	0xFEC00000
 
-#define	IO_SAPIC_REG_SELECT	0x0
-#define	IO_SAPIC_WINDOW		0x10
-#define	IO_SAPIC_EOI		0x40
+#define	IOSAPIC_REG_SELECT	0x0
+#define	IOSAPIC_WINDOW		0x10
+#define	IOSAPIC_EOI		0x40
 
-#define	IO_SAPIC_VERSION	0x1
+#define	IOSAPIC_VERSION	0x1
 
 /*
  * Redirection table entry
  */
+#define	IOSAPIC_RTE_LOW(i)	(0x10+i*2)
+#define	IOSAPIC_RTE_HIGH(i)	(0x11+i*2)
 
-#define	IO_SAPIC_RTE_LOW(i)	(0x10+i*2)
-#define	IO_SAPIC_RTE_HIGH(i)	(0x11+i*2)
-
-
-#define	IO_SAPIC_DEST_SHIFT		16
+#define	IOSAPIC_DEST_SHIFT		16
 
 /*
  * Delivery mode
  */
-
-#define	IO_SAPIC_DELIVERY_SHIFT		8
-#define	IO_SAPIC_FIXED			0x0
-#define	IO_SAPIC_LOWEST_PRIORITY	0x1
-#define	IO_SAPIC_PMI			0x2
-#define	IO_SAPIC_NMI			0x4
-#define	IO_SAPIC_INIT			0x5
-#define	IO_SAPIC_EXTINT			0x7
+#define	IOSAPIC_DELIVERY_SHIFT		8
+#define	IOSAPIC_FIXED			0x0
+#define	IOSAPIC_LOWEST_PRIORITY	0x1
+#define	IOSAPIC_PMI			0x2
+#define	IOSAPIC_NMI			0x4
+#define	IOSAPIC_INIT			0x5
+#define	IOSAPIC_EXTINT			0x7
 
 /*
  * Interrupt polarity
  */
-
-#define	IO_SAPIC_POLARITY_SHIFT		13
-#define	IO_SAPIC_POL_HIGH		0
-#define	IO_SAPIC_POL_LOW		1
+#define	IOSAPIC_POLARITY_SHIFT		13
+#define	IOSAPIC_POL_HIGH		0
+#define	IOSAPIC_POL_LOW		1
 
 /*
  * Trigger mode
  */
-
-#define	IO_SAPIC_TRIGGER_SHIFT		15
-#define	IO_SAPIC_EDGE			0
-#define	IO_SAPIC_LEVEL			1
+#define	IOSAPIC_TRIGGER_SHIFT		15
+#define	IOSAPIC_EDGE			0
+#define	IOSAPIC_LEVEL			1
 
 /*
  * Mask bit
  */
-
-#define	IO_SAPIC_MASK_SHIFT		16
-#define	IO_SAPIC_UNMASK			0
-#define	IO_SAPIC_MSAK			1
-
-/*
- * Bus types
- */
-#define  BUS_ISA         0               /* ISA Bus */
-#define  BUS_PCI         1               /* PCI Bus */
-
-#ifndef CONFIG_IA64_PCI_FIRMWARE_IRQ
-struct intr_routing_entry {
-      unsigned char srcbus;
-      unsigned char srcbusno;
-      unsigned char srcbusirq;
-      unsigned char iosapic_pin;
-      unsigned char dstiosapic;
-      unsigned char mode;
-      unsigned char trigger;
-      unsigned char polarity;
-};
-
-extern        struct  intr_routing_entry      intr_routing[];
-#endif
+#define	IOSAPIC_MASK_SHIFT		16
+#define	IOSAPIC_UNMASK			0
+#define	IOSAPIC_MSAK			1
 
 #ifndef __ASSEMBLY__
 
-#include <asm/irq.h>
-
-/*
- * IOSAPIC Version Register return 32 bit structure like:
- * {
- *	unsigned int version   : 8;
- *	unsigned int reserved1 : 8;
- *	unsigned int pins      : 8;
- *	unsigned int reserved2 : 8;
- * }
- */
-extern unsigned int iosapic_version(unsigned long);
-extern void iosapic_init(unsigned long, int);
-
-struct iosapic_vector {
-	unsigned long iosapic_base; /* IOSAPIC Base address */
-        char pin;		    /* IOSAPIC pin (-1 == No data) */
-	unsigned char bus;	    /* Bus number */
-	unsigned char baseirq;	    /* Base IRQ handled by this IOSAPIC */
-	unsigned char bustype;	    /* Bus type (ISA, PCI, etc) */
-	unsigned int busdata;	    /* Bus specific ID */
-        /* These bitfields use the values defined above */
-	unsigned char dmode        : 3;
-	unsigned char polarity     : 1;
-	unsigned char trigger      : 1; 
-	unsigned char UNUSED       : 3;
-};
-extern struct iosapic_vector iosapic_vector[NR_IRQS];
-
-#define iosapic_addr(v)     iosapic_vector[v].iosapic_base
-#define iosapic_pin(v)      iosapic_vector[v].pin
-#define iosapic_bus(v)      iosapic_vector[v].bus
-#define iosapic_baseirq(v)  iosapic_vector[v].baseirq
-#define iosapic_bustype(v)  iosapic_vector[v].bustype
-#define iosapic_busdata(v)  iosapic_vector[v].busdata
-#define iosapic_dmode(v)    iosapic_vector[v].dmode
-#define iosapic_trigger(v)  iosapic_vector[v].trigger
-#define iosapic_polarity(v) iosapic_vector[v].polarity
+extern void __init iosapic_init (unsigned long address, unsigned int base_irq);
+extern void iosapic_register_legacy_irq (unsigned long irq, unsigned long pin,
+					 unsigned long polarity, unsigned long trigger);
+extern void iosapic_pci_fixup (int);
 
 # endif /* !__ASSEMBLY__ */
 #endif /* __ASM_IA64_IOSAPIC_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/machvec.h linux/include/asm-ia64/machvec.h
--- v2.4.0-prerelease/linux/include/asm-ia64/machvec.h	Fri Aug 11 19:09:06 2000
+++ linux/include/asm-ia64/machvec.h	Thu Jan  4 12:50:17 2001
@@ -14,24 +14,46 @@
 #include <linux/types.h>
 
 /* forward declarations: */
-struct hw_interrupt_type;
-struct irq_desc;
-struct mm_struct;
+struct pci_dev;
 struct pt_regs;
-struct task_struct;
-struct timeval;
-struct vm_area_struct;
-struct acpi_entry_iosapic;
+struct scatterlist;
 
 typedef void ia64_mv_setup_t (char **);
 typedef void ia64_mv_irq_init_t (void);
-typedef void ia64_mv_pci_fixup_t (void);
+typedef void ia64_mv_pci_fixup_t (int);
 typedef unsigned long ia64_mv_map_nr_t (unsigned long);
 typedef void ia64_mv_mca_init_t (void);
 typedef void ia64_mv_mca_handler_t (void);
 typedef void ia64_mv_cmci_handler_t (int, void *, struct pt_regs *);
 typedef void ia64_mv_log_print_t (void);
-typedef void ia64_mv_register_iosapic_t (struct acpi_entry_iosapic *);
+typedef void ia64_mv_send_ipi_t (int, int, int, int);
+
+/* PCI-DMA interface: */
+typedef void ia64_mv_pci_dma_init (void);
+typedef void *ia64_mv_pci_alloc_consistent (struct pci_dev *, size_t, dma_addr_t *);
+typedef void ia64_mv_pci_free_consistent (struct pci_dev *, size_t, void *, dma_addr_t);
+typedef dma_addr_t ia64_mv_pci_map_single (struct pci_dev *, void *, size_t, int);
+typedef void ia64_mv_pci_unmap_single (struct pci_dev *, dma_addr_t, size_t, int);
+typedef int ia64_mv_pci_map_sg (struct pci_dev *, struct scatterlist *, int, int);
+typedef void ia64_mv_pci_unmap_sg (struct pci_dev *, struct scatterlist *, int, int);
+typedef void ia64_mv_pci_dma_sync_single (struct pci_dev *, dma_addr_t, size_t, int);
+typedef void ia64_mv_pci_dma_sync_sg (struct pci_dev *, struct scatterlist *, int, int);
+typedef unsigned long ia64_mv_pci_dma_address (struct scatterlist *);
+/*
+ * WARNING: The legacy I/O space is _architected_.  Platforms are
+ * expected to follow this architected model (see Section 10.7 in the
+ * IA-64 Architecture Software Developer's Manual).  Unfortunately,
+ * some broken machines do not follow that model, which is why we have
+ * to make the inX/outX operations part of the machine vector.
+ * Platform designers should follow the architected model whenever
+ * possible.
+ */
+typedef unsigned int ia64_mv_inb_t (unsigned long);
+typedef unsigned int ia64_mv_inw_t (unsigned long);
+typedef unsigned int ia64_mv_inl_t (unsigned long);
+typedef void ia64_mv_outb_t (unsigned char, unsigned long);
+typedef void ia64_mv_outw_t (unsigned short, unsigned long);
+typedef void ia64_mv_outl_t (unsigned int, unsigned long);
 
 extern void machvec_noop (void);
 
@@ -39,7 +61,7 @@
 #  include <asm/machvec_hpsim.h>
 # elif defined (CONFIG_IA64_DIG)
 #  include <asm/machvec_dig.h>
-# elif defined (CONFIG_IA64_SGI_SN1_SIM)
+# elif defined (CONFIG_IA64_SGI_SN1)
 #  include <asm/machvec_sn1.h>
 # elif defined (CONFIG_IA64_GENERIC)
 
@@ -55,7 +77,23 @@
 #  define platform_cmci_handler	ia64_mv.cmci_handler
 #  define platform_log_print	ia64_mv.log_print
 #  define platform_pci_fixup	ia64_mv.pci_fixup
-#  define platform_register_iosapic	ia64_mv.register_iosapic
+#  define platform_send_ipi	ia64_mv.send_ipi
+#  define platform_pci_dma_init		ia64_mv.dma_init
+#  define platform_pci_alloc_consistent	ia64_mv.alloc_consistent
+#  define platform_pci_free_consistent	ia64_mv.free_consistent
+#  define platform_pci_map_single	ia64_mv.map_single
+#  define platform_pci_unmap_single	ia64_mv.unmap_single
+#  define platform_pci_map_sg		ia64_mv.map_sg
+#  define platform_pci_unmap_sg		ia64_mv.unmap_sg
+#  define platform_pci_dma_sync_single	ia64_mv.sync_single
+#  define platform_pci_dma_sync_sg	ia64_mv.sync_sg
+#  define platform_pci_dma_address	ia64_mv.dma_address
+#  define platform_inb		ia64_mv.inb
+#  define platform_inw		ia64_mv.inw
+#  define platform_inl		ia64_mv.inl
+#  define platform_outb		ia64_mv.outb
+#  define platform_outw		ia64_mv.outw
+#  define platform_outl		ia64_mv.outl
 # endif
 
 struct ia64_machine_vector {
@@ -68,7 +106,23 @@
 	ia64_mv_mca_handler_t *mca_handler;
 	ia64_mv_cmci_handler_t *cmci_handler;
 	ia64_mv_log_print_t *log_print;
-	ia64_mv_register_iosapic_t *register_iosapic;
+	ia64_mv_send_ipi_t *send_ipi;
+	ia64_mv_pci_dma_init *dma_init;
+	ia64_mv_pci_alloc_consistent *alloc_consistent;
+	ia64_mv_pci_free_consistent *free_consistent;
+	ia64_mv_pci_map_single *map_single;
+	ia64_mv_pci_unmap_single *unmap_single;
+	ia64_mv_pci_map_sg *map_sg;
+	ia64_mv_pci_unmap_sg *unmap_sg;
+	ia64_mv_pci_dma_sync_single *sync_single;
+	ia64_mv_pci_dma_sync_sg *sync_sg;
+	ia64_mv_pci_dma_address *dma_address;
+	ia64_mv_inb_t *inb;
+	ia64_mv_inw_t *inw;
+	ia64_mv_inl_t *inl;
+	ia64_mv_outb_t *outb;
+	ia64_mv_outw_t *outw;
+	ia64_mv_outl_t *outl;
 };
 
 #define MACHVEC_INIT(name)			\
@@ -82,7 +136,23 @@
 	platform_mca_handler,			\
 	platform_cmci_handler,			\
 	platform_log_print,			\
-	platform_register_iosapic			\
+	platform_send_ipi,			\
+	platform_pci_dma_init,			\
+	platform_pci_alloc_consistent,		\
+	platform_pci_free_consistent,		\
+	platform_pci_map_single,		\
+	platform_pci_unmap_single,		\
+	platform_pci_map_sg,			\
+	platform_pci_unmap_sg,			\
+	platform_pci_dma_sync_single,		\
+	platform_pci_dma_sync_sg,		\
+	platform_pci_dma_address,		\
+	platform_inb,				\
+	platform_inw,				\
+	platform_inl,				\
+	platform_outb,				\
+	platform_outw,				\
+	platform_outl				\
 }
 
 extern struct ia64_machine_vector ia64_mv;
@@ -93,6 +163,20 @@
 # endif /* CONFIG_IA64_GENERIC */
 
 /*
+ * Declare default routines which aren't declared anywhere else:
+ */
+extern ia64_mv_pci_dma_init swiotlb_init;
+extern ia64_mv_pci_alloc_consistent swiotlb_alloc_consistent;
+extern ia64_mv_pci_free_consistent swiotlb_free_consistent;
+extern ia64_mv_pci_map_single swiotlb_map_single;
+extern ia64_mv_pci_unmap_single swiotlb_unmap_single;
+extern ia64_mv_pci_map_sg swiotlb_map_sg;
+extern ia64_mv_pci_unmap_sg swiotlb_unmap_sg;
+extern ia64_mv_pci_dma_sync_single swiotlb_sync_single;
+extern ia64_mv_pci_dma_sync_sg swiotlb_sync_sg;
+extern ia64_mv_pci_dma_address swiotlb_dma_address;
+
+/*
  * Define default versions so we can extend machvec for new platforms without having
  * to update the machvec files for all existing platforms.
  */
@@ -117,8 +201,56 @@
 #ifndef platform_pci_fixup
 # define platform_pci_fixup	((ia64_mv_pci_fixup_t *) machvec_noop)
 #endif
-#ifndef platform_register_iosapic
-# define platform_register_iosapic	((ia64_mv_register_iosapic_t *) machvec_noop)
+#ifndef platform_send_ipi
+# define platform_send_ipi	ia64_send_ipi	/* default to architected version */
+#endif
+#ifndef platform_pci_dma_init
+# define platform_pci_dma_init		swiotlb_init
+#endif
+#ifndef platform_pci_alloc_consistent
+# define platform_pci_alloc_consistent	swiotlb_alloc_consistent
+#endif
+#ifndef platform_pci_free_consistent
+# define platform_pci_free_consistent	swiotlb_free_consistent
+#endif
+#ifndef platform_pci_map_single
+# define platform_pci_map_single	swiotlb_map_single
+#endif
+#ifndef platform_pci_unmap_single
+# define platform_pci_unmap_single	swiotlb_unmap_single
+#endif
+#ifndef platform_pci_map_sg
+# define platform_pci_map_sg		swiotlb_map_sg
+#endif
+#ifndef platform_pci_unmap_sg
+# define platform_pci_unmap_sg		swiotlb_unmap_sg
+#endif
+#ifndef platform_pci_dma_sync_single
+# define platform_pci_dma_sync_single	swiotlb_sync_single
+#endif
+#ifndef platform_pci_dma_sync_sg
+# define platform_pci_dma_sync_sg	swiotlb_sync_sg
+#endif
+#ifndef platform_pci_dma_address
+# define  platform_pci_dma_address	swiotlb_dma_address
+#endif
+#ifndef platform_inb
+# define platform_inb		__ia64_inb
+#endif
+#ifndef platform_inw
+# define platform_inw		__ia64_inw
+#endif
+#ifndef platform_inl
+# define platform_inl		__ia64_inl
+#endif
+#ifndef platform_outb
+# define platform_outb		__ia64_outb
+#endif
+#ifndef platform_outw
+# define platform_outw		__ia64_outw
+#endif
+#ifndef platform_outl
+# define platform_outl		__ia64_outl
 #endif
 
 #endif /* _ASM_IA64_MACHVEC_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/machvec_dig.h linux/include/asm-ia64/machvec_dig.h
--- v2.4.0-prerelease/linux/include/asm-ia64/machvec_dig.h	Fri Aug 11 19:09:06 2000
+++ linux/include/asm-ia64/machvec_dig.h	Thu Jan  4 12:50:17 2001
@@ -3,9 +3,8 @@
 
 extern ia64_mv_setup_t dig_setup;
 extern ia64_mv_irq_init_t dig_irq_init;
-extern ia64_mv_pci_fixup_t dig_pci_fixup;
+extern ia64_mv_pci_fixup_t iosapic_pci_fixup;
 extern ia64_mv_map_nr_t map_nr_dense;
-extern ia64_mv_register_iosapic_t dig_register_iosapic;
 
 /*
  * This stuff has dual use!
@@ -17,8 +16,7 @@
 #define platform_name		"dig"
 #define platform_setup		dig_setup
 #define platform_irq_init	dig_irq_init
-#define platform_pci_fixup	dig_pci_fixup
+#define platform_pci_fixup	iosapic_pci_fixup
 #define platform_map_nr		map_nr_dense
-#define platform_register_iosapic dig_register_iosapic
 
 #endif /* _ASM_IA64_MACHVEC_DIG_h */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/machvec_hpsim.h linux/include/asm-ia64/machvec_hpsim.h
--- v2.4.0-prerelease/linux/include/asm-ia64/machvec_hpsim.h	Fri Jul 14 16:08:12 2000
+++ linux/include/asm-ia64/machvec_hpsim.h	Thu Jan  4 12:50:17 2001
@@ -15,7 +15,6 @@
 #define platform_name		"hpsim"
 #define platform_setup		hpsim_setup
 #define platform_irq_init	hpsim_irq_init
-#define platform_pci_fixup	hpsim_pci_fixup
 #define platform_map_nr		map_nr_dense
 
 #endif /* _ASM_IA64_MACHVEC_HPSIM_h */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/machvec_init.h linux/include/asm-ia64/machvec_init.h
--- v2.4.0-prerelease/linux/include/asm-ia64/machvec_init.h	Fri Aug 11 19:09:06 2000
+++ linux/include/asm-ia64/machvec_init.h	Thu Jan  4 12:50:17 2001
@@ -4,6 +4,14 @@
 
 #include <asm/machvec.h>
 
+extern ia64_mv_send_ipi_t ia64_send_ipi;
+extern ia64_mv_inb_t __ia64_inb;
+extern ia64_mv_inw_t __ia64_inw;
+extern ia64_mv_inl_t __ia64_inl;
+extern ia64_mv_outb_t __ia64_outb;
+extern ia64_mv_outw_t __ia64_outw;
+extern ia64_mv_outl_t __ia64_outl;
+
 #define MACHVEC_HELPER(name)									\
  struct ia64_machine_vector machvec_##name __attribute__ ((unused, __section__ (".machvec")))	\
 	= MACHVEC_INIT(name);
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/machvec_sn1.h linux/include/asm-ia64/machvec_sn1.h
--- v2.4.0-prerelease/linux/include/asm-ia64/machvec_sn1.h	Sun Feb  6 18:42:40 2000
+++ linux/include/asm-ia64/machvec_sn1.h	Thu Jan  4 12:50:17 2001
@@ -4,6 +4,23 @@
 extern ia64_mv_setup_t sn1_setup;
 extern ia64_mv_irq_init_t sn1_irq_init;
 extern ia64_mv_map_nr_t sn1_map_nr;
+extern ia64_mv_send_ipi_t sn1_send_IPI;
+extern ia64_mv_pci_fixup_t sn1_pci_fixup;
+extern ia64_mv_inb_t sn1_inb;
+extern ia64_mv_inw_t sn1_inw;
+extern ia64_mv_inl_t sn1_inl;
+extern ia64_mv_outb_t sn1_outb;
+extern ia64_mv_outw_t sn1_outw;
+extern ia64_mv_outl_t sn1_outl;
+extern ia64_mv_pci_alloc_consistent	sn1_pci_alloc_consistent;
+extern ia64_mv_pci_free_consistent	sn1_pci_free_consistent;
+extern ia64_mv_pci_map_single		sn1_pci_map_single;
+extern ia64_mv_pci_unmap_single		sn1_pci_unmap_single;
+extern ia64_mv_pci_map_sg		sn1_pci_map_sg;
+extern ia64_mv_pci_unmap_sg		sn1_pci_unmap_sg;
+extern ia64_mv_pci_dma_sync_single	sn1_pci_dma_sync_single;
+extern ia64_mv_pci_dma_sync_sg		sn1_pci_dma_sync_sg;
+extern ia64_mv_pci_dma_address		sn1_dma_address;
 
 /*
  * This stuff has dual use!
@@ -16,5 +33,22 @@
 #define platform_setup		sn1_setup
 #define platform_irq_init	sn1_irq_init
 #define platform_map_nr		sn1_map_nr
+#define platform_send_ipi	sn1_send_IPI
+#define platform_pci_fixup	sn1_pci_fixup
+#define platform_inb		sn1_inb
+#define platform_inw		sn1_inw
+#define platform_inl		sn1_inl
+#define platform_outb		sn1_outb
+#define platform_outw		sn1_outw
+#define platform_outl		sn1_outl
+#define platform_pci_alloc_consistent	sn1_pci_alloc_consistent
+#define platform_pci_free_consistent	sn1_pci_free_consistent
+#define platform_pci_map_single		sn1_pci_map_single
+#define platform_pci_unmap_single	sn1_pci_unmap_single
+#define platform_pci_map_sg		sn1_pci_map_sg
+#define platform_pci_unmap_sg		sn1_pci_unmap_sg
+#define platform_pci_dma_sync_single	sn1_pci_dma_sync_single
+#define platform_pci_dma_sync_sg	sn1_pci_dma_sync_sg
+#define platform_pci_dma_address	sn1_dma_address
 
 #endif /* _ASM_IA64_MACHVEC_SN1_h */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/mca.h linux/include/asm-ia64/mca.h
--- v2.4.0-prerelease/linux/include/asm-ia64/mca.h	Fri Apr 21 15:21:24 2000
+++ linux/include/asm-ia64/mca.h	Thu Jan  4 12:50:17 2001
@@ -18,6 +18,7 @@
 #include <asm/param.h>
 #include <asm/sal.h>
 #include <asm/processor.h>
+#include <asm/hw_irq.h>
 
 /* These are the return codes from all the IA64_MCA specific interfaces */
 typedef	int ia64_mca_return_code_t;
@@ -30,9 +31,9 @@
 #define IA64_MCA_RENDEZ_TIMEOUT		(100 * HZ)	/* 1000 milliseconds */
 
 /* Interrupt vectors reserved for MC handling. */
-#define IA64_MCA_RENDEZ_INT_VECTOR	0xF3	/* Rendez interrupt */
-#define IA64_MCA_WAKEUP_INT_VECTOR	0x12	/* Wakeup interrupt */
-#define IA64_MCA_CMC_INT_VECTOR		0xF2	/* Correctable machine check interrupt */
+#define IA64_MCA_RENDEZ_INT_VECTOR	MCA_RENDEZ_IRQ	/* Rendez interrupt */
+#define IA64_MCA_WAKEUP_INT_VECTOR	MCA_WAKEUP_IRQ	/* Wakeup interrupt */
+#define IA64_MCA_CMC_INT_VECTOR		CMC_IRQ	/* Correctable machine check interrupt */
 
 #define IA64_CMC_INT_DISABLE		0
 #define IA64_CMC_INT_ENABLE		1
@@ -45,11 +46,11 @@
 	u64	cmcv_regval;
 	struct	{
 		u64  	cmcr_vector		: 8;
-		u64	cmcr_ignored1		: 47;
+		u64	cmcr_reserved1		: 4;
+		u64	cmcr_ignored1		: 1;
+		u64	cmcr_reserved2		: 3;
 		u64	cmcr_mask		: 1;
-		u64	cmcr_reserved1		: 3;
-		u64	cmcr_ignored2		: 1;
-		u64	cmcr_reserved2		: 4;
+		u64	cmcr_ignored2		: 47;
 	} cmcv_reg_s;
 
 } cmcv_reg_t;
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/mman.h linux/include/asm-ia64/mman.h
--- v2.4.0-prerelease/linux/include/asm-ia64/mman.h	Fri Apr 21 15:21:24 2000
+++ linux/include/asm-ia64/mman.h	Thu Jan  4 12:50:17 2001
@@ -23,6 +23,8 @@
 #define MAP_EXECUTABLE	0x1000		/* mark it as an executable */
 #define MAP_LOCKED	0x2000		/* pages are locked */
 #define MAP_NORESERVE	0x4000		/* don't check for reservations */
+#define MAP_WRITECOMBINED 0x10000	/* write-combine the area */
+#define MAP_NONCACHED	0x20000		/* don't cache the memory */
 
 #define MS_ASYNC	1		/* sync memory asynchronously */
 #define MS_INVALIDATE	2		/* invalidate the caches */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/mmu_context.h linux/include/asm-ia64/mmu_context.h
--- v2.4.0-prerelease/linux/include/asm-ia64/mmu_context.h	Tue Oct 31 12:42:27 2000
+++ linux/include/asm-ia64/mmu_context.h	Thu Jan  4 12:50:17 2001
@@ -32,20 +32,11 @@
 
 #define IA64_REGION_ID_KERNEL	0 /* the kernel's region id (tlb.c depends on this being 0) */
 
-#define IA64_REGION_ID_BITS	18
-
-#ifdef CONFIG_IA64_TLB_CHECKS_REGION_NUMBER
-# define IA64_HW_CONTEXT_BITS	IA64_REGION_ID_BITS
-#else
-# define IA64_HW_CONTEXT_BITS	(IA64_REGION_ID_BITS - 3)
-#endif
-
-#define IA64_HW_CONTEXT_MASK	((1UL << IA64_HW_CONTEXT_BITS) - 1)
-
 struct ia64_ctx {
 	spinlock_t lock;
 	unsigned int next;	/* next context number to use */
 	unsigned int limit;	/* next >= limit => must call wrap_mmu_context() */
+	unsigned int max_ctx;	/* max. context value supported by all CPUs */
 };
 
 extern struct ia64_ctx ia64_ctx;
@@ -60,11 +51,7 @@
 static inline unsigned long
 ia64_rid (unsigned long context, unsigned long region_addr)
 {
-# ifdef CONFIG_IA64_TLB_CHECKS_REGION_NUMBER
-	return context;
-# else
 	return context << 3 | (region_addr >> 61);
-# endif
 }
 
 static inline void
@@ -108,12 +95,8 @@
 	unsigned long rid_incr = 0;
 	unsigned long rr0, rr1, rr2, rr3, rr4;
 
-	rid = mm->context;
-
-#ifndef CONFIG_IA64_TLB_CHECKS_REGION_NUMBER
-	rid <<= 3;	/* make space for encoding the region number */
+	rid = mm->context << 3;	/* make space for encoding the region number */
 	rid_incr = 1 << 8;
-#endif
 
 	/* encode the region id, preferred page size, and VHPT enable bit: */
 	rr0 = (rid << 8) | (PAGE_SHIFT << 2) | 1;
@@ -132,11 +115,10 @@
 }
 
 /*
- * Switch from address space PREV to address space NEXT.  Note that
- * TSK may be NULL.
+ * Switch from address space PREV to address space NEXT.
  */
 static inline void
-switch_mm (struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu)
+activate_mm (struct mm_struct *prev, struct mm_struct *next)
 {
 	/*
 	 * We may get interrupts here, but that's OK because interrupt
@@ -147,7 +129,6 @@
 	reload_context(next);
 }
 
-#define activate_mm(prev,next)					\
-	switch_mm((prev), (next), NULL, smp_processor_id())
+#define switch_mm(prev_mm,next_mm,next_task,cpu)	activate_mm(prev_mm, next_mm)
 
 #endif /* _ASM_IA64_MMU_CONTEXT_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/module.h linux/include/asm-ia64/module.h
--- v2.4.0-prerelease/linux/include/asm-ia64/module.h	Tue Oct 31 12:42:27 2000
+++ linux/include/asm-ia64/module.h	Thu Jan  4 12:50:17 2001
@@ -75,10 +75,10 @@
 	/*
 	 * Pointers are reasonable, add the module unwind table
 	 */
-	archdata->unw_table = unw_add_unwind_table(mod->name, archdata->segment_base,
+	archdata->unw_table = unw_add_unwind_table(mod->name,
+						   (unsigned long) archdata->segment_base,
 						   (unsigned long) archdata->gp,
-						   (unsigned long) archdata->unw_start,
-						   (unsigned long) archdata->unw_end);
+						   archdata->unw_start, archdata->unw_end);
 #endif /* CONFIG_IA64_NEW_UNWIND */
 	return 0;
 }
@@ -98,7 +98,7 @@
 		archdata = (struct archdata *)(mod->archdata_start);
 
 		if (archdata->unw_table != NULL)
-			unw_remove_unwind_table(archdata->unw_table);
+			unw_remove_unwind_table((void *) archdata->unw_table);
 	}
 #endif /* CONFIG_IA64_NEW_UNWIND */
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/offsets.h linux/include/asm-ia64/offsets.h
--- v2.4.0-prerelease/linux/include/asm-ia64/offsets.h	Tue Oct 31 12:42:27 2000
+++ linux/include/asm-ia64/offsets.h	Thu Jan  4 12:50:18 2001
@@ -11,7 +11,7 @@
 #define PT_PTRACED_BIT			0
 #define PT_TRACESYS_BIT			1
 
-#define IA64_TASK_SIZE			3328	/* 0xd00 */
+#define IA64_TASK_SIZE			3376	/* 0xd30 */
 #define IA64_PT_REGS_SIZE		400	/* 0x190 */
 #define IA64_SWITCH_STACK_SIZE		560	/* 0x230 */
 #define IA64_SIGINFO_SIZE		128	/* 0x80 */
@@ -21,10 +21,10 @@
 #define IA64_TASK_SIGPENDING_OFFSET	16	/* 0x10 */
 #define IA64_TASK_NEED_RESCHED_OFFSET	40	/* 0x28 */
 #define IA64_TASK_PROCESSOR_OFFSET	100	/* 0x64 */
-#define IA64_TASK_THREAD_OFFSET		1424	/* 0x590 */
-#define IA64_TASK_THREAD_KSP_OFFSET	1424	/* 0x590 */
-#define IA64_TASK_THREAD_SIGMASK_OFFSET	3184	/* 0xc70 */
-#define IA64_TASK_PID_OFFSET		188	/* 0xbc */
+#define IA64_TASK_THREAD_OFFSET		1456	/* 0x5b0 */
+#define IA64_TASK_THREAD_KSP_OFFSET	1456	/* 0x5b0 */
+#define IA64_TASK_THREAD_SIGMASK_OFFSET	3224	/* 0xc98 */
+#define IA64_TASK_PID_OFFSET		196	/* 0xc4 */
 #define IA64_TASK_MM_OFFSET		88	/* 0x58 */
 #define IA64_PT_REGS_CR_IPSR_OFFSET	0	/* 0x0 */
 #define IA64_PT_REGS_CR_IIP_OFFSET	8	/* 0x8 */
@@ -115,7 +115,7 @@
 #define IA64_SWITCH_STACK_AR_UNAT_OFFSET	528	/* 0x210 */
 #define IA64_SWITCH_STACK_AR_RNAT_OFFSET	536	/* 0x218 */
 #define IA64_SWITCH_STACK_AR_BSPSTORE_OFFSET 544	/* 0x220 */
-#define IA64_SWITCH_STACK_PR_OFFSET	464	/* 0x1d0 */
+#define IA64_SWITCH_STACK_PR_OFFSET	552	/* 0x228 */
 #define IA64_SIGCONTEXT_AR_BSP_OFFSET	72	/* 0x48 */
 #define IA64_SIGCONTEXT_AR_RNAT_OFFSET	80	/* 0x50 */
 #define IA64_SIGCONTEXT_FLAGS_OFFSET	0	/* 0x0 */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/page.h linux/include/asm-ia64/page.h
--- v2.4.0-prerelease/linux/include/asm-ia64/page.h	Tue Oct 31 12:42:27 2000
+++ linux/include/asm-ia64/page.h	Thu Jan  4 12:50:18 2001
@@ -40,9 +40,6 @@
 extern void clear_page (void *page);
 extern void copy_page (void *to, void *from);
 
-#define clear_user_page(page, vaddr)	clear_page(page)
-#define copy_user_page(to, from, vaddr)	copy_page(to, from)
-
 #  ifdef STRICT_MM_TYPECHECKS
 /*
  * These are used to make use of C type-checking..
@@ -58,7 +55,6 @@
 #define pgprot_val(x)	((x).pgprot)
 
 #define __pte(x)	((pte_t) { (x) } )
-#define __pgd(x)	((pgd_t) { (x) } )
 #define __pgprot(x)	((pgprot_t) { (x) } )
 
 #  else /* !STRICT_MM_TYPECHECKS */
@@ -93,21 +89,17 @@
  */
 #define MAP_NR_DENSE(addr)	(((unsigned long) (addr) - PAGE_OFFSET) >> PAGE_SHIFT)
 
-/*
- * This variant works well for the SGI SN1 architecture (which does have huge
- * holes in the memory address space).
- */
-#define MAP_NR_SN1(addr)	(((unsigned long) (addr) - PAGE_OFFSET) >> PAGE_SHIFT)
-
 #ifdef CONFIG_IA64_GENERIC
 # include <asm/machvec.h>
-# define virt_to_page(kaddr)   (mem_map + platform_map_nr(kaddr))
-#elif defined (CONFIG_IA64_SN_SN1)
-# define virt_to_page(kaddr)   (mem_map + MAP_NR_SN1(kaddr))
+# define virt_to_page(kaddr)	(mem_map + platform_map_nr(kaddr))
+#elif defined (CONFIG_IA64_SGI_SN1)
+# ifndef CONFIG_DISCONTIGMEM
+#  define virt_to_page(kaddr)	(mem_map + MAP_NR_DENSE(kaddr))
+# endif
 #else
-# define virt_to_page(kaddr)   (mem_map + MAP_NR_DENSE(kaddr))
+# define virt_to_page(kaddr)	(mem_map + MAP_NR_DENSE(kaddr))
 #endif
-#define VALID_PAGE(page)       ((page - mem_map) < max_mapnr)
+#define VALID_PAGE(page)	((page - mem_map) < max_mapnr)
 
 typedef union ia64_va {
 	struct {
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/pci.h linux/include/asm-ia64/pci.h
--- v2.4.0-prerelease/linux/include/asm-ia64/pci.h	Tue Oct 31 12:42:27 2000
+++ linux/include/asm-ia64/pci.h	Thu Jan  4 12:50:18 2001
@@ -1,6 +1,7 @@
 #ifndef _ASM_IA64_PCI_H
 #define _ASM_IA64_PCI_H
 
+#include <linux/config.h>
 #include <linux/slab.h>
 #include <linux/string.h>
 #include <linux/types.h>
@@ -21,125 +22,42 @@
 
 struct pci_dev;
 
-static inline void pcibios_set_master(struct pci_dev *dev)
+static inline void
+pcibios_set_master (struct pci_dev *dev)
 {
 	/* No special bus mastering setup handling */
 }
 
-static inline void pcibios_penalize_isa_irq(int irq)
+static inline void
+pcibios_penalize_isa_irq (int irq)
 {
 	/* We don't do dynamic PCI IRQ allocation */
 }
 
 /*
- * Dynamic DMA mapping API.
+ * Dynamic DMA mapping API.  See Documentation/DMA-mapping.txt for details.
  */
+#define pci_alloc_consistent		platform_pci_alloc_consistent
+#define pci_free_consistent		platform_pci_free_consistent
+#define pci_map_single			platform_pci_map_single
+#define pci_unmap_single		platform_pci_unmap_single
+#define pci_map_sg			platform_pci_map_sg
+#define pci_unmap_sg			platform_pci_unmap_sg
+#define pci_dma_sync_single		platform_pci_dma_sync_single
+#define pci_dma_sync_sg			platform_pci_dma_sync_sg
+#define sg_dma_address			platform_pci_dma_address
 
 /*
- * Allocate and map kernel buffer using consistent mode DMA for a device.
- * hwdev should be valid struct pci_dev pointer for PCI devices,
- * NULL for PCI-like buses (ISA, EISA).
- * Returns non-NULL cpu-view pointer to the buffer if successful and
- * sets *dma_addrp to the pci side dma address as well, else *dma_addrp
- * is undefined.
- */
-extern void *pci_alloc_consistent (struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle);
-
-/*
- * Free and unmap a consistent DMA buffer.
- * cpu_addr is what was returned from pci_alloc_consistent,
- * size must be the same as what as passed into pci_alloc_consistent,
- * and likewise dma_addr must be the same as what *dma_addrp was set to.
- *
- * References to the memory and mappings associated with cpu_addr/dma_addr
- * past this call are illegal.
- */
-extern void pci_free_consistent (struct pci_dev *hwdev, size_t size,
-				 void *vaddr, dma_addr_t dma_handle);
-
-/*
- * Map a single buffer of the indicated size for DMA in streaming mode.
- * The 32-bit bus address to use is returned.
- *
- * Once the device is given the dma address, the device owns this memory
- * until either pci_unmap_single or pci_dma_sync_single is performed.
- */
-extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction);
-
-/*
- * Unmap a single streaming mode DMA translation.  The dma_addr and size
- * must match what was provided for in a previous pci_map_single call.  All
- * other usages are undefined.
- *
- * After this call, reads by the cpu to the buffer are guarenteed to see
- * whatever the device wrote there.
- */
-extern void pci_unmap_single (struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction);
-
-/*
- * Map a set of buffers described by scatterlist in streaming
- * mode for DMA.  This is the scather-gather version of the
- * above pci_map_single interface.  Here the scatter gather list
- * elements are each tagged with the appropriate dma address
- * and length.  They are obtained via sg_dma_{address,length}(SG).
- *
- * NOTE: An implementation may be able to use a smaller number of
- *       DMA address/length pairs than there are SG table elements.
- *       (for example via virtual mapping capabilities)
- *       The routine returns the number of addr/length pairs actually
- *       used, at most nents.
- *
- * Device ownership issues as mentioned above for pci_map_single are
- * the same here.
- */
-extern int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction);
-
-/*
- * Unmap a set of streaming mode DMA translations.
- * Again, cpu read rules concerning calls here are the same as for
- * pci_unmap_single() above.
- */
-extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction);
-
-/*
- * Make physical memory consistent for a single
- * streaming mode DMA translation after a transfer.
- *
- * If you perform a pci_map_single() but wish to interrogate the
- * buffer using the cpu, yet do not wish to teardown the PCI dma
- * mapping, you must call this function before doing so.  At the
- * next point you give the PCI dma address back to the card, the
- * device again owns the buffer.
- */
-extern void pci_dma_sync_single (struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);
-
-/*
- * Make physical memory consistent for a set of streaming mode DMA
- * translations after a transfer.
- *
- * The same as pci_dma_sync_single but for a scatter-gather list,
- * same rules and usage.
- */
-extern void pci_dma_sync_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction);
-
-/* Return whether the given PCI device DMA address mask can
- * be supported properly.  For example, if your device can
- * only drive the low 24-bits during PCI bus mastering, then
+ * Return whether the given PCI device DMA address mask can be supported properly.  For
+ * example, if your device can only drive the low 24-bits during PCI bus mastering, then
  * you would pass 0x00ffffff as the mask to this function.
  */
 static inline int
-pci_dma_supported(struct pci_dev *hwdev, dma_addr_t mask)
+pci_dma_supported (struct pci_dev *hwdev, dma_addr_t mask)
 {
 	return 1;
 }
 
-/* These macros should be used after a pci_map_sg call has been done
- * to get bus addresses of each of the SG entries and their lengths.
- * You should only work with the number of sg entries pci_map_sg
- * returns, or alternatively stop on the first sg_dma_len(sg) which
- * is 0.
- */
-#define sg_dma_address(sg)	(virt_to_bus((sg)->address))
 #define sg_dma_len(sg)		((sg)->length)
 
 #endif /* _ASM_IA64_PCI_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/pgalloc.h linux/include/asm-ia64/pgalloc.h
--- v2.4.0-prerelease/linux/include/asm-ia64/pgalloc.h	Tue Oct 31 12:42:27 2000
+++ linux/include/asm-ia64/pgalloc.h	Thu Jan  4 12:50:18 2001
@@ -15,6 +15,7 @@
 
 #include <linux/config.h>
 
+#include <linux/mm.h>
 #include <linux/threads.h>
 
 #include <asm/mmu_context.h>
@@ -175,11 +176,8 @@
 		if (!pmd_page)
 			pmd_page = get_pmd_slow();
 		if (pmd_page) {
-			if (pgd_none(*pgd)) {
-				pgd_set(pgd, pmd_page);
-				return pmd_page + offset;
-			} else
-				free_pmd_fast(pmd_page);
+			pgd_set(pgd, pmd_page);
+			return pmd_page + offset;
 		} else
 			return NULL;
 	}
@@ -196,13 +194,6 @@
 extern int do_check_pgt_cache (int, int);
 
 /*
- * This establishes kernel virtual mappings (e.g., as a result of a
- * vmalloc call).  Since ia-64 uses a separate kernel page table,
- * there is nothing to do here... :)
- */
-#define set_pgdir(vmaddr, entry)	do { } while(0)
-
-/*
  * Now for some TLB flushing routines.  This is the kind of stuff that
  * can be very expensive, so try to avoid them whenever possible.
  */
@@ -249,7 +240,12 @@
 static __inline__ void
 flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
 {
-	flush_tlb_range(vma->vm_mm, addr, addr + PAGE_SIZE);
+#ifdef CONFIG_SMP
+	flush_tlb_range(vma->vm_mm, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE);
+#else
+	if (vma->vm_mm == current->active_mm)
+		asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(PAGE_SHIFT << 2) : "memory");
+#endif
 }
 
 /*
@@ -259,14 +255,66 @@
 static inline void
 flush_tlb_pgtables (struct mm_struct *mm, unsigned long start, unsigned long end)
 {
-	/*
-	 * XXX fix mmap(), munmap() et al to guarantee that there are no mappings
-	 * across region boundaries. --davidm 00/02/23
-	 */
-	if (rgn_index(start) != rgn_index(end)) {
+	if (rgn_index(start) != rgn_index(end))
 		printk("flush_tlb_pgtables: can't flush across regions!!\n");
-	}
 	flush_tlb_range(mm, ia64_thash(start), ia64_thash(end));
+}
+
+/*
+ * Now for some cache flushing routines.  This is the kind of stuff
+ * that can be very expensive, so try to avoid them whenever possible.
+ */
+
+/* Caches aren't brain-dead on the IA-64. */
+#define flush_cache_all()			do { } while (0)
+#define flush_cache_mm(mm)			do { } while (0)
+#define flush_cache_range(mm, start, end)	do { } while (0)
+#define flush_cache_page(vma, vmaddr)		do { } while (0)
+#define flush_page_to_ram(page)			do { } while (0)
+
+extern void flush_icache_range (unsigned long start, unsigned long end);
+
+static inline void
+flush_dcache_page (struct page *page)
+{
+	clear_bit(PG_arch_1, &page->flags);
+}
+
+static inline void
+clear_user_page (void *addr, unsigned long vaddr, struct page *page)
+{
+	clear_page(addr);
+	flush_dcache_page(page);
+}
+
+static inline void
+copy_user_page (void *to, void *from, unsigned long vaddr, struct page *page)
+{
+	copy_page(to, from);
+	flush_dcache_page(page);
+}
+
+/*
+ * IA-64 doesn't have any external MMU info: the page tables contain all the necessary
+ * information.  However, we use this macro to take care of any (delayed) i-cache flushing
+ * that may be necessary.
+ */
+static inline void
+update_mmu_cache (struct vm_area_struct *vma, unsigned long address, pte_t pte)
+{
+	struct page *page;
+
+	if (!pte_exec(pte))
+		return;				/* not an executable page... */
+
+	page = pte_page(pte);
+	address &= PAGE_MASK;
+
+	if (test_bit(PG_arch_1, &page->flags))
+		return;				/* i-cache is already coherent with d-cache */
+
+	flush_icache_range(address, address + PAGE_SIZE);
+	set_bit(PG_arch_1, &page->flags);	/* mark page as clean */
 }
 
 #endif /* _ASM_IA64_PGALLOC_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/pgtable.h linux/include/asm-ia64/pgtable.h
--- v2.4.0-prerelease/linux/include/asm-ia64/pgtable.h	Tue Oct 31 12:42:27 2000
+++ linux/include/asm-ia64/pgtable.h	Thu Jan  4 12:50:18 2001
@@ -24,7 +24,11 @@
  * matches the VHPT short format, the firt doubleword of the VHPD long
  * format, and the first doubleword of the TLB insertion format.
  */
-#define _PAGE_P			(1 <<  0)	/* page present bit */
+#define _PAGE_P_BIT		0
+#define _PAGE_A_BIT		5
+#define _PAGE_D_BIT		6
+
+#define _PAGE_P			(1 << _PAGE_P_BIT)	/* page present bit */
 #define _PAGE_MA_WB		(0x0 <<  2)	/* write back memory attribute */
 #define _PAGE_MA_UC		(0x4 <<  2)	/* uncacheable memory attribute */
 #define _PAGE_MA_UCE		(0x5 <<  2)	/* UC exported attribute */
@@ -46,8 +50,8 @@
 #define _PAGE_AR_X_RX		(7 <<  9)	/* exec & promote / read & exec */
 #define _PAGE_AR_MASK		(7 <<  9)
 #define _PAGE_AR_SHIFT		9
-#define _PAGE_A			(1 <<  5)	/* page accessed bit */
-#define _PAGE_D			(1 <<  6)	/* page dirty bit */
+#define _PAGE_A			(1 << _PAGE_A_BIT)	/* page accessed bit */
+#define _PAGE_D			(1 << _PAGE_D_BIT)	/* page dirty bit */
 #define _PAGE_PPN_MASK		(((__IA64_UL(1) << IA64_MAX_PHYS_BITS) - 1) & ~0xfffUL)
 #define _PAGE_ED		(__IA64_UL(1) << 52)	/* exception deferral */
 #define _PAGE_PROTNONE		(__IA64_UL(1) << 63)
@@ -79,7 +83,7 @@
 #define PGDIR_SIZE		(__IA64_UL(1) << PGDIR_SHIFT)
 #define PGDIR_MASK		(~(PGDIR_SIZE-1))
 #define PTRS_PER_PGD		(__IA64_UL(1) << (PAGE_SHIFT-3))
-#define USER_PTRS_PER_PGD	PTRS_PER_PGD
+#define USER_PTRS_PER_PGD	(5*PTRS_PER_PGD/8)	/* regions 0-4 are user regions */
 #define FIRST_USER_PGD_NR	0
 
 /*
@@ -98,24 +102,27 @@
  */
 #define PTRS_PER_PTE	(__IA64_UL(1) << (PAGE_SHIFT-3))
 
-/* Number of pointers that fit on a page:  this will go away. */
-#define PTRS_PER_PAGE	(__IA64_UL(1) << (PAGE_SHIFT-3))
-
 # ifndef __ASSEMBLY__
 
 #include <asm/bitops.h>
 #include <asm/mmu_context.h>
+#include <asm/processor.h>
 #include <asm/system.h>
 
 /*
  * All the normal masks have the "page accessed" bits on, as any time
  * they are used, the page is accessed. They are cleared only by the
- * page-out routines
+ * page-out routines.  On the other hand, we do NOT turn on the
+ * execute bit on pages that are mapped writable.  For those pages, we
+ * turn on the X bit only when the program attempts to actually
+ * execute code in such a page (it's a "lazy execute bit", if you
+ * will).  This lets reduce the amount of i-cache flushing we have to
+ * do for data pages such as stack and heap pages.
  */
 #define PAGE_NONE	__pgprot(_PAGE_PROTNONE | _PAGE_A)
 #define PAGE_SHARED	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
 #define PAGE_READONLY	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
-#define PAGE_COPY	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
+#define PAGE_COPY	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
 #define PAGE_GATE	__pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
 #define PAGE_KERNEL	__pgprot(__DIRTY_BITS  | _PAGE_PL_0 | _PAGE_AR_RWX)
 
@@ -132,19 +139,19 @@
 #define __P001	PAGE_READONLY
 #define __P010	PAGE_READONLY	/* write to priv pg -> copy & make writable */
 #define __P011	PAGE_READONLY	/* ditto */
-#define __P100	__pgprot(_PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_PL_3 | _PAGE_AR_X_RX)
-#define __P101	__pgprot(_PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_PL_3 | _PAGE_AR_RX)
-#define __P110	__pgprot(_PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_PL_3 | _PAGE_AR_RX)
-#define __P111	__pgprot(_PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_PL_3 | _PAGE_AR_RX)
+#define __P100	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
+#define __P101	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
+#define __P110	PAGE_COPY
+#define __P111	PAGE_COPY
 
 #define __S000	PAGE_NONE
 #define __S001	PAGE_READONLY
 #define __S010	PAGE_SHARED	/* we don't have (and don't need) write-only */
 #define __S011	PAGE_SHARED
-#define __S100	__pgprot(_PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_PL_3 | _PAGE_AR_X_RX)
-#define __S101	__pgprot(_PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_PL_3 | _PAGE_AR_RX)
-#define __S110	__pgprot(_PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_PL_3 | _PAGE_AR_RWX)
-#define __S111	__pgprot(_PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_PL_3 | _PAGE_AR_RWX)
+#define __S100	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
+#define __S101	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
+#define __S110	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
+#define __S111	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
 
 #define pgd_ERROR(e)	printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
 #define pmd_ERROR(e)	printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
@@ -162,30 +169,8 @@
  */
 #define page_address(page)	((page)->virtual)
 
-/*
- * Now for some cache flushing routines.  This is the kind of stuff
- * that can be very expensive, so try to avoid them whenever possible.
- */
-
-/* Caches aren't brain-dead on the ia-64. */
-#define flush_cache_all()			do { } while (0)
-#define flush_cache_mm(mm)			do { } while (0)
-#define flush_cache_range(mm, start, end)	do { } while (0)
-#define flush_cache_page(vma, vmaddr)		do { } while (0)
-#define flush_page_to_ram(page)			do { } while (0)
-#define flush_dcache_page(page)			do { } while (0)
-#define flush_icache_range(start, end)		do { } while (0)
-
-extern void ia64_flush_icache_page (unsigned long addr);
-
-#define flush_icache_page(vma,pg)				\
-do {								\
-	if ((vma)->vm_flags & PROT_EXEC)			\
-		ia64_flush_icache_page((unsigned long) page_address(pg));	\
-} while (0)
-
 /* Quick test to see if ADDR is a (potentially) valid physical address. */
-static __inline__ long
+static inline long
 ia64_phys_addr_valid (unsigned long addr)
 {
 	return (addr & (my_cpu_data.unimpl_pa_mask)) == 0;
@@ -213,13 +198,17 @@
 
 /*
  * On some architectures, special things need to be done when setting
- * the PTE in a page table.  Nothing special needs to be on ia-64.
+ * the PTE in a page table.  Nothing special needs to be on IA-64.
  */
 #define set_pte(ptep, pteval)	(*(ptep) = (pteval))
 
-#define VMALLOC_START		(0xa000000000000000+2*PAGE_SIZE)
+#define RGN_SIZE	(1UL << 61)
+#define RGN_MAP_LIMIT	(1UL << (4*PAGE_SHIFT - 12))	/* limit of mappable area in region */
+#define RGN_KERNEL	7
+
+#define VMALLOC_START		(0xa000000000000000 + 2*PAGE_SIZE)
 #define VMALLOC_VMADDR(x)	((unsigned long)(x))
-#define VMALLOC_END		0xbfffffffffffffff
+#define VMALLOC_END		(0xa000000000000000 + RGN_MAP_LIMIT)
 
 /*
  * BAD_PAGETABLE is used when we need a bogus page-table, while
@@ -280,19 +269,19 @@
  * The following have defined behavior only work if pte_present() is true.
  */
 #define pte_read(pte)		(((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) < 6)
-#define pte_write(pte)	((unsigned) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) - 2) < 4)
-#define pte_dirty(pte)		(pte_val(pte) & _PAGE_D)
-#define pte_young(pte)		(pte_val(pte) & _PAGE_A)
+#define pte_write(pte)	((unsigned) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) - 2) <= 4)
+#define pte_exec(pte)		((pte_val(pte) & _PAGE_AR_RX) != 0)
+#define pte_dirty(pte)		((pte_val(pte) & _PAGE_D) != 0)
+#define pte_young(pte)		((pte_val(pte) & _PAGE_A) != 0)
 /*
- * Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the
- * 2nd bit in the access rights:
+ * Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the 2nd bit in the
+ * access rights:
  */
 #define pte_wrprotect(pte)	(__pte(pte_val(pte) & ~_PAGE_AR_RW))
 #define pte_mkwrite(pte)	(__pte(pte_val(pte) | _PAGE_AR_RW))
-
+#define pte_mkexec(pte)		(__pte(pte_val(pte) | _PAGE_AR_RX))
 #define pte_mkold(pte)		(__pte(pte_val(pte) & ~_PAGE_A))
 #define pte_mkyoung(pte)	(__pte(pte_val(pte) | _PAGE_A))
-
 #define pte_mkclean(pte)	(__pte(pte_val(pte) & ~_PAGE_D))
 #define pte_mkdirty(pte)	(__pte(pte_val(pte) | _PAGE_D))
 
@@ -317,7 +306,7 @@
 /*
  * Return the region index for virtual address ADDRESS.
  */
-static __inline__ unsigned long
+static inline unsigned long
 rgn_index (unsigned long address)
 {
 	ia64_va a;
@@ -329,7 +318,7 @@
 /*
  * Return the region offset for virtual address ADDRESS.
  */
-static __inline__ unsigned long
+static inline unsigned long
 rgn_offset (unsigned long address)
 {
 	ia64_va a;
@@ -338,10 +327,7 @@
 	return a.f.off;
 }
 
-#define RGN_SIZE	(1UL << 61)
-#define RGN_KERNEL	7
-
-static __inline__ unsigned long
+static inline unsigned long
 pgd_index (unsigned long address)
 {
 	unsigned long region = address >> 61;
@@ -352,7 +338,7 @@
 
 /* The offset in the 1-level directory is given by the 3 region bits
    (61..63) and the seven level-1 bits (33-39).  */
-static __inline__ pgd_t*
+static inline pgd_t*
 pgd_offset (struct mm_struct *mm, unsigned long address)
 {
 	return mm->pgd + pgd_index(address);
@@ -371,56 +357,102 @@
 #define pte_offset(dir,addr) \
 	((pte_t *) pmd_page(*(dir)) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
 
+/* atomic versions of the some PTE manipulations: */
 
-extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
-extern void paging_init (void);
+static inline int
+ptep_test_and_clear_young (pte_t *ptep)
+{
+#ifdef CONFIG_SMP
+	return test_and_clear_bit(_PAGE_A_BIT, ptep);
+#else
+	pte_t pte = *ptep;
+	if (!pte_young(pte))
+		return 0;
+	set_pte(ptep, pte_mkold(pte));
+	return 1;
+#endif
+}
 
-/*
- * IA-64 doesn't have any external MMU info: the page tables contain
- * all the necessary information.  However, we can use this macro
- * to pre-install (override) a PTE that we know is needed anyhow.
- *
- * Asit says that on Itanium, it is generally faster to let the VHPT
- * walker pick up a newly installed PTE (and VHPT misses should be
- * extremely rare compared to normal misses).  Also, since
- * pre-installing the PTE has the problem that we may evict another
- * TLB entry needlessly because we don't know for sure whether we need
- * to update the iTLB or dTLB, I tend to prefer this solution, too.
- * Also, this avoids nasty issues with forward progress (what if the
- * newly installed PTE gets replaced before we return to the previous
- * execution context?).
- *
- */
-#if 1
-# define update_mmu_cache(vma,address,pte)
+static inline int
+ptep_test_and_clear_dirty (pte_t *ptep)
+{
+#ifdef CONFIG_SMP
+	return test_and_clear_bit(_PAGE_D_BIT, ptep);
 #else
-# define update_mmu_cache(vma,address,pte)							\
-do {												\
-	/*											\
-	 * XXX fix me!!										\
-	 *											\
-	 * It's not clear this is a win.  We may end up pollute the				\
-	 * dtlb with itlb entries and vice versa (e.g., consider stack				\
-	 * pages that are normally marked executable).  It would be				\
-	 * better to insert the TLB entry for the TLB cache that we				\
-	 * know needs the new entry.  However, the update_mmu_cache()				\
-	 * arguments don't tell us whether we got here through a data				\
-	 * access or through an instruction fetch.  Talk to Linus to				\
-	 * fix this.										\
-	 *											\
-	 * If you re-enable this code, you must disable the ptc code in				\
-	 * Entry 20 of the ivt.									\
-	 */											\
-	unsigned long flags;									\
-												\
-	ia64_clear_ic(flags);									\
-	ia64_itc((vma->vm_flags & PROT_EXEC) ? 0x3 : 0x2, address, pte_val(pte), PAGE_SHIFT);	\
-	__restore_flags(flags);									\
-} while (0)
+	pte_t pte = *ptep;
+	if (!pte_dirty(pte))
+		return 0;
+	set_pte(ptep, pte_mkclean(pte));
+	return 1;
 #endif
+}
+
+static inline pte_t
+ptep_get_and_clear (pte_t *ptep)
+{
+#ifdef CONFIG_SMP
+	return __pte(xchg((long *) ptep, 0));
+#else
+	pte_t pte = *ptep;
+	pte_clear(ptep);
+	return pte;
+#endif
+}
+
+static inline void
+ptep_set_wrprotect (pte_t *ptep)
+{
+#ifdef CONFIG_SMP
+	unsigned long new, old;
+
+	do {
+		old = pte_val(*ptep);
+		new = pte_val(pte_wrprotect(__pte (old)));
+	} while (cmpxchg((unsigned long *) ptep, old, new) != old);
+#else
+	pte_t old_pte = *ptep;
+	set_pte(ptep, pte_wrprotect(old_pte));
+#endif
+}
+
+static inline void
+ptep_mkdirty (pte_t *ptep)
+{
+#ifdef CONFIG_SMP
+	set_bit(_PAGE_D_BIT, ptep);
+#else
+	pte_t old_pte = *ptep;
+	set_pte(ptep, pte_mkdirty(old_pte));
+#endif
+}
+
+static inline int
+pte_same (pte_t a, pte_t b)
+{
+	return pte_val(a) == pte_val(b);
+}
+
+/*
+ * Macros to check the type of access that triggered a page fault.
+ */
+
+static inline int
+is_write_access (int access_type)
+{
+	return (access_type & 0x2);
+}
+
+static inline int
+is_exec_access (int access_type)
+{
+	return (access_type & 0x4);
+}
+
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+extern void paging_init (void);
 
 #define SWP_TYPE(entry)			(((entry).val >> 1) & 0xff)
-#define SWP_OFFSET(entry)		((entry).val >> 9)
+#define SWP_OFFSET(entry)		(((entry).val << 1) >> 10)
 #define SWP_ENTRY(type,offset)		((swp_entry_t) { ((type) << 1) | ((offset) << 9) })
 #define pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
 #define swp_entry_to_pte(x)		((pte_t) { (x).val })
@@ -437,7 +469,8 @@
 extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
 
-#include <asm-generic/pgtable.h>
+/* We provide our own get_unmapped_area to cope with VA holes for userland */
+#define HAVE_ARCH_UNMAPPED_AREA
 
 # endif /* !__ASSEMBLY__ */
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/processor.h linux/include/asm-ia64/processor.h
--- v2.4.0-prerelease/linux/include/asm-ia64/processor.h	Mon Jan  1 09:38:36 2001
+++ linux/include/asm-ia64/processor.h	Thu Jan  4 12:50:18 2001
@@ -4,7 +4,7 @@
 /*
  * Copyright (C) 1998-2000 Hewlett-Packard Co
  * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
- * Copyright (C) 1998, 1999 Stephane Eranian <eranian@hpl.hp.com>
+ * Copyright (C) 1998-2000 Stephane Eranian <eranian@hpl.hp.com>
  * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
  * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
  *
@@ -19,15 +19,21 @@
 #include <asm/types.h>
 
 #define IA64_NUM_DBG_REGS	8
-#define IA64_NUM_PM_REGS	4
+/*
+ * Limits for PMC and PMD are set to less than maximum architected values
+ * but should be sufficient for a while
+ */
+#define IA64_NUM_PMC_REGS	32
+#define IA64_NUM_PMD_REGS	32
+#define IA64_NUM_PMD_COUNTERS	4
 
 /*
  * TASK_SIZE really is a mis-named.  It really is the maximum user
- * space address (plus one).  On ia-64, there are five regions of 2TB
+ * space address (plus one).  On IA-64, there are five regions of 2TB
  * each (assuming 8KB page size), for a total of 8TB of user virtual
  * address space.
  */
-#define TASK_SIZE		0xa000000000000000
+#define TASK_SIZE		(current->thread.task_size)
 
 /*
  * This decides where the kernel will search for a free chunk of vm
@@ -157,6 +163,7 @@
 #define IA64_THREAD_UAC_NOPRINT	(__IA64_UL(1) << 3)	/* don't log unaligned accesses */
 #define IA64_THREAD_UAC_SIGBUS	(__IA64_UL(1) << 4)	/* generate SIGBUS on unaligned acc. */
 #define IA64_THREAD_KRBS_SYNCED	(__IA64_UL(1) << 5)	/* krbs synced with process vm? */
+#define IA64_THREAD_MAP_SHARED	(__IA64_UL(1) << 6)	/* ugly: just a tmp flag for mmap() */
 #define IA64_KERNEL_DEATH	(__IA64_UL(1) << 63)	/* see die_if_kernel()... */
 
 #define IA64_THREAD_UAC_SHIFT	3
@@ -242,8 +249,11 @@
 	__u64 usec_per_cyc;	/* 2^IA64_USEC_PER_CYC_SHIFT*1000000/itc_freq */
 	__u64 unimpl_va_mask;	/* mask of unimplemented virtual address bits (from PAL) */
 	__u64 unimpl_pa_mask;	/* mask of unimplemented physical address bits (from PAL) */
+	__u64 ptce_base;
+	__u32 ptce_count[2];
+	__u32 ptce_stride[2];
 #ifdef CONFIG_SMP
-	__u64 loops_per_sec;
+	__u64 loops_per_jiffy;
 	__u64 ipi_count;
 	__u64 prof_counter;
 	__u64 prof_multiplier;
@@ -252,12 +262,6 @@
 
 #define my_cpu_data		cpu_data[smp_processor_id()]
 
-#ifdef CONFIG_SMP
-# define ia64_loops_per_sec()	my_cpu_data.loops_per_sec
-#else
-# define ia64_loops_per_sec()	loops_per_sec
-#endif
-
 extern struct cpuinfo_ia64 cpu_data[NR_CPUS];
 
 extern void identify_cpu (struct cpuinfo_ia64 *);
@@ -288,14 +292,20 @@
 	__u64 dbr[IA64_NUM_DBG_REGS];
 	__u64 ibr[IA64_NUM_DBG_REGS];
 #ifdef CONFIG_PERFMON
-	__u64 pmc[IA64_NUM_PM_REGS];
-	__u64 pmd[IA64_NUM_PM_REGS];
-	__u64 pmod[IA64_NUM_PM_REGS];
-# define INIT_THREAD_PM		{0, }, {0, }, {0, },
+	__u64 pmc[IA64_NUM_PMC_REGS];
+	__u64 pmd[IA64_NUM_PMD_REGS];
+	struct {
+		__u64		val;	/* virtual 64bit counter */
+		__u64		rval;	/* reset value on overflow */
+		int		sig;	/* signal used to notify */
+		int		pid;	/* process to notify */
+	} pmu_counters[IA64_NUM_PMD_COUNTERS];
+# define INIT_THREAD_PM		{0, }, {0, }, {{ 0, 0, 0, 0}, },
 #else
 # define INIT_THREAD_PM
 #endif
-	__u64 map_base;			/* base address for mmap() */
+	__u64 map_base;			/* base address for get_unmapped_area() */
+	__u64 task_size;		/* limit for task size */
 #ifdef CONFIG_IA32_SUPPORT
 	__u64 eflag;			/* IA32 EFLAGS reg */
 	__u64 fsr;			/* IA32 floating pt status reg */
@@ -309,7 +319,7 @@
 	union {
 		__u64 sigmask;		/* aligned mask for sigsuspend scall */
 	} un;
-# define INIT_THREAD_IA32	, 0, 0, 0x17800000037fULL, 0, 0, 0, 0, 0, 0, {0}
+# define INIT_THREAD_IA32	0, 0, 0x17800000037fULL, 0, 0, 0, 0, 0, 0, {0},
 #else
 # define INIT_THREAD_IA32
 #endif /* CONFIG_IA32_SUPPORT */
@@ -328,8 +338,9 @@
 	{0, },				/* dbr */	\
 	{0, },				/* ibr */	\
 	INIT_THREAD_PM					\
-	0x2000000000000000		/* map_base */	\
-	INIT_THREAD_IA32,				\
+	0x2000000000000000,		/* map_base */	\
+	0xa000000000000000,		/* task_size */	\
+	INIT_THREAD_IA32				\
 	0				/* siginfo */	\
 }
 
@@ -422,8 +433,8 @@
 #endif
 
 #ifdef CONFIG_PERFMON
-extern void ia64_save_pm_regs (struct thread_struct *thread);
-extern void ia64_load_pm_regs (struct thread_struct *thread);
+extern void ia64_save_pm_regs (struct task_struct *task);
+extern void ia64_load_pm_regs (struct task_struct *task);
 #endif
 
 #define ia64_fph_enable()	__asm__ __volatile__ (";; rsm psr.dfh;; srlz.d;;" ::: "memory");
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/ptrace.h linux/include/asm-ia64/ptrace.h
--- v2.4.0-prerelease/linux/include/asm-ia64/ptrace.h	Tue Oct 31 12:42:27 2000
+++ linux/include/asm-ia64/ptrace.h	Thu Jan  4 12:50:18 2001
@@ -2,8 +2,8 @@
 #define _ASM_IA64_PTRACE_H
 
 /*
- * Copyright (C) 1998, 1999 Hewlett-Packard Co
- * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1998-2000 Hewlett-Packard Co
+ * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
  * Copyright (C) 1998, 1999 Stephane Eranian <eranian@hpl.hp.com>
  *
  * 12/07/98	S. Eranian	added pt_regs & switch_stack
@@ -74,6 +74,9 @@
 
 #ifndef __ASSEMBLY__
 
+#include <asm/current.h>
+#include <asm/page.h>
+
 /*
  * This struct defines the way the registers are saved on system
  * calls.
@@ -236,7 +239,14 @@
 
   extern void ia64_increment_ip (struct pt_regs *pt);
   extern void ia64_decrement_ip (struct pt_regs *pt);
-#endif
+
+static inline void
+force_successful_syscall_return (void)
+{
+	ia64_task_regs(current)->r8 = 0;
+}
+
+#endif /* !__KERNEL__ */
 
 #endif /* !__ASSEMBLY__ */
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sal.h linux/include/asm-ia64/sal.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sal.h	Tue Oct 31 12:42:27 2000
+++ linux/include/asm-ia64/sal.h	Thu Jan  4 12:50:18 2001
@@ -24,7 +24,9 @@
 
 extern spinlock_t sal_lock;
 
-#define __SAL_CALL(result,args...)	result = (*ia64_sal)(args)
+/* SAL spec _requires_ eight args for each call. */
+#define __SAL_CALL(result,a0,a1,a2,a3,a4,a5,a6,a7)	\
+	result = (*ia64_sal)(a0,a1,a2,a3,a4,a5,a6,a7)
 
 #ifdef CONFIG_SMP
 # define SAL_CALL(result,args...) do {		\
@@ -60,10 +62,10 @@
 	 * informational value should be printed (e.g., "reboot for
 	 * change to take effect").
 	 */
-	s64 	status;
-	u64 	v0;
-	u64 	v1;
-	u64 	v2;
+	s64 status;
+	u64 v0;
+	u64 v1;
+	u64 v2;
 };
 
 typedef struct ia64_sal_retval (*ia64_sal_handler) (u64, ...);
@@ -78,24 +80,27 @@
  * The SAL system table is followed by a variable number of variable
  * length descriptors.  The structure of these descriptors follows
  * below.
+ * The defininition follows SAL specs from July 2000
  */
 struct ia64_sal_systab {
-	char signature[4];	/* should be "SST_" */
-	int size;		/* size of this table in bytes */
-	unsigned char sal_rev_minor;
-	unsigned char sal_rev_major;
-	unsigned short entry_count;	/* # of entries in variable portion */
-	unsigned char checksum;
-	char ia32_bios_present;
-	unsigned short reserved1;
-	char oem_id[32];	/* ASCII NUL terminated OEM id
-				   (terminating NUL is missing if
-				   string is exactly 32 bytes long). */
-	char product_id[32];	/* ASCII product id  */
-	char reserved2[16];
+	u8 signature[4];	/* should be "SST_" */
+	u32 size;		/* size of this table in bytes */
+	u8 sal_rev_minor;
+	u8 sal_rev_major;
+	u16 entry_count;	/* # of entries in variable portion */
+	u8 checksum;
+	u8 reserved1[7];
+	u8 sal_a_rev_minor;
+	u8 sal_a_rev_major;
+	u8 sal_b_rev_minor;
+	u8 sal_b_rev_major;
+	/* oem_id & product_id: terminating NUL is missing if string is exactly 32 bytes long. */
+	u8 oem_id[32];
+	u8 product_id[32];	/* ASCII product id  */
+	u8 reserved2[8];
 };
 
-enum SAL_Systab_Entry_Type {
+enum sal_systab_entry_type {
 	SAL_DESC_ENTRY_POINT = 0,
 	SAL_DESC_MEMORY = 1,
 	SAL_DESC_PLATFORM_FEATURE = 2,
@@ -115,75 +120,78 @@
  */
 #define SAL_DESC_SIZE(type)	"\060\040\020\040\020\020"[(unsigned) type]
 
-struct ia64_sal_desc_entry_point {
-	char type;
-	char reserved1[7];
-	s64 pal_proc;
-	s64 sal_proc;
-	s64 gp;
-	char reserved2[16];
-};
-
-struct ia64_sal_desc_memory {
-	char type;
-	char used_by_sal;	/* needs to be mapped for SAL? */
-	char mem_attr;		/* current memory attribute setting */
-	char access_rights;	/* access rights set up by SAL */
-	char mem_attr_mask;	/* mask of supported memory attributes */
-	char reserved1;
-	char mem_type;		/* memory type */
-	char mem_usage;		/* memory usage */
-	s64 addr;		/* physical address of memory */
-	unsigned int length;	/* length (multiple of 4KB pages) */
-	unsigned int reserved2;
-	char oem_reserved[8];
-};
+typedef struct ia64_sal_desc_entry_point {
+	u8 type;
+	u8 reserved1[7];
+	u64 pal_proc;
+	u64 sal_proc;
+	u64 gp;
+	u8 reserved2[16];
+}ia64_sal_desc_entry_point_t;
+
+typedef struct ia64_sal_desc_memory {
+	u8 type;
+	u8 used_by_sal;	/* needs to be mapped for SAL? */
+	u8 mem_attr;		/* current memory attribute setting */
+	u8 access_rights;	/* access rights set up by SAL */
+	u8 mem_attr_mask;	/* mask of supported memory attributes */
+	u8 reserved1;
+	u8 mem_type;		/* memory type */
+	u8 mem_usage;		/* memory usage */
+	u64 addr;		/* physical address of memory */
+	u32 length;	/* length (multiple of 4KB pages) */
+	u32 reserved2;
+	u8 oem_reserved[8];
+} ia64_sal_desc_memory_t;
 
 #define IA64_SAL_PLATFORM_FEATURE_BUS_LOCK		(1 << 0)
 #define IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT	(1 << 1)
 #define IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT	(1 << 2)
 
-struct ia64_sal_desc_platform_feature {
-	char type;
-	unsigned char feature_mask;
-	char reserved1[14];
-};
-
-struct ia64_sal_desc_tr {
-	char type;
-	char tr_type;		/* 0 == instruction, 1 == data */
-	char regnum;		/* translation register number */
-	char reserved1[5];
-	s64 addr;		/* virtual address of area covered */
-	s64 page_size;		/* encoded page size */
-	char reserved2[8];
-};
+typedef struct ia64_sal_desc_platform_feature {
+	u8 type;
+	u8 feature_mask;
+	u8 reserved1[14];
+} ia64_sal_desc_platform_feature_t;
+
+typedef struct ia64_sal_desc_tr {
+	u8 type;
+	u8 tr_type;		/* 0 == instruction, 1 == data */
+	u8 regnum;		/* translation register number */
+	u8 reserved1[5];
+	u64 addr;		/* virtual address of area covered */
+	u64 page_size;		/* encoded page size */
+	u8 reserved2[8];
+} ia64_sal_desc_tr_t;
 
 typedef struct ia64_sal_desc_ptc {
-	char type;
-	char reserved1[3];
-	unsigned int num_domains;	/* # of coherence domains */
-	s64  domain_info;		/* physical address of domain info table */
+	u8 type;
+	u8 reserved1[3];
+	u32 num_domains;	/* # of coherence domains */
+	u64 domain_info;	/* physical address of domain info table */
 } ia64_sal_desc_ptc_t;
 
 typedef struct ia64_sal_ptc_domain_info {
-	unsigned long proc_count;	/* number of processors in domain */
-	long proc_list;			/* physical address of LID array */
+	u64 proc_count;		/* number of processors in domain */
+	u64 proc_list;		/* physical address of LID array */
 } ia64_sal_ptc_domain_info_t;
 
 typedef struct ia64_sal_ptc_domain_proc_entry {
-	unsigned char id;		/* id of processor */
-	unsigned char eid;		/* eid of processor */
+	u64 reserved : 16;
+	u64 eid : 8;		/* eid of processor */
+	u64 id  : 8;		/* id of processor */
+	u64 ignored : 32;
 } ia64_sal_ptc_domain_proc_entry_t;
 
+
 #define IA64_SAL_AP_EXTERNAL_INT 0
 
-struct ia64_sal_desc_ap_wakeup {
-	char type;
-	char mechanism;		/* 0 == external interrupt */
-	char reserved1[6];
-	long vector;		/* interrupt vector in range 0x10-0xff */
-};
+typedef struct ia64_sal_desc_ap_wakeup {
+	u8 type;
+	u8 mechanism;		/* 0 == external interrupt */
+	u8 reserved1[6];
+	u64 vector;		/* interrupt vector in range 0x10-0xff */
+} ia64_sal_desc_ap_wakeup_t ;
 
 extern ia64_sal_handler ia64_sal;
 extern struct ia64_sal_desc_ptc *ia64_ptc_domain_info;
@@ -218,24 +226,24 @@
 
 /* Encodings for vectors which can be registered by the OS with SAL */
 enum {
-	SAL_VECTOR_OS_MCA		=	0,
-	SAL_VECTOR_OS_INIT		=	1,
-	SAL_VECTOR_OS_BOOT_RENDEZ	=	2
+	SAL_VECTOR_OS_MCA		= 0,
+	SAL_VECTOR_OS_INIT		= 1,
+	SAL_VECTOR_OS_BOOT_RENDEZ	= 2
 };
 
 /* Definition of the SAL Error Log from the SAL spec */
 
 /* Definition of timestamp according to SAL spec for logging purposes */
 
-typedef struct sal_log_timestamp_s {
-	u8	slh_century;				/* Century (19, 20, 21, ...) */
-	u8	slh_year;				/* Year (00..99) */
-	u8	slh_month;				/* Month (1..12) */
-	u8	slh_day;				/* Day (1..31) */
-	u8	slh_reserved;					
-	u8	slh_hour;				/* Hour (0..23)	*/
-	u8	slh_minute;				/* Minute (0..59) */
-	u8	slh_second;				/* Second (0..59) */
+typedef struct sal_log_timestamp {
+	u8 slh_century;		/* Century (19, 20, 21, ...) */
+	u8 slh_year;		/* Year (00..99) */
+	u8 slh_month;		/* Month (1..12) */
+	u8 slh_day;		/* Day (1..31) */
+	u8 slh_reserved;					
+	u8 slh_hour;		/* Hour (0..23)	*/
+	u8 slh_minute;		/* Minute (0..59) */
+	u8 slh_second;		/* Second (0..59) */
 } sal_log_timestamp_t;
 
 
@@ -243,126 +251,126 @@
 #define MAX_TLB_ERRORS				6
 #define MAX_BUS_ERRORS				1
 
-typedef struct sal_log_processor_info_s {
+typedef struct sal_log_processor_info {
 	struct	{
-		u64		slpi_psi	: 1,
-				slpi_cache_check: MAX_CACHE_ERRORS,
-				slpi_tlb_check	: MAX_TLB_ERRORS,
-				slpi_bus_check	: MAX_BUS_ERRORS,
-				slpi_reserved2	: (31 - (MAX_TLB_ERRORS + MAX_CACHE_ERRORS
-							 + MAX_BUS_ERRORS)),
-				slpi_minstate	: 1,
-				slpi_bank1_gr	: 1,
-				slpi_br		: 1,
-				slpi_cr		: 1,
-				slpi_ar		: 1,
-				slpi_rr		: 1,
-				slpi_fr		: 1,
-				slpi_reserved1	: 25;
+		u64 slpi_psi		: 1,
+		    slpi_cache_check: MAX_CACHE_ERRORS,
+		    slpi_tlb_check	: MAX_TLB_ERRORS,
+		    slpi_bus_check	: MAX_BUS_ERRORS,
+		    slpi_reserved2	: (31 - (MAX_TLB_ERRORS + MAX_CACHE_ERRORS
+		    			 + MAX_BUS_ERRORS)),
+		    slpi_minstate	: 1,
+		    slpi_bank1_gr	: 1,
+		    slpi_br		: 1,
+		    slpi_cr		: 1,
+		    slpi_ar		: 1,
+		    slpi_rr		: 1,
+		    slpi_fr		: 1,
+		    slpi_reserved1	: 25;
 	} slpi_valid;
 
-	pal_processor_state_info_t	slpi_processor_state_info;
+	pal_processor_state_info_t slpi_processor_state_info;
 
 	struct {
-		pal_cache_check_info_t	slpi_cache_check;
-		u64			slpi_target_address;
+		pal_cache_check_info_t slpi_cache_check;
+		u64 slpi_target_address;
 	} slpi_cache_check_info[MAX_CACHE_ERRORS];
 		
-	pal_tlb_check_info_t		slpi_tlb_check_info[MAX_TLB_ERRORS];
+	pal_tlb_check_info_t slpi_tlb_check_info[MAX_TLB_ERRORS];
 
 	struct {
-		pal_bus_check_info_t	slpi_bus_check;
-		u64			slpi_requestor_addr;	
-		u64			slpi_responder_addr;	
-		u64			slpi_target_addr;
+		pal_bus_check_info_t slpi_bus_check;
+		u64 slpi_requestor_addr;	
+		u64 slpi_responder_addr;	
+		u64 slpi_target_addr;
 	} slpi_bus_check_info[MAX_BUS_ERRORS];
 
-	pal_min_state_area_t		slpi_min_state_area;
-	u64				slpi_br[8];
-	u64				slpi_cr[128];
-	u64				slpi_ar[128];
-	u64				slpi_rr[8];
-	u64				slpi_fr[128];
+	pal_min_state_area_t slpi_min_state_area;
+	u64 slpi_br[8];
+	u64 slpi_cr[128];
+	u64 slpi_ar[128];
+	u64 slpi_rr[8];
+	u64 slpi_fr[128];
 } sal_log_processor_info_t;
 
 /* platform error log structures */
 typedef struct platerr_logheader {
-	u64	nextlog;	/* next log offset if present */
-	u64	loglength;	/* log length */
-	u64	logsubtype;	/* log subtype memory/bus/component */
-	u64	eseverity;	/* error severity */
+	u64 nextlog;		/* next log offset if present */
+	u64 loglength;		/* log length */
+	u64 logsubtype;		/* log subtype memory/bus/component */
+	u64 eseverity;		/* error severity */
 } ehdr_t;
 
 typedef struct sysmem_errlog {
-	ehdr_t	lhdr;		/* header */
-	u64	vflag;		/* valid bits for each field in the log */
-	u64	addr;		/* memory address */
-	u64	data;		/* memory data */
-	u64	cmd;		/* command bus value if any */
-	u64	ctrl;		/* control bus value if any */
-	u64	addrsyndrome;	/* memory address ecc/parity syndrome bits */
-	u64	datasyndrome;	/* data ecc/parity syndrome */
-	u64	cacheinfo;	/* platform cache info as defined in pal spec. table 7-34 */
+	ehdr_t lhdr;		/* header */
+	u64 vflag;		/* valid bits for each field in the log */
+	u64 addr;		/* memory address */
+	u64 data;		/* memory data */
+	u64 cmd;		/* command bus value if any */
+	u64 ctrl;		/* control bus value if any */
+	u64 addrsyndrome;	/* memory address ecc/parity syndrome bits */
+	u64 datasyndrome;	/* data ecc/parity syndrome */
+	u64 cacheinfo;		/* platform cache info as defined in pal spec. table 7-34 */
 } merrlog_t;
 
 typedef struct sysbus_errlog {
-	ehdr_t	lhdr;		/* linkded list header */
-	u64	vflag;		/* valid bits for each field in the log */
-	u64	busnum;		/* bus number in error */
-	u64	reqaddr;	/* requestor address */
-	u64	resaddr;	/* responder address */
-	u64	taraddr;	/* target address */
-	u64	data;		/* requester r/w data */
-	u64	cmd;		/* bus commands */
-	u64	ctrl;		/* bus controls (be# &-0) */
-	u64	addrsyndrome;	/* addr bus ecc/parity bits */
-	u64	datasyndrome;	/* data bus ecc/parity bits */
-	u64	cmdsyndrome;	/* command bus ecc/parity bits */
-	u64	ctrlsyndrome;	/* control bus ecc/parity bits */
+	ehdr_t lhdr;		/* linkded list header */
+	u64 vflag;		/* valid bits for each field in the log */
+	u64 busnum;		/* bus number in error */
+	u64 reqaddr;		/* requestor address */
+	u64 resaddr;		/* responder address */
+	u64 taraddr;		/* target address */
+	u64 data;		/* requester r/w data */
+	u64 cmd;		/* bus commands */
+	u64 ctrl;		/* bus controls (be# &-0) */
+	u64 addrsyndrome;	/* addr bus ecc/parity bits */
+	u64 datasyndrome;	/* data bus ecc/parity bits */
+	u64 cmdsyndrome;	/* command bus ecc/parity bits */
+	u64 ctrlsyndrome;	/* control bus ecc/parity bits */
 } berrlog_t;
 
 /* platform error log structures */
 typedef struct syserr_chdr {	/* one header per component */
-	u64	busnum;		/* bus number on which the component resides */
-	u64	devnum;		/* same as device select */
-	u64	funcid;		/* function id of the device */
-	u64	devid;		/* pci device id */
-	u64	classcode;	/* pci class code for the device */
-	u64	cmdreg;		/* pci command reg value */
-	u64	statreg;	/* pci status reg value */
+	u64 busnum;		/* bus number on which the component resides */
+	u64 devnum;		/* same as device select */
+	u64 funcid;		/* function id of the device */
+	u64 devid;		/* pci device id */
+	u64 classcode;		/* pci class code for the device */
+	u64 cmdreg;		/* pci command reg value */
+	u64 statreg;		/* pci status reg value */
 } chdr_t;
 
 typedef struct cfginfo {
-	u64	cfgaddr;
-	u64	cfgval;
+	u64 cfgaddr;
+	u64 cfgval;
 } cfginfo_t;
 
 typedef struct sys_comperr {	/* per component */
-	ehdr_t	lhdr;		/* linked list header */
-	u64	vflag;		/* valid bits for each field in the log */
-	chdr_t	scomphdr;	
-	u64	numregpair;	/* number of reg addr/value pairs */
+	ehdr_t lhdr;		/* linked list header */
+	u64 vflag;		/* valid bits for each field in the log */
+	chdr_t scomphdr;	
+	u64 numregpair;		/* number of reg addr/value pairs */
 	cfginfo_t cfginfo;
 } cerrlog_t;
 
 typedef struct sel_records {
-	ehdr_t	lhdr;
-	u64	seldata;
+	ehdr_t lhdr;
+	u64 seldata;
 } isel_t;
 
 typedef struct plat_errlog {
-	u64	      mbcsvalid;	/* valid bits for each type of log */
-	merrlog_t     smemerrlog;	/* platform memory error logs */
-	berrlog_t     sbuserrlog;	/* platform bus error logs */
-	cerrlog_t     scomperrlog;	/* platform chipset error logs */
-	isel_t	      selrecord;	/* ipmi sel record */
+	u64 mbcsvalid;		/* valid bits for each type of log */
+	merrlog_t smemerrlog;	/* platform memory error logs */
+	berrlog_t sbuserrlog;	/* platform bus error logs */
+	cerrlog_t scomperrlog;	/* platform chipset error logs */
+	isel_t selrecord;	/* ipmi sel record */
 } platforminfo_t;
 
 /* over all log structure (processor+platform) */
 
 typedef union udev_specific_log {
-	sal_log_processor_info_t  proclog;
-	platforminfo_t		  platlog;
+	sal_log_processor_info_t proclog;
+	platforminfo_t platlog;
 } devicelog_t;
 
 
@@ -378,21 +386,18 @@
 #define sal_log_processor_info_rr_valid			slpi_valid.slpi_rr
 #define sal_log_processor_info_fr_valid			slpi_valid.slpi_fr
 
-typedef struct sal_log_header_s {
-	u64			slh_next_log;		/* Offset of the next log from the 
-							 * beginning of  this structure.
-							 */
-	uint			slh_log_len;		/* Length of this error log in bytes */
-	ushort			slh_log_type;		/* Type of log (0 - cpu ,1 - platform) */
-	ushort			slh_log_sub_type;	/* SGI specific sub type */
-	sal_log_timestamp_t	slh_log_timestamp;	/* Timestamp */
+typedef struct sal_log_header {
+	u64 slh_next_log;	/* Offset of the next log from the beginning of this structure */
+	u32 slh_log_len;	/* Length of this error log in bytes */
+	u16 slh_log_type;	/* Type of log (0 - cpu ,1 - platform) */
+	u16 slh_log_sub_type;	/* SGI specific sub type */
+	sal_log_timestamp_t slh_log_timestamp;	/* Timestamp */
 } sal_log_header_t;
 
 /* SAL PSI log structure */
-typedef struct psilog
-{
-	sal_log_header_t   sal_elog_header;
-	devicelog_t        devlog;
+typedef struct psilog {
+	sal_log_header_t sal_elog_header;
+	devicelog_t devlog;
 } ia64_psilog_t;
 
 /*
@@ -405,7 +410,7 @@
 {
 	struct ia64_sal_retval isrv;
 
-	SAL_CALL(isrv, SAL_FREQ_BASE, which);
+	SAL_CALL(isrv, SAL_FREQ_BASE, which, 0, 0, 0, 0, 0, 0);
 	*ticks_per_second = isrv.v0;
 	*drift_info = isrv.v1;
 	return isrv.status;
@@ -416,7 +421,7 @@
 ia64_sal_cache_flush (u64 cache_type)
 {
 	struct ia64_sal_retval isrv;
-	SAL_CALL(isrv, SAL_CACHE_FLUSH, cache_type);
+	SAL_CALL(isrv, SAL_CACHE_FLUSH, cache_type, 0, 0, 0, 0, 0, 0);
 	return isrv.status;
 }
 
@@ -427,7 +432,7 @@
 ia64_sal_cache_init (void)
 {
 	struct ia64_sal_retval isrv;
-	SAL_CALL(isrv, SAL_CACHE_INIT);
+	SAL_CALL(isrv, SAL_CACHE_INIT, 0, 0, 0, 0, 0, 0, 0);
 	return isrv.status;
 }
 
@@ -438,7 +443,8 @@
 ia64_sal_clear_state_info (u64 sal_info_type, u64 sal_info_sub_type)
 {
 	struct ia64_sal_retval isrv;
-	SAL_CALL(isrv, SAL_CLEAR_STATE_INFO, sal_info_type, sal_info_sub_type);
+	SAL_CALL(isrv, SAL_CLEAR_STATE_INFO, sal_info_type, sal_info_sub_type,
+	         0, 0, 0, 0, 0);
 	return isrv.status;
 }
 
@@ -450,7 +456,8 @@
 ia64_sal_get_state_info (u64 sal_info_type, u64 sal_info_sub_type, u64 *sal_info)
 {
 	struct ia64_sal_retval isrv;
-	SAL_CALL(isrv, SAL_GET_STATE_INFO, sal_info_type, sal_info_sub_type, sal_info);
+	SAL_CALL(isrv, SAL_GET_STATE_INFO, sal_info_type, sal_info_sub_type,
+	         sal_info, 0, 0, 0, 0);
 	if (isrv.status)
 		return 0;
 	return isrv.v0;
@@ -462,7 +469,8 @@
 ia64_sal_get_state_info_size (u64 sal_info_type, u64 sal_info_sub_type)
 {
 	struct ia64_sal_retval isrv;
-	SAL_CALL(isrv, SAL_GET_STATE_INFO_SIZE, sal_info_type, sal_info_sub_type);
+	SAL_CALL(isrv, SAL_GET_STATE_INFO_SIZE, sal_info_type, sal_info_sub_type,
+	         0, 0, 0, 0, 0);
 	if (isrv.status)
 		return 0;
 	return isrv.v0;
@@ -475,7 +483,7 @@
 ia64_sal_mc_rendez (void)
 {
 	struct ia64_sal_retval isrv;
-	SAL_CALL(isrv, SAL_MC_RENDEZ);
+	SAL_CALL(isrv, SAL_MC_RENDEZ, 0, 0, 0, 0, 0, 0, 0);
 	return isrv.status;
 }
 
@@ -487,7 +495,8 @@
 ia64_sal_mc_set_params (u64 param_type, u64 i_or_m, u64 i_or_m_val, u64 timeout)
 {
 	struct ia64_sal_retval isrv;
-	SAL_CALL(isrv, SAL_MC_SET_PARAMS, param_type, i_or_m, i_or_m_val, timeout);
+	SAL_CALL(isrv, SAL_MC_SET_PARAMS, param_type, i_or_m, i_or_m_val, timeout,
+	         0, 0, 0);
 	return isrv.status;
 }
 
@@ -496,19 +505,7 @@
 ia64_sal_pci_config_read (u64 pci_config_addr, u64 size, u64 *value)
 {
 	struct ia64_sal_retval isrv;
-#ifdef CONFIG_ITANIUM_A1_SPECIFIC
-	extern spinlock_t ivr_read_lock;
-	unsigned long flags;
-
-	/*
-	 * Avoid PCI configuration read/write overwrite -- A0 Interrupt loss workaround
-	 */
-	spin_lock_irqsave(&ivr_read_lock, flags);
-#endif
-	SAL_CALL(isrv, SAL_PCI_CONFIG_READ, pci_config_addr, size);
-#ifdef CONFIG_ITANIUM_A1_SPECIFIC
-	spin_unlock_irqrestore(&ivr_read_lock, flags);
-#endif
+	SAL_CALL(isrv, SAL_PCI_CONFIG_READ, pci_config_addr, size, 0, 0, 0, 0, 0);
 	if (value)
 		*value = isrv.v0;
 	return isrv.status;
@@ -519,19 +516,8 @@
 ia64_sal_pci_config_write (u64 pci_config_addr, u64 size, u64 value)
 {
 	struct ia64_sal_retval isrv;
-#ifdef CONFIG_ITANIUM_A1_SPECIFIC
-	extern spinlock_t ivr_read_lock;
-	unsigned long flags;
-
-	/*
-	 * Avoid PCI configuration read/write overwrite -- A0 Interrupt loss workaround
-	 */
-	spin_lock_irqsave(&ivr_read_lock, flags);
-#endif
-	SAL_CALL(isrv, SAL_PCI_CONFIG_WRITE, pci_config_addr, size, value);
-#ifdef CONFIG_ITANIUM_A1_SPECIFIC
-	spin_unlock_irqrestore(&ivr_read_lock, flags);
-#endif
+	SAL_CALL(isrv, SAL_PCI_CONFIG_WRITE, pci_config_addr, size, value,
+	         0, 0, 0, 0);
 	return isrv.status;
 }
 
@@ -543,7 +529,8 @@
 ia64_sal_register_physical_addr (u64 phys_entry, u64 phys_addr)
 {
 	struct ia64_sal_retval isrv;
-	SAL_CALL(isrv, SAL_REGISTER_PHYSICAL_ADDR, phys_entry, phys_addr);
+	SAL_CALL(isrv, SAL_REGISTER_PHYSICAL_ADDR, phys_entry, phys_addr,
+	         0, 0, 0, 0, 0);
 	return isrv.status;
 }
 
@@ -569,7 +556,8 @@
 		     u64 *error_code, u64 *scratch_buf_size_needed)
 {
 	struct ia64_sal_retval isrv;
-	SAL_CALL(isrv, SAL_UPDATE_PAL, param_buf, scratch_buf, scratch_buf_size);
+	SAL_CALL(isrv, SAL_UPDATE_PAL, param_buf, scratch_buf, scratch_buf_size,
+	         0, 0, 0, 0);
 	if (error_code)
 		*error_code = isrv.v0;
 	if (scratch_buf_size_needed)
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/shmparam.h linux/include/asm-ia64/shmparam.h
--- v2.4.0-prerelease/linux/include/asm-ia64/shmparam.h	Sun Feb  6 18:42:40 2000
+++ linux/include/asm-ia64/shmparam.h	Thu Jan  4 12:50:18 2001
@@ -1,6 +1,12 @@
 #ifndef _ASM_IA64_SHMPARAM_H
 #define _ASM_IA64_SHMPARAM_H
 
-#define	SHMLBA	PAGE_SIZE		/* attach addr a multiple of this */
+/*
+ * SHMLBA controls minimum alignment at which shared memory segments
+ * get attached.  The IA-64 architecture says that there may be a
+ * performance degradation when there are virtual aliases within 1MB.
+ * To reduce the chance of this, we set SHMLBA to 1MB. --davidm 00/12/20
+ */
+#define	SHMLBA	(1024*1024)
 
 #endif /* _ASM_IA64_SHMPARAM_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/addrs.h linux/include/asm-ia64/sn/addrs.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/addrs.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/addrs.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,545 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 1999 Silicon Graphics, Inc.
+ * Copyright (C) 1999 by Ralf Baechle
+ */
+#ifndef _ASM_SN_ADDRS_H
+#define _ASM_SN_ADDRS_H
+
+#if _LANGUAGE_C
+#include <linux/types.h>
+#endif /* _LANGUAGE_C */
+
+#if !defined(CONFIG_IA64_SGI_SN1) && !defined(CONFIG_IA64_GENERIC)
+#include <asm/addrspace.h>
+#include <asm/reg.h>
+#include <asm/sn/kldir.h>
+#endif	/* CONFIG_IA64_SGI_SN1 */
+
+#if defined(CONFIG_IA64_SGI_IO)
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+#include <asm/sn/sn1/addrs.h>
+#endif
+#endif	/* CONFIG_IA64_SGI_IO */
+
+
+#if _LANGUAGE_C
+
+#if defined(CONFIG_IA64_SGI_IO)	/* FIXME */
+#define PS_UINT_CAST		(__psunsigned_t)
+#define UINT64_CAST		(uint64_t)
+#else	/* CONFIG_IA64_SGI_IO */
+#define PS_UINT_CAST		(unsigned long)
+#define UINT64_CAST		(unsigned long)
+#endif	/* CONFIG_IA64_SGI_IO */
+
+#define HUBREG_CAST		(volatile hubreg_t *)
+
+#elif _LANGUAGE_ASSEMBLY
+
+#define PS_UINT_CAST
+#define UINT64_CAST
+#define HUBREG_CAST
+
+#endif
+
+
+#define NASID_GET_META(_n)	((_n) >> NASID_LOCAL_BITS)
+#if defined CONFIG_SGI_IP35 || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+#define NASID_GET_LOCAL(_n)	((_n) & 0x7f)
+#endif
+#define NASID_MAKE(_m, _l)	(((_m) << NASID_LOCAL_BITS) | (_l))
+
+#define NODE_ADDRSPACE_MASK	(NODE_ADDRSPACE_SIZE - 1)
+#define TO_NODE_ADDRSPACE(_pa)	(UINT64_CAST (_pa) & NODE_ADDRSPACE_MASK)
+
+#define CHANGE_ADDR_NASID(_pa, _nasid)	\
+		((UINT64_CAST (_pa) & ~NASID_MASK) | \
+		 (UINT64_CAST(_nasid) <<  NASID_SHFT))
+
+
+/*
+ * The following macros are used to index to the beginning of a specific
+ * node's address space.
+ */
+
+#define NODE_OFFSET(_n)		(UINT64_CAST (_n) << NODE_SIZE_BITS)
+
+#define NODE_CAC_BASE(_n)	(CAC_BASE   + NODE_OFFSET(_n))
+#define NODE_HSPEC_BASE(_n)	(HSPEC_BASE + NODE_OFFSET(_n))
+#define NODE_IO_BASE(_n)	(IO_BASE    + NODE_OFFSET(_n))
+#define NODE_MSPEC_BASE(_n)	(MSPEC_BASE + NODE_OFFSET(_n))
+#define NODE_UNCAC_BASE(_n)	(UNCAC_BASE + NODE_OFFSET(_n))
+
+#define TO_NODE(_n, _x)		(NODE_OFFSET(_n)     | ((_x)		   ))
+#define TO_NODE_CAC(_n, _x)	(NODE_CAC_BASE(_n)   | ((_x) & TO_PHYS_MASK))
+#define TO_NODE_UNCAC(_n, _x)	(NODE_UNCAC_BASE(_n) | ((_x) & TO_PHYS_MASK))
+#define TO_NODE_MSPEC(_n, _x)	(NODE_MSPEC_BASE(_n) | ((_x) & TO_PHYS_MASK))
+#define TO_NODE_HSPEC(_n, _x)	(NODE_HSPEC_BASE(_n) | ((_x) & TO_PHYS_MASK))
+
+
+#define RAW_NODE_SWIN_BASE(nasid, widget)				\
+	(NODE_IO_BASE(nasid) + (UINT64_CAST (widget) << SWIN_SIZE_BITS))
+
+#define WIDGETID_GET(addr)	((unsigned char)((addr >> SWIN_SIZE_BITS) & 0xff))
+
+/*
+ * The following definitions pertain to the IO special address
+ * space.  They define the location of the big and little windows
+ * of any given node.
+ */
+
+#define SWIN_SIZE_BITS		24
+#define SWIN_SIZE		(UINT64_CAST 1 << 24)
+#define	SWIN_SIZEMASK		(SWIN_SIZE - 1)
+#define	SWIN_WIDGET_MASK	0xF
+
+/*
+ * Convert smallwindow address to xtalk address.
+ *
+ * 'addr' can be physical or virtual address, but will be converted
+ * to Xtalk address in the range 0 -> SWINZ_SIZEMASK
+ */
+#define	SWIN_WIDGETADDR(addr)	((addr) & SWIN_SIZEMASK)
+#define	SWIN_WIDGETNUM(addr)	(((addr)  >> SWIN_SIZE_BITS) & SWIN_WIDGET_MASK)
+/*
+ * Verify if addr belongs to small window address on node with "nasid"
+ *
+ *
+ * NOTE: "addr" is expected to be XKPHYS address, and NOT physical
+ * address
+ *
+ *
+ */
+#define	NODE_SWIN_ADDR(nasid, addr)	\
+		(((addr) >= NODE_SWIN_BASE(nasid, 0))  && \
+		 ((addr) <  (NODE_SWIN_BASE(nasid, HUB_NUM_WIDGET) + SWIN_SIZE)\
+		 ))
+
+/*
+ * The following define the major position-independent aliases used
+ * in SN.
+ *	UALIAS -- 256MB in size, reads in the UALIAS result in
+ *			uncached references to the memory of the reader's node.
+ *	CPU_UALIAS -- 128kb in size, the bottom part of UALIAS is flipped
+ *			depending on which CPU does the access to provide
+ *			all CPUs with unique uncached memory at low addresses.
+ *	LBOOT  -- 256MB in size, reads in the LBOOT area result in
+ *			uncached references to the local hub's boot prom and
+ *			other directory-bus connected devices.
+ *	IALIAS -- 8MB in size, reads in the IALIAS result in uncached
+ *			references to the local hub's registers.
+ */
+
+#define UALIAS_BASE		HSPEC_BASE
+#define UALIAS_SIZE		0x10000000	/* 256 Megabytes */
+#define UALIAS_LIMIT		(UALIAS_BASE + UALIAS_SIZE)
+
+/*
+ * The bottom of ualias space is flipped depending on whether you're
+ * processor 0 or 1 within a node.
+ */
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+#define LREG_BASE		(HSPEC_BASE + 0x10000000)
+#define LREG_SIZE		0x8000000  /* 128 MB */
+#define LREG_LIMIT		(LREG_BASE + LREG_SIZE)
+#define LBOOT_BASE		(LREG_LIMIT)
+#define LBOOT_SIZE		0x8000000   /* 128 MB */
+#define LBOOT_LIMIT		(LBOOT_BASE + LBOOT_SIZE)
+#define LBOOT_STRIDE		0x2000000    /* two PROMs, on 32M boundaries */
+#endif
+
+#define	HUB_REGISTER_WIDGET	1
+#define IALIAS_BASE		NODE_SWIN_BASE(0, HUB_REGISTER_WIDGET)
+#define IALIAS_SIZE		0x800000	/* 8 Megabytes */
+#define IS_IALIAS(_a)		(((_a) >= IALIAS_BASE) &&		\
+				 ((_a) < (IALIAS_BASE + IALIAS_SIZE)))
+
+/*
+ * Macro for referring to Hub's RBOOT space
+ */
+
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+
+#define NODE_LREG_BASE(_n)	(NODE_HSPEC_BASE(_n) + 0x30000000)
+#define NODE_LREG_LIMIT(_n)	(NODE_LREG_BASE(_n) + LREG_SIZE)
+#define RREG_BASE(_n)		(NODE_LREG_BASE(_n))
+#define RREG_LIMIT(_n)		(NODE_LREG_LIMIT(_n))
+#define RBOOT_SIZE		0x8000000	/* 128 Megabytes */
+#define NODE_RBOOT_BASE(_n)	(NODE_HSPEC_BASE(_n) + 0x38000000)
+#define NODE_RBOOT_LIMIT(_n)	(NODE_RBOOT_BASE(_n) + RBOOT_SIZE)
+
+#endif
+
+/*
+ * Macros for referring the Hub's back door space
+ *
+ *   These macros correctly process addresses in any node's space.
+ *   WARNING: They won't work in assembler.
+ *
+ *   BDDIR_ENTRY_LO returns the address of the low double-word of the dir
+ *                  entry corresponding to a physical (Cac or Uncac) address.
+ *   BDDIR_ENTRY_HI returns the address of the high double-word of the entry.
+ *   BDPRT_ENTRY    returns the address of the double-word protection entry
+ *                  corresponding to the page containing the physical address.
+ *   BDPRT_ENTRY_S  Stores the value into the protection entry.
+ *   BDPRT_ENTRY_L  Load the value from the protection entry.
+ *   BDECC_ENTRY    returns the address of the ECC byte corresponding to a
+ *                  double-word at a specified physical address.
+ *   BDECC_ENTRY_H  returns the address of the two ECC bytes corresponding to a
+ *                  quad-word at a specified physical address.
+ */
+#define NODE_BDOOR_BASE(_n)	(NODE_HSPEC_BASE(_n) + (NODE_ADDRSPACE_SIZE/2))
+
+#define NODE_BDECC_BASE(_n)	(NODE_BDOOR_BASE(_n))
+#define NODE_BDDIR_BASE(_n)	(NODE_BDOOR_BASE(_n) + (NODE_ADDRSPACE_SIZE/4))
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+/*
+ * Bedrock's directory entries are a single word:  no low/high
+ */
+
+#define BDDIR_ENTRY(_pa)	(HSPEC_BASE +				      \
+				  NODE_ADDRSPACE_SIZE * 7 / 8  		    | \
+				 UINT64_CAST (_pa)	& NASID_MASK	    | \
+				 UINT64_CAST (_pa) >> 3 & BDDIR_UPPER_MASK)
+
+#ifdef BRINGUP
+        /* minimize source changes by mapping *_LO() & *_HI()   */
+#define BDDIR_ENTRY_LO(_pa)     BDDIR_ENTRY(_pa)
+#define BDDIR_ENTRY_HI(_pa)     BDDIR_ENTRY(_pa)
+#endif /* BRINGUP */
+
+#define BDDIR_PAGE_MASK		(BDDIR_UPPER_MASK & 0x7ffff << 11)
+#define BDDIR_PAGE_BASE_MASK	(UINT64_CAST 0xfffffffffffff800)
+
+#ifdef _LANGUAGE_C
+
+#define BDPRT_ENTRY_ADDR(_pa, _rgn)      ((uint64_t *) ( (HSPEC_BASE +       \
+                                 NODE_ADDRSPACE_SIZE * 7 / 8 + 0x408)       | \
+                                (UINT64_CAST (_pa)      & NASID_MASK)        | \
+                                (UINT64_CAST (_pa) >> 3 & BDDIR_PAGE_MASK)   | \
+                                (UINT64_CAST (_pa) >> 3 & 0x3 << 4)          | \
+                                ((_rgn) & 0x1e) << 5))
+
+static __inline uint64_t BDPRT_ENTRY_L(paddr_t pa,uint32_t rgn) {
+	uint64_t word=*BDPRT_ENTRY_ADDR(pa,rgn);
+
+	if(rgn&0x20)			/*If the region is > 32, move it down*/
+		word = word >> 32;
+	if(rgn&0x1)			/*If the region is odd, get that part */
+		word = word >> 16;
+	word = word & 0xffff;		/*Get the 16 bits we are interested in*/
+
+	return word;
+}
+
+static __inline void BDPRT_ENTRY_S(paddr_t pa,uint32_t rgn,uint64_t val) {
+        uint64_t *addr=(uint64_t *)BDPRT_ENTRY_ADDR(pa,rgn);
+        uint64_t word,mask;
+
+        word=*addr;
+	mask=0;
+	if(rgn&0x1) {
+		mask|=0x0000ffff0000ffff;
+		val=val<<16;
+	}
+	else
+		mask|=0xffff0000ffff0000;
+	if(rgn&0x20) {
+		mask|=0x00000000ffffffff;
+		val=val<<32;
+	}
+	else
+		mask|=0xffffffff00000000;
+	word &= mask;
+	word |= val;
+
+	*(addr++)=word;
+	addr++;
+        *(addr++)=word;
+        addr++;
+        *(addr++)=word;
+        addr++;
+        *addr=word;
+}
+#endif	/*_LANGUAGE_C*/
+
+#define BDCNT_ENTRY(_pa)	(HSPEC_BASE +				      \
+				  NODE_ADDRSPACE_SIZE * 7 / 8 + 0x8    	    | \
+				 UINT64_CAST (_pa)	& NASID_MASK	    | \
+				 UINT64_CAST (_pa) >> 3 & BDDIR_PAGE_MASK   | \
+				 UINT64_CAST (_pa) >> 3 & 0x3 << 4)
+
+
+#ifdef    BRINGUP
+  /* little endian packing of ecc bytes requires a swizzle */ 
+  /* this is problemmatic for memory_init_ecc              */
+#endif /* BRINGUP */
+#define BDECC_ENTRY(_pa)	(HSPEC_BASE +				      \
+				  NODE_ADDRSPACE_SIZE * 5 / 8 		    | \
+				 UINT64_CAST (_pa)	& NASID_MASK	    | \
+				 UINT64_CAST (_pa) >> 3 & BDECC_UPPER_MASK    \
+				   		        ^ 0x7ULL)
+
+#define BDECC_SCRUB(_pa)	(HSPEC_BASE +				      \
+				  NODE_ADDRSPACE_SIZE / 2 		    | \
+				 UINT64_CAST (_pa)	& NASID_MASK	    | \
+				 UINT64_CAST (_pa) >> 3 & BDECC_UPPER_MASK    \
+				   		        ^ 0x7ULL)
+
+  /* address for Halfword backdoor ecc access. Note that   */
+  /* ecc bytes are packed in little endian order           */
+#define BDECC_ENTRY_H(_pa)	(HSPEC_BASE +                                 \
+				  NODE_ADDRSPACE_SIZE * 5 / 8		    | \
+				 UINT64_CAST (_pa)	 & NASID_MASK	    | \
+				 UINT64_CAST (_pa) >> 3 & BDECC_UPPER_MASK    \
+				   		        ^ 0x6ULL)
+
+/*
+ * Macro to convert a back door directory, protection, page counter, or ecc
+ * address into the raw physical address of the associated cache line
+ * or protection page.
+ */
+
+#define BDDIR_TO_MEM(_ba)	(UINT64_CAST  (_ba) & NASID_MASK            | \
+				 (UINT64_CAST (_ba) & BDDIR_UPPER_MASK) << 3)
+
+#ifdef BRINGUP
+/*
+ * This can't be done since there are 4 entries per address so you'd end up
+ * mapping back to 4 different physical addrs.
+ */
+  
+#define BDPRT_TO_MEM(_ba) 	(UINT64_CAST  (_ba) & NASID_MASK	    | \
+				 (UINT64_CAST (_ba) & BDDIR_PAGE_MASK) << 3 | \
+				 (UINT64_CAST (_ba) & 0x3 << 4) << 3)
+#endif
+
+#define BDCNT_TO_MEM(_ba) 	(UINT64_CAST  (_ba) & NASID_MASK	    | \
+				 (UINT64_CAST (_ba) & BDDIR_PAGE_MASK) << 3 | \
+				 (UINT64_CAST (_ba) & 0x3 << 4) << 3)
+
+#define BDECC_TO_MEM(_ba)	(UINT64_CAST  (_ba) & NASID_MASK	    | \
+				 ((UINT64_CAST (_ba) ^ 0x7ULL)                \
+				                    & BDECC_UPPER_MASK) << 3 )
+
+#define BDECC_H_TO_MEM(_ba)	(UINT64_CAST  (_ba) & NASID_MASK	    | \
+				 ((UINT64_CAST (_ba) ^ 0x6ULL)                \
+				                    & BDECC_UPPER_MASK) << 3 )
+
+#define BDADDR_IS_DIR(_ba)	((UINT64_CAST  (_ba) & 0x8) == 0)
+#define BDADDR_IS_PRT(_ba)	((UINT64_CAST  (_ba) & 0x408) == 0x408)
+#define BDADDR_IS_CNT(_ba)	((UINT64_CAST  (_ba) & 0x8) == 0x8)
+
+#endif /* CONFIG_SGI_IP35 */
+
+
+/*
+ * The following macros produce the correct base virtual address for
+ * the hub registers.  The LOCAL_HUB_* macros produce the appropriate
+ * address for the local registers.  The REMOTE_HUB_* macro produce
+ * the address for the specified hub's registers.  The intent is
+ * that the appropriate PI, MD, NI, or II register would be substituted
+ * for _x.
+ */
+
+/*
+ * WARNING:
+ *	When certain Hub chip workaround are defined, it's not sufficient
+ *	to dereference the *_HUB_ADDR() macros.  You should instead use
+ *	HUB_L() and HUB_S() if you must deal with pointers to hub registers.
+ *	Otherwise, the recommended approach is to use *_HUB_L() and *_HUB_S().
+ *	They're always safe.
+ */
+#define LOCAL_HUB_ADDR(_x)	(HUBREG_CAST (IALIAS_BASE + (_x)))
+#define REMOTE_HUB_ADDR(_n, _x)	(HUBREG_CAST (NODE_SWIN_BASE(_n, 1) +	\
+					      0x800000 + (_x)))
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+#define REMOTE_HUB_PI_ADDR(_n, _sn, _x)	(HUBREG_CAST (NODE_SWIN_BASE(_n, 1) +	\
+					      0x800000 + PIREG(_x, _sn)))
+#define LOCAL_HSPEC_ADDR(_x)		(HUBREG_CAST (LREG_BASE + (_x)))
+#define REMOTE_HSPEC_ADDR(_n, _x)	(HUBREG_CAST (RREG_BASE(_n) + (_x)))
+#endif /* CONFIG_SGI_IP35 */
+
+#if _LANGUAGE_C
+
+#define HUB_L(_a)			*(_a)
+#define	HUB_S(_a, _d)			*(_a) = (_d)
+
+#define LOCAL_HUB_L(_r)			HUB_L(LOCAL_HUB_ADDR(_r))
+#define LOCAL_HUB_S(_r, _d)		HUB_S(LOCAL_HUB_ADDR(_r), (_d))
+#define REMOTE_HUB_L(_n, _r)		HUB_L(REMOTE_HUB_ADDR((_n), (_r)))
+#define REMOTE_HUB_S(_n, _r, _d)	HUB_S(REMOTE_HUB_ADDR((_n), (_r)), (_d))
+#define REMOTE_HUB_PI_L(_n, _sn, _r)	HUB_L(REMOTE_HUB_PI_ADDR((_n), (_sn), (_r)))
+#define REMOTE_HUB_PI_S(_n, _sn, _r, _d) HUB_S(REMOTE_HUB_PI_ADDR((_n), (_sn), (_r)), (_d))
+
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+#define LOCAL_HSPEC_L(_r)	     HUB_L(LOCAL_HSPEC_ADDR(_r))
+#define LOCAL_HSPEC_S(_r, _d)	     HUB_S(LOCAL_HSPEC_ADDR(_r), (_d))
+#define REMOTE_HSPEC_L(_n, _r)	     HUB_L(REMOTE_HSPEC_ADDR((_n), (_r)))
+#define REMOTE_HSPEC_S(_n, _r, _d)   HUB_S(REMOTE_HSPEC_ADDR((_n), (_r)), (_d))
+#endif /* CONFIG_SGI_IP35 */
+
+#endif /* _LANGUAGE_C */
+
+/*
+ * The following macros are used to get to a hub/bridge register, given
+ * the base of the register space.
+ */
+#define HUB_REG_PTR(_base, _off)	\
+	(HUBREG_CAST ((__psunsigned_t)(_base) + (__psunsigned_t)(_off)))
+
+#define HUB_REG_PTR_L(_base, _off)	\
+	HUB_L(HUB_REG_PTR((_base), (_off)))
+
+#define HUB_REG_PTR_S(_base, _off, _data)	\
+	HUB_S(HUB_REG_PTR((_base), (_off)), (_data))
+
+/*
+ * Software structure locations -- permanently fixed
+ *    See diagram in kldir.h
+ */
+
+#define PHYS_RAMBASE		0x0
+#define K0_RAMBASE		PHYS_TO_K0(PHYS_RAMBASE)
+
+#define EX_HANDLER_OFFSET(slice) ((slice) << 16)
+#define EX_HANDLER_ADDR(nasid, slice)					\
+	PHYS_TO_K0(NODE_OFFSET(nasid) | EX_HANDLER_OFFSET(slice))
+#define EX_HANDLER_SIZE		0x0400
+
+#define EX_FRAME_OFFSET(slice)	((slice) << 16 | 0x400)
+#define EX_FRAME_ADDR(nasid, slice)					\
+	PHYS_TO_K0(NODE_OFFSET(nasid) | EX_FRAME_OFFSET(slice))
+#define EX_FRAME_SIZE		0x0c00
+
+#define ARCS_SPB_OFFSET		0x1000
+#define ARCS_SPB_ADDR(nasid)						\
+	PHYS_TO_K0(NODE_OFFSET(nasid) | ARCS_SPB_OFFSET)
+#define ARCS_SPB_SIZE		0x0400
+
+#define KLDIR_OFFSET		0x2000
+#define KLDIR_ADDR(nasid)						\
+	TO_NODE_UNCAC((nasid), KLDIR_OFFSET)
+#define KLDIR_SIZE		0x0400
+
+
+/*
+ * Software structure locations -- indirected through KLDIR
+ *    See diagram in kldir.h
+ *
+ * Important:	All low memory structures must only be accessed
+ *		uncached, except for the symmon stacks.
+ */
+
+#define KLI_LAUNCH		0		/* Dir. entries */
+#define KLI_KLCONFIG		1
+#define	KLI_NMI			2
+#define KLI_GDA			3
+#define KLI_FREEMEM		4
+#define	KLI_SYMMON_STK		5
+#define KLI_PI_ERROR		6
+#define KLI_KERN_VARS		7
+#define	KLI_KERN_XP		8
+#define	KLI_KERN_PARTID		9
+
+#if _LANGUAGE_C
+
+#define KLD_BASE(nasid)		((kldir_ent_t *) KLDIR_ADDR(nasid))
+#define KLD_LAUNCH(nasid)	(KLD_BASE(nasid) + KLI_LAUNCH)
+#define KLD_NMI(nasid)		(KLD_BASE(nasid) + KLI_NMI)
+#define KLD_KLCONFIG(nasid)	(KLD_BASE(nasid) + KLI_KLCONFIG)
+#define KLD_PI_ERROR(nasid)	(KLD_BASE(nasid) + KLI_PI_ERROR)
+#define KLD_GDA(nasid)		(KLD_BASE(nasid) + KLI_GDA)
+#define KLD_SYMMON_STK(nasid)	(KLD_BASE(nasid) + KLI_SYMMON_STK)
+#define KLD_FREEMEM(nasid)	(KLD_BASE(nasid) + KLI_FREEMEM)
+#define KLD_KERN_VARS(nasid)	(KLD_BASE(nasid) + KLI_KERN_VARS)
+#define	KLD_KERN_XP(nasid)	(KLD_BASE(nasid) + KLI_KERN_XP)
+#define	KLD_KERN_PARTID(nasid)	(KLD_BASE(nasid) + KLI_KERN_PARTID)
+
+#define LAUNCH_OFFSET(nasid, slice)					\
+	(KLD_LAUNCH(nasid)->offset +					\
+	 KLD_LAUNCH(nasid)->stride * (slice))
+#define LAUNCH_ADDR(nasid, slice)					\
+	TO_NODE_UNCAC((nasid), LAUNCH_OFFSET(nasid, slice))
+#define LAUNCH_SIZE(nasid)	KLD_LAUNCH(nasid)->size
+
+#define NMI_OFFSET(nasid, slice)					\
+	(KLD_NMI(nasid)->offset +					\
+	 KLD_NMI(nasid)->stride * (slice))
+#define NMI_ADDR(nasid, slice)						\
+	TO_NODE_UNCAC((nasid), NMI_OFFSET(nasid, slice))
+#define NMI_SIZE(nasid)	KLD_NMI(nasid)->size
+
+#define KLCONFIG_OFFSET(nasid)	KLD_KLCONFIG(nasid)->offset
+#define KLCONFIG_ADDR(nasid)						\
+	TO_NODE_UNCAC((nasid), KLCONFIG_OFFSET(nasid))
+#define KLCONFIG_SIZE(nasid)	KLD_KLCONFIG(nasid)->size
+
+#define GDA_ADDR(nasid)		KLD_GDA(nasid)->pointer
+#define GDA_SIZE(nasid)		KLD_GDA(nasid)->size
+
+#define SYMMON_STK_OFFSET(nasid, slice)					\
+	(KLD_SYMMON_STK(nasid)->offset +				\
+	 KLD_SYMMON_STK(nasid)->stride * (slice))
+#define SYMMON_STK_STRIDE(nasid)	KLD_SYMMON_STK(nasid)->stride
+
+#define SYMMON_STK_ADDR(nasid, slice)					\
+	TO_NODE_CAC((nasid), SYMMON_STK_OFFSET(nasid, slice))
+
+#define SYMMON_STK_SIZE(nasid)	KLD_SYMMON_STK(nasid)->stride
+
+#define SYMMON_STK_END(nasid)	(SYMMON_STK_ADDR(nasid, 0) + KLD_SYMMON_STK(nasid)->size)
+
+/* loading symmon 4k below UNIX. the arcs loader needs the topaddr for a
+ * relocatable program
+ */
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+/* update master.d/sn1_elspec.dbg, SN1/addrs.h/DEBUGUNIX_ADDR, and
+ * DBGLOADADDR in symmon's Makefile when changing this */
+#define UNIX_DEBUG_LOADADDR     0x310000
+#elif defined(SN0XXL)
+#define UNIX_DEBUG_LOADADDR     0x360000
+#else
+#define	UNIX_DEBUG_LOADADDR	0x300000
+#endif
+#define	SYMMON_LOADADDR(nasid)						\
+	TO_NODE(nasid, PHYS_TO_K0(UNIX_DEBUG_LOADADDR - 0x1000))
+
+#define FREEMEM_OFFSET(nasid)	KLD_FREEMEM(nasid)->offset
+#define FREEMEM_ADDR(nasid)	SYMMON_STK_END(nasid)
+/*
+ * XXX
+ * Fix this. FREEMEM_ADDR should be aware of if symmon is loaded.
+ * Also, it should take into account what prom thinks to be a safe
+ * address
+	PHYS_TO_K0(NODE_OFFSET(nasid) + FREEMEM_OFFSET(nasid))
+ */
+#define FREEMEM_SIZE(nasid)	KLD_FREEMEM(nasid)->size
+
+#define PI_ERROR_OFFSET(nasid)	KLD_PI_ERROR(nasid)->offset
+#define PI_ERROR_ADDR(nasid)						\
+	TO_NODE_UNCAC((nasid), PI_ERROR_OFFSET(nasid))
+#define PI_ERROR_SIZE(nasid)	KLD_PI_ERROR(nasid)->size
+
+#define NODE_OFFSET_TO_K0(_nasid, _off)					\
+	(PAGE_OFFSET | NODE_OFFSET(_nasid) | (_off))
+#define K0_TO_NODE_OFFSET(_k0addr)					\
+	((__psunsigned_t)(_k0addr) & NODE_ADDRSPACE_MASK)
+
+#define KERN_VARS_ADDR(nasid)	KLD_KERN_VARS(nasid)->pointer
+#define KERN_VARS_SIZE(nasid)	KLD_KERN_VARS(nasid)->size
+
+#define	KERN_XP_ADDR(nasid)	KLD_KERN_XP(nasid)->pointer
+#define	KERN_XP_SIZE(nasid)	KLD_KERN_XP(nasid)->size
+
+#define GPDA_ADDR(nasid)	TO_NODE_CAC(nasid, GPDA_OFFSET)
+
+#endif /* _LANGUAGE_C */
+
+
+#endif /* _ASM_SN_ADDRS_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/agent.h linux/include/asm-ia64/sn/agent.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/agent.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/agent.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,45 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * This file has definitions for the hub and snac interfaces.
+ *
+ * Copyright (C) 1992 - 1997, 1999 Silcon Graphics, Inc.
+ * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org)
+ */
+#ifndef _ASM_SGI_SN_AGENT_H
+#define _ASM_SGI_SN_AGENT_H
+
+#include <asm/sn/addrs.h>
+#include <asm/sn/arch.h>
+//#include <asm/sn/io.h>
+
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+#include <asm/sn/sn1/bedrock.h>
+#endif	/* CONFIG_SGI_IP35 */
+
+/*
+ * NIC register macros
+ */
+
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+#define HUB_NIC_ADDR(_cpuid) 						   \
+	REMOTE_HUB_ADDR(COMPACT_TO_NASID_NODEID(cputocnode(_cpuid)),       \
+		LB_MICROLAN_CTL)
+#endif
+
+#define SET_HUB_NIC(_my_cpuid, _val) 				  	   \
+	(HUB_S(HUB_NIC_ADDR(_my_cpuid), (_val)))
+
+#define SET_MY_HUB_NIC(_v) 					           \
+	SET_HUB_NIC(cpuid(), (_v))
+
+#define GET_HUB_NIC(_my_cpuid) 						   \
+	(HUB_L(HUB_NIC_ADDR(_my_cpuid)))
+
+#define GET_MY_HUB_NIC() 						   \
+	GET_HUB_NIC(cpuid())
+
+#endif /* _ASM_SGI_SN_AGENT_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/alenlist.h linux/include/asm-ia64/sn/alenlist.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/alenlist.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/alenlist.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,204 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_ALENLIST_H
+#define _ASM_SN_ALENLIST_H
+
+/* Definition of Address/Length List */
+
+/*
+ * An Address/Length List is used when setting up for an I/O DMA operation.
+ * A driver creates an Address/Length List that describes to the the DMA 
+ * interface where in memory the DMA should go.  The bus interface sets up 
+ * mapping registers, if required, and returns a suitable list of "physical 
+ * addresses" or "I/O address" to the driver.  The driver then uses these 
+ * to set up an appropriate scatter/gather operation(s).
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * An Address/Length List Address.  It'll get cast to the appropriate type,
+ * and must be big enough to hold the largest possible address in any
+ * supported address space.
+ */
+typedef u64 alenaddr_t;
+typedef u64 uvaddr_t;
+
+typedef struct alenlist_s *alenlist_t;
+
+/* 
+ * For tracking progress as we walk down an address/length list.
+ */
+typedef struct alenlist_cursor_s *alenlist_cursor_t;
+
+/*
+ * alenlist representation that can be passed via an idl
+ */
+struct external_alenlist {
+	alenaddr_t	addr;
+	size_t		len;
+};
+typedef struct external_alenlist *external_alenlist_t;
+
+
+/* Return codes from alenlist routines.  */
+#define ALENLIST_FAILURE -1
+#define ALENLIST_SUCCESS 0
+
+
+/* Flags to alenlist routines */
+#define AL_NOSLEEP	0x01		/* Do not sleep, waiting for memory */
+#define AL_NOCOMPACT	0x02		/* Do not try to compact adjacent entries */
+#define AL_LEAVE_CURSOR	0x04		/* Do not update cursor */
+
+
+/* Create an Address/Length List, and clear it of all entries.  */
+extern alenlist_t alenlist_create(unsigned flags);
+
+/* Grow/shrink an Address/Length List and FIX its size. */
+extern int alenlist_grow(alenlist_t, size_t npairs);
+
+/* Clear an Address/Length List so that it now describes 0 pairs. */
+extern void alenlist_clear(alenlist_t alenlist);
+
+/*
+ * Convenience function to create an Address/Length List and then append 
+ * the specified Address/Length Pair.  Exactly the same as alenlist_create 
+ * followed by alenlist_append.  Can be used when a small list (e.g. 1 pair)
+ * is adequate.
+ */
+extern alenlist_t
+alenpair_init(	alenaddr_t address, 			/* init to this address */
+		size_t length);				/* init to this length */
+
+/* 
+ * Peek at the head of an Address/Length List.  This does *NOT* update
+ * the internal cursor.
+ */
+extern int
+alenpair_get(	alenlist_t alenlist,		/* in: get from this List */
+		alenaddr_t *address,		/* out: address */
+		size_t *length);		/* out: length */
+
+/* Free the space consumed by an Address/Length List. */
+extern void alenlist_destroy(alenlist_t alenlist);
+
+/*
+ * Indicate that we're done using an Address/Length List.
+ * If we are the last user, destroy the List.
+ */
+extern void
+alenlist_done(alenlist_t alenlist);
+
+/* Append another Pair to a List */
+extern int alenlist_append(alenlist_t alenlist, 	/* append to this list */
+			alenaddr_t address,		/* address to append */
+			size_t length,			/* length to append */
+			unsigned flags);
+
+/* 
+ * Replace a Pair in the middle of a List, and return old values.
+ * (not generally useful for drivers; used by bus providers).
+ */
+extern int
+alenlist_replace(	alenlist_t alenlist, 		/* in: replace in this list */
+			alenlist_cursor_t cursorp,	/* inout: which item to replace */
+			alenaddr_t *addrp, 		/* inout: address */
+			size_t *lengthp,		/* inout: length */
+			unsigned flags);
+
+
+/* Get the next Pair from a List */
+extern int alenlist_get(alenlist_t alenlist, 		/* in: get from this list */
+			alenlist_cursor_t cursorp,	/* inout: which item to get */
+			size_t maxlength,		/* in: at most length */
+			alenaddr_t *addr, 		/* out: address */
+			size_t *length,			/* out: length */
+			unsigned flags);
+
+
+/* Return the number of Pairs stored in this List */
+extern int alenlist_size(alenlist_t alenlist);
+
+/* Concatenate two Lists. */
+extern void alenlist_concat(	alenlist_t from, 	/* copy from this list */
+				alenlist_t to);		/* to this list */
+
+/* Create a copy of an Address/Length List */
+extern alenlist_t alenlist_clone(alenlist_t old,	/* clone this list */
+				 unsigned flags);
+
+
+/* Allocate and initialize an Address/Length List Cursor */
+extern alenlist_cursor_t alenlist_cursor_create(alenlist_t alenlist, unsigned flags);
+
+/* Free an Address/Length List Cursor */
+extern void alenlist_cursor_destroy(alenlist_cursor_t cursorp);
+
+/*
+ * Initialize an Address/Length List Cursor in order to walk thru an
+ * Address/Length List from the beginning.
+ */
+extern int alenlist_cursor_init(alenlist_t alenlist, 
+				size_t offset, 
+				alenlist_cursor_t cursorp);
+
+/* Clone an Address/Length List Cursor. */
+extern int alenlist_cursor_clone(alenlist_t alenlist, 
+				alenlist_cursor_t cursorp_in, 
+				alenlist_cursor_t cursorp_out);
+
+/* 
+ * Return the number of bytes passed so far according to the specified
+ * Address/Length List Cursor.
+ */
+extern size_t alenlist_cursor_offset(alenlist_t alenlist, alenlist_cursor_t cursorp);
+
+
+
+
+/* Convert from a Kernel Virtual Address to a Physical Address/Length List */
+extern alenlist_t kvaddr_to_alenlist(	alenlist_t alenlist, 
+					caddr_t kvaddr, 
+					size_t length, 
+					unsigned flags);
+
+/* Convert from a User Virtual Address to a Physical Address/Length List */
+extern alenlist_t uvaddr_to_alenlist(	alenlist_t alenlist,
+					uvaddr_t vaddr, 
+					size_t length,
+					unsigned flags);
+
+/* Convert from a buf struct to a Physical Address/Length List */
+struct buf;
+extern alenlist_t buf_to_alenlist(	alenlist_t alenlist, 
+					struct buf *buf, 
+					unsigned flags);
+
+
+/* 
+ * Tracking position as we walk down an Address/Length List.
+ * This structure is NOT generally for use by device drivers.
+ */
+struct alenlist_cursor_s {
+	struct alenlist_s	*al_alenlist;	/* which list */
+	size_t			al_offset;	/* total bytes passed by cursor */
+	struct alenlist_chunk_s	*al_chunk;	/* which chunk in alenlist */
+	unsigned int		al_index;	/* which pair in chunk */
+	size_t			al_bcount;	/* offset into address/length pair */
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _ASM_SN_ALENLIST_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/arc/hinv.h linux/include/asm-ia64/sn/arc/hinv.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/arc/hinv.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/arc/hinv.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,186 @@
+/*
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Jack Steiner (steiner@sgi.com)
+ */
+
+
+/* $Id$
+ *
+ * ARCS hardware/memory inventory/configuration and system ID definitions.
+ */
+#ifndef _ASM_SN_ARC_HINV_H
+#define _ASM_SN_ARC_HINV_H
+
+#include <asm/sn/arc/types.h>
+
+/* configuration query defines */
+typedef enum configclass {
+	SystemClass,
+	ProcessorClass,
+	CacheClass,
+#ifndef	_NT_PROM
+	MemoryClass,
+	AdapterClass,
+	ControllerClass,
+	PeripheralClass
+#else	/* _NT_PROM */
+	AdapterClass,
+	ControllerClass,
+	PeripheralClass,
+	MemoryClass
+#endif	/* _NT_PROM */
+} CONFIGCLASS;
+
+typedef enum configtype {
+	ARC,
+	CPU,
+	FPU,
+	PrimaryICache,
+	PrimaryDCache,
+	SecondaryICache,
+	SecondaryDCache,
+	SecondaryCache,
+#ifndef	_NT_PROM
+	Memory,
+#endif
+	EISAAdapter,
+	TCAdapter,
+	SCSIAdapter,
+	DTIAdapter,
+	MultiFunctionAdapter,
+	DiskController,
+	TapeController,
+	CDROMController,
+	WORMController,
+	SerialController,
+	NetworkController,
+	DisplayController,
+	ParallelController,
+	PointerController,
+	KeyboardController,
+	AudioController,
+	OtherController,
+	DiskPeripheral,
+	FloppyDiskPeripheral,
+	TapePeripheral,
+	ModemPeripheral,
+	MonitorPeripheral,
+	PrinterPeripheral,
+	PointerPeripheral,
+	KeyboardPeripheral,
+	TerminalPeripheral,
+	LinePeripheral,
+	NetworkPeripheral,
+#ifdef	_NT_PROM
+	Memory,
+#endif
+	OtherPeripheral,
+
+	/* new stuff for IP30 */
+	/* added without moving anything */
+	/* except ANONYMOUS. */
+
+	XTalkAdapter,
+	PCIAdapter,
+	GIOAdapter,
+	TPUAdapter,
+
+	Anonymous
+} CONFIGTYPE;
+
+typedef enum {
+	Failed = 1,
+	ReadOnly = 2,
+	Removable = 4,
+	ConsoleIn = 8,
+	ConsoleOut = 16,
+	Input = 32,
+	Output = 64
+} IDENTIFIERFLAG;
+
+#ifndef NULL			/* for GetChild(NULL); */
+#define	NULL	0
+#endif
+
+union key_u {
+	struct {
+#ifdef	_MIPSEB
+		unsigned char  c_bsize;		/* block size in lines */
+		unsigned char  c_lsize;		/* line size in bytes/tag */
+		unsigned short c_size;		/* cache size in 4K pages */
+#else	/* _MIPSEL */
+		unsigned short c_size;		/* cache size in 4K pages */
+		unsigned char  c_lsize;		/* line size in bytes/tag */
+		unsigned char  c_bsize;		/* block size in lines */
+#endif	/* _MIPSEL */
+	} cache;
+	ULONG FullKey;
+};
+
+#if _MIPS_SIM == _ABI64
+#define SGI_ARCS_VERS	64			/* sgi 64-bit version */
+#define SGI_ARCS_REV	0			/* rev .00 */
+#else
+#define SGI_ARCS_VERS	1			/* first version */
+#define SGI_ARCS_REV	10			/* rev .10, 3/04/92 */
+#endif
+
+typedef struct component {
+	CONFIGCLASS	Class;
+	CONFIGTYPE	Type;
+	IDENTIFIERFLAG	Flags;
+	USHORT		Version;
+	USHORT		Revision;
+	ULONG 		Key;
+	ULONG		AffinityMask;
+	ULONG		ConfigurationDataSize;
+	ULONG		IdentifierLength;
+	char		*Identifier;
+} COMPONENT;
+
+/* internal structure that holds pathname parsing data */
+struct cfgdata {
+	char *name;			/* full name */
+	int minlen;			/* minimum length to match */
+	CONFIGTYPE type;		/* type of token */
+};
+
+/* System ID */
+typedef struct systemid {
+	CHAR VendorId[8];
+	CHAR ProductId[8];
+} SYSTEMID;
+
+/* memory query functions */
+typedef enum memorytype {
+	ExceptionBlock,
+	SPBPage,			/* ARCS == SystemParameterBlock */
+#ifndef	_NT_PROM
+	FreeContiguous,
+	FreeMemory,
+	BadMemory,
+	LoadedProgram,
+	FirmwareTemporary,
+	FirmwarePermanent
+#else	/* _NT_PROM */
+	FreeMemory,
+	BadMemory,
+	LoadedProgram,
+	FirmwareTemporary,
+	FirmwarePermanent,
+	FreeContiguous
+#endif	/* _NT_PROM */
+} MEMORYTYPE;
+
+typedef struct memorydescriptor {
+	MEMORYTYPE	Type;
+	LONG		BasePage;
+	LONG		PageCount;
+} MEMORYDESCRIPTOR;
+
+#endif /* _ASM_SN_ARC_HINV_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/arc/types.h linux/include/asm-ia64/sn/arc/types.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/arc/types.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/arc/types.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,43 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright 1999 Ralf Baechle (ralf@gnu.org)
+ * Copyright 1999 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_SN_ARC_TYPES_H
+#define _ASM_SN_ARC_TYPES_H
+
+#include <linux/config.h>
+
+typedef char		CHAR;
+typedef short		SHORT;
+typedef long		LARGE_INTEGER __attribute__ ((__mode__ (__DI__)));
+typedef	long		LONG __attribute__ ((__mode__ (__DI__)));
+typedef unsigned char	UCHAR;
+typedef unsigned short	USHORT;
+typedef unsigned long	ULONG __attribute__ ((__mode__ (__DI__)));
+typedef void		VOID;
+
+/* The pointer types.  We're 64-bit and the firmware is also 64-bit, so
+   live is sane ...  */
+typedef CHAR		*_PCHAR;
+typedef SHORT		*_PSHORT;
+typedef LARGE_INTEGER	*_PLARGE_INTEGER;
+typedef	LONG		*_PLONG;
+typedef UCHAR		*_PUCHAR;
+typedef USHORT		*_PUSHORT;
+typedef ULONG		*_PULONG;
+typedef VOID		*_PVOID;
+
+typedef CHAR		*PCHAR;
+typedef SHORT		*PSHORT;
+typedef LARGE_INTEGER	*PLARGE_INTEGER;
+typedef	LONG		*PLONG;
+typedef UCHAR		*PUCHAR;
+typedef USHORT		*PUSHORT;
+typedef ULONG		*PULONG;
+typedef VOID		*PVOID;
+
+#endif /* _ASM_SN_ARC_TYPES_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/arch.h linux/include/asm-ia64/sn/arch.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/arch.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/arch.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,175 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * SGI specific setup.
+ *
+ * Copyright (C) 1995 - 1997, 1999 Silcon Graphics, Inc.
+ * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org)
+ */
+#ifndef _ASM_SN_ARCH_H
+#define _ASM_SN_ARCH_H
+
+#include <linux/types.h>
+#include <linux/config.h>
+
+#if defined(CONFIG_IA64_SGI_IO)
+#include <asm/sn/types.h>
+#if defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_SGI_IP37) || defined(CONFIG_IA64_GENERIC)
+#include <asm/sn/sn1/arch.h>
+#endif
+#endif	/* CONFIG_IA64_SGI_IO */
+
+
+#if defined(_LANGUAGE_C) || defined(_LANGUAGE_C_PLUS_PLUS)
+typedef u64	hubreg_t;
+typedef u64	nic_t;
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+typedef u64     bdrkreg_t;
+#endif	/* CONFIG_SGI_xxxxx */
+#endif	/* _LANGUAGE_C || _LANGUAGE_C_PLUS_PLUS */
+
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+#define CPUS_PER_NODE		4	/* CPUs on a single hub */
+#define CPUS_PER_NODE_SHFT	2	/* Bits to shift in the node number */
+#define CPUS_PER_SUBNODE	2	/* CPUs on a single hub PI */
+#endif
+#define CNODE_NUM_CPUS(_cnode)		(NODEPDA(_cnode)->node_num_cpus)
+
+#define CNODE_TO_CPU_BASE(_cnode)	(NODEPDA(_cnode)->node_first_cpu)
+
+#define makespnum(_nasid, _slice)					\
+		(((_nasid) << CPUS_PER_NODE_SHFT) | (_slice))
+
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+
+/*
+ * There are 2 very similar macros for dealing with "slices". Make sure
+ * you use the right one. 
+ * Unfortunately, on all platforms except IP35 (currently), the 2 macros 
+ * are interchangible. 
+ *
+ * On IP35, there are 4 cpus per node. Each cpu is refered to by it's slice.
+ * The slices are numbered 0 thru 3. 
+ *
+ * There are also 2 PI interfaces per node. Each PI interface supports 2 cpus.
+ * The term "local slice" specifies the cpu number relative to the PI.
+ *
+ * The cpus on the node are numbered:
+ *	slice	localslice
+ *	  0          0
+ *	  1          1
+ *	  2          0
+ *	  3          1
+ *
+ *	cputoslice - returns a number 0..3 that is the slice of the specified cpu.
+ *	cputolocalslice - returns a number 0..1 that identifies the local slice of
+ *			the cpu within it's PI interface.
+ */
+#ifdef notyet
+	/* These are dummied up for now ..... */
+#define cputocnode(cpu)				\
+               (pdaindr[(cpu)].p_nodeid)
+#define cputonasid(cpu)				\
+               (pdaindr[(cpu)].p_nasid)
+#define cputoslice(cpu)				\
+               (ASSERT(pdaindr[(cpu)].pda), (pdaindr[(cpu)].pda->p_slice))
+#define cputolocalslice(cpu)			\
+               (ASSERT(pdaindr[(cpu)].pda), (LOCALCPU(pdaindr[(cpu)].pda->p_slice)))
+#define cputosubnode(cpu)			\
+		(ASSERT(pdaindr[(cpu)].pda), (SUBNODE(pdaindr[(cpu)].pda->p_slice)))
+#else
+#define cputocnode(cpu) 0
+#define cputonasid(cpu) 0
+#define cputoslice(cpu) 0
+#define cputolocalslice(cpu) 0
+#define cputosubnode(cpu) 0
+#endif	/* notyet */
+#endif	/* CONFIG_SGI_IP35 */
+
+#if defined(_LANGUAGE_C) || defined(_LANGUAGE_C_PLUS_PLUS)
+
+#define INVALID_NASID		(nasid_t)-1
+#define INVALID_CNODEID		(cnodeid_t)-1
+#define INVALID_PNODEID		(pnodeid_t)-1
+#define INVALID_MODULE		(moduleid_t)-1
+#define	INVALID_PARTID		(partid_t)-1
+
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+extern int     get_slice(void);
+extern cpuid_t get_cnode_cpu(cnodeid_t);
+extern int get_cpu_slice(cpuid_t);
+extern cpuid_t cnodetocpu(cnodeid_t);
+// extern cpuid_t cnode_slice_to_cpuid(cnodeid_t, int);
+
+extern int cnode_exists(cnodeid_t cnode);
+extern cnodeid_t cpuid_to_compact_node[MAXCPUS];
+#endif	/* CONFIG_IP35 */
+
+extern nasid_t get_nasid(void);
+extern cnodeid_t get_cpu_cnode(int);
+extern int get_cpu_slice(cpuid_t);
+
+/*
+ * NO ONE should access these arrays directly.  The only reason we refer to
+ * them here is to avoid the procedure call that would be required in the
+ * macros below.  (Really want private data members here :-)
+ */
+extern cnodeid_t nasid_to_compact_node[MAX_NASIDS];
+extern nasid_t compact_to_nasid_node[MAX_COMPACT_NODES];
+
+/*
+ * These macros are used by various parts of the kernel to convert
+ * between the three different kinds of node numbering.   At least some
+ * of them may change to procedure calls in the future, but the macros
+ * will continue to work.  Don't use the arrays above directly.
+ */
+
+#define	NASID_TO_REGION(nnode)	      	\
+    ((nnode) >> \
+     (is_fine_dirmode() ? NASID_TO_FINEREG_SHFT : NASID_TO_COARSEREG_SHFT))
+
+extern cnodeid_t nasid_to_compact_node[MAX_NASIDS];
+extern nasid_t compact_to_nasid_node[MAX_COMPACT_NODES];
+extern cnodeid_t cpuid_to_compact_node[MAXCPUS];
+
+#if !defined(DEBUG)
+
+#define NASID_TO_COMPACT_NODEID(nnode)	(nasid_to_compact_node[nnode])
+#define COMPACT_TO_NASID_NODEID(cnode)	(compact_to_nasid_node[cnode])
+#define CPUID_TO_COMPACT_NODEID(cpu)	(cpuid_to_compact_node[(cpu)])
+#else
+
+/*
+ * These functions can do type checking and fail if they need to return
+ * a bad nodeid, but they're not as fast so just use 'em for debug kernels.
+ */
+cnodeid_t nasid_to_compact_nodeid(nasid_t nasid);
+nasid_t compact_to_nasid_nodeid(cnodeid_t cnode);
+
+#define NASID_TO_COMPACT_NODEID(nnode)	nasid_to_compact_nodeid(nnode)
+#define COMPACT_TO_NASID_NODEID(cnode)	compact_to_nasid_nodeid(cnode)
+#define CPUID_TO_COMPACT_NODEID(cpu)	(cpuid_to_compact_node[(cpu)])
+#endif
+
+extern int node_getlastslot(cnodeid_t);
+
+#endif /* _LANGUAGE_C || _LANGUAGE_C_PLUS_PLUS */
+
+#define SLOT_BITMASK    	(MAX_MEM_SLOTS - 1)
+#define SLOT_SIZE		(1LL<<SLOT_SHIFT)
+
+#define node_getnumslots(node)	(MAX_MEM_SLOTS)
+#define NODE_MAX_MEM_SIZE	SLOT_SIZE * MAX_MEM_SLOTS
+
+/*
+ * New stuff in here from Irix sys/pfdat.h.
+ */
+#define	SLOT_PFNSHIFT		(SLOT_SHIFT - PAGE_SHIFT)
+#define	PFN_NASIDSHFT		(NASID_SHFT - PAGE_SHIFT)
+#define mkpfn(nasid, off)	(((pfn_t)(nasid) << PFN_NASIDSHFT) | (off))
+#define slot_getbasepfn(node,slot) \
+		(mkpfn(COMPACT_TO_NASID_NODEID(node), slot<<SLOT_PFNSHIFT))
+#endif /* _ASM_SN_ARCH_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/cdl.h linux/include/asm-ia64/sn/cdl.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/cdl.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/cdl.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,179 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_CDL_H
+#define _ASM_SN_CDL_H
+
+#include <asm/sn/sgi.h>
+
+/*
+ *	cdl: connection/driver list
+ *
+ *	support code for bus infrastructure for busses
+ *	that have self-identifying devices; initially
+ *	constructed for xtalk, pciio and gioio modules.
+ */
+typedef struct cdl     *cdl_p;
+
+/*
+ *	cdl_itr_f is the type for the functions
+ *	that are handled by cdl_iterate.
+ */
+
+typedef void	
+cdl_iter_f		(devfs_handle_t vhdl);
+
+/*
+ *	If CDL_PRI_HI is specified in the flags
+ *	parameter for cdl_add_driver, then that driver's
+ *	attach routine will be called for future connect
+ *	points before any (non-CDL_PRI_HI) drivers.
+ *
+ *	The IOC3 driver uses this facility to make sure
+ *	that the ioc3_attach() function is called before
+ *	the attach routines of any subdevices.
+ *
+ *	Drivers for bridge-based crosstalk cards that
+ *	are almost but not quite generic can use it to
+ *	arrange that their attach() functions get called
+ *	before the generic bridge drivers, so they can
+ *	leave behind "hint" structures that will
+ *	properly configure the generic driver.
+ */
+#define	CDL_PRI_HI	0x0001
+
+/*
+ *	cdl_new: construct a new connection/driver list
+ *
+ *	Called once for each "kind" of bus. Returns an
+ *	opaque cookie representing the particular list
+ *	that will be operated on by the other calls.
+ */
+extern cdl_p		cdl_new(char *, char *, char *);
+
+/*
+ *	cdl_del: destroy a connection/driver list.
+ *
+ *	Releases all dynamically allocated resources
+ *	associated with the specified list. Forgets what
+ *	drivers might be involved in this kind of bus,
+ *	forgets what connection points have been noticed
+ *	on this kind of bus.
+ */
+extern void		cdl_del(cdl_p reg);
+
+/*
+ *	cdl_add_driver: register a device driver
+ *
+ *	Calls the driver's attach routine with all
+ *	connection points on the list that have the same
+ *	key information as the driver; then places the
+ *	driver on the list so that any connection points
+ *	discovered in the future that match the driver
+ *	can be handed off to the driver's attach
+ *	routine.
+ *
+ *	CDL_PRI_HI may be specified (see above).
+ */
+
+extern int		cdl_add_driver(cdl_p reg,
+				       int key1,
+				       int key2,
+				       char *prefix,
+				       int flags);
+
+/*
+ *	cdl_del_driver: remove a device driver
+ *
+ *	Calls the driver's detach routine with all
+ *	connection points on the list that match the
+ *	driver; then forgets about the driver. Future
+ *	calls to cdl_add_connpt with connections that
+ *	would match this driver no longer trigger calls
+ *	to the driver's attach routine.
+ *
+ *	NOTE: Yes, I said CONNECTION POINTS, not
+ *	verticies that the driver has been attached to
+ *	with hwgraph_driver_add(); this gives the driver
+ *	a chance to clean up anything it did to the
+ *	connection point in its attach routine. Also,
+ *	this is done whether or not the attach routine
+ *	was successful.
+ */
+extern void		cdl_del_driver(cdl_p reg, 
+				       char *prefix);
+
+/*
+ *	cdl_add_connpt: add a connection point
+ *
+ *	Calls the attach routines of all the drivers on
+ *	the list that match this connection point, in
+ *	the order that they were added to the list,
+ *	except that CDL_PRI_HI drivers are called first.
+ *
+ *	Then the vertex is added to the list, so it can
+ *	be presented to any matching drivers that may be
+ *	subsequently added to the list.
+ */
+extern int		cdl_add_connpt(cdl_p reg,
+				       int key1,
+				       int key2,
+				       devfs_handle_t conn);
+
+/*
+ *	cdl_del_connpt: delete a connection point
+ *
+ *	Calls the detach routines of all matching
+ *	drivers for this connection point, in the same
+ *	order that the attach routines were called; then
+ *	forgets about this vertex, so drivers added in
+ *	the future will not be told about it.
+ *
+ *	NOTE: Same caveat here about the detach calls as
+ *	in the cdl_del_driver() comment above.
+ */
+extern void		cdl_del_connpt(cdl_p reg,
+				       int key1,
+				       int key2,
+				       devfs_handle_t conn);
+
+/*
+ *	cdl_iterate: find all verticies in the registry
+ *	corresponding to the named driver and call them
+ *	with the specified function (giving the vertex
+ *	as the parameter).
+ */
+
+extern void		cdl_iterate(cdl_p reg,
+				    char *prefix,
+				    cdl_iter_f *func);
+
+/*
+ * An INFO_LBL_ASYNC_ATTACH label is attached to a vertex, pointing to
+ * an instance of async_attach_s to indicate that asynchronous
+ * attachment may be applied to that device ... if the corresponding
+ * driver allows it.
+ */
+
+struct async_attach_s {
+	sema_t async_sema;
+	int    async_count;
+};
+typedef struct async_attach_s *async_attach_t;
+
+async_attach_t	async_attach_new(void);
+void		async_attach_free(async_attach_t);
+async_attach_t  async_attach_get_info(devfs_handle_t);
+void            async_attach_add_info(devfs_handle_t, async_attach_t);
+void            async_attach_del_info(devfs_handle_t);
+void		async_attach_signal_start(async_attach_t);
+void		async_attach_signal_done(async_attach_t);
+void		async_attach_waitall(async_attach_t);
+
+#endif	/* _ASM_SN_CDL_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/clksupport.h linux/include/asm-ia64/sn/clksupport.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/clksupport.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/clksupport.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,64 @@
+/*
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Jack Steiner (steiner@sgi.com)
+ */
+
+
+#ifndef  _ASM_KSYS_CLKSUPPORT_H
+#define _ASM_KSYS_CLKSUPPORT_H
+
+/* #include <sys/mips_addrspace.h> */
+
+#if SN
+#include <asm/sn/agent.h>
+#include <asm/sn/intr_public.h>
+typedef hubreg_t clkreg_t;
+extern nasid_t master_nasid;
+
+#define GET_LOCAL_RTC		(clkreg_t)LOCAL_HUB_L(PI_RT_COUNT)
+#define DISABLE_TMO_INTR()	if  (cpuid_to_localslice(cpuid())) \
+					REMOTE_HUB_PI_S(get_nasid(),\
+						cputosubnode(cpuid()),\
+						PI_RT_COMPARE_B, 0); \
+				else \
+					REMOTE_HUB_PI_S(get_nasid(),\
+						cputosubnode(cpuid()),\
+						PI_RT_COMPARE_A, 0);
+
+/* This is a hack; we really need to figure these values out dynamically */
+/* 
+ * Since 800 ns works very well with various HUB frequencies, such as
+ * 360, 380, 390 and 400 MHZ, we use 800 ns rtc cycle time.
+ */
+#define NSEC_PER_CYCLE		800
+#define CYCLE_PER_SEC		(NSEC_PER_SEC/NSEC_PER_CYCLE)
+/*
+ * Number of cycles per profiling intr 
+ */
+#define CLK_FCLOCK_FAST_FREQ	1250
+#define CLK_FCLOCK_SLOW_FREQ	0
+/* The is the address that the user will use to mmap the cycle counter */
+#define CLK_CYCLE_ADDRESS_FOR_USER LOCAL_HUB_ADDR(PI_RT_COUNT)
+
+#elif IP30
+#include <sys/cpu.h>
+typedef heartreg_t clkreg_t;
+#define NSEC_PER_CYCLE		80
+#define CYCLE_PER_SEC		(NSEC_PER_SEC/NSEC_PER_CYCLE)
+#define GET_LOCAL_RTC	*((volatile clkreg_t *)PHYS_TO_COMPATK1(HEART_COUNT))
+#define DISABLE_TMO_INTR()
+#define CLK_CYCLE_ADDRESS_FOR_USER PHYS_TO_K1(HEART_COUNT)
+#define CLK_FCLOCK_SLOW_FREQ (CYCLE_PER_SEC / HZ)
+#endif
+
+/* Prototypes */
+extern void init_timebase(void);
+extern void fastick_maint(struct eframe_s *);
+extern int audioclock;
+extern int prfclk_enabled_cnt;
+#endif  /* _ASM_KSYS_CLKSUPPORT_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/cmn_err.h linux/include/asm-ia64/sn/cmn_err.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/cmn_err.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/cmn_err.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,120 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_CMN_ERR_H
+#define _ASM_SN_CMN_ERR_H
+
+/*
+** Common error handling severity levels.  Converted to be
+** represented by the associated 4.3BSD syslog priorities.
+*/
+
+#define CE_DEBUG	KERN_DEBUG	/* debug	*/
+#define CE_CONT		KERN_INFO	/* continuation	*/
+#define CE_NOTE		KERN_NOTICE	/* notice	*/
+#define CE_WARN		KERN_WARNING	/* warning	*/
+#define CE_ALERT	KERN_ALERT	/* alert	*/
+#define CE_PANIC	KERN_EMERG	/* panic	*/
+
+#define	CE_LEVELMASK	LOG_PRIMASK	/* mask for severity level	*/
+#define	CE_CPUID	0x8		/* prepend CPU id to output	*/
+#define CE_PHYSID	0x10		/* prepend CPU phys location    */
+#define CE_SYNC		0x20		/* wait for uart to drain before returning */
+
+/* Flags for Availmon Monitoring
+ * When a developer or's these bits into the cmn_err flags above,
+ * and they have availmon installed, certain "actions" will take
+ * place depending upon how they have the availmon software configured.
+ */
+#define CE_TOOKACTIONS   0x0100            /* Actions taken by some error   */
+#define CE_RUNNINGPOOR   0x0200            /* System running degraded       */
+#define CE_MAINTENANCE   0x0400            /* System needs maintenance      */
+#define CE_CONFIGERROR   0x0800            /* System configured incorrectly */
+
+/* Bitmasks for separating subtasks from priority levels */
+#define CE_PRIOLEVELMASK 0x00ff  /* bitmask for severity levels of cmn_err */
+#define CE_SUBTASKMASK   0xff00  /* bitmask for availmon actions of cmn_err */
+#define CE_AVAILMONALL   (CE_TOOKACTIONS|CE_RUNNINGPOOR| \
+                                 CE_MAINTENANCE|CE_CONFIGERROR)
+
+#ifdef __KERNEL__
+
+#define CE_PBPANIC	KERN_CRIT	/* Special define used to manipulate
+					 * putbufndx in kernel */
+
+/* Console output flushing flag and routine */
+
+extern int constrlen;		/* Length of current console string, if zero,
+				   there are no characters to flush */
+#define	CONBUF_LOCKED	0	/* conbuf is already locked */
+#define	CONBUF_UNLOCKED	1	/* need to reacquire lock */
+#define CONBUF_DRAIN	2	/* ensure output before returning */
+
+/*
+ * bit field descriptions for printf %r and %R formats
+ *
+ * printf("%r %R", val, reg_descp);
+ * struct reg_desc *reg_descp;
+ *
+ * the %r and %R formats allow formatted print of bit fields.  individual
+ * bit fields are described by a struct reg_desc, multiple bit fields within
+ * a single word can be described by multiple reg_desc structures.
+ * %r outputs a string of the format "<bit field descriptions>"
+ * %R outputs a string of the format "0x%x<bit field descriptions>"
+ *
+ * The fields in a reg_desc are:
+ *	__psunsigned_t rd_mask;	An appropriate mask to isolate the bit field
+ *				within a word, and'ed with val
+ *
+ *	int rd_shift;		A shift amount to be done to the isolated
+ *				bit field.  done before printing the isolate
+ *				bit field with rd_format and before searching
+ *				for symbolic value names in rd_values
+ *
+ *	char *rd_name;		If non-null, a bit field name to label any
+ *				out from rd_format or searching rd_values.
+ *				if neither rd_format or rd_values is non-null
+ *				rd_name is printed only if the isolated
+ *				bit field is non-null.
+ *
+ *	char *rd_format;	If non-null, the shifted bit field value
+ *				is printed using this format.
+ *
+ *	struct reg_values *rd_values;	If non-null, a pointer to a table
+ *				matching numeric values with symbolic names.
+ *				rd_values are searched and the symbolic
+ *				value is printed if a match is found, if no
+ *				match is found "???" is printed.
+ *				
+ */
+
+
+/*
+ * register values
+ * map between numeric values and symbolic values
+ */
+struct reg_values {
+	__psunsigned_t rv_value;
+	char *rv_name;
+};
+
+/*
+ * register descriptors are used for formatted prints of register values
+ * rd_mask and rd_shift must be defined, other entries may be null
+ */
+struct reg_desc {
+	k_machreg_t rd_mask;	/* mask to extract field */
+	int rd_shift;		/* shift for extracted value, - >>, + << */
+	char *rd_name;		/* field name */
+	char *rd_format;	/* format to print field */
+	struct reg_values *rd_values;	/* symbolic names of values */
+};
+
+#endif	/* __KERNEL__ */
+#endif	/* _ASM_SN_CMN_ERR_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/dmamap.h linux/include/asm-ia64/sn/dmamap.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/dmamap.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/dmamap.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,88 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_DMAMAP_H
+#define _ASM_SN_DMAMAP_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Definitions for allocating, freeing, and using DMA maps
+ */
+
+/*
+ * DMA map types
+ */
+#define	DMA_SCSI	0
+#define	DMA_A24VME	1		/* Challenge/Onyx only 	*/
+#define	DMA_A32VME	2		/* Challenge/Onyx only 	*/
+#define	DMA_A64VME	3		/* SN0/Racer */
+
+#define	DMA_EISA	4
+
+#define	DMA_PCI32	5		/* SN0/Racer 	*/
+#define	DMA_PCI64	6		/* SN0/Racer 	*/
+
+/*
+ * DMA map structure as returned by dma_mapalloc()
+ */
+typedef struct dmamap {
+	int		dma_type;	/* Map type (see above) */
+	int		dma_adap;	/* I/O adapter */
+	int		dma_index;	/* Beginning map register to use */
+	int		dma_size;	/* Number of map registers to use */
+	paddr_t		dma_addr;	/* Corresponding bus addr for A24/A32 */
+	caddr_t		dma_virtaddr;	/* Beginning virtual address that is mapped */
+} dmamap_t;
+
+struct alenlist_s;
+
+/*
+ * Prototypes of exported functions
+ */
+extern dmamap_t	*dma_mapalloc(int, int, int, int);
+extern void	dma_mapfree(dmamap_t *);
+extern int	dma_map(dmamap_t *, caddr_t, int);
+extern int	dma_map2(dmamap_t *, caddr_t, caddr_t, int);
+extern paddr_t	dma_mapaddr(dmamap_t *, caddr_t);
+#ifdef IRIX
+extern int	dma_mapbp(dmamap_t *, buf_t *, int);
+#endif
+extern int	dma_map_alenlist(dmamap_t *, struct alenlist_s *, size_t);
+extern uint	ev_kvtoiopnum(caddr_t);
+
+/*
+ * These variables are defined in master.d/kernel
+ */
+extern struct map *a24map[];
+extern struct map *a32map[];
+
+extern int a24_mapsize;
+extern int a32_mapsize;
+
+extern lock_t dmamaplock;
+extern sv_t dmamapout;
+
+#ifdef __cplusplus
+}
+#endif
+
+/* standard flags values for pio_map routines,
+ * including {xtalk,pciio}_dmamap calls.
+ * NOTE: try to keep these in step with PIOMAP flags.
+ */
+#define DMAMAP_FIXED	0x1
+#define DMAMAP_NOSLEEP	0x2
+#define	DMAMAP_INPLACE	0x4
+
+#define	DMAMAP_FLAGS	0x7
+
+#endif /* _ASM_SN_DMAMAP_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/driver.h linux/include/asm-ia64/sn/driver.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/driver.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/driver.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,150 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_DRIVER_H
+#define _ASM_SN_DRIVER_H
+
+/*
+** Interface for device driver handle management.
+**
+** These functions are mostly for use by the loadable driver code, and
+** for use by I/O bus infrastructure code.
+*/
+
+typedef struct device_driver_s *device_driver_t;
+#define DEVICE_DRIVER_NONE (device_driver_t)NULL
+
+/* == Driver thread priority support == */
+typedef int ilvl_t;
+/* default driver thread priority level */
+#define DRIVER_THREAD_PRI_DEFAULT	(ilvl_t)230
+/* invalid driver thread priority level */
+#define DRIVER_THREAD_PRI_INVALID	(ilvl_t)-1
+
+/* Associate a thread priority with a driver */
+extern int device_driver_thread_pri_set(device_driver_t driver,
+					ilvl_t pri);
+
+/* Get the thread priority associated with the driver */
+extern ilvl_t device_driver_thread_pri_get(device_driver_t driver);
+
+/* Get the thread priority for a driver from the sysgen paramters */
+extern ilvl_t device_driver_sysgen_thread_pri_get(char *driver_prefix);
+
+/* Initialize device driver functions. */
+extern void device_driver_init(void);
+
+
+/* Allocate a driver handle */
+extern device_driver_t device_driver_alloc(char *prefix);
+
+
+/* Free a driver handle */
+extern void device_driver_free(device_driver_t driver);
+
+
+/* Given a device driver prefix, return a handle to the driver. */
+extern device_driver_t device_driver_get(char *prefix);
+
+/* Given a device, return a handle to the driver. */
+extern device_driver_t device_driver_getbydev(devfs_handle_t device);
+
+struct cdevsw;
+struct bdevsw;
+
+/* Associate a driver with bdevsw/cdevsw pointers. */
+extern int
+device_driver_devsw_put(device_driver_t driver,
+			struct bdevsw *my_bdevsw,
+			struct cdevsw *my_cdevsw);
+
+
+/* Given a driver, return the corresponding bdevsw and cdevsw pointers. */
+extern void
+device_driver_devsw_get(	device_driver_t driver, 
+				struct bdevsw **bdevswp,
+				struct cdevsw **cdevswp);
+
+/* Given a driver, return its name (prefix). */
+extern void device_driver_name_get(device_driver_t driver, char *buffer, int length);
+
+
+/* 
+ * A descriptor for every static device driver in the system.
+ * lboot creates a table of these and places in in master.c.
+ * device_driver_init runs through this table during initialization
+ * in order to "register" every static device driver.
+ */
+typedef struct static_device_driver_desc_s {
+	char 		*sdd_prefix;
+	struct bdevsw 	*sdd_bdevsw;
+	struct cdevsw 	*sdd_cdevsw;
+} *static_device_driver_desc_t;
+
+extern struct static_device_driver_desc_s static_device_driver_table[];
+extern int static_devsw_count;
+
+
+/*====== administration support ========== */
+/* structure of each entry in the table created by lboot for
+ * device / driver administration
+*/
+typedef struct dev_admin_info_s {
+	char	*dai_name;		/* name of the device or driver
+					 * prefix 
+					 */
+	char	*dai_param_name;	/* device or driver parameter name */
+	char	*dai_param_val;		/* value of the parameter */
+} dev_admin_info_t;
+
+
+/* Update all the administrative hints associated with the device */
+extern void 	device_admin_info_update(devfs_handle_t	dev_vhdl);
+
+/* Update all the administrative hints associated with the device driver */
+extern void	device_driver_admin_info_update(device_driver_t	driver);
+
+/* Get a particular administrative hint associated with a device */
+extern char 	*device_admin_info_get(devfs_handle_t	dev_vhdl,
+				       char		*info_lbl);
+
+/* Associate a particular administrative hint for a device */
+extern int	device_admin_info_set(devfs_handle_t	dev_vhdl,
+				      char		*info_lbl,
+				      char		*info_val);
+
+/* Get a particular administrative hint associated with a device driver*/
+extern char 	*device_driver_admin_info_get(char	*driver_prefix,	
+					      char	*info_name);
+
+/* Associate a particular administrative hint for a device driver*/
+extern int	device_driver_admin_info_set(char	*driver_prefix,
+					     char	*driver_info_lbl,
+					     char	*driver_info_val);
+
+/* Initialize the extended device administrative hint table */
+extern void	device_admin_table_init(void);
+
+/* Add a hint corresponding to a device to the extended device administrative
+ * hint table.
+ */
+extern void	device_admin_table_update(char *dev_name,
+					  char *param_name,
+					  char *param_val);
+
+/* Initialize the extended device driver administrative hint table */
+extern void	device_driver_admin_table_init(void);
+
+/* Add a hint corresponding to a device to the extended device driver 
+ * administrative hint table.
+ */
+extern void	device_driver_admin_table_update(char *drv_prefix,
+						 char *param_name,
+						 char *param_val);	
+#endif /* _ASM_SN_DRIVER_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/eeprom.h linux/include/asm-ia64/sn/eeprom.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/eeprom.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/eeprom.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,402 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Public interface for reading Atmel EEPROMs via L1 system controllers
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_EEPROM_H
+#define _ASM_SN_EEPROM_H
+
+#include <asm/sn/sgi.h>
+#include <asm/sn/vector.h>
+#include <asm/sn/xtalk/xbow.h>
+#include <asm/sn/pci/bridge.h>
+#include <asm/sn/nic.h>
+
+/*
+ * The following structures are an implementation of the EEPROM info
+ * areas described in the SN1 EEPROM spec and the IPMI FRU Information
+ * Storage definition
+ */
+
+/* Maximum lengths for EEPROM fields
+ */
+#define EEPROM_PARTNUM_LEN	20
+#define EEPROM_SERNUM_LEN	10
+#define	EEPROM_MANUF_NAME_LEN	10
+#define EEPROM_PROD_NAME_LEN	14
+
+
+
+/* The EEPROM "common header", which contains offsets to the other
+ * info areas in the EEPROM
+ */
+typedef struct eeprom_common_hdr_t
+{
+    uchar_t	format;		/* common header format byte */
+    uchar_t	internal_use;	/* offsets to various info areas */
+    uchar_t	chassis;	/*  (in doubleword units)        */
+    uchar_t	board;
+    uchar_t	product;
+    uchar_t	multi_record;
+    uchar_t	pad;
+    uchar_t	checksum;
+} eeprom_common_hdr_t;
+
+
+/* The chassis (brick) info area 
+ */
+typedef struct eeprom_chassis_ia_t
+{
+    uchar_t	format;		/* format byte */
+    uchar_t	length;		/* info area length in doublewords */
+    uchar_t	type;		/* chassis type (always 0x17 "rack mount") */
+    uchar_t	part_num_tl;	/* type/length of part number field */
+
+    char	part_num[EEPROM_PARTNUM_LEN];
+    				/* ASCII part number */
+
+    uchar_t	serial_num_tl;	/* type/length of serial number field */
+
+    char	serial_num[EEPROM_SERNUM_LEN];
+    				/* ASCII serial number */
+
+    uchar_t	checksum;
+
+} eeprom_chassis_ia_t;
+
+
+/* The board info area
+ */
+typedef struct eeprom_board_ia_t
+{
+    uchar_t       format;         /* format byte */
+    uchar_t       length;         /* info area length in doublewords */
+    uchar_t	language;	/* language code, always 0x00 "English" */
+    int		mfg_date;	/* date & time of manufacture, in minutes
+				    since 0:00 1/1/96 */
+    uchar_t	manuf_tl;	/* type/length of manufacturer name field */
+
+    char	manuf[EEPROM_MANUF_NAME_LEN];
+				/* ASCII manufacturer name */
+
+    uchar_t	product_tl;	/* type/length of product name field */
+
+    char	product[EEPROM_PROD_NAME_LEN];
+				/* ASCII product name */
+
+    uchar_t	serial_num_tl;	/* type/length of board serial number */
+
+    char	serial_num[EEPROM_SERNUM_LEN];
+				/* ASCII serial number */
+
+    uchar_t	part_num_tl;	/* type/length of board part number */
+
+    char	part_num[EEPROM_PARTNUM_LEN];
+				/* ASCII part number */
+
+    /*
+     * "custom" fields -- see SN1 EEPROM Spec
+     */
+    uchar_t	board_rev_tl;	/* type/length of board rev (always 0xC2) */
+
+    char	board_rev[2];	/* ASCII board revision */
+
+    uchar_t	eeprom_size_tl; /* type/length of eeprom size field */
+    uchar_t	eeprom_size;	/* size code for eeprom */
+    uchar_t	temp_waiver_tl;	/* type/length of temp waiver field (0xC2) */
+    char	temp_waiver[2];	/* temp waiver */
+
+    
+    /*
+     * these fields only appear in main boards' EEPROMs
+     */
+    uchar_t	ekey_G_tl;	/* type/length of encryption key "G" */
+    uint32_t	ekey_G;		/* encryption key "G" */
+    uchar_t	ekey_P_tl;	/* type/length of encryption key "P" */
+    uint32_t	ekey_P;		/* encryption key "P" */
+    uchar_t	ekey_Y_tl;	/* type/length of encryption key "Y" */
+    uint32_t	ekey_Y;		/* encryption key "Y" */
+
+    
+    /*
+     * these fields are used for I bricks only
+     */
+    uchar_t	mac_addr_tl;	  /* type/length of MAC address */
+    char	mac_addr[12];	  /* MAC address */
+    uchar_t	ieee1394_cfg_tl;  /* type/length of IEEE 1394 info */
+    uchar_t	ieee1394_cfg[32]; /* IEEE 1394 config info */
+    
+
+    /*
+     * all boards have a checksum
+     */
+    uchar_t	checksum;
+
+} eeprom_board_ia_t;
+
+/* given a pointer to the three-byte little-endian EEPROM representation
+ * of date-of-manufacture, this function translates to a big-endian
+ * integer format
+ */
+int eeprom_xlate_board_mfr_date( uchar_t *src );
+
+
+/* EEPROM Serial Presence Detect record (used for DIMMs in IP35)
+ */
+typedef struct eeprom_spd_t
+{
+    /* 0*/ uchar_t spd_used; /* # of bytes written to serial memory by manufacturer */
+    /* 1*/ uchar_t spd_size; /* Total # of bytes of SPD memory device */
+    /* 2*/ uchar_t mem_type; /* Fundamental memory type (FPM, EDO, SDRAM..) */
+    /* 3*/ uchar_t num_rows; /* # of row addresses on this assembly */
+    /* 4*/ uchar_t num_cols; /* # Column Addresses on this assembly */
+    /* 5*/ uchar_t mod_rows; /* # Module Rows on this assembly */
+    /* 6*/ uchar_t data_width[2]; /* Data Width of this assembly (16b little-endian) */
+    /* 8*/ uchar_t volt_if; /* Voltage interface standard of this assembly */
+    /* 9*/ uchar_t cyc_time; /* SDRAM Cycle time, CL=X (highest CAS latency) */
+    /* A*/ uchar_t acc_time; /* SDRAM Access from Clock (highest CAS latency) */
+    /* B*/ uchar_t dimm_cfg; /* DIMM Configuration type (non-parity, ECC) */
+    /* C*/ uchar_t refresh_rt; /* Refresh Rate/Type */
+    /* D*/ uchar_t prim_width; /* Primary SDRAM Width */
+    /* E*/ uchar_t ec_width; /* Error Checking SDRAM width */
+    /* F*/ uchar_t min_delay; /* Min Clock Delay Back to Back Random Col Address */
+    /*10*/ uchar_t burst_len; /* Burst Lengths Supported */
+    /*11*/ uchar_t num_banks; /* # of Banks on Each SDRAM Device */
+    /*12*/ uchar_t cas_latencies; /* CAS# Latencies Supported */
+    /*13*/ uchar_t cs_latencies; /* CS# Latencies Supported */
+    /*14*/ uchar_t we_latencies; /* Write Latencies Supported */
+    /*15*/ uchar_t mod_attrib; /* SDRAM Module Attributes */
+    /*16*/ uchar_t dev_attrib; /* SDRAM Device Attributes: General */
+    /*17*/ uchar_t cyc_time2; /* Min SDRAM Cycle time at CL X-1 (2nd highest CAS latency) */
+    /*18*/ uchar_t acc_time2; /* SDRAM Access from Clock at CL X-1 (2nd highest CAS latency) */
+    /*19*/ uchar_t cyc_time3; /* Min SDRAM Cycle time at CL X-2 (3rd highest CAS latency) */
+    /*1A*/ uchar_t acc_time3; /* Max SDRAM Access from Clock at CL X-2 (3nd highest CAS latency) */
+    /*1B*/ uchar_t min_row_prechg; /* Min Row Precharge Time (Trp) */
+    /*1C*/ uchar_t min_ra_to_ra; /* Min Row Active to Row Active (Trrd) */
+    /*1D*/ uchar_t min_ras_to_cas; /* Min RAS to CAS Delay (Trcd) */
+    /*1E*/ uchar_t min_ras_pulse; /* Minimum RAS Pulse Width (Tras) */
+    /*1F*/ uchar_t row_density; /* Density of each row on module */
+    /*20*/ uchar_t ca_setup; /* Command and Address signal input setup time */
+    /*21*/ uchar_t ca_hold; /* Command and Address signal input hold time */
+    /*22*/ uchar_t d_setup; /* Data signal input setup time */
+    /*23*/ uchar_t d_hold; /* Data signal input hold time */
+
+    /*24*/ uchar_t pad0[26]; /* unused */
+    
+    /*3E*/ uchar_t data_rev; /* SPD Data Revision Code */
+    /*3F*/ uchar_t checksum; /* Checksum for bytes 0-62 */
+    /*40*/ uchar_t jedec_id[8]; /* Manufacturer's JEDEC ID code */
+    
+    /*48*/ uchar_t mfg_loc; /* Manufacturing Location */
+    /*49*/ uchar_t part_num[18]; /* Manufacturer's Part Number */
+
+    /*5B*/ uchar_t rev_code[2]; /* Revision Code */
+
+    /*5D*/ uchar_t mfg_date[2]; /* Manufacturing Date */
+
+    /*5F*/ uchar_t ser_num[4]; /* Assembly Serial Number */
+
+    /*63*/ uchar_t manuf_data[27]; /* Manufacturer Specific Data */
+
+    /*7E*/ uchar_t intel_freq; /* Intel specification frequency */
+    /*7F*/ uchar_t intel_100MHz; /* Intel spec details for 100MHz support */
+
+} eeprom_spd_t;
+
+
+#define EEPROM_SPD_RECORD_MAXLEN	256
+
+typedef union eeprom_spd_u
+{
+    eeprom_spd_t fields;
+    char         bytes[EEPROM_SPD_RECORD_MAXLEN];
+
+} eeprom_spd_u;
+
+
+/* EEPROM board record
+ */
+typedef struct eeprom_brd_record_t 
+{
+    eeprom_chassis_ia_t		*chassis_ia;
+    eeprom_board_ia_t		*board_ia;
+    eeprom_spd_u		*spd;
+
+} eeprom_brd_record_t;
+
+
+/* End-of-fields marker
+ */
+#define EEPROM_EOF	        0xc1
+
+
+/* masks for dissecting the type/length bytes
+ */
+#define FIELD_FORMAT_MASK       0xc0
+#define FIELD_LENGTH_MASK       0x3f
+
+
+/* field format codes (used in type/length bytes)
+ */
+#define FIELD_FORMAT_BINARY     0x00 /* binary format */
+#define FIELD_FORMAT_BCD        0x40 /* BCD */
+#define FIELD_FORMAT_PACKED     0x80 /* packed 6-bit ASCII */
+#define FIELD_FORMAT_ASCII      0xC0 /* 8-bit ASCII */
+
+
+
+
+/* codes specifying brick and board type
+ */
+#define C_BRICK		0x100
+
+#define C_PIMM		(C_BRICK | 0x10)
+#define C_PIMM_0	(C_PIMM) /* | 0x0 */
+#define C_PIMM_1	(C_PIMM | 0x1)
+
+#define C_DIMM		(C_BRICK | 0x20)
+#define C_DIMM_0	(C_DIMM) /* | 0x0 */
+#define C_DIMM_1	(C_DIMM | 0x1)
+#define C_DIMM_2	(C_DIMM | 0x2)
+#define C_DIMM_3	(C_DIMM | 0x3)
+#define C_DIMM_4	(C_DIMM | 0x4)
+#define C_DIMM_5	(C_DIMM | 0x5)
+#define C_DIMM_6	(C_DIMM | 0x6)
+#define C_DIMM_7	(C_DIMM | 0x7)
+
+#define R_BRICK		0x200
+#define R_POWER		(R_BRICK | 0x10)
+
+#define VECTOR		0x300 /* used in vector ops when the destination
+			       * could be a cbrick or an rbrick */
+
+#define IO_BRICK	0x400
+#define IO_POWER	(IO_BRICK | 0x10)
+
+#define BRICK_MASK	0xf00
+#define SUBORD_MASK	0xf0  /* AND with component specification; if the
+			         the result is non-zero, then the component
+			         is a subordinate board of some kind */
+#define COMPT_MASK	0xf   /* if there's more than one instance of a
+				 particular type of subordinate board, this 
+				 masks out which one we're talking about */
+
+
+
+/* functions & macros for obtaining "NIC-like" strings from EEPROMs
+ */
+
+int eeprom_str( char *nic_str, nasid_t nasid, int component );
+int vector_eeprom_str( char *nic_str, nasid_t nasid,
+		       int component, net_vec_t path );
+
+#define CBRICK_EEPROM_STR(s,n)	eeprom_str((s),(n),C_BRICK)
+#define IOBRICK_EEPROM_STR(s,n)	eeprom_str((s),(n),IO_BRICK)
+#define RBRICK_EEPROM_STR(s,n,p)  vector_eeprom_str((s),(n),R_BRICK,p)
+#define VECTOR_EEPROM_STR(s,n,p)  vector_eeprom_str((s),(n),VECTOR,p)
+
+
+
+/* functions for obtaining formatted records from EEPROMs
+ */
+
+int cbrick_eeprom_read( eeprom_brd_record_t *buf, nasid_t nasid,
+			int component );
+int iobrick_eeprom_read( eeprom_brd_record_t *buf, nasid_t nasid,
+			 int component );
+int vector_eeprom_read( eeprom_brd_record_t *buf, nasid_t nasid,
+			net_vec_t path, int component );
+
+
+/* functions providing unique id's for duplonet and i/o discovery
+ */
+
+int cbrick_uid_get( nasid_t nasid, uint64_t *uid );
+int rbrick_uid_get( nasid_t nasid, net_vec_t path, uint64_t *uid );
+int iobrick_uid_get( nasid_t nasid, uint64_t *uid );
+
+
+/* retrieve the ethernet MAC address for an I-brick
+ */
+
+int ibrick_mac_addr_get( nasid_t nasid, char *eaddr );
+
+
+/* error codes
+ */
+
+#define EEP_OK			0
+#define EEP_L1			1
+#define EEP_FAIL		2
+#define EEP_BAD_CHECKSUM	3
+#define EEP_NICIFY		4
+#define EEP_PARAM		6
+#define EEP_NOMEM		7
+
+
+
+/* given a hardware graph vertex and an indication of the brick type,
+ * brick and board to be read, this functions reads the eeprom and
+ * attaches a "NIC"-format string of manufacturing information to the 
+ * vertex.  If the vertex already has the string, just returns the
+ * string.  If component is not VECTOR or R_BRICK, the path parameter
+ * is ignored.
+ */
+
+#ifdef IRIX
+char *eeprom_vertex_info_set( int component, int nasid, devfs_handle_t v,
+			      net_vec_t path );
+#endif
+
+
+
+/* We may need to differentiate between an XBridge and other types of
+ * bridges during discovery to tell whether the bridge in question
+ * is part of an IO brick.  The following function reads the WIDGET_ID
+ * register of the bridge under examination and returns a positive value
+ * if the part and mfg numbers stored there indicate that this widget
+ * is an XBridge (and so must be part of a brick).
+ */
+#ifdef IRIX
+int is_iobrick( int nasid, int widget_num );
+#endif
+
+/* the following macro derives the widget number from the register
+ * address passed to it and uses is_iobrick to determine whether
+ * the widget in question is part of an SN1 IO brick.
+ */
+#ifdef IRIX
+#define IS_IOBRICK(rg)	is_iobrick( NASID_GET((rg)), SWIN_WIDGETNUM((rg)) )
+#else
+#define IS_IOBRICK(rg)	1
+#endif
+
+
+
+/* macros for NIC compatability */
+/* always invoked on "this" cbrick */
+#define HUB_VERTEX_MFG_INFO(v) \
+    eeprom_vertex_info_set( C_BRICK, get_nasid(), (v), 0 )
+
+#define BRIDGE_VERTEX_MFG_INFO(v, r) \
+    ( IS_IOBRICK((r)) ? eeprom_vertex_info_set \
+		          ( IO_BRICK, NASID_GET((r)), (v), 0 ) \
+		      : nic_bridge_vertex_info((v), (r)) )
+
+#ifdef BRINGUP /* will we read mfg info from IOC3's that aren't
+		* part of IO7 cards, or aren't in I/O bricks? */
+#define IOC3_VERTEX_MFG_INFO(v, r, e) \
+    eeprom_vertex_info_set( IO_IO7, NASID_GET((r)), (v), 0 )
+#endif /* BRINGUP */
+
+#define HUB_UID_GET(n,v,p)	cbrick_uid_get((n),(p))
+#define ROUTER_UID_GET(d,p)	rbrick_uid_get(get_nasid(),(d),(p))
+#define XBOW_UID_GET(n,p)	iobrick_uid_get((n),(p))
+
+#endif /* _ASM_SN_EEPROM_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/gda.h linux/include/asm-ia64/sn/gda.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/gda.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/gda.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,108 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Derived from IRIX <sys/SN/gda.h>.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ *
+ * gda.h -- Contains the data structure for the global data area,
+ * 	The GDA contains information communicated between the
+ *	PROM, SYMMON, and the kernel. 
+ */
+#ifndef _ASM_SN_GDA_H
+#define _ASM_SN_GDA_H
+
+#include <asm/sn/addrs.h>
+
+#define GDA_MAGIC	0x58464552
+
+/*
+ * GDA Version History
+ *
+ * Version #	| Change
+ * -------------+-------------------------------------------------------
+ * 	1	| Initial IP27 version 
+ * 	2	| Prom sets g_partid field to the partition number. 0 IS
+ *		| a valid partition #. 
+ */
+
+#define GDA_VERSION	2	/* Current GDA version # */
+
+#define G_MAGICOFF	0
+#define G_VERSIONOFF	4
+#define G_PROMOPOFF	6
+#define G_MASTEROFF	8
+#define G_VDSOFF	12
+#define G_HKDNORMOFF	16
+#define G_HKDUTLBOFF	24
+#define G_HKDXUTLBOFF	32
+#define G_PARTIDOFF	40
+#define G_TABLEOFF	128
+
+#ifdef _LANGUAGE_C
+
+typedef struct gda {
+	u32	g_magic;	/* GDA magic number */
+	u16	g_version;	/* Version of this structure */
+	u16	g_masterid;	/* The NASID:CPUNUM of the master cpu */
+	u32	g_promop;	/* Passes requests from the kernel to prom */
+	u32	g_vds;		/* Store the virtual dipswitches here */
+	void	**g_hooked_norm;/* ptr to pda loc for norm hndlr */
+	void	**g_hooked_utlb;/* ptr to pda loc for utlb hndlr */
+	void	**g_hooked_xtlb;/* ptr to pda loc for xtlb hndlr */
+	int	g_partid;	/* partition id */
+	int	g_symmax;	/* Max symbols in name table. */
+	void	*g_dbstab;	/* Address of idbg symbol table */
+	char	*g_nametab;	/* Address of idbg name table */
+	void	*g_ktext_repmask;
+				/* Pointer to a mask of nodes with copies
+				 * of the kernel. */
+	char	g_padding[56];	/* pad out to 128 bytes */
+	nasid_t	g_nasidtable[MAX_COMPACT_NODES]; /* NASID of each node,
+						  * indexed by cnodeid.
+						  */
+} gda_t;
+
+#define GDA ((gda_t*) GDA_ADDR(get_nasid()))
+
+#endif /* __LANGUAGE_C */
+/*
+ * Define:	PART_GDA_VERSION
+ * Purpose:	Define the minimum version of the GDA required, lower 
+ *		revisions assume GDA is NOT set up, and read partition
+ *		information from the board info.
+ */
+#define	PART_GDA_VERSION	2
+
+/*
+ * The following requests can be sent to the PROM during startup.
+ */
+
+#define PROMOP_MAGIC		0x0ead0000
+#define PROMOP_MAGIC_MASK	0x0fff0000
+
+#define PROMOP_BIST_SHIFT       11
+#define PROMOP_BIST_MASK        (0x3 << 11)
+
+#define PROMOP_REG		PI_ERR_STACK_ADDR_A
+
+#define PROMOP_INVALID		(PROMOP_MAGIC | 0x00)
+#define PROMOP_HALT             (PROMOP_MAGIC | 0x10)
+#define PROMOP_POWERDOWN        (PROMOP_MAGIC | 0x20)
+#define PROMOP_RESTART          (PROMOP_MAGIC | 0x30)
+#define PROMOP_REBOOT           (PROMOP_MAGIC | 0x40)
+#define PROMOP_IMODE            (PROMOP_MAGIC | 0x50)
+
+#define PROMOP_CMD_MASK		0x00f0
+#define PROMOP_OPTIONS_MASK	0xfff0
+
+#define PROMOP_SKIP_DIAGS	0x0100		/* don't bother running diags */
+#define PROMOP_SKIP_MEMINIT	0x0200		/* don't bother initing memory */
+#define PROMOP_SKIP_DEVINIT	0x0400		/* don't bother initing devices */
+#define PROMOP_BIST1		0x0800		/* keep track of which BIST ran */
+#define PROMOP_BIST2		0x1000		/* keep track of which BIST ran */
+
+#endif /* _ASM_SN_GDA_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/hack.h linux/include/asm-ia64/sn/hack.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/hack.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/hack.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,92 @@
+/*
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Jack Steiner (steiner@sgi.com)
+ */
+
+
+#ifndef _ASM_SN_HACK_H
+#define _ASM_SN_HACK_H
+
+#include <asm/sn/types.h>
+#include <asm/uaccess.h>		/* for copy_??_user */
+
+/******************************************
+ * Definitions that do not exist in linux *
+ ******************************************/
+
+typedef int cred_t;	/* This is for compilation reasons */
+struct cred { int x; };
+
+/*
+ * Hardware Graph routines that are currently stubbed!
+ */
+#include <linux/devfs_fs_kernel.h>
+
+#define DELAY(a)
+#define cpuid() 0
+
+/************************************************
+ * Routines redefined to use linux equivalents. *
+ ************************************************/
+
+#define FIXME(s) printk("FIXME: [ %s ] in %s at %s:%d\n", s, __FUNCTION__, __FILE__, __LINE__)
+
+#define sv_init(a,b,c)          FIXME("Fixme: sv_init : no-op")
+#define sv_wait(a,b,c,d)        FIXME("Fixme: sv_wait : no-op")
+#define sv_broadcast(a)  	FIXME("Fixme: sv_broadcast : no-op")
+#define sv_destroy(a)		FIXME("Fixme: sv_destroy : no-op")
+
+extern devfs_handle_t dummy_vrtx;
+#define cpuid_to_vertex(cpuid) dummy_vrtx /* (pdaindr[cpuid].pda->p_vertex) */
+
+#define PUTBUF_LOCK(a) { FIXME("PUTBUF_LOCK"); }
+#define PUTBUF_UNLOCK(a) { FIXME("PUTBUF_UNLOCK"); }
+static inline int sv_signal(sv_t *a) {FIXME("sv_signal : return 0"); return (0); }
+
+#define cmn_err(x,y...)         { FIXME("cmn_err : use printk"); printk(x y); }
+
+typedef int (*splfunc_t)(void);
+extern int badaddr_val(volatile void *, int , volatile void *);
+
+extern int cap_able_cred(uint64_t a, uint64_t b);
+
+#define _CAP_CRABLE(cr,c)	(cap_able_cred(cr,c))
+#define CAP_MEMORY_MGT          (0x01LL << 25)
+#define CAP_DEVICE_MGT          (0x01LL << 37)
+
+#define io_splock(l) l
+#define io_spunlock(l,s)
+
+/* move to stubs.c yet */
+#define spinlock_destroy(a)     /* needed by pcibr_detach() */
+#define mutex_spinlock(a) 0
+#define mutex_spinunlock(a,b)
+#define mutex_spinlock_spl(x,y) y
+#define mutex_init(a,b,c)               ;
+#define mutex_lock(a,b)                 ;
+#define mutex_unlock(a)                 ;
+#define dev_to_vhdl(dev) 0
+#define get_timestamp() 0
+#define us_delay(a)
+#define v_mapphys(a,b,c) printk("Fixme: v_mapphys - soft->base 0x%p\n", b);
+#define splhi()  0
+#define spl7	splhi()
+#define splx(s)
+#define spinlock_init(x,name) mutex_init(x, MUTEX_DEFAULT, name);
+
+extern void * kmem_alloc_node(register size_t, register int, cnodeid_t);
+extern void * kmem_zalloc(size_t, int);
+extern void * kmem_zalloc_node(register size_t, register int, cnodeid_t );
+extern void * kmem_zone_alloc(register zone_t *, int);
+extern zone_t * kmem_zone_init(register int , char *);
+extern void kmem_zone_free(register zone_t *, void *);
+extern int is_specified(char *);
+extern int cap_able(uint64_t);
+extern int compare_and_swap_ptr(void **, void *, void *);
+
+#endif	/* _ASM_SN_HACK_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/hcl.h linux/include/asm-ia64/sn/hcl.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/hcl.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/hcl.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,114 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_HCL_H
+#define _ASM_SN_HCL_H
+
+extern spinlock_t hcl_spinlock;
+extern devfs_handle_t hcl_handle; /* HCL driver */
+extern devfs_handle_t hwgraph_root;
+
+
+typedef long            labelcl_info_place_t;
+typedef long            arbitrary_info_t;
+typedef long            arb_info_desc_t;
+
+/* Support for INVENTORY */
+struct inventory_s;
+struct invplace_s;
+extern struct invplace_s invplace_none;
+
+
+/* 
+ * Reserve room in every vertex for 2 pieces of fast access indexed information 
+ * Note that we do not save a pointer to the bdevsw or cdevsw[] tables anymore.
+ */
+#define HWGRAPH_NUM_INDEX_INFO	2	/* MAX Entries */
+#define HWGRAPH_CONNECTPT	0	/* connect point (aprent) */
+#define HWGRAPH_FASTINFO	1	/* callee's private handle */
+
+/*
+ * Reserved edge_place_t values, used as the "place" parameter to edge_get_next.
+ * Every vertex in the hwgraph has up to 2 *implicit* edges.  There is an implicit
+ * edge called "." that points to the current vertex.  There is an implicit edge
+ * called ".." that points to the vertex' connect point.
+ */
+#define EDGE_PLACE_WANT_CURRENT 0	/* "." */
+#define EDGE_PLACE_WANT_CONNECTPT 1	/* ".." */
+#define EDGE_PLACE_WANT_REAL_EDGES 2	/* Get the first real edge */
+#define HWGRAPH_RESERVED_PLACES 2
+
+
+/*
+ * Special pre-defined edge labels.
+ */
+#define HWGRAPH_EDGELBL_HW 	"hw"
+#define HWGRAPH_EDGELBL_DOT 	"."
+#define HWGRAPH_EDGELBL_DOTDOT 	".."
+#define graph_edge_place_t uint
+
+/*
+ * External declarations of EXPORTED SYMBOLS in hcl.c
+ */
+extern devfs_handle_t hwgraph_register(devfs_handle_t, const char *,
+	unsigned int, unsigned int, unsigned int, unsigned int,
+	umode_t, uid_t, gid_t, struct file_operations *, void *);
+
+extern int hwgraph_mk_symlink(devfs_handle_t, const char *, unsigned int,
+	unsigned int, const char *, unsigned int, devfs_handle_t *, void *);
+
+extern int hwgraph_vertex_destroy(devfs_handle_t);
+
+extern int hwgraph_edge_add(devfs_handle_t, devfs_handle_t, char *);
+extern int hwgraph_edge_get(devfs_handle_t, char *, devfs_handle_t *);
+
+extern arbitrary_info_t hwgraph_fastinfo_get(devfs_handle_t);
+extern void hwgraph_fastinfo_set(devfs_handle_t, arbitrary_info_t );
+extern devfs_handle_t hwgraph_mk_dir(devfs_handle_t, const char *, unsigned int, void *);
+
+extern int hwgraph_connectpt_set(devfs_handle_t, devfs_handle_t);
+extern devfs_handle_t hwgraph_connectpt_get(devfs_handle_t);
+extern int hwgraph_edge_get_next(devfs_handle_t, char *, devfs_handle_t *, uint *);
+extern graph_error_t hwgraph_edge_remove(devfs_handle_t, char *, devfs_handle_t *);
+
+extern graph_error_t hwgraph_traverse(devfs_handle_t, char *, devfs_handle_t *);
+
+extern int hwgraph_vertex_get_next(devfs_handle_t *, devfs_handle_t *);
+extern int hwgraph_inventory_get_next(devfs_handle_t, invplace_t *, 
+				      inventory_t **);
+extern int hwgraph_inventory_add(devfs_handle_t, int, int, major_t, minor_t, int);
+extern int hwgraph_inventory_remove(devfs_handle_t, int, int, major_t, minor_t, int);
+extern int hwgraph_controller_num_get(devfs_handle_t);
+extern void hwgraph_controller_num_set(devfs_handle_t, int);
+extern int hwgraph_path_ad(devfs_handle_t, char *, devfs_handle_t *);
+extern devfs_handle_t hwgraph_path_to_vertex(char *);
+extern devfs_handle_t hwgraph_path_to_dev(char *);
+extern devfs_handle_t hwgraph_block_device_get(devfs_handle_t);
+extern devfs_handle_t hwgraph_char_device_get(devfs_handle_t);
+extern graph_error_t hwgraph_char_device_add(devfs_handle_t, char *, char *, devfs_handle_t *);
+extern int hwgraph_path_add(devfs_handle_t, char *, devfs_handle_t *);
+extern struct file_operations * hwgraph_bdevsw_get(devfs_handle_t);
+extern int hwgraph_info_add_LBL(devfs_handle_t, char *, arbitrary_info_t);
+extern int hwgraph_info_get_LBL(devfs_handle_t, char *, arbitrary_info_t *);
+extern int hwgraph_info_replace_LBL(devfs_handle_t, char *, arbitrary_info_t,
+				    arbitrary_info_t *);
+extern int hwgraph_info_get_exported_LBL(devfs_handle_t, char *, int *, arbitrary_info_t *);
+extern int hwgraph_info_get_next_LBL(devfs_handle_t, char *, arbitrary_info_t *,
+                                labelcl_info_place_t *);
+
+extern int hwgraph_path_lookup(devfs_handle_t, char *, devfs_handle_t *, char **);
+extern int hwgraph_info_export_LBL(devfs_handle_t, char *, int);
+extern int hwgraph_info_unexport_LBL(devfs_handle_t, char *);
+extern int hwgraph_info_remove_LBL(devfs_handle_t, char *, arbitrary_info_t *);
+extern char * vertex_to_name(devfs_handle_t, char *, uint);
+extern graph_error_t hwgraph_vertex_unref(devfs_handle_t);
+
+
+
+#endif /* _ASM_SN_HCL_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/hcl_util.h linux/include/asm-ia64/sn/hcl_util.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/hcl_util.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/hcl_util.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,24 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+#ifndef _ASM_SN_HCL_UTIL_H
+#define _ASM_SN_HCL_UTIL_H
+
+extern char * dev_to_name(devfs_handle_t, char *, uint);
+extern int device_master_set(devfs_handle_t, devfs_handle_t);
+extern devfs_handle_t device_master_get(devfs_handle_t);
+extern cnodeid_t master_node_get(devfs_handle_t);
+extern cnodeid_t nodevertex_to_cnodeid(devfs_handle_t);
+extern void mark_nodevertex_as_node(devfs_handle_t, cnodeid_t);
+extern void device_info_set(devfs_handle_t, void *);
+extern void *device_info_get(devfs_handle_t);
+
+
+#endif _ASM_SN_HCL_UTIL_H
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/hubspc.h linux/include/asm-ia64/sn/hubspc.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/hubspc.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/hubspc.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,25 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_HUBSPC_H
+#define _ASM_SN_HUBSPC_H
+
+typedef enum {
+        HUBSPC_REFCOUNTERS,
+	HUBSPC_PROM
+} hubspc_subdevice_t;
+
+
+/*
+ * Reference Counters
+ */
+
+extern int refcounters_attach(devfs_handle_t hub);
+
+#endif /* _ASM_SN_HUBSPC_H */        
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/hwcntrs.h linux/include/asm-ia64/sn/hwcntrs.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/hwcntrs.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/hwcntrs.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,98 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_HWCNTRS_H
+#define _ASM_SN_HWCNTRS_H
+
+
+typedef  uint64_t refcnt_t;
+
+#define SN0_REFCNT_MAX_COUNTERS 64
+
+typedef struct sn0_refcnt_set {
+	refcnt_t    refcnt[SN0_REFCNT_MAX_COUNTERS];
+        uint64_t  flags;
+        uint64_t  reserved[4];
+} sn0_refcnt_set_t;
+
+typedef struct sn0_refcnt_buf {
+	sn0_refcnt_set_t   refcnt_set;
+	uint64_t         paddr;
+        uint64_t         page_size;
+        cnodeid_t          cnodeid;         /* cnodeid + pad[3] use 64 bits */
+        uint16_t           pad[3];
+        uint64_t         reserved[4];
+} sn0_refcnt_buf_t;
+
+typedef struct sn0_refcnt_args {
+	uint64_t          vaddr;
+	uint64_t          len;
+	sn0_refcnt_buf_t*   buf;
+        uint64_t          reserved[4];
+} sn0_refcnt_args_t;
+
+/*
+ * Info needed by the user level program
+ * to mmap the refcnt buffer
+ */
+
+#define RCB_INFO_GET  1
+#define RCB_SLOT_GET  2
+
+typedef struct rcb_info {
+        uint64_t  rcb_len;                  /* total refcnt buffer len in bytes */
+
+        int         rcb_sw_sets;              /* number of sw counter sets in buffer */
+        int         rcb_sw_counters_per_set;  /* sw counters per set -- numnodes */
+        int         rcb_sw_counter_size;      /* sizeof(refcnt_t) -- size of sw cntr */
+
+        int         rcb_base_pages;           /* number of base pages in node */
+        int         rcb_base_page_size;       /* sw base page size */        
+        uint64_t  rcb_base_paddr;           /* base physical address for this node */
+
+        int         rcb_cnodeid;              /* cnodeid for this node */
+        int         rcb_granularity;          /* hw page size used for counter sets */
+        uint        rcb_hw_counter_max;       /* max hwcounter count (width mask) */
+        int         rcb_diff_threshold;       /* current node differential threshold */
+        int         rcb_abs_threshold;        /* current node absolute threshold */
+        int         rcb_num_slots;            /* physmem slots */
+        
+        int         rcb_reserved[512];
+        
+} rcb_info_t;
+
+typedef struct rcb_slot {
+        uint64_t  base;
+        uint64_t  size;
+} rcb_slot_t;
+
+#if defined(__KERNEL__)
+// #include <sys/immu.h>
+typedef struct sn0_refcnt_args_32 {
+	uint64_t    vaddr;
+	uint64_t    len;
+	app32_ptr_t   buf;
+        uint64_t    reserved[4];
+} sn0_refcnt_args_32_t;
+
+/* Defines and Macros  */
+/* A set of reference counts are for 4k bytes of physical memory */
+#define	NBPREFCNTP	0x1000	
+#define	BPREFCNTPSHIFT	12
+#define bytes_to_refcntpages(x)	(((__psunsigned_t)(x)+(NBPREFCNTP-1))>>BPREFCNTPSHIFT)
+#define refcntpage_offset(x)	((__psunsigned_t)(x)&((NBPP-1)&~(NBPREFCNTP-1)))
+#define align_to_refcntpage(x)	((__psunsigned_t)(x)&(~(NBPREFCNTP-1)))
+
+extern void migr_refcnt_read(sn0_refcnt_buf_t*);
+extern void migr_refcnt_read_extended(sn0_refcnt_buf_t*);
+extern int migr_refcnt_enabled(void);
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_SN_HWCNTRS_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/intr.h linux/include/asm-ia64/sn/intr.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/intr.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/intr.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,250 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_INTR_H
+#define _ASM_SN_INTR_H
+
+/* Number of interrupt levels associated with each interrupt register. */
+#define N_INTPEND_BITS		64
+
+#define INT_PEND0_BASELVL	0
+#define INT_PEND1_BASELVL	64
+
+#define	N_INTPENDJUNK_BITS	8
+#define	INTPENDJUNK_CLRBIT	0x80
+
+#include <asm/sn/intr_public.h>
+
+#if LANGUAGE_C
+
+#if defined(CONFIG_IA64_SGI_IO)
+
+#define II_NAMELEN	24
+
+/*
+ * Dispatch table entry - contains information needed to call an interrupt
+ * routine.
+ */
+typedef struct intr_vector_s {
+	intr_func_t	iv_func;	/* Interrupt handler function */
+	intr_func_t	iv_prefunc;	/* Interrupt handler prologue func */
+	void		*iv_arg;	/* Argument to pass to handler */
+#ifdef IRIX
+	thd_int_t		iv_tinfo;	/* Thread info */
+#endif
+	cpuid_t			iv_mustruncpu;	/* Where we must run. */
+} intr_vector_t;
+
+/* Interrupt information table. */
+typedef struct intr_info_s {
+	xtalk_intr_setfunc_t	ii_setfunc;	/* Function to set the interrupt
+						 * destination and level register.
+						 * It returns 0 (success) or an
+						 * error code.
+						 */
+	void			*ii_cookie;	/* arg passed to setfunc */
+	devfs_handle_t		ii_owner_dev;	/* device that owns this intr */
+	char			ii_name[II_NAMELEN];	/* Name of this intr. */
+	int			ii_flags;	/* informational flags */
+} intr_info_t;
+
+#define iv_tflags	iv_tinfo.thd_flags
+#define iv_isync	iv_tinfo.thd_isync
+#define iv_lat		iv_tinfo.thd_latstats
+#define iv_thread	iv_tinfo.thd_ithread
+#define iv_pri		iv_tinfo.thd_pri
+
+#define THD_CREATED	0x00000001	/*
+					 * We've created a thread for this
+					 * interrupt.
+					 */
+
+/*
+ * Bits for ii_flags:
+ */
+#define II_UNRESERVE	0
+#define II_RESERVE	1		/* Interrupt reserved. 			*/
+#define II_INUSE	2		/* Interrupt connected 			*/
+#define II_ERRORINT	4		/* INterrupt is an error condition 	*/
+#define II_THREADED	8		/* Interrupt handler is threaded.	*/
+
+/*
+ * Interrupt level wildcard
+ */
+#define INTRCONNECT_ANYBIT	-1
+
+/*
+ * This structure holds information needed both to call and to maintain
+ * interrupts.  The two are in separate arrays for the locality benefits.
+ * Since there's only one set of vectors per hub chip (but more than one
+ * CPU, the lock to change the vector tables must be here rather than in
+ * the PDA.
+ */
+
+typedef struct intr_vecblk_s {
+	intr_vector_t	vectors[N_INTPEND_BITS];  /* information needed to
+						     call an intr routine. */
+	intr_info_t	info[N_INTPEND_BITS];	  /* information needed only
+						     to maintain interrupts. */
+	lock_t		vector_lock;		  /* Lock for this and the
+						     masks in the PDA. */
+	splfunc_t	vector_spl;		  /* vector_lock req'd spl */
+	int		vector_state;		  /* Initialized to zero.
+						     Set to INTR_INITED
+						     by hubintr_init.
+						   */
+	int		vector_count;		  /* Number of vectors
+						   * reserved.
+						   */
+	int		cpu_count[CPUS_PER_SUBNODE]; /* How many interrupts are
+						   * connected to each CPU
+						   */
+	int		ithreads_enabled;	  /* Are interrupt threads
+						   * initialized on this node.
+						   * and block?
+						   */
+} intr_vecblk_t;
+
+/* Possible values for vector_state: */
+#define VECTOR_UNINITED	0
+#define VECTOR_INITED	1
+#define VECTOR_SET	2
+
+#define hub_intrvect0	private.p_intmasks.dispatch0->vectors
+#define hub_intrvect1	private.p_intmasks.dispatch1->vectors
+#define hub_intrinfo0	private.p_intmasks.dispatch0->info
+#define hub_intrinfo1	private.p_intmasks.dispatch1->info
+
+#endif	/* CONFIG_IA64_SGI_IO */
+
+/*
+ * Macros to manipulate the interrupt register on the calling hub chip.
+ */
+
+#define LOCAL_HUB_SEND_INTR(_level)	LOCAL_HUB_S(PI_INT_PEND_MOD, \
+						    (0x100|(_level)))
+#if defined(CONFIG_IA64_SGI_IO)
+#define REMOTE_HUB_PI_SEND_INTR(_hub, _sn, _level) \
+		REMOTE_HUB_PI_S((_hub), _sn, PI_INT_PEND_MOD, (0x100|(_level)))
+
+#define REMOTE_CPU_SEND_INTR(_cpuid, _level) 					\
+		REMOTE_HUB_PI_S(cputonasid(_cpuid),				\
+			SUBNODE(cputoslice(_cpuid)),				\
+			PI_INT_PEND_MOD, (0x100|(_level)))
+#endif	/* CONFIG_IA64_SGI_IO*/
+
+/*
+ * When clearing the interrupt, make sure this clear does make it 
+ * to the hub. Otherwise we could end up losing interrupts.
+ * We do an uncached load of the int_pend0 register to ensure this.
+ */
+
+#define LOCAL_HUB_CLR_INTR(_level)	  \
+                LOCAL_HUB_S(PI_INT_PEND_MOD, (_level)),	\
+                LOCAL_HUB_L(PI_INT_PEND0)
+#define REMOTE_HUB_PI_CLR_INTR(_hub, _sn, _level) \
+		REMOTE_HUB_PI_S((_hub), (_sn), PI_INT_PEND_MOD, (_level)),	\
+                REMOTE_HUB_PI_L((_hub), (_sn), PI_INT_PEND0)
+
+#if defined(CONFIG_IA64_SGI_IO)
+/* Special support for use by gfx driver only.  Supports special gfx hub interrupt. */
+extern void install_gfxintr(cpuid_t cpu, ilvl_t swlevel, intr_func_t intr_func, void *intr_arg);
+
+void setrtvector(intr_func_t func);
+
+/*
+ * Interrupt blocking
+ */
+extern void intr_block_bit(cpuid_t cpu, int bit);
+extern void intr_unblock_bit(cpuid_t cpu, int bit);
+#endif	/* CONFIG_IA64_SGI_IO */
+
+#endif /* LANGUAGE_C */
+
+/*
+ * Hard-coded interrupt levels:
+ */
+
+/*
+ *	L0 = SW1
+ *	L1 = SW2
+ *	L2 = INT_PEND0
+ *	L3 = INT_PEND1
+ *	L4 = RTC
+ *	L5 = Profiling Timer
+ *	L6 = Hub Errors
+ *	L7 = Count/Compare (T5 counters)
+ */
+
+
+/* INT_PEND0 hard-coded bits. */
+#ifdef DEBUG_INTR_TSTAMP
+/* hard coded interrupt level for interrupt latency test interrupt */
+#define	CPU_INTRLAT_B	62
+#define	CPU_INTRLAT_A	61
+#endif
+
+/* Hardcoded bits required by software. */
+#define MSC_MESG_INTR	9
+#define CPU_ACTION_B	8
+#define CPU_ACTION_A	7
+
+/* These are determined by hardware: */
+#define CC_PEND_B	6
+#define CC_PEND_A	5
+#define UART_INTR	4
+#define PG_MIG_INTR	3
+#define GFX_INTR_B	2
+#define GFX_INTR_A	1
+#define RESERVED_INTR	0
+
+/* INT_PEND1 hard-coded bits: */
+#define MSC_PANIC_INTR	63
+#define NI_ERROR_INTR	62
+#define MD_COR_ERR_INTR	61
+#define COR_ERR_INTR_B	60
+#define COR_ERR_INTR_A	59
+#define CLK_ERR_INTR	58
+
+#if CONFIG_SGI_IP35 || CONFIG_IA64_SGI_SN1 || CONFIG_IA64_GENERIC
+# define NACK_INT_B	57
+# define NACK_INT_A	56
+# define LB_ERROR	55
+# define XB_ERROR	54
+#else
+	<< BOMB! >>  Must define IP27 or IP35 or IP37
+#endif
+
+#define BRIDGE_ERROR_INTR 53	/* Setup by PROM to catch Bridge Errors */
+
+#define IP27_INTR_0	52	/* Reserved for PROM use */
+#define IP27_INTR_1	51	/*   (do not use in Kernel) */
+#define IP27_INTR_2	50
+#define IP27_INTR_3	49
+#define IP27_INTR_4	48
+#define IP27_INTR_5	47
+#define IP27_INTR_6	46
+#define IP27_INTR_7	45
+
+#define	TLB_INTR_B	44	/* used for tlb flush random */
+#define	TLB_INTR_A	43
+
+#define LLP_PFAIL_INTR_B 42	/* see ml/SN/SN0/sysctlr.c */
+#define LLP_PFAIL_INTR_A 41
+
+#define NI_BRDCAST_ERR_B 40
+#define NI_BRDCAST_ERR_A 39
+
+#if CONFIG_SGI_IP35 || CONFIG_IA64_SGI_SN1 || CONFIG_IA64_GENERIC
+# define IO_ERROR_INTR	38	/* set up by prom */
+# define DEBUG_INTR_B	37	/* used by symmon to stop all cpus */
+# define DEBUG_INTR_A	36
+#endif
+
+#endif /* _ASM_SN_INTR_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/intr_public.h linux/include/asm-ia64/sn/intr_public.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/intr_public.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/intr_public.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,59 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef __SYS_SN_INTR_PUBLIC_H__
+#define __SYS_SN_INTR_PUBLIC_H__
+
+
+/* REMEMBER: If you change these, the whole world needs to be recompiled.
+ * It would also require changing the hubspl.s code and SN0/intr.c
+ * Currently, the spl code has no support for multiple INTPEND1 masks.
+ */
+
+#define	N_INTPEND0_MASKS	1
+#define	N_INTPEND1_MASKS	1
+
+#define INTPEND0_MAXMASK	(N_INTPEND0_MASKS - 1)
+#define INTPEND1_MAXMASK	(N_INTPEND1_MASKS - 1)
+
+#if _LANGUAGE_C
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+#include <asm/sn/sn1/arch.h>
+#endif
+#include <asm/sn/arch.h>
+
+struct intr_vecblk_s;	/* defined in asm/sn/intr.h */
+
+/*
+ * The following are necessary to create the illusion of a CEL
+ * on the IP27 hub.  We'll add more priority levels soon, but for
+ * now, any interrupt in a particular band effectively does an spl.
+ * These must be in the PDA since they're different for each processor.
+ * Users of this structure must hold the vector_lock in the appropriate vector
+ * block before modifying the mask arrays.  There's only one vector block
+ * for each Hub so a lock in the PDA wouldn't be adequate.
+ */
+typedef struct hub_intmasks_s {
+	/*
+	 * The masks are stored with the lowest-priority (most inclusive)
+	 * in the lowest-numbered masks (i.e., 0, 1, 2...).
+	 */
+	/* INT_PEND0: */
+	hubreg_t		intpend0_masks[N_INTPEND0_MASKS]; 
+	/* INT_PEND1: */
+	hubreg_t		intpend1_masks[N_INTPEND1_MASKS];
+	/* INT_PEND0: */
+	struct intr_vecblk_s	*dispatch0;	
+	/* INT_PEND1: */
+	struct intr_vecblk_s	*dispatch1;
+} hub_intmasks_t;
+
+#endif /* _LANGUAGE_C */
+#endif /* __SYS_SN_INTR_PUBLIC_H__ */
+
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/invent.h linux/include/asm-ia64/sn/invent.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/invent.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/invent.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,684 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_INVENT_H
+#define _ASM_SN_INVENT_H
+
+/*
+ * sys/sn/invent.h --  Kernel Hardware Inventory
+ *
+ * As the system boots, a list of recognized devices is assembled.
+ * This list can then be accessed through syssgi() by user-level programs
+ * so that they can learn about available peripherals and the system's
+ * hardware configuration.
+ *
+ * The data is organized into a linked list of structures that are composed
+ * of an inventory item class and a class-specific type.  Each instance may
+ * also specify a 32-bit "state" which might be size, readiness, or
+ * anything else that's relevant.
+ *
+ */
+
+#define major_t int
+#define minor_t int
+#define app32_ptr_t unsigned long
+#define graph_vertex_place_t long
+#define GRAPH_VERTEX_NONE ((devfs_handle_t)-1)
+
+typedef struct inventory_s {
+	struct	inventory_s *inv_next;	/* next inventory record in list */
+	int	inv_class;		/* class of object */
+	int	inv_type;		/* class sub-type of object */
+	major_t	inv_controller;		/* object major identifier */
+	minor_t	inv_unit;		/* object minor identifier */
+	int	inv_state;		/* information specific to object or
+					   class */
+} inventory_t;
+
+typedef struct cpu_inv_s {
+	int	cpuflavor;	/* differentiate processor */
+	int	cpufq;		/* cpu frequency */
+	int	sdsize;		/* secondary data cache size */
+	int	sdfreq;		/* speed of the secondary cache */
+} cpu_inv_t;
+
+
+typedef struct diag_inv_s{
+         char name[80];
+         int  diagval;
+         int  physid;
+         int  virtid;
+} diag_inv_t;
+
+
+typedef struct router_inv_s{
+  char portmap[80];             /* String indicating which ports int/ext */
+  char type[40];                /* String name: e.g. "star", "meta", etc. */
+  int  freq;                    /* From hub */
+  int  rev;                     /* From hub */
+} router_inv_t;
+
+
+/*
+ * NOTE: This file is a central registry for inventory IDs for each
+ *       class of inventory object.  It is important to keep the central copy
+ *       of this file up-to-date with the work going on in various engineering
+ *       projects.  When making changes to this file in an engineering project
+ *       tree, please make those changes separately from any others and then
+ *       merge the changes to this file into the main line trees in order to
+ *       prevent other engineering projects from conflicting with your ID
+ *       allocations.
+ */
+
+
+/* Inventory Classes */
+/* when adding a new class, add also to classes[] in hinv.c */
+#define INV_PROCESSOR	1
+#define INV_DISK	2
+#define INV_MEMORY	3
+#define INV_SERIAL	4
+#define INV_PARALLEL	5
+#define INV_TAPE	6
+#define INV_GRAPHICS	7
+#define INV_NETWORK	8
+#define INV_SCSI	9	/* SCSI devices other than disk and tape */
+#define INV_AUDIO	10
+#define	INV_IOBD	11
+#define	INV_VIDEO	12
+#define	INV_BUS		13
+#define	INV_MISC	14	/* miscellaneous: a catchall */
+/*** add post-5.2 classes here for backward compatibility ***/
+#define	INV_COMPRESSION	15
+#define	INV_VSCSI	16	/* SCSI devices on jag other than disk and tape */
+#define	INV_DISPLAY     17
+#define	INV_UNC_SCSILUN	18	/* Unconnected SCSI lun */
+#define	INV_PCI		19	/* PCI Bus */
+#define	INV_PCI_NO_DRV	20	/* PCI Bus without any driver */
+#define	INV_PROM	21	/* Different proms in the system */
+#define INV_IEEE1394	22	/* IEEE1394 devices */
+#define INV_RPS		23      /* redundant power source */
+#define INV_TPU		24	/* Tensor Processing Unit */
+#define INV_FCNODE	25	/* Helper class for SCSI classes, not in classes[] */
+
+/* types for class processor */
+#define INV_CPUBOARD	1
+#define INV_CPUCHIP	2
+#define INV_FPUCHIP	3
+#define INV_CCSYNC	4	/* CC Rev 2+ sync join counter */
+
+/* states for cpu and fpu chips are revision numbers */
+
+/* cpuboard states */
+#define INV_IP20BOARD   10
+#define INV_IP19BOARD   11
+#define INV_IP22BOARD   12
+#define INV_IP21BOARD	13
+#define INV_IP26BOARD	14
+#define INV_IP25BOARD	15
+#define INV_IP30BOARD	16
+#define INV_IP28BOARD	17
+#define INV_IP32BOARD	18
+#define INV_IP27BOARD	19
+#define INV_IPMHSIMBOARD 20
+#define INV_IP33BOARD	21
+#define INV_IP35BOARD	22
+
+/* types for class INV_IOBD */
+#define INV_EVIO	2	/* EVEREST I/O board */
+#define INV_O200IO	3	/* Origin 200 base I/O */
+
+/* IO board types for origin2000  for class INV_IOBD*/
+
+#define INV_O2000BASEIO	0x21	
+#define INV_O2000MSCSI	0x22	
+#define INV_O2000MENET	0x23
+#define INV_O2000HIPPI	0x24
+#define INV_O2000GFX	0x25	
+#define INV_O2000HAROLD 0x26
+#define INV_O2000VME	0x27
+#define INV_O2000MIO	0x28
+#define INV_O2000FC	0x29
+#define INV_O2000LINC	0x2a
+
+#define INV_PCIADAP	4
+/* states for class INV_IOBD type INV_EVERESTIO -- value of eb_type field */
+#define INV_IO4_REV1	0x21	
+
+/* types for class disk */
+/* NB: types MUST be unique within a class.
+   Please check this if adding new types. */
+
+#define INV_SCSICONTROL	1
+#define INV_SCSIDRIVE	2
+#define INV_SCSIFLOPPY	5	/* also cdroms, optical disks, etc. */
+#define INV_JAGUAR	16	/* Interphase Jaguar */
+#define INV_VSCSIDRIVE	17	/* Disk connected to Jaguar */
+#define INV_GIO_SCSICONTROL 18	/* optional GIO SCSI controller */
+#define INV_SCSIRAID	19	/* SCSI attached RAID */
+#define INV_XLVGEN      20	/* Generic XLV disk device */
+#define INV_PCCARD	21	/* PC-card (PCMCIA) devices */
+#define INV_PCI_SCSICONTROL	22   /* optional PCI SCSI controller */
+
+/* states for INV_SCSICONTROL disk type; indicate which chip rev;
+ * for 93A and B, unit field has microcode rev. */
+#define INV_WD93	0	/* WD 33C93  */
+#define INV_WD93A	1	/* WD 33C93A */
+#define INV_WD93B	2	/* WD 33C93B */
+#define INV_WD95A	3	/* WD 33C95A */
+#define INV_SCIP95	4       /* SCIP with a WD 33C95A */
+#define INV_ADP7880	5	/* Adaptec 7880 (single channel) */
+#define INV_QL_REV1     6       /* qlogic 1040  */
+#define INV_QL_REV2     7       /* qlogic 1040A */
+#define INV_QL_REV2_4   8       /* qlogic 1040A rev 4 */
+#define INV_QL_REV3     9       /* qlogic 1040B */
+#define INV_FCADP	10	/* Adaptec Emerald Fibrechannel */
+#define INV_QL_REV4     11      /* qlogic 1040B rev 2 */
+#define INV_QL		12	/* Unknown QL version */	
+#define INV_QL_1240     13      /* qlogic 1240 */
+#define INV_QL_1080     14      /* qlogic 1080 */
+#define INV_QL_1280     15      /* qlogic 1280 */
+#define INV_QL_10160    16      /* qlogic 10160 */
+#define INV_QL_12160    17      /* qlogic 12160 */
+#define INV_QL_2100	18	/* qLogic 2100 Fibrechannel */
+#define INV_QL_2200	19	/* qLogic 2200 Fibrechannel */
+#define INV_SBP2	20	/* SBP2 protocol over OHCI on 1394 */
+
+
+
+/* states for INV_SCSIDRIVE type of class disk */
+#define INV_RAID5_LUN	0x100
+#define INV_PRIMARY	0x200	/* primary path */
+#define INV_ALTERNATE	0x400	/* alternate path */
+#define INV_FAILED	0x800	/* path has failed */
+#define INV_XVMVOL	0x1000	/* disk is managed by XVM */
+
+/* states for INV_SCSIFLOPPY type of class disk */
+#define INV_TEAC_FLOPPY 1       /* TEAC 3 1/2 inch floppy drive */
+#define INV_INSITE_FLOPPY 2     /* INSITE, IOMEGA  Io20S, SyQuest floppy drives */
+
+/* END OF CLASS DISK TYPES */
+
+/* types for class memory */
+/* NB. the states for class memory are sizes in bytes */
+#define INV_MAIN	1
+#define INV_DCACHE	3
+#define INV_ICACHE	4
+#define INV_WBUFFER	5
+#define INV_SDCACHE	6
+#define INV_SICACHE	7
+#define INV_SIDCACHE	8
+#define INV_MAIN_MB	9
+#define INV_HUBSPC      10      /* HUBSPC */
+
+/* types for class serial */
+#define INV_CDSIO	1	/* Central Data serial board */
+#define INV_T3270	2	/* T3270 emulation */
+#define INV_GSE		3	/* SpectraGraphics Gerbil coax cable */
+#define INV_SI		4	/* SNA SDLC controller */
+#define	INV_M333X25 	6	/* X.25 controller */
+#define INV_CDSIO_E	7	/* Central Data serial board on E space */
+#define INV_ONBOARD	8	/* Serial ports per CPU board */
+#define INV_EPC_SERIAL	9	/* EVEREST I/O EPC serial port */
+#define INV_ICA		10	/* IRIS (IBM) Channel Adapter card */
+#define INV_VSC		11	/* SBE VME Synch Comm board */
+#define INV_ISC		12	/* SBE ISA Synch Comm board */
+#define INV_GSC		13	/* SGI GIO Synch Comm board */
+#define INV_ASO_SERIAL	14	/* serial portion of SGI ASO board */
+#define INV_PSC		15	/* SBE PCI Synch Comm board */
+#define INV_IOC3_DMA	16	/* DMA mode IOC3 serial */
+#define INV_IOC3_PIO	17	/* PIO mode IOC3 serial */
+#define INV_INVISIBLE	18	/* invisible inventory entry for kernel use */
+#define INV_ISA_DMA	19	/* DMA mode ISA serial -- O2 */
+
+/* types for class parallel */
+#define INV_GPIB	2	/* National Instrument GPIB board */
+#define INV_GPIB_E	3	/* National Instrument GPIB board on E space*/
+#define INV_EPC_PLP	4	/* EVEREST I/O EPC Parallel Port */
+#define INV_ONBOARD_PLP	5	/* Integral parallel port,
+				      state = 0 -> output only
+				      state = 1 -> bi-directional */
+#define INV_EPP_ECP_PLP	6	/* Integral EPP/ECP parallel port */
+#define INV_EPP_PFD	7	/* External EPP parallel peripheral */
+
+/* types for class tape */
+#define INV_SCSIQIC	1	/* Any SCSI tape, not just QIC{24,150}... */
+#define INV_VSCSITAPE	4	/* SCSI tape connected to Jaguar */
+
+/* sub types for type INV_SCSIQIC and INV_VSCSITAPE (in state) */
+#define TPUNKNOWN	0	/* type not known */
+#define TPQIC24		1	/* QIC24 1/4" cartridge */
+#define TPDAT		2	/* 4mm Digital Audio Tape cartridge */
+#define TPQIC150	3	/* QIC150 1/4" cartridge */
+#define TP9TRACK	4	/* 9 track reel */
+#define TP8MM_8200	5	/* 8 mm video tape cartridge */
+#define TP8MM_8500	6	/* 8 mm video tape cartridge */
+#define TPQIC1000	7	/* QIC1000 1/4" cartridge */
+#define TPQIC1350	8	/* QIC1350 1/4" cartridge */
+#define TP3480		9	/* 3480 compatible cartridge */
+#define TPDLT		10	/* DEC Digital Linear Tape cartridge */
+#define TPD2		11	/* D2 tape cartridge */
+#define TPDLTSTACKER	12	/* DEC Digital Linear Tape stacker */
+#define TPNTP		13	/* IBM Magstar 3590 Tape Device cartridge */
+#define TPNTPSTACKER	14	/* IBM Magstar 3590 Tape Device stacker */
+#define TPSTK9490       15      /* StorageTeK 9490 */
+#define TPSTKSD3        16      /* StorageTeK SD3 */
+#define TPGY10	        17      /* Sony GY-10  */
+#define TP8MM_8900	18	/* 8 mm (AME) tape cartridge */
+#define TPMGSTRMP       19      /* IBM Magster MP 3570 cartridge */
+#define TPMGSTRMPSTCKR  20      /* IBM Magstar MP stacker */
+#define TPSTK4791       21      /* StorageTek 4791 */
+#define TPSTK4781       22      /* StorageTek 4781 */
+#define TPFUJDIANA1     23      /* Fujitsu Diana-1 (M1016/M1017) */
+#define TPFUJDIANA2     24      /* Fujitsu Diana-2 (M2483) */
+#define TPFUJDIANA3     25      /* Fujitsu Diana-3 (M2488) */
+#define TP8MM_AIT	26	/* Sony AIT format tape */
+#define TPTD3600        27      /* Philips TD3600  */
+#define TPTD3600STCKR   28      /* Philips TD3600  stacker */
+#define TPNCTP          29      /* Philips NCTP */
+#define TPGY2120        30      /* Sony GY-2120 (replaces GY-10)  */
+#define TPOVL490E       31      /* Overland Data L490E (3490E compatible) */
+#define TPSTK9840       32      /* StorageTeK 9840 (aka Eagle) */
+
+/* Diagnostics inventory */
+#define INV_CPUDIAGVAL  70
+
+
+/*
+ *  GFX invent is a subset of gfxinfo
+ */
+
+/* types for class graphics */
+#define INV_GR1BOARD	1	/* GR1 (Eclipse) graphics */
+#define INV_GR1BP	2	/* OBSOLETE - use INV_GR1BIT24 instead */
+#define INV_GR1ZBUFFER	3	/* OBSOLETE - use INV_GR1ZBUF24 instead */
+#define INV_GRODEV	4	/* Clover1 graphics */
+#define INV_GMDEV	5	/* GT graphics */
+#define INV_CG2		6	/* CG2 composite video/genlock board */
+#define INV_VMUXBOARD	7	/* VMUX video mux board */
+#define	INV_VGX		8	/* VGX (PowerVision) graphics */
+#define	INV_VGXT	9	/* VGXT (PowerVision) graphics with IMP5s. */
+#define	INV_LIGHT	10	/* LIGHT graphics */
+#define INV_GR2		11	/* EXPRESS graphics */
+#define INV_RE		12	/* RealityEngine graphics */
+#define INV_VTX		13	/* RealityEngine graphics - VTX variant */
+#define INV_NEWPORT	14	/* Newport graphics */
+#define INV_MGRAS	15	/* Mardigras graphics */
+#define INV_IR		16	/* InfiniteReality graphics */
+#define INV_CRIME	17	/* Moosehead on board CRIME graphics */
+#define INV_IR2		18	/* InfiniteReality2 graphics */
+#define INV_IR2LITE	19	/* Reality graphics */
+#define INV_IR2E	20	/* InfiniteReality2e graphics */
+#define INV_ODSY        21      /* Odyssey graphics */
+#define INV_IR3		22	/* InfiniteReality3 graphics */
+
+/* states for graphics class GR1 */
+#define INV_GR1REMASK	0x7	/* RE version */
+#define INV_GR1REUNK	0x0	/* RE version unknown */
+#define INV_GR1RE1	0x1	/* RE1 */
+#define INV_GR1RE2	0x2	/* RE2 */
+#define INV_GR1BUSMASK	0x38	/* GR1 bus architecture */
+#define INV_GR1PB	0x00	/* Eclipse private bus */
+#define INV_GR1PBVME	0x08	/* VGR2 board VME and private bus interfaces */
+#define INV_GR1TURBO	0x40	/* has turbo option */
+#define INV_GR1BIT24  	0x80    /* has bitplane option */
+#define INV_GR1ZBUF24 	0x100   /* has z-buffer option */
+#define INV_GR1SMALLMON 0x200   /* using 14" monitor */
+#define INV_GR1SMALLMAP 0x400   /* has 256 entry color map */
+#define INV_GR1AUX4 	0x800   /* has AUX/WID plane option */
+
+/* states for graphics class GR2 */
+		/* bitmasks */
+#define INV_GR2_Z	0x1	/* has z-buffer option */
+#define INV_GR2_24	0x2	/* has bitplane option */
+#define INV_GR2_4GE     0x4     /* has 4 GEs */
+#define INV_GR2_1GE	0x8	/* has 1 GEs */
+#define INV_GR2_2GE	0x10	/* has 2 GEs */
+#define INV_GR2_8GE	0x20	/* has 8 GEs */
+#define INV_GR2_GR3	0x40	/* board GR3 */
+#define INV_GR2_GU1	0x80	/* board GU1 */
+#define INV_GR2_INDY    0x100   /* board GR3 on Indy*/
+#define INV_GR2_GR5	0x200	/* board GR3 with 4 GEs, hinv prints GR5-XZ */
+
+		/* supported configurations */
+#define INV_GR2_XS	0x0     /* GR2-XS */
+#define INV_GR2_XSZ	0x1     /* GR2-XS with z-buffer */
+#define INV_GR2_XS24	0x2     /* GR2-XS24 */
+#define INV_GR2_XS24Z	0x3     /* GR2-XS24 with z-buffer */
+#define INV_GR2_XSM	0x4     /* GR2-XSM */
+#define INV_GR2_ELAN	0x7	/* GR2-Elan */
+#define	INV_GR2_XZ	0x13	/* GR2-XZ */
+#define	INV_GR3_XSM	0x44	/* GR3-XSM */
+#define	INV_GR3_ELAN	0x47	/* GR3-Elan */
+#define	INV_GU1_EXTREME	0xa3	/* GU1-Extreme */
+
+/* States for graphics class NEWPORT */
+#define	INV_NEWPORT_XL	0x01	/* Indigo2 XL model */
+#define INV_NEWPORT_24	0x02	/* board has 24 bitplanes */
+#define INV_NEWTON      0x04    /* Triton SUBGR tagging */
+
+/* States for graphics class MGRAS */
+#define INV_MGRAS_ARCHS 0xff000000      /* architectures */
+#define INV_MGRAS_HQ3   0x00000000   /*impact*/
+#define INV_MGRAS_HQ4	0x01000000   /*gamera*/
+#define INV_MGRAS_MOT   0x02000000   /*mothra*/
+#define INV_MGRAS_GES	0x00ff0000	/* number of GEs */
+#define INV_MGRAS_1GE	0x00010000
+#define INV_MGRAS_2GE	0x00020000
+#define INV_MGRAS_RES	0x0000ff00	/* number of REs */
+#define INV_MGRAS_1RE	0x00000100
+#define INV_MGRAS_2RE	0x00000200
+#define INV_MGRAS_TRS	0x000000ff	/* number of TRAMs */
+#define INV_MGRAS_0TR	0x00000000
+#define INV_MGRAS_1TR	0x00000001
+#define INV_MGRAS_2TR	0x00000002
+
+/* States for graphics class CRIME */
+#define INV_CRM_BASE    0x01            /* Moosehead basic model */
+
+/* States for graphics class ODSY */
+#define INV_ODSY_ARCHS      0xff000000 /* architectures */
+#define INV_ODSY_REVA_ARCH  0x01000000 /* Buzz Rev A */
+#define INV_ODSY_REVB_ARCH  0x02000000 /* Buzz Rev B */
+#define INV_ODSY_MEMCFG     0x00ff0000 /* memory configs */
+#define INV_ODSY_MEMCFG_32  0x00010000 /* 32MB memory */
+#define INV_ODSY_MEMCFG_64  0x00020000 /* 64MB memory */
+#define INV_ODSY_MEMCFG_128 0x00030000 /* 128MB memory */
+#define INV_ODSY_MEMCFG_256 0x00040000 /* 256MB memory */
+#define INV_ODSY_MEMCFG_512 0x00050000 /* 512MB memory */
+
+
+/* types for class network */
+#define INV_NET_ETHER		0	/* 10Mb Ethernet */
+#define INV_NET_HYPER		1	/* HyperNet */
+#define	INV_NET_CRAYIOS		2	/* Cray Input/Ouput Subsystem */
+#define	INV_NET_FDDI		3	/* FDDI */
+#define INV_NET_TOKEN		4	/* 16/4 Token Ring */
+#define INV_NET_HIPPI		5	/* HIPPI */
+#define INV_NET_ATM		6	/* ATM */
+#define INV_NET_ISDN_BRI	7	/* ISDN */
+#define INV_NET_ISDN_PRI	8	/* PRI ISDN */
+#define INV_NET_HIPPIS		9	/* HIPPI-Serial */
+#define	INV_NET_GSN		10	/* GSN (aka HIPPI-6400) */
+
+/* controllers for network types, unique within class network */
+#define INV_ETHER_EC	0	/* IP6 integral controller */
+#define INV_ETHER_ENP	1	/* CMC board */
+#define INV_ETHER_ET	2	/* IP5 integral controller */
+#define INV_HYPER_HY	3	/* HyperNet controller */
+#define	INV_CRAYIOS_CFEI3 4	/* Cray Front End Interface, v3 */
+#define	INV_FDDI_IMF	5	/* Interphase/Martin 3211 FDDI */
+#define INV_ETHER_EGL	6	/* Interphase V/4207 Eagle */
+#define INV_ETHER_FXP	7	/* CMC C/130 FXP */
+#define INV_FDDI_IPG	8	/* Interphase/SGI 4211 Peregrine FDDI */
+#define INV_TOKEN_FV	9	/* Formation fv1600 Token-Ring board */
+#define INV_FDDI_XPI	10	/* XPI GIO bus FDDI */
+#define INV_TOKEN_GTR	11	/* GTR GIO bus TokenRing */
+#define INV_ETHER_GIO	12	/* IP12/20 optional GIO ethernet controller */
+#define INV_ETHER_EE	13	/* Everest IO4 EPC SEEQ/EDLC */
+#define INV_HIO_HIPPI	14	/* HIO HIPPI for Challenge/Onyx */
+#define INV_ATM_GIO64	15	/* ATM OC-3c Mez card */
+#define INV_ETHER_EP	16	/* 8-port E-Plex Ethernet */
+#define INV_ISDN_SM	17	/* Siemens PEB 2085 */
+#define INV_TOKEN_MTR	18	/* EISA TokenRing */
+#define INV_ETHER_EF	19	/* IOC3 Fast Ethernet */
+#define INV_ISDN_48XP	20	/* Xircom PRI-48XP */
+#define INV_FDDI_RNS	21	/* Rockwell Network Systems FDDI */
+#define INV_HIPPIS_XTK	22	/* Xtalk HIPPI-Serial */
+#define INV_ATM_QUADOC3	23	/* Xtalk Quad OC-3c ATM interface */
+#define INV_TOKEN_MTRPCI 24     /* PCI TokenRing */
+#define INV_ETHER_ECF	25	/* PCI Fast Ethernet */
+#define INV_GFE		26	/* GIO Fast Ethernet */
+#define INV_VFE		27	/* VME Fast Ethernet */
+#define	INV_ETHER_GE	28	/* Gigabit Ethernet */
+#define	INV_ETHER_EFP	INV_ETHER_EF	/* unused (same as IOC3 Fast Ethernet) */
+#define INV_GSN_XTK1	29	/* single xtalk version of GSN */
+#define INV_GSN_XTK2	30	/* dual xtalk version of GSN */
+#define INV_FORE_HE	31	/* FORE HE ATM Card */
+#define INV_FORE_PCA	32	/* FORE PCA ATM Card */
+#define INV_FORE_VMA    33      /* FORE VMA ATM Card */
+#define INV_FORE_ESA    34      /* FORE ESA ATM Card */
+#define INV_FORE_GIA    35      /* FORE GIA ATM Card */
+
+/* Types for class INV_SCSI and INV_VSCSI; The type code is the same as
+ * the device type code returned by the Inquiry command, iff the Inquiry
+ * command defines a type code for the device in question.  If it doesn't,
+ * values over 31 will be used for the device type.
+ * Note: the lun is encoded in bits 8-15 of the state.  The
+ * state field low 3 bits contains the information from the inquiry
+ * cmd that indicates ANSI SCSI 1,2, etc. compliance, and bit 7
+ * contains the inquiry info that indicates whether the media is
+ * removable.
+ */
+#define INV_PRINTER	2	/* SCSI printer */
+#define INV_CPU		3	/* SCSI CPU device */
+#define INV_WORM	4	/* write-once-read-many (e.g. optical disks) */
+#define INV_CDROM	5	/* CD-ROM  */
+#define INV_SCANNER	6	/* scanners */
+#define INV_OPTICAL	7	/* optical disks (read-write) */
+#define INV_CHANGER	8	/* jukebox's for CDROMS, for example */
+#define INV_COMM	9	/* Communications device */
+#define INV_RAIDCTLR	32	/* RAID ctlr actually gives type 0 */
+
+/* bit definitions for state field for class INV_SCSI */
+#define INV_REMOVE	0x80	/* has removable media */
+#define INV_SCSI_MASK	7	/* to which ANSI SCSI standard device conforms*/
+
+/* types for class INV_AUDIO */
+
+#define INV_AUDIO_HDSP		0	/* Indigo DSP system */
+#define INV_AUDIO_VIGRA110	1	/* ViGRA 110 audio board */
+#define INV_AUDIO_VIGRA210	2	/* ViGRA 210 audio board */
+#define INV_AUDIO_A2		3	/* HAL2 / Audio Module for Indigo 2 */
+#define INV_AUDIO_A3		4	/* Moosehead (IP32) AD1843 codec */
+#define INV_AUDIO_RAD		5	/* RAD PCI chip */
+
+/* types for class INV_VIDEO */
+
+#define	INV_VIDEO_LIGHT		0
+#define	INV_VIDEO_VS2		1	/* MultiChannel Option */
+#define	INV_VIDEO_EXPRESS	2	/* kaleidecope video */
+#define	INV_VIDEO_VINO		3
+#define	INV_VIDEO_VO2		4	/* Sirius Video */
+#define	INV_VIDEO_INDY		5	/* Indy Video - kal vid on Newport
+					  gfx on Indy */
+#define	INV_VIDEO_MVP		6	/* Moosehead Video Ports */
+#define	INV_VIDEO_INDY_601	7	/* Indy Video 601 */
+#define	INV_VIDEO_PMUX		8	/* PALMUX video w/ PGR gfx */
+#define	INV_VIDEO_MGRAS		9	/* Galileo 1.5 video */
+#define	INV_VIDEO_DIVO		10	/* DIVO video */
+#define	INV_VIDEO_RACER		11	/* SpeedRacer Pro Video */
+#define	INV_VIDEO_EVO		12	/* EVO Personal Video */
+#define INV_VIDEO_XTHD		13	/* XIO XT-HDTV video */
+
+/* states for video class INV_VIDEO_EXPRESS */
+
+#define INV_GALILEO_REV		0xF
+#define INV_GALILEO_JUNIOR	0x10
+#define INV_GALILEO_INDY_CAM	0x20
+#define INV_GALILEO_DBOB	0x40
+#define INV_GALILEO_ELANTEC	0x80
+
+/* states for video class VINO */
+
+#define INV_VINO_REV		0xF
+#define INV_VINO_INDY_CAM	0x10
+#define INV_VINO_INDY_NOSW	0x20	/* nebulous - means s/w not installed */
+
+/* states for video class MVP */
+
+#define INV_MVP_REV(x)		(((x)&0x0000000f))
+#define INV_MVP_REV_SW(x)	(((x)&0x000000f0)>>4)
+#define INV_MVP_AV_BOARD(x)	(((x)&0x00000f00)>>8)
+#define	INV_MVP_AV_REV(x)	(((x)&0x0000f000)>>12)
+#define	INV_MVP_CAMERA(x)	(((x)&0x000f0000)>>16)
+#define	INV_MVP_CAM_REV(x)	(((x)&0x00f00000)>>20)
+#define INV_MVP_SDIINF(x)       (((x)&0x0f000000)>>24)
+#define INV_MVP_SDI_REV(x)      (((x)&0xf0000000)>>28)
+
+/* types for class INV_BUS */
+
+#define INV_BUS_VME	0
+#define INV_BUS_EISA	1
+#define INV_BUS_GIO	2
+#define INV_BUS_BT3_PCI	3
+
+/* types for class INV_MISC */
+#define INV_MISC_EPC_EINT	0	/* EPC external interrupts */
+#define INV_MISC_PCKM		1	/* pc keyboard or mouse */
+#define INV_MISC_IOC3_EINT	2	/* IOC3 external interrupts */
+#define INV_MISC_OTHER		3	/* non-specific type */
+
+/*
+ * The four components below do not actually have inventory information
+ * associated with the vertex. These symbols are used by grio at the 
+ * moment to figure out the device type from the vertex. If these get
+ * inventory structures in the future, either the type values must
+ * remain the same or grio code needs to change.
+ */
+
+#define INV_XBOW        	3	/* cross bow */
+#define INV_HUB         	4	/* hub */
+#define INV_PCI_BRIDGE  	5	/* pci bridge */
+#define INV_ROUTER		6	/* router */
+
+/*  types for class INV_PROM */
+#define INV_IO6PROM	0
+#define INV_IP27PROM	1
+#define INV_IP35PROM	2
+
+/* types for class INV_COMPRESSION */
+
+#define	INV_COSMO		0
+#define	INV_INDYCOMP		1
+#define	INV_IMPACTCOMP		2	/* cosmo2, aka impact compression */
+#define	INV_VICE		3 	/* Video imaging & compression engine */
+
+/* types for class INV_DISPLAY */
+#define INV_PRESENTER_BOARD	0       /* Indy Presenter adapter board */
+#define INV_PRESENTER_PANEL	1       /* Indy Presenter board and panel */
+#define INV_ICO_BOARD		2	/* IMPACT channel option board */
+#define INV_DCD_BOARD		3	/* O2 dual channel option board */
+#define INV_7of9_BOARD          4       /* 7of9 flatpanel adapter board */
+#define INV_7of9_PANEL          5       /* 7of9 flatpanel board and panel */
+
+/* types for class INV_IEEE1394 */
+#define INV_OHCI		0	/* Ohci IEEE1394 pci card */
+#define INV_RAWISO1394   	10	/* Raw Isochronous IEEE 1394 protocol driver */
+#define INV_RAWASYNC1394 	11	/* Raw Asynchronous IEEE 1394 protocol driver */
+#define INV_AVC1394     	12	/* Audio, Video & Control (AV/C) IEEE 1394 protocol driver */
+
+/* state for class INV_IEEE1394 & type INV_OHCI */
+#define INV_IEEE1394_STATE_TI_REV_1 0
+
+/* O2 DVLink 1.1 controller static info */
+#define INV_IEEE1394_CTLR_O2_DVLINK_11 0x8009104c
+
+/* types for class INV_TPU */
+#define	INV_TPU_EXT		0	/* External XIO Tensor Processing Unit */
+#define	INV_TPU_XIO		1	/* Internal XIO Tensor Processing Unit */
+
+typedef struct invent_generic_s {
+	unsigned short	ig_module;
+	unsigned short	ig_slot;
+	unsigned char	ig_flag;
+	int	ig_invclass;
+} invent_generic_t;
+
+#define INVENT_ENABLED	0x1
+
+typedef struct invent_membnkinfo {
+	unsigned short	imb_size;	/* bank size in MB */
+	unsigned short	imb_attr;	/* Mem attributes */
+	unsigned int	imb_flag;	/* bank flags */
+} invent_membnkinfo_t;
+
+
+typedef struct invent_meminfo {
+	invent_generic_t 	im_gen;
+	unsigned short	im_size;	/* memory size     */
+	unsigned short	im_banks;	/* number of banks */
+	/*
+	 * declare an array with one element. Each platform is expected to
+	 * allocate the size required based on the number of banks and set
+	 * the im_banks correctly for this array traversal.
+	 */
+	invent_membnkinfo_t im_bank_info[1]; 
+} invent_meminfo_t;
+
+#define INV_MEM_PREMIUM	 0x01
+
+typedef struct invent_cpuinfo {
+	invent_generic_t ic_gen;
+	cpu_inv_t     ic_cpu_info;
+	unsigned short	ic_cpuid;
+	unsigned short	ic_slice;
+} invent_cpuinfo_t;
+
+typedef struct invent_rpsinfo {
+	invent_generic_t ir_gen;
+	int 		 ir_xbox;	/* is RPS connected to an xbox */
+} invent_rpsinfo_t;
+
+typedef struct invent_miscinfo {
+	invent_generic_t im_gen;
+	int       	 im_rev;
+	int		 im_version;
+	int	         im_type;
+	uint64_t	 im_speed;
+} invent_miscinfo_t;
+
+
+typedef struct invent_routerinfo{
+         invent_generic_t im_gen;
+         router_inv_t     rip;
+} invent_routerinfo_t;
+
+
+
+#ifdef __KERNEL__
+
+typedef struct irix5_inventory_s {
+	app32_ptr_t	inv_next;	/* next inventory record in list */
+	int	inv_class;		/* class of object */
+	int	inv_type;		/* class sub-type of object */
+	major_t	inv_controller;		/* object major identifier */
+	minor_t	inv_unit;		/* object minor identifier */
+	int	inv_state;		/* information specific to object or
+					   class */
+} irix5_inventory_t;
+
+typedef struct invplace_s {
+	devfs_handle_t		invplace_vhdl;		/* current vertex */
+	devfs_handle_t		invplace_vplace;	/* place in vertex list */
+	inventory_t		*invplace_inv;		/* place in inv list on vertex */
+} invplace_t; /* Magic cookie placeholder in inventory list */
+
+extern void	    add_to_inventory(int, int, int, int, int);
+extern void	    replace_in_inventory(inventory_t *, int, int, int, int, int);
+extern inventory_t  *get_next_inventory(invplace_t *);
+extern inventory_t  *find_inventory(inventory_t *, int, int, int, int, int);
+extern int	    scaninvent(int (*)(inventory_t *, void *), void *);
+extern int	    get_sizeof_inventory(int);
+
+extern void device_inventory_add(	devfs_handle_t device, 
+					int class, 
+					int type, 
+					major_t ctlr, 
+					minor_t unit, 
+					int state);
+
+
+extern inventory_t *device_inventory_get_next(	devfs_handle_t device,
+						invplace_t *);
+
+extern void device_controller_num_set(	devfs_handle_t,
+					int);
+extern int device_controller_num_get(	devfs_handle_t);
+#endif /* __KERNEL__ */
+#endif /* _ASM_SN_INVENT_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/io.h linux/include/asm-ia64/sn/io.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/io.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/io.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,76 @@
+
+/* $Id: io.h,v 1.2 2000/02/02 16:35:57 ralf Exp $
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000 Ralf Baechle
+ * Copyright (C) 2000 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_SN_IO_H
+#define _ASM_SN_IO_H
+
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+#include <asm/sn/sn1/addrs.h>
+#endif
+
+#define IO_SPACE_BASE IO_BASE
+
+/* Because we only have PCI I/O ports.  */
+#if !defined(CONFIG_IA64_SGI_IO)
+#define IO_SPACE_LIMIT 0xffffffff
+
+/* No isa_* versions, the Origin doesn't have ISA / EISA bridges.  */
+
+#else	/* CONFIG_IA64_SGI_IO */
+
+#define IIO_ITTE_BASE	0x400160	/* base of translation table entries */
+#define IIO_ITTE(bigwin)	(IIO_ITTE_BASE + 8*(bigwin))
+
+#define IIO_ITTE_OFFSET_BITS	5	/* size of offset field */
+#define IIO_ITTE_OFFSET_MASK	((1<<IIO_ITTE_OFFSET_BITS)-1)
+#define IIO_ITTE_OFFSET_SHIFT	0
+
+#define IIO_ITTE_WIDGET_BITS	4	/* size of widget field */
+#define IIO_ITTE_WIDGET_MASK	((1<<IIO_ITTE_WIDGET_BITS)-1)
+#define IIO_ITTE_WIDGET_SHIFT	8
+
+#define IIO_ITTE_IOSP		1	/* I/O Space bit */
+#define IIO_ITTE_IOSP_MASK	1
+#define IIO_ITTE_IOSP_SHIFT	12
+#define HUB_PIO_MAP_TO_MEM	0
+#define HUB_PIO_MAP_TO_IO	1
+
+#define IIO_ITTE_INVALID_WIDGET	3	/* an invalid widget  */
+
+#define IIO_ITTE_PUT(nasid, bigwin, io_or_mem, widget, addr) \
+	REMOTE_HUB_S((nasid), IIO_ITTE(bigwin), \
+		(((((addr) >> BWIN_SIZE_BITS) & \
+		   IIO_ITTE_OFFSET_MASK) << IIO_ITTE_OFFSET_SHIFT) | \
+		(io_or_mem << IIO_ITTE_IOSP_SHIFT) | \
+		(((widget) & IIO_ITTE_WIDGET_MASK) << IIO_ITTE_WIDGET_SHIFT)))
+
+#define IIO_ITTE_DISABLE(nasid, bigwin) \
+	IIO_ITTE_PUT((nasid), HUB_PIO_MAP_TO_MEM, \
+		     (bigwin), IIO_ITTE_INVALID_WIDGET, 0)
+
+#define IIO_ITTE_GET(nasid, bigwin) REMOTE_HUB_ADDR((nasid), IIO_ITTE(bigwin))
+
+/*
+ * Macro which takes the widget number, and returns the 
+ * IO PRB address of that widget.
+ * value _x is expected to be a widget number in the range 
+ * 0, 8 - 0xF
+ */
+#define	IIO_IOPRB(_x)	(IIO_IOPRB_0 + ( ( (_x) < HUB_WIDGET_ID_MIN ? \
+			(_x) : \
+			(_x) - (HUB_WIDGET_ID_MIN-1)) << 3) )
+
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+#include <asm/sn/sn1/hubio.h>
+#endif
+
+#endif	/* CONFIG_IA64_SGI_IO */
+
+#endif /* _ASM_SN_IO_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/iobus.h linux/include/asm-ia64/sn/iobus.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/iobus.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/iobus.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,185 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_IOBUS_H
+#define _ASM_SN_IOBUS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct eframe_s;
+struct piomap;
+struct dmamap;
+
+
+/* for ilvl_t interrupt level, for use with intr_block_level.  Can't
+ * typedef twice without causing warnings, and some users of this header
+ * file do not already include driver.h, but expect ilvl_t to be defined,
+ * while others include both, leading to the warning ...
+ */
+
+#include <asm/types.h>
+#include <asm/sn/driver.h>
+
+
+typedef __psunsigned_t iobush_t;
+
+#if __KERNEL__
+/* adapter handle */
+typedef devfs_handle_t adap_t;
+#endif
+
+
+/* interrupt function */
+typedef void	       *intr_arg_t;
+typedef void		intr_func_f(intr_arg_t);
+typedef intr_func_f    *intr_func_t;
+
+#define	INTR_ARG(n)	((intr_arg_t)(__psunsigned_t)(n))
+
+/* system interrupt resource handle -- returned from intr_alloc */
+typedef struct intr_s *intr_t;
+#define INTR_HANDLE_NONE ((intr_t)0)
+
+/*
+ * restore interrupt level value, returned from intr_block_level
+ * for use with intr_unblock_level.
+ */
+typedef void *rlvl_t;
+
+
+/* 
+ * A basic, platform-independent description of I/O requirements for
+ * a device. This structure is usually formed by lboot based on information 
+ * in configuration files.  It contains information about PIO, DMA, and
+ * interrupt requirements for a specific instance of a device.
+ *
+ * The pio description is currently unused.
+ *
+ * The dma description describes bandwidth characteristics and bandwidth
+ * allocation requirements. (TBD)
+ *
+ * The Interrupt information describes the priority of interrupt, desired 
+ * destination, policy (TBD), whether this is an error interrupt, etc.  
+ * For now, interrupts are targeted to specific CPUs.
+ */
+
+typedef struct device_desc_s {
+	/* pio description (currently none) */
+
+	/* dma description */
+	/* TBD: allocated badwidth requirements */
+
+	/* interrupt description */
+	devfs_handle_t	intr_target;	/* Hardware locator string */
+	int 		intr_policy;	/* TBD */
+	ilvl_t		intr_swlevel;	/* software level for blocking intr */
+	char		*intr_name;	/* name of interrupt, if any */
+
+	int		flags;
+} *device_desc_t;
+
+/* flag values */
+#define	D_INTR_ISERR	0x1		/* interrupt is for error handling */
+#define D_IS_ASSOC	0x2		/* descriptor is associated with a dev */
+#define D_INTR_NOTHREAD	0x4		/* Interrupt handler isn't threaded. */
+
+#define INTR_SWLEVEL_NOTHREAD_DEFAULT 	0	/* Default
+						 * Interrupt level in case of
+						 * non-threaded interrupt 
+						 * handlers
+						 */
+/* 
+ * Drivers use these interfaces to manage device descriptors.
+ *
+ * To examine defaults:
+ *	desc = device_desc_default_get(dev);
+ *	device_desc_*_get(desc);
+ *
+ * To modify defaults:
+ *	desc = device_desc_default_get(dev);
+ *	device_desc_*_set(desc);
+ *
+ * To eliminate defaults:
+ *	device_desc_default_set(dev, NULL);
+ *
+ * To override defaults:
+ *	desc = device_desc_dup(dev);
+ *	device_desc_*_set(desc,...);
+ *	use device_desc in calls
+ *	device_desc_free(desc);
+ *
+ * Software must not set or eliminate default device descriptors for a device while
+ * concurrently get'ing, dup'ing or using them.  Default device descriptors can be 
+ * changed only for a device that is quiescent.  In general, device drivers have no
+ * need to permanently change defaults anyway -- they just override defaults, when
+ * necessary.
+ */
+extern device_desc_t	device_desc_dup(devfs_handle_t dev);
+extern void		device_desc_free(device_desc_t device_desc);
+extern device_desc_t	device_desc_default_get(devfs_handle_t dev);
+extern void		device_desc_default_set(devfs_handle_t dev, device_desc_t device_desc);
+
+extern devfs_handle_t	device_desc_intr_target_get(device_desc_t device_desc);
+extern int		device_desc_intr_policy_get(device_desc_t device_desc);
+extern ilvl_t		device_desc_intr_swlevel_get(device_desc_t device_desc);
+extern char *		device_desc_intr_name_get(device_desc_t device_desc);
+extern int		device_desc_flags_get(device_desc_t device_desc);
+
+extern void		device_desc_intr_target_set(device_desc_t device_desc, devfs_handle_t target);
+extern void		device_desc_intr_policy_set(device_desc_t device_desc, int policy);
+extern void		device_desc_intr_swlevel_set(device_desc_t device_desc, ilvl_t swlevel);
+extern void		device_desc_intr_name_set(device_desc_t device_desc, char *name);
+extern void		device_desc_flags_set(device_desc_t device_desc, int flags);
+
+
+/* IO state */
+#ifdef COMMENT
+#define IO_STATE_EMPTY			0x01	/* non-existent */
+#define IO_STATE_INITIALIZING		0x02	/* being initialized */
+#define IO_STATE_ATTACHING   		0x04    /* becoming active */
+#define IO_STATE_ACTIVE      		0x08    /* active */
+#define IO_STATE_DETACHING   		0x10    /* becoming inactive */
+#define IO_STATE_INACTIVE    		0x20    /* not in use */
+#define IO_STATE_ERROR			0x40    /* problems */
+#define IO_STATE_BAD_HARDWARE		0x80	/* broken hardware */
+#endif
+
+struct edt;
+
+
+/* return codes */
+#define RC_OK				0	
+#define RC_ERROR			1
+
+/* bus configuration management op code */
+#define IOBUS_CONFIG_ATTACH		0	/* vary on */
+#define IOBUS_CONFIG_DETACH		1	/* vary off */
+#define IOBUS_CONFIG_RECOVER		2	/* clear error then vary on */
+
+/* get low-level PIO handle */
+extern int pio_geth(struct piomap*, int bus, int bus_id, int subtype, 
+	iopaddr_t addr, int size);	
+
+/* get low-level DMA handle */
+extern int dma_geth(struct dmamap*, int bus_type, int bus_id, int dma_type, 
+	int npages, int page_size, int flags);	
+
+#ifdef __cplusplus
+}
+#endif
+
+/*
+ * Macros for page number and page offsets, using ps as page size
+ */
+#define x_pnum(addr, ps) ((__psunsigned_t)(addr) / (__psunsigned_t)(ps))
+#define x_poff(addr, ps) ((__psunsigned_t)(addr) & ((__psunsigned_t)(ps) - 1))
+
+#endif /* _ASM_SN_IOBUS_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/ioc3.h linux/include/asm-ia64/sn/ioc3.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/ioc3.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/ioc3.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,671 @@
+/* $Id: ioc3.h,v 1.2 2000/11/16 19:49:17 pfg Exp $
+ *
+ * Copyright (C) 1999 Ralf Baechle
+ * This file is part of the Linux driver for the SGI IOC3.
+ */
+#ifndef IOC3_H
+#define IOC3_H
+
+/* SUPERIO uart register map */
+typedef volatile struct ioc3_uartregs {
+	union {
+		volatile u8	rbr;	/* read only, DLAB == 0 */
+		volatile u8	thr;	/* write only, DLAB == 0 */
+		volatile u8	dll;	/* DLAB == 1 */
+	} u1;
+	union {
+		volatile u8	ier;	/* DLAB == 0 */
+		volatile u8	dlm;	/* DLAB == 1 */
+	} u2;
+	union {
+		volatile u8	iir;	/* read only */
+		volatile u8	fcr;	/* write only */
+	} u3;
+	volatile u8	    iu_lcr;
+	volatile u8	    iu_mcr;
+	volatile u8	    iu_lsr;
+	volatile u8	    iu_msr;
+	volatile u8	    iu_scr;
+} ioc3_uregs_t;
+
+#define iu_rbr u1.rbr
+#define iu_thr u1.thr
+#define iu_dll u1.dll
+#define iu_ier u2.ier
+#define iu_dlm u2.dlm
+#define iu_iir u3.iir
+#define iu_fcr u3.fcr
+
+struct ioc3_sioregs {
+	volatile u8		fill[0x141];	/* starts at 0x141 */
+
+	volatile u8		uartc;
+	volatile u8		kbdcg;
+
+	volatile u8		fill0[0x150 - 0x142 - 1];
+
+	volatile u8		pp_data;
+	volatile u8		pp_dsr;
+	volatile u8		pp_dcr;
+
+	volatile u8		fill1[0x158 - 0x152 - 1];
+
+	volatile u8		pp_fifa;
+	volatile u8		pp_cfgb;
+	volatile u8		pp_ecr;
+
+	volatile u8		fill2[0x168 - 0x15a - 1];
+
+	volatile u8		rtcad;
+	volatile u8		rtcdat;
+
+	volatile u8		fill3[0x170 - 0x169 - 1];
+
+	struct ioc3_uartregs    uartb;	/* 0x20170  */
+	struct ioc3_uartregs    uarta;	/* 0x20178  */
+};
+
+/* Register layout of IOC3 in configuration space.  */
+struct ioc3 {
+	volatile u32	pad0[7];	/* 0x00000  */
+	volatile u32	sio_ir;		/* 0x0001c  */
+	volatile u32	sio_ies;	/* 0x00020  */
+	volatile u32	sio_iec;	/* 0x00024  */
+	volatile u32	sio_cr;		/* 0x00028  */
+	volatile u32	int_out;	/* 0x0002c  */
+	volatile u32	mcr;		/* 0x00030  */
+
+	/* General Purpose I/O registers  */
+	volatile u32	gpcr_s;		/* 0x00034  */
+	volatile u32	gpcr_c;		/* 0x00038  */
+	volatile u32	gpdr;		/* 0x0003c  */
+	volatile u32	gppr_0;		/* 0x00040  */
+	volatile u32	gppr_1;		/* 0x00044  */
+	volatile u32	gppr_2;		/* 0x00048  */
+	volatile u32	gppr_3;		/* 0x0004c  */
+	volatile u32	gppr_4;		/* 0x00050  */
+	volatile u32	gppr_5;		/* 0x00054  */
+	volatile u32	gppr_6;		/* 0x00058  */
+	volatile u32	gppr_7;		/* 0x0005c  */
+	volatile u32	gppr_8;		/* 0x00060  */
+	volatile u32	gppr_9;		/* 0x00064  */
+	volatile u32	gppr_10;	/* 0x00068  */
+	volatile u32	gppr_11;	/* 0x0006c  */
+	volatile u32	gppr_12;	/* 0x00070  */
+	volatile u32	gppr_13;	/* 0x00074  */
+	volatile u32	gppr_14;	/* 0x00078  */
+	volatile u32	gppr_15;	/* 0x0007c  */
+
+	/* Parallel Port Registers  */
+	volatile u32	ppbr_h_a;	/* 0x00080  */
+	volatile u32	ppbr_l_a;	/* 0x00084  */
+	volatile u32	ppcr_a;		/* 0x00088  */
+	volatile u32	ppcr;		/* 0x0008c  */
+	volatile u32	ppbr_h_b;	/* 0x00090  */
+	volatile u32	ppbr_l_b;	/* 0x00094  */
+	volatile u32	ppcr_b;		/* 0x00098  */
+
+	/* Keyboard and Mouse Registers  */
+	volatile u32	km_csr;		/* 0x0009c  */
+	volatile u32	k_rd;		/* 0x000a0  */
+	volatile u32	m_rd;		/* 0x000a4  */
+	volatile u32	k_wd;		/* 0x000a8  */
+	volatile u32	m_wd;		/* 0x000ac  */
+
+	/* Serial Port Registers  */
+	volatile u32	sbbr_h;		/* 0x000b0  */
+	volatile u32	sbbr_l;		/* 0x000b4  */
+	volatile u32	sscr_a;		/* 0x000b8  */
+	volatile u32	stpir_a;	/* 0x000bc  */
+	volatile u32	stcir_a;	/* 0x000c0  */
+	volatile u32	srpir_a;	/* 0x000c4  */
+	volatile u32	srcir_a;	/* 0x000c8  */
+	volatile u32	srtr_a;		/* 0x000cc  */
+	volatile u32	shadow_a;	/* 0x000d0  */
+	volatile u32	sscr_b;		/* 0x000d4  */
+	volatile u32	stpir_b;	/* 0x000d8  */
+	volatile u32	stcir_b;	/* 0x000dc  */
+	volatile u32	srpir_b;	/* 0x000e0  */
+	volatile u32	srcir_b;	/* 0x000e4  */
+	volatile u32	srtr_b;		/* 0x000e8  */
+	volatile u32	shadow_b;	/* 0x000ec  */
+
+	/* Ethernet Registers  */
+	volatile u32	emcr;		/* 0x000f0  */
+	volatile u32	eisr;		/* 0x000f4  */
+	volatile u32	eier;		/* 0x000f8  */
+	volatile u32	ercsr;		/* 0x000fc  */
+	volatile u32	erbr_h;		/* 0x00100  */
+	volatile u32	erbr_l;		/* 0x00104  */
+	volatile u32	erbar;		/* 0x00108  */
+	volatile u32	ercir;		/* 0x0010c  */
+	volatile u32	erpir;		/* 0x00110  */
+	volatile u32	ertr;		/* 0x00114  */
+	volatile u32	etcsr;		/* 0x00118  */
+	volatile u32	ersr;		/* 0x0011c  */
+	volatile u32	etcdc;		/* 0x00120  */
+	volatile u32	ebir;		/* 0x00124  */
+	volatile u32	etbr_h;		/* 0x00128  */
+	volatile u32	etbr_l;		/* 0x0012c  */
+	volatile u32	etcir;		/* 0x00130  */
+	volatile u32	etpir;		/* 0x00134  */
+	volatile u32	emar_h;		/* 0x00138  */
+	volatile u32	emar_l;		/* 0x0013c  */
+	volatile u32	ehar_h;		/* 0x00140  */
+	volatile u32	ehar_l;		/* 0x00144  */
+	volatile u32	micr;		/* 0x00148  */
+	volatile u32	midr_r;		/* 0x0014c  */
+	volatile u32    midr_w;           /* 0x00150  */
+	volatile u32	pad1[(0x20000 - 0x00154) / 4];
+
+	/* SuperIO Registers  XXX */
+	struct ioc3_sioregs	sregs;	/* 0x20000 */
+	volatile u32	pad2[(0x40000 - 0x20180) / 4];
+
+	/* SSRAM Diagnostic Access */
+	volatile u32	ssram[(0x80000 - 0x40000) / 4];
+
+	/* Bytebus device offsets
+	   0x80000 -   Access to the generic devices selected with   DEV0
+	   0x9FFFF     bytebus DEV_SEL_0
+	   0xA0000 -   Access to the generic devices selected with   DEV1
+	   0xBFFFF     bytebus DEV_SEL_1
+	   0xC0000 -   Access to the generic devices selected with   DEV2
+	   0xDFFFF     bytebus DEV_SEL_2
+	   0xE0000 -   Access to the generic devices selected with   DEV3
+	   0xFFFFF     bytebus DEV_SEL_3  */
+};
+
+/*
+ * Ethernet RX Buffer
+ */
+struct ioc3_erxbuf {
+	u32	w0;			/* first word (valid,bcnt,cksum) */
+	u32	err;			/* second word various errors */
+	/* next comes n bytes of padding */
+	/* then the received ethernet frame itself */
+};
+
+#define ERXBUF_IPCKSUM_MASK	0x0000ffff
+#define ERXBUF_BYTECNT_MASK	0x07ff0000
+#define ERXBUF_BYTECNT_SHIFT	16
+#define ERXBUF_V		0x80000000
+
+#define ERXBUF_CRCERR		0x00000001	/* aka RSV15 */
+#define ERXBUF_FRAMERR		0x00000002	/* aka RSV14 */
+#define ERXBUF_CODERR		0x00000004	/* aka RSV13 */
+#define ERXBUF_INVPREAMB	0x00000008	/* aka RSV18 */
+#define ERXBUF_LOLEN		0x00007000	/* aka RSV2_0 */
+#define ERXBUF_HILEN		0x03ff0000	/* aka RSV12_3 */
+#define ERXBUF_MULTICAST	0x04000000	/* aka RSV16 */
+#define ERXBUF_BROADCAST	0x08000000	/* aka RSV17 */
+#define ERXBUF_LONGEVENT	0x10000000	/* aka RSV19 */
+#define ERXBUF_BADPKT		0x20000000	/* aka RSV20 */
+#define ERXBUF_GOODPKT		0x40000000	/* aka RSV21 */
+#define ERXBUF_CARRIER		0x80000000	/* aka RSV22 */
+
+/*
+ * Ethernet TX Descriptor
+ */
+#define ETXD_DATALEN    104
+struct ioc3_etxd {
+	u32	cmd;				/* command field */
+	u32	bufcnt;				/* buffer counts field */
+	u64	p1;				/* buffer pointer 1 */
+	u64	p2;				/* buffer pointer 2 */
+	u8	data[ETXD_DATALEN];		/* opt. tx data */
+};
+
+#define ETXD_BYTECNT_MASK	0x000007ff	/* total byte count */
+#define ETXD_INTWHENDONE	0x00001000	/* intr when done */
+#define ETXD_D0V		0x00010000	/* data 0 valid */
+#define ETXD_B1V		0x00020000	/* buf 1 valid */
+#define ETXD_B2V		0x00040000	/* buf 2 valid */
+#define ETXD_DOCHECKSUM		0x00080000	/* insert ip cksum */
+#define ETXD_CHKOFF_MASK	0x07f00000	/* cksum byte offset */
+#define ETXD_CHKOFF_SHIFT	20
+
+#define ETXD_D0CNT_MASK		0x0000007f
+#define ETXD_B1CNT_MASK		0x0007ff00
+#define ETXD_B1CNT_SHIFT	8
+#define ETXD_B2CNT_MASK		0x7ff00000
+#define ETXD_B2CNT_SHIFT	20
+
+/*
+ * Bytebus device space
+ */
+#define IOC3_BYTEBUS_DEV0	0x80000L
+#define IOC3_BYTEBUS_DEV1	0xa0000L
+#define IOC3_BYTEBUS_DEV2	0xc0000L
+#define IOC3_BYTEBUS_DEV3	0xe0000L
+
+/* ------------------------------------------------------------------------- */
+
+/* Superio Registers (PIO Access) */
+#define IOC3_SIO_BASE		0x20000
+#define IOC3_SIO_UARTC		(IOC3_SIO_BASE+0x141)	/* UART Config */
+#define IOC3_SIO_KBDCG		(IOC3_SIO_BASE+0x142)	/* KBD Config */
+#define IOC3_SIO_PP_BASE	(IOC3_SIO_BASE+PP_BASE)		/* Parallel Port */
+#define IOC3_SIO_RTC_BASE	(IOC3_SIO_BASE+0x168)	/* Real Time Clock */
+#define IOC3_SIO_UB_BASE	(IOC3_SIO_BASE+UARTB_BASE)	/* UART B */
+#define IOC3_SIO_UA_BASE	(IOC3_SIO_BASE+UARTA_BASE)	/* UART A */
+
+/* SSRAM Diagnostic Access */
+#define IOC3_SSRAM	IOC3_RAM_OFF	/* base of SSRAM diagnostic access */
+#define IOC3_SSRAM_LEN	0x40000 /* 256kb (address space size, may not be fully populated) */
+#define IOC3_SSRAM_DM	0x0000ffff	/* data mask */
+#define IOC3_SSRAM_PM	0x00010000	/* parity mask */
+
+/* bitmasks for PCI_SCR */
+#define PCI_SCR_PAR_RESP_EN	0x00000040	/* enb PCI parity checking */
+#define PCI_SCR_SERR_EN		0x00000100	/* enable the SERR# driver */
+#define PCI_SCR_DROP_MODE_EN	0x00008000	/* drop pios on parity err */
+#define PCI_SCR_RX_SERR		(0x1 << 16)
+#define PCI_SCR_DROP_MODE	(0x1 << 17)
+#define PCI_SCR_SIG_PAR_ERR	(0x1 << 24)
+#define PCI_SCR_SIG_TAR_ABRT	(0x1 << 27)
+#define PCI_SCR_RX_TAR_ABRT	(0x1 << 28)
+#define PCI_SCR_SIG_MST_ABRT	(0x1 << 29)
+#define PCI_SCR_SIG_SERR	(0x1 << 30)
+#define PCI_SCR_PAR_ERR		(0x1 << 31)
+
+/* bitmasks for IOC3_KM_CSR */
+#define KM_CSR_K_WRT_PEND 0x00000001	/* kbd port xmitting or resetting */
+#define KM_CSR_M_WRT_PEND 0x00000002	/* mouse port xmitting or resetting */
+#define KM_CSR_K_LCB	  0x00000004	/* Line Cntrl Bit for last KBD write */
+#define KM_CSR_M_LCB	  0x00000008	/* same for mouse */
+#define KM_CSR_K_DATA	  0x00000010	/* state of kbd data line */
+#define KM_CSR_K_CLK	  0x00000020	/* state of kbd clock line */
+#define KM_CSR_K_PULL_DATA 0x00000040	/* pull kbd data line low */
+#define KM_CSR_K_PULL_CLK 0x00000080	/* pull kbd clock line low */
+#define KM_CSR_M_DATA	  0x00000100	/* state of ms data line */
+#define KM_CSR_M_CLK	  0x00000200	/* state of ms clock line */
+#define KM_CSR_M_PULL_DATA 0x00000400	/* pull ms data line low */
+#define KM_CSR_M_PULL_CLK 0x00000800	/* pull ms clock line low */
+#define KM_CSR_EMM_MODE	  0x00001000	/* emulation mode */
+#define KM_CSR_SIM_MODE	  0x00002000	/* clock X8 */
+#define KM_CSR_K_SM_IDLE  0x00004000	/* Keyboard is idle */
+#define KM_CSR_M_SM_IDLE  0x00008000	/* Mouse is idle */
+#define KM_CSR_K_TO	  0x00010000	/* Keyboard trying to send/receive */
+#define KM_CSR_M_TO	  0x00020000	/* Mouse trying to send/receive */
+#define KM_CSR_K_TO_EN	  0x00040000	/* KM_CSR_K_TO + KM_CSR_K_TO_EN = cause
+					   SIO_IR to assert */
+#define KM_CSR_M_TO_EN	  0x00080000	/* KM_CSR_M_TO + KM_CSR_M_TO_EN = cause
+					   SIO_IR to assert */
+#define KM_CSR_K_CLAMP_ONE	0x00100000	/* Pull K_CLK low after rec. one char */
+#define KM_CSR_M_CLAMP_ONE	0x00200000	/* Pull M_CLK low after rec. one char */
+#define KM_CSR_K_CLAMP_THREE	0x00400000	/* Pull K_CLK low after rec. three chars */
+#define KM_CSR_M_CLAMP_THREE	0x00800000	/* Pull M_CLK low after rec. three char */
+
+/* bitmasks for IOC3_K_RD and IOC3_M_RD */
+#define KM_RD_DATA_2	0x000000ff	/* 3rd char recvd since last read */
+#define KM_RD_DATA_2_SHIFT 0
+#define KM_RD_DATA_1	0x0000ff00	/* 2nd char recvd since last read */
+#define KM_RD_DATA_1_SHIFT 8
+#define KM_RD_DATA_0	0x00ff0000	/* 1st char recvd since last read */
+#define KM_RD_DATA_0_SHIFT 16
+#define KM_RD_FRAME_ERR_2 0x01000000	/*  framing or parity error in byte 2 */
+#define KM_RD_FRAME_ERR_1 0x02000000	/* same for byte 1 */
+#define KM_RD_FRAME_ERR_0 0x04000000	/* same for byte 0 */
+
+#define KM_RD_KBD_MSE	0x08000000	/* 0 if from kbd, 1 if from mouse */
+#define KM_RD_OFLO	0x10000000	/* 4th char recvd before this read */
+#define KM_RD_VALID_2	0x20000000	/* DATA_2 valid */
+#define KM_RD_VALID_1	0x40000000	/* DATA_1 valid */
+#define KM_RD_VALID_0	0x80000000	/* DATA_0 valid */
+#define KM_RD_VALID_ALL (KM_RD_VALID_0|KM_RD_VALID_1|KM_RD_VALID_2)
+
+/* bitmasks for IOC3_K_WD & IOC3_M_WD */
+#define KM_WD_WRT_DATA	0x000000ff	/* write to keyboard/mouse port */
+#define KM_WD_WRT_DATA_SHIFT 0
+
+/* bitmasks for serial RX status byte */
+#define RXSB_OVERRUN	0x01	/* char(s) lost */
+#define RXSB_PAR_ERR	0x02	/* parity error */
+#define RXSB_FRAME_ERR	0x04	/* framing error */
+#define RXSB_BREAK	0x08	/* break character */
+#define RXSB_CTS	0x10	/* state of CTS */
+#define RXSB_DCD	0x20	/* state of DCD */
+#define RXSB_MODEM_VALID 0x40	/* DCD, CTS and OVERRUN are valid */
+#define RXSB_DATA_VALID 0x80	/* data byte, FRAME_ERR PAR_ERR & BREAK valid */
+
+/* bitmasks for serial TX control byte */
+#define TXCB_INT_WHEN_DONE 0x20 /* interrupt after this byte is sent */
+#define TXCB_INVALID	0x00	/* byte is invalid */
+#define TXCB_VALID	0x40	/* byte is valid */
+#define TXCB_MCR	0x80	/* data<7:0> to modem control register */
+#define TXCB_DELAY	0xc0	/* delay data<7:0> mSec */
+
+/* bitmasks for IOC3_SBBR_L */
+#define SBBR_L_SIZE	0x00000001	/* 0 == 1KB rings, 1 == 4KB rings */
+#define SBBR_L_BASE	0xfffff000	/* lower serial ring base addr */
+
+/* bitmasks for IOC3_SSCR_<A:B> */
+#define SSCR_RX_THRESHOLD 0x000001ff	/* hiwater mark */
+#define SSCR_TX_TIMER_BUSY 0x00010000	/* TX timer in progress */
+#define SSCR_HFC_EN	0x00020000	/* hardware flow control enabled */
+#define SSCR_RX_RING_DCD 0x00040000	/* post RX record on delta-DCD */
+#define SSCR_RX_RING_CTS 0x00080000	/* post RX record on delta-CTS */
+#define SSCR_HIGH_SPD	0x00100000	/* 4X speed */
+#define SSCR_DIAG	0x00200000	/* bypass clock divider for sim */
+#define SSCR_RX_DRAIN	0x08000000	/* drain RX buffer to memory */
+#define SSCR_DMA_EN	0x10000000	/* enable ring buffer DMA */
+#define SSCR_DMA_PAUSE	0x20000000	/* pause DMA */
+#define SSCR_PAUSE_STATE 0x40000000	/* sets when PAUSE takes effect */
+#define SSCR_RESET	0x80000000	/* reset DMA channels */
+
+/* all producer/comsumer pointers are the same bitfield */
+#define PROD_CONS_PTR_4K 0x00000ff8	/* for 4K buffers */
+#define PROD_CONS_PTR_1K 0x000003f8	/* for 1K buffers */
+#define PROD_CONS_PTR_OFF 3
+
+/* bitmasks for IOC3_SRCIR_<A:B> */
+#define SRCIR_ARM	0x80000000	/* arm RX timer */
+
+/* bitmasks for IOC3_SRPIR_<A:B> */
+#define SRPIR_BYTE_CNT	0x07000000	/* bytes in packer */
+#define SRPIR_BYTE_CNT_SHIFT 24
+
+/* bitmasks for IOC3_STCIR_<A:B> */
+#define STCIR_BYTE_CNT	0x0f000000	/* bytes in unpacker */
+#define STCIR_BYTE_CNT_SHIFT 24
+
+/* bitmasks for IOC3_SHADOW_<A:B> */
+#define SHADOW_DR	0x00000001	/* data ready */
+#define SHADOW_OE	0x00000002	/* overrun error */
+#define SHADOW_PE	0x00000004	/* parity error */
+#define SHADOW_FE	0x00000008	/* framing error */
+#define SHADOW_BI	0x00000010	/* break interrupt */
+#define SHADOW_THRE	0x00000020	/* transmit holding register empty */
+#define SHADOW_TEMT	0x00000040	/* transmit shift register empty */
+#define SHADOW_RFCE	0x00000080	/* char in RX fifo has an error */
+#define SHADOW_DCTS	0x00010000	/* delta clear to send */
+#define SHADOW_DDCD	0x00080000	/* delta data carrier detect */
+#define SHADOW_CTS	0x00100000	/* clear to send */
+#define SHADOW_DCD	0x00800000	/* data carrier detect */
+#define SHADOW_DTR	0x01000000	/* data terminal ready */
+#define SHADOW_RTS	0x02000000	/* request to send */
+#define SHADOW_OUT1	0x04000000	/* 16550 OUT1 bit */
+#define SHADOW_OUT2	0x08000000	/* 16550 OUT2 bit */
+#define SHADOW_LOOP	0x10000000	/* loopback enabled */
+
+/* bitmasks for IOC3_SRTR_<A:B> */
+#define SRTR_CNT	0x00000fff	/* reload value for RX timer */
+#define SRTR_CNT_VAL	0x0fff0000	/* current value of RX timer */
+#define SRTR_CNT_VAL_SHIFT 16
+#define SRTR_HZ		16000	/* SRTR clock frequency */
+
+/* bitmasks for IOC3_SIO_IR, IOC3_SIO_IEC and IOC3_SIO_IES  */
+#define SIO_IR_SA_TX_MT		0x00000001	/* Serial port A TX empty */
+#define SIO_IR_SA_RX_FULL	0x00000002	/* port A RX buf full */
+#define SIO_IR_SA_RX_HIGH	0x00000004	/* port A RX hiwat */
+#define SIO_IR_SA_RX_TIMER	0x00000008	/* port A RX timeout */
+#define SIO_IR_SA_DELTA_DCD	0x00000010	/* port A delta DCD */
+#define SIO_IR_SA_DELTA_CTS	0x00000020	/* port A delta CTS */
+#define SIO_IR_SA_INT		0x00000040	/* port A pass-thru intr */
+#define SIO_IR_SA_TX_EXPLICIT	0x00000080	/* port A explicit TX thru */
+#define SIO_IR_SA_MEMERR	0x00000100	/* port A PCI error */
+#define SIO_IR_SB_TX_MT		0x00000200	/* */
+#define SIO_IR_SB_RX_FULL	0x00000400	/* */
+#define SIO_IR_SB_RX_HIGH	0x00000800	/* */
+#define SIO_IR_SB_RX_TIMER	0x00001000	/* */
+#define SIO_IR_SB_DELTA_DCD	0x00002000	/* */
+#define SIO_IR_SB_DELTA_CTS	0x00004000	/* */
+#define SIO_IR_SB_INT		0x00008000	/* */
+#define SIO_IR_SB_TX_EXPLICIT	0x00010000	/* */
+#define SIO_IR_SB_MEMERR	0x00020000	/* */
+#define SIO_IR_PP_INT		0x00040000	/* P port pass-thru intr */
+#define SIO_IR_PP_INTA		0x00080000	/* PP context A thru */
+#define SIO_IR_PP_INTB		0x00100000	/* PP context B thru */
+#define SIO_IR_PP_MEMERR	0x00200000	/* PP PCI error */
+#define SIO_IR_KBD_INT		0x00400000	/* kbd/mouse intr */
+#define SIO_IR_RT_INT		0x08000000	/* RT output pulse */
+#define SIO_IR_GEN_INT1		0x10000000	/* RT input pulse */
+#define SIO_IR_GEN_INT_SHIFT	28
+
+/* per device interrupt masks */
+#define SIO_IR_SA		(SIO_IR_SA_TX_MT | SIO_IR_SA_RX_FULL | \
+				 SIO_IR_SA_RX_HIGH | SIO_IR_SA_RX_TIMER | \
+				 SIO_IR_SA_DELTA_DCD | SIO_IR_SA_DELTA_CTS | \
+				 SIO_IR_SA_INT | SIO_IR_SA_TX_EXPLICIT | \
+				 SIO_IR_SA_MEMERR)
+#define SIO_IR_SB		(SIO_IR_SB_TX_MT | SIO_IR_SB_RX_FULL | \
+				 SIO_IR_SB_RX_HIGH | SIO_IR_SB_RX_TIMER | \
+				 SIO_IR_SB_DELTA_DCD | SIO_IR_SB_DELTA_CTS | \
+				 SIO_IR_SB_INT | SIO_IR_SB_TX_EXPLICIT | \
+				 SIO_IR_SB_MEMERR)
+#define SIO_IR_PP		(SIO_IR_PP_INT | SIO_IR_PP_INTA | \
+				 SIO_IR_PP_INTB | SIO_IR_PP_MEMERR)
+#define SIO_IR_RT		(SIO_IR_RT_INT | SIO_IR_GEN_INT1)
+
+/* macro to load pending interrupts */
+#define IOC3_PENDING_INTRS(mem) (PCI_INW(&((mem)->sio_ir)) & \
+				 PCI_INW(&((mem)->sio_ies_ro)))
+
+/* bitmasks for SIO_CR */
+#define SIO_CR_SIO_RESET	0x00000001	/* reset the SIO */
+#define SIO_CR_SER_A_BASE	0x000000fe	/* DMA poll addr port A */
+#define SIO_CR_SER_A_BASE_SHIFT 1
+#define SIO_CR_SER_B_BASE	0x00007f00	/* DMA poll addr port B */
+#define SIO_CR_SER_B_BASE_SHIFT 8
+#define SIO_SR_CMD_PULSE	0x00078000	/* byte bus strobe length */
+#define SIO_CR_CMD_PULSE_SHIFT	15
+#define SIO_CR_ARB_DIAG		0x00380000	/* cur !enet PCI requet (ro) */
+#define SIO_CR_ARB_DIAG_TXA	0x00000000
+#define SIO_CR_ARB_DIAG_RXA	0x00080000
+#define SIO_CR_ARB_DIAG_TXB	0x00100000
+#define SIO_CR_ARB_DIAG_RXB	0x00180000
+#define SIO_CR_ARB_DIAG_PP	0x00200000
+#define SIO_CR_ARB_DIAG_IDLE	0x00400000	/* 0 -> active request (ro) */
+
+/* bitmasks for INT_OUT */
+#define INT_OUT_COUNT	0x0000ffff	/* pulse interval timer */
+#define INT_OUT_MODE	0x00070000	/* mode mask */
+#define INT_OUT_MODE_0	0x00000000	/* set output to 0 */
+#define INT_OUT_MODE_1	0x00040000	/* set output to 1 */
+#define INT_OUT_MODE_1PULSE 0x00050000	/* send 1 pulse */
+#define INT_OUT_MODE_PULSES 0x00060000	/* send 1 pulse every interval */
+#define INT_OUT_MODE_SQW 0x00070000	/* toggle output every interval */
+#define INT_OUT_DIAG	0x40000000	/* diag mode */
+#define INT_OUT_INT_OUT 0x80000000	/* current state of INT_OUT */
+
+/* time constants for INT_OUT */
+#define INT_OUT_NS_PER_TICK (30 * 260)	/* 30 ns PCI clock, divisor=260 */
+#define INT_OUT_TICKS_PER_PULSE 3	/* outgoing pulse lasts 3 ticks */
+#define INT_OUT_US_TO_COUNT(x)		/* convert uS to a count value */ \
+	(((x) * 10 + INT_OUT_NS_PER_TICK / 200) *	\
+	 100 / INT_OUT_NS_PER_TICK - 1)
+#define INT_OUT_COUNT_TO_US(x)		/* convert count value to uS */ \
+	(((x) + 1) * INT_OUT_NS_PER_TICK / 1000)
+#define INT_OUT_MIN_TICKS 3	/* min period is width of pulse in "ticks" */
+#define INT_OUT_MAX_TICKS INT_OUT_COUNT		/* largest possible count */
+
+/* bitmasks for GPCR */
+#define GPCR_DIR	0x000000ff	/* tristate pin input or output */
+#define GPCR_DIR_PIN(x) (1<<(x))	/* access one of the DIR bits */
+#define GPCR_EDGE	0x000f0000	/* extint edge or level sensitive */
+#define GPCR_EDGE_PIN(x) (1<<((x)+15))	/* access one of the EDGE bits */
+
+/* values for GPCR */
+#define GPCR_INT_OUT_EN 0x00100000	/* enable INT_OUT to pin 0 */
+#define GPCR_MLAN_EN	0x00200000	/* enable MCR to pin 8 */
+#define GPCR_DIR_SERA_XCVR 0x00000080	/* Port A Transceiver select enable */
+#define GPCR_DIR_SERB_XCVR 0x00000040	/* Port B Transceiver select enable */
+#define GPCR_DIR_PHY_RST   0x00000020	/* ethernet PHY reset enable */
+
+/* defs for some of the generic I/O pins */
+#define GPCR_PHY_RESET		0x20	/* pin is output to PHY reset */
+#define GPCR_UARTB_MODESEL	0x40	/* pin is output to port B mode sel */
+#define GPCR_UARTA_MODESEL	0x80	/* pin is output to port A mode sel */
+
+#define GPPR_PHY_RESET_PIN	5	/* GIO pin controlling phy reset */
+#define GPPR_UARTB_MODESEL_PIN	6	/* GIO pin controlling uart b mode select */
+#define GPPR_UARTA_MODESEL_PIN	7	/* GIO pin controlling uart a mode select */
+
+#define EMCR_DUPLEX		0x00000001
+#define EMCR_PROMISC		0x00000002
+#define EMCR_PADEN		0x00000004
+#define EMCR_RXOFF_MASK		0x000001f8
+#define EMCR_RXOFF_SHIFT	3
+#define EMCR_RAMPAR		0x00000200
+#define EMCR_BADPAR		0x00000800
+#define EMCR_BUFSIZ		0x00001000
+#define EMCR_TXDMAEN		0x00002000
+#define EMCR_TXEN		0x00004000
+#define EMCR_RXDMAEN		0x00008000
+#define EMCR_RXEN		0x00010000
+#define EMCR_LOOPBACK		0x00020000
+#define EMCR_ARB_DIAG		0x001c0000
+#define EMCR_ARB_DIAG_IDLE	0x00200000
+#define EMCR_RST		0x80000000
+
+#define EISR_RXTIMERINT		0x00000001
+#define EISR_RXTHRESHINT	0x00000002
+#define EISR_RXOFLO		0x00000004
+#define EISR_RXBUFOFLO		0x00000008
+#define EISR_RXMEMERR		0x00000010
+#define EISR_RXPARERR		0x00000020
+#define EISR_TXEMPTY		0x00010000
+#define EISR_TXRTRY		0x00020000
+#define EISR_TXEXDEF		0x00040000
+#define EISR_TXLCOL		0x00080000
+#define EISR_TXGIANT		0x00100000
+#define EISR_TXBUFUFLO		0x00200000
+#define EISR_TXEXPLICIT		0x00400000
+#define EISR_TXCOLLWRAP		0x00800000
+#define EISR_TXDEFERWRAP	0x01000000
+#define EISR_TXMEMERR		0x02000000
+#define EISR_TXPARERR		0x04000000
+
+#define ERCSR_THRESH_MASK	0x000001ff	/* enet RX threshold */
+#define ERCSR_RX_TMR		0x40000000	/* simulation only */
+#define ERCSR_DIAG_OFLO		0x80000000	/* simulation only */
+
+#define ERBR_ALIGNMENT		4096
+#define ERBR_L_RXRINGBASE_MASK	0xfffff000
+
+#define ERBAR_BARRIER_BIT	0x0100
+#define ERBAR_RXBARR_MASK	0xffff0000
+#define ERBAR_RXBARR_SHIFT	16
+
+#define ERCIR_RXCONSUME_MASK	0x00000fff
+
+#define ERPIR_RXPRODUCE_MASK	0x00000fff
+#define ERPIR_ARM		0x80000000
+
+#define ERTR_CNT_MASK		0x000007ff
+
+#define ETCSR_IPGT_MASK		0x0000007f
+#define ETCSR_IPGR1_MASK	0x00007f00
+#define ETCSR_IPGR1_SHIFT	8
+#define ETCSR_IPGR2_MASK	0x007f0000
+#define ETCSR_IPGR2_SHIFT	16
+#define ETCSR_NOTXCLK		0x80000000
+
+#define ETCDC_COLLCNT_MASK	0x0000ffff
+#define ETCDC_DEFERCNT_MASK	0xffff0000
+#define ETCDC_DEFERCNT_SHIFT	16
+
+#define ETBR_ALIGNMENT		(64*1024)
+#define ETBR_L_RINGSZ_MASK	0x00000001
+#define ETBR_L_RINGSZ128	0
+#define ETBR_L_RINGSZ512	1
+#define ETBR_L_TXRINGBASE_MASK	0xffffc000
+
+#define ETCIR_TXCONSUME_MASK	0x0000ffff
+#define ETCIR_IDLE		0x80000000
+
+#define ETPIR_TXPRODUCE_MASK	0x0000ffff
+
+#define EBIR_TXBUFPROD_MASK	0x0000001f
+#define EBIR_TXBUFCONS_MASK	0x00001f00
+#define EBIR_TXBUFCONS_SHIFT	8
+#define EBIR_RXBUFPROD_MASK	0x007fc000
+#define EBIR_RXBUFPROD_SHIFT	14
+#define EBIR_RXBUFCONS_MASK	0xff800000
+#define EBIR_RXBUFCONS_SHIFT	23
+
+#define MICR_REGADDR_MASK	0x0000001f
+#define MICR_PHYADDR_MASK	0x000003e0
+#define MICR_PHYADDR_SHIFT	5
+#define MICR_READTRIG		0x00000400
+#define MICR_BUSY		0x00000800
+
+#define MIDR_DATA_MASK		0x0000ffff
+
+#define ERXBUF_IPCKSUM_MASK	0x0000ffff
+#define ERXBUF_BYTECNT_MASK	0x07ff0000
+#define ERXBUF_BYTECNT_SHIFT	16
+#define ERXBUF_V		0x80000000
+
+#define ERXBUF_CRCERR		0x00000001	/* aka RSV15 */
+#define ERXBUF_FRAMERR		0x00000002	/* aka RSV14 */
+#define ERXBUF_CODERR		0x00000004	/* aka RSV13 */
+#define ERXBUF_INVPREAMB	0x00000008	/* aka RSV18 */
+#define ERXBUF_LOLEN		0x00007000	/* aka RSV2_0 */
+#define ERXBUF_HILEN		0x03ff0000	/* aka RSV12_3 */
+#define ERXBUF_MULTICAST	0x04000000	/* aka RSV16 */
+#define ERXBUF_BROADCAST	0x08000000	/* aka RSV17 */
+#define ERXBUF_LONGEVENT	0x10000000	/* aka RSV19 */
+#define ERXBUF_BADPKT		0x20000000	/* aka RSV20 */
+#define ERXBUF_GOODPKT		0x40000000	/* aka RSV21 */
+#define ERXBUF_CARRIER		0x80000000	/* aka RSV22 */
+
+#define ETXD_BYTECNT_MASK	0x000007ff	/* total byte count */
+#define ETXD_INTWHENDONE	0x00001000	/* intr when done */
+#define ETXD_D0V		0x00010000	/* data 0 valid */
+#define ETXD_B1V		0x00020000	/* buf 1 valid */
+#define ETXD_B2V		0x00040000	/* buf 2 valid */
+#define ETXD_DOCHECKSUM		0x00080000	/* insert ip cksum */
+#define ETXD_CHKOFF_MASK	0x07f00000	/* cksum byte offset */
+#define ETXD_CHKOFF_SHIFT	20
+
+#define ETXD_D0CNT_MASK		0x0000007f
+#define ETXD_B1CNT_MASK		0x0007ff00
+#define ETXD_B1CNT_SHIFT	8
+#define ETXD_B2CNT_MASK		0x7ff00000
+#define ETXD_B2CNT_SHIFT	20
+
+typedef enum ioc3_subdevs_e {
+    ioc3_subdev_ether,
+    ioc3_subdev_generic,
+    ioc3_subdev_nic,
+    ioc3_subdev_kbms,
+    ioc3_subdev_ttya,
+    ioc3_subdev_ttyb,
+    ioc3_subdev_ecpp,
+    ioc3_subdev_rt,
+    ioc3_nsubdevs
+} ioc3_subdev_t;
+
+/* subdevice disable bits,
+ * from the standard INFO_LBL_SUBDEVS
+ */
+#define IOC3_SDB_ETHER		(1<<ioc3_subdev_ether)
+#define IOC3_SDB_GENERIC	(1<<ioc3_subdev_generic)
+#define IOC3_SDB_NIC		(1<<ioc3_subdev_nic)
+#define IOC3_SDB_KBMS		(1<<ioc3_subdev_kbms)
+#define IOC3_SDB_TTYA		(1<<ioc3_subdev_ttya)
+#define IOC3_SDB_TTYB		(1<<ioc3_subdev_ttyb)
+#define IOC3_SDB_ECPP		(1<<ioc3_subdev_ecpp)
+#define IOC3_SDB_RT		(1<<ioc3_subdev_rt)
+
+#define IOC3_ALL_SUBDEVS	((1<<ioc3_nsubdevs)-1)
+
+#define IOC3_SDB_SERIAL		(IOC3_SDB_TTYA|IOC3_SDB_TTYB)
+
+#define IOC3_STD_SUBDEVS	IOC3_ALL_SUBDEVS
+
+#define IOC3_INTA_SUBDEVS	IOC3_SDB_ETHER
+#define IOC3_INTB_SUBDEVS	(IOC3_SDB_GENERIC|IOC3_SDB_KBMS|IOC3_SDB_SERIAL|IOC3_SDB_ECPP|IOC3_SDB_RT)
+
+/*
+ * PCI Configuration Space Register Address Map, use offset from IOC3 PCI
+ * configuration base such that this can be used for multiple IOC3s
+ */
+#define IOC3_PCI_ID		0x0	/* ID */
+
+#define IOC3_VENDOR_ID_NUM	0x10A9
+#define IOC3_DEVICE_ID_NUM	0x0003
+
+#endif /* IOC3_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/ioerror.h linux/include/asm-ia64/sn/ioerror.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/ioerror.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/ioerror.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,194 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_IOERROR_H
+#define _ASM_SN_IOERROR_H
+
+
+/*
+ * Macros defining the various Errors to be handled as part of
+ * IO Error handling.
+ */
+
+/*
+ * List of errors to be handled by each subsystem.
+ * "error_code" field will take one of these values.
+ * The error code is built up of single bits expressing
+ * our confidence that the error was that type; note
+ * that it is possible to have a PIO or DMA error where
+ * we don't know whether it was a READ or a WRITE, or
+ * even a READ or WRITE error that we're not sure whether
+ * to call a PIO or DMA.
+ *
+ * It is also possible to set both PIO and DMA, and possible
+ * to set both READ and WRITE; the first may be nonsensical
+ * but the second *could* be used to designate an access
+ * that is known to be a read-modify-write cycle. It is
+ * quite possible that nobody will ever use PIO|DMA or
+ * READ|WRITE ... but being flexible is good.
+ */
+#define	IOECODE_UNSPEC		0
+#define	IOECODE_READ		1
+#define	IOECODE_WRITE		2
+#define	IOECODE_PIO		4
+#define	IOECODE_DMA		8
+
+#define	IOECODE_PIO_READ	(IOECODE_PIO|IOECODE_READ)
+#define	IOECODE_PIO_WRITE	(IOECODE_PIO|IOECODE_WRITE)
+#define	IOECODE_DMA_READ	(IOECODE_DMA|IOECODE_READ)
+#define	IOECODE_DMA_WRITE	(IOECODE_DMA|IOECODE_WRITE)
+
+/* support older names, but try to move everything
+ * to using new names that identify which package
+ * controls their values ...
+ */
+#define	PIO_READ_ERROR		IOECODE_PIO_READ
+#define	PIO_WRITE_ERROR		IOECODE_PIO_WRITE
+#define	DMA_READ_ERROR		IOECODE_DMA_READ
+#define	DMA_WRITE_ERROR		IOECODE_DMA_WRITE
+
+/*
+ * List of error numbers returned by error handling sub-system.
+ */
+
+#define	IOERROR_HANDLED		0	/* Error Properly handled.        */
+#define	IOERROR_NODEV		0x1	/* No such device attached        */
+#define	IOERROR_BADHANDLE	0x2	/* Received bad handle            */
+#define	IOERROR_BADWIDGETNUM	0x3	/* Bad widget number              */
+#define	IOERROR_BADERRORCODE	0x4	/* Bad error code passed in       */
+#define	IOERROR_INVALIDADDR	0x5	/* Invalid address specified      */
+
+#define	IOERROR_WIDGETLEVEL	0x6	/* Some failure at widget level    */
+#define	IOERROR_XTALKLEVEL	0x7
+
+#define	IOERROR_HWGRAPH_LOOKUP	0x8	/* hwgraph lookup failed for path  */
+#define	IOERROR_UNHANDLED	0x9	/* handler rejected error          */
+
+#define	IOERROR_PANIC		0xA	/* subsidiary handler has already
+					 * started decode: continue error
+					 * data dump, and panic from top
+					 * caller in error chain.
+					 */
+
+/*
+ * IO errors at the bus/device driver level
+ */
+
+#define	IOERROR_DEV_NOTFOUND	0x10	/* Device matching bus addr not found */
+#define	IOERROR_DEV_SHUTDOWN	0x11	/* Device has been shutdown        */
+
+/*
+ * Type of address.
+ * Indicates the direction of transfer that caused the error.
+ */
+#define	IOERROR_ADDR_PIO	1	/* Error Address generated due to PIO */
+#define	IOERROR_ADDR_DMA	2	/* Error address generated due to DMA */
+
+/*
+ * IO error structure.
+ *
+ * This structure would expand to hold the information retrieved from
+ * all IO related error registers.
+ *
+ * This structure is defined to hold all system specific
+ * information related to a single error.
+ *
+ * This serves a couple of purpose.
+ *      - Error handling often involves translating one form of address to other
+ *        form. So, instead of having different data structures at each level,
+ *        we have a single structure, and the appropriate fields get filled in
+ *        at each layer.
+ *      - This provides a way to dump all error related information in any layer
+ *        of erorr handling (debugging aid).
+ *
+ * A second possibility is to allow each layer to define its own error
+ * data structure, and fill in the proper fields. This has the advantage
+ * of isolating the layers.
+ * A big concern is the potential stack usage (and overflow), if each layer
+ * defines these structures on stack (assuming we don't want to do kmalloc.
+ *
+ * Any layer wishing to pass extra information to a layer next to it in
+ * error handling hierarchy, can do so as a separate parameter.
+ */
+
+typedef struct io_error_s {
+    /* Bit fields indicating which sturcture fields are valid */
+    union {
+	struct {
+	    unsigned                ievb_errortype:1;
+	    unsigned                ievb_widgetnum:1;
+	    unsigned                ievb_widgetdev:1;
+	    unsigned                ievb_srccpu:1;
+	    unsigned                ievb_srcnode:1;
+	    unsigned                ievb_errnode:1;
+	    unsigned                ievb_sysioaddr:1;
+	    unsigned                ievb_xtalkaddr:1;
+	    unsigned                ievb_busspace:1;
+	    unsigned                ievb_busaddr:1;
+	    unsigned                ievb_vaddr:1;
+	    unsigned                ievb_memaddr:1;
+	    unsigned		    ievb_epc:1;
+	    unsigned		    ievb_ef:1;
+	} iev_b;
+	unsigned                iev_a;
+    } ie_v;
+
+    short                   ie_errortype;	/* error type: extra info about error */
+    short                   ie_widgetnum;	/* Widget number that's in error */
+    short                   ie_widgetdev;	/* Device within widget in error */
+    cpuid_t                 ie_srccpu;	/* CPU on srcnode generating error */
+    cnodeid_t               ie_srcnode;		/* Node which caused the error   */
+    cnodeid_t               ie_errnode;		/* Node where error was noticed  */
+    iopaddr_t               ie_sysioaddr;	/* Sys specific IO address       */
+    iopaddr_t               ie_xtalkaddr;	/* Xtalk (48bit) addr of Error   */
+    iopaddr_t               ie_busspace;	/* Bus specific address space    */
+    iopaddr_t               ie_busaddr;		/* Bus specific address          */
+    caddr_t                 ie_vaddr;	/* Virtual address of error      */
+    paddr_t                 ie_memaddr;		/* Physical memory address       */
+    caddr_t		    ie_epc;		/* pc when error reported	 */
+    caddr_t		    ie_ef;		/* eframe when error reported	 */
+
+} ioerror_t;
+
+#define	IOERROR_INIT(e)		do { (e)->ie_v.iev_a = 0; } while (0)
+#define	IOERROR_SETVALUE(e,f,v)	do { (e)->ie_ ## f = (v); (e)->ie_v.iev_b.ievb_ ## f = 1; } while (0)
+#define	IOERROR_FIELDVALID(e,f)	(((e)->ie_v.iev_b.ievb_ ## f) != 0)
+#define	IOERROR_GETVALUE(e,f)	(ASSERT(IOERROR_FIELDVALID(e,f)),((e)->ie_ ## f))
+
+#if	IP27 || IP35
+/* hub code likes to call the SysAD address "hubaddr" ... */
+#define	ie_hubaddr	ie_sysioaddr
+#define	ievb_hubaddr	ievb_sysioaddr
+#endif
+
+/*
+ * Error handling Modes.
+ */
+typedef enum {
+    MODE_DEVPROBE,		/* Probing mode. Errors not fatal */
+    MODE_DEVERROR,		/* Error while system is running */
+    MODE_DEVUSERERROR,		/* Device Error created due to user mode access */
+    MODE_DEVREENABLE		/* Reenable pass                */
+} ioerror_mode_t;
+
+
+typedef int             error_handler_f(void *, int, ioerror_mode_t, ioerror_t *);
+typedef void           *error_handler_arg_t;
+
+extern void             ioerror_dump(char *, int, int, ioerror_t *);
+
+#ifdef	ERROR_DEBUG
+#define	IOERROR_DUMP(x, y, z, t)	ioerror_dump((x), (y), (z), (t))
+#define	IOERR_PRINTF(x)	(x)
+#else
+#define	IOERROR_DUMP(x, y, z, t)
+#define	IOERR_PRINTF(x)
+#endif				/* ERROR_DEBUG */
+
+#endif				/* _ASM_SN_IOERROR_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/ioerror_handling.h linux/include/asm-ia64/sn/ioerror_handling.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/ioerror_handling.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/ioerror_handling.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,317 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_IOERROR_HANDLING_H
+#define _ASM_SN_IOERROR_HANDLING_H
+
+#if __KERNEL__
+
+/*
+ * Basic types required for io error handling interfaces.
+ */
+
+/*
+ * Return code from the io error handling interfaces.
+ */
+
+enum error_return_code_e {
+	/* Success */
+	ERROR_RETURN_CODE_SUCCESS,
+
+	/* Unknown failure */
+	ERROR_RETURN_CODE_GENERAL_FAILURE,
+
+	/* Nth error noticed while handling the first error */
+	ERROR_RETURN_CODE_NESTED_CALL,
+
+	/* State of the vertex is invalid */
+	ERROR_RETURN_CODE_INVALID_STATE,
+
+	/* Invalid action */
+	ERROR_RETURN_CODE_INVALID_ACTION,
+
+	/* Valid action but not cannot set it */
+	ERROR_RETURN_CODE_CANNOT_SET_ACTION,
+
+	/* Valid action but not possible for the current state */
+	ERROR_RETURN_CODE_CANNOT_PERFORM_ACTION,
+
+	/* Valid state but cannot change the state of the vertex to it */
+	ERROR_RETURN_CODE_CANNOT_SET_STATE,
+
+	/* ??? */
+	ERROR_RETURN_CODE_DUPLICATE,
+
+	/* Reached the root of the system critical graph */
+	ERROR_RETURN_CODE_SYS_CRITICAL_GRAPH_BEGIN,
+
+	/* Reached the leaf of the system critical graph */
+	ERROR_RETURN_CODE_SYS_CRITICAL_GRAPH_ADD,
+
+	/* Cannot shutdown the device in hw/sw */
+	ERROR_RETURN_CODE_SHUTDOWN_FAILED,
+
+	/* Cannot restart the device in hw/sw */
+	ERROR_RETURN_CODE_RESET_FAILED,
+
+	/* Cannot failover the io subsystem */
+	ERROR_RETURN_CODE_FAILOVER_FAILED,
+
+	/* No Jump Buffer exists */
+	ERROR_RETURN_CODE_NO_JUMP_BUFFER
+};
+
+typedef uint64_t  error_return_code_t;
+
+/*
+ * State of the vertex during error handling.
+ */
+enum error_state_e {
+	/* Ignore state */
+	ERROR_STATE_IGNORE,
+
+	/* Invalid state */
+	ERROR_STATE_NONE,
+
+	/* Trying to decipher the error bits */
+	ERROR_STATE_LOOKUP,
+
+	/* Trying to carryout the action decided upon after
+	 * looking at the error bits 
+	 */
+	ERROR_STATE_ACTION,
+
+	/* Donot allow any other operations to this vertex from
+	 * other parts of the kernel. This is also used to indicate
+	 * that the device has been software shutdown.
+	 */
+	ERROR_STATE_SHUTDOWN,
+
+	/* This is a transitory state when no new requests are accepted
+	 * on behalf of the device. This is usually used when trying to
+	 * quiesce all the outstanding operations and preparing the
+	 * device for a failover / shutdown etc.
+	 */
+	ERROR_STATE_SHUTDOWN_IN_PROGRESS,
+
+	/* This is the state when there is absolutely no activity going
+	 * on wrt device.
+	 */
+	ERROR_STATE_SHUTDOWN_COMPLETE,
+	
+	/* This is the state when the device has issued a retry. */
+	ERROR_STATE_RETRY,
+
+	/* This is the normal state. This can also be used to indicate
+	 * that the device has been software-enabled after software-
+	 * shutting down previously.
+	 */
+	ERROR_STATE_NORMAL
+	
+};
+
+typedef uint64_t  error_state_t;
+
+/*
+ * Generic error classes. This is used to classify errors after looking
+ * at the error bits and helpful in deciding on the action.
+ */
+enum error_class_e {
+	/* Unclassified error */
+	ERROR_CLASS_UNKNOWN,
+
+	/* LLP transmit error */
+	ERROR_CLASS_LLP_XMIT,
+
+	/* LLP receive error */
+	ERROR_CLASS_LLP_RECV,
+
+	/* Credit error */
+	ERROR_CLASS_CREDIT,
+
+	/* Timeout error */
+	ERROR_CLASS_TIMEOUT,
+
+	/* Access error */
+	ERROR_CLASS_ACCESS,
+
+	/* System coherency error */
+	ERROR_CLASS_SYS_COHERENCY,
+
+	/* Bad data error (ecc / parity etc) */
+	ERROR_CLASS_BAD_DATA,
+
+	/* Illegal request packet */
+	ERROR_CLASS_BAD_REQ_PKT,
+	
+	/* Illegal response packet */
+	ERROR_CLASS_BAD_RESP_PKT
+};
+
+typedef uint64_t  error_class_t;
+
+
+/* 
+ * Error context which the error action can use.
+ */
+typedef void			*error_context_t;
+#define ERROR_CONTEXT_IGNORE	((error_context_t)-1ll)
+
+
+/* 
+ * Error action type.
+ */
+typedef error_return_code_t 	(*error_action_f)( error_context_t);
+#define ERROR_ACTION_IGNORE	((error_action_f)-1ll)
+
+/* Typical set of error actions */
+typedef struct error_action_set_s {
+	error_action_f		eas_panic;
+	error_action_f		eas_shutdown;
+	error_action_f		eas_abort;
+	error_action_f		eas_retry;
+	error_action_f		eas_failover;
+	error_action_f		eas_log_n_ignore;
+	error_action_f		eas_reset;
+} error_action_set_t;
+
+
+/* Set of priorites for in case mutliple error actions/states
+ * are trying to be prescribed for a device.
+ * NOTE : The ordering below encapsulates the priorities. Highest value
+ * corresponds to highest priority.
+ */
+enum error_priority_e {
+	ERROR_PRIORITY_IGNORE,
+	ERROR_PRIORITY_NONE,
+	ERROR_PRIORITY_NORMAL,
+	ERROR_PRIORITY_LOG,
+	ERROR_PRIORITY_FAILOVER,
+	ERROR_PRIORITY_RETRY,
+	ERROR_PRIORITY_ABORT,
+	ERROR_PRIORITY_SHUTDOWN,
+	ERROR_PRIORITY_RESTART,
+	ERROR_PRIORITY_PANIC
+};
+
+typedef uint64_t  error_priority_t;
+
+/* Error state interfaces */
+#if defined(CONFIG_SGI_IO_ERROR_HANDLING)
+extern error_return_code_t	error_state_set(devfs_handle_t,error_state_t);
+extern error_state_t		error_state_get(devfs_handle_t);
+#endif
+
+/* System critical graph interfaces */
+
+extern boolean_t		is_sys_critical_vertex(devfs_handle_t);
+extern devfs_handle_t		sys_critical_first_child_get(devfs_handle_t);
+extern devfs_handle_t		sys_critical_next_child_get(devfs_handle_t);
+extern devfs_handle_t		sys_critical_parent_get(devfs_handle_t);
+extern error_return_code_t	sys_critical_graph_vertex_add(devfs_handle_t,
+							     devfs_handle_t new);
+
+/* Error action interfaces */
+
+extern error_return_code_t	error_action_set(devfs_handle_t,
+						 error_action_f,
+						 error_context_t,
+						 error_priority_t);
+extern error_return_code_t	error_action_perform(devfs_handle_t);
+
+
+#define INFO_LBL_ERROR_SKIP_ENV	"error_skip_env"
+
+#define v_error_skip_env_get(v, l)		\
+hwgraph_info_get_LBL(v, INFO_LBL_ERROR_SKIP_ENV, (arbitrary_info_t *)&l)
+
+#define v_error_skip_env_set(v, l, r)		\
+(r ? 						\
+ hwgraph_info_replace_LBL(v, INFO_LBL_ERROR_SKIP_ENV, (arbitrary_info_t)l,0) :\
+ hwgraph_info_add_LBL(v, INFO_LBL_ERROR_SKIP_ENV, (arbitrary_info_t)l))
+
+#define v_error_skip_env_clear(v)		\
+hwgraph_info_remove_LBL(v, INFO_LBL_ERROR_SKIP_ENV, 0)
+
+/* Skip point interfaces */
+extern error_return_code_t	error_skip_point_jump(devfs_handle_t, boolean_t);
+extern error_return_code_t	error_skip_point_clear(devfs_handle_t);
+
+/* REFERENCED */
+#if defined(CONFIG_SGI_IO_ERROR_HANDLING)
+
+inline static int
+error_skip_point_mark(devfs_handle_t  v)  			 
+{									
+	label_t		*error_env = NULL;	 			
+	int		code = 0;		
+
+	/* Check if we have a valid hwgraph vertex */
+#ifdef IRIX
+	if (!dev_is_vertex(v))
+		return(code);
+#endif
+				
+	/* There is no error jump buffer for this device vertex. Allocate
+	 * one.								 
+	 */								 
+	if (v_error_skip_env_get(v, error_env) != GRAPH_SUCCESS) {	 
+		error_env = kmem_zalloc(sizeof(label_t), KM_NOSLEEP);	 
+		/* Unable to allocate memory for jum buffer. This should 
+		 * be a very rare occurrence.				 
+		 */							 
+		if (!error_env)						 
+			return(-1);					 
+		/* Store the jump buffer information on the vertex.*/	 
+		if (v_error_skip_env_set(v, error_env, 0) != GRAPH_SUCCESS)
+			return(-2);					   
+	}								   
+	ASSERT(v_error_skip_env_get(v, error_env) == GRAPH_SUCCESS);
+	code = setjmp(*error_env);					   
+#ifdef IRIX
+	/* NOTE: It might be OK to leave the allocated jump buffer on the
+	 * vertex. This can be used for later purposes.
+	 */
+	if (code) {							   
+		/* This is the case where a long jump has been taken from one
+		 * one of the error handling interfaces.		     
+		 */							     
+		if (v_error_skip_env_clear(v, error_env) == GRAPH_SUCCESS)   
+			kfree(error_env);
+	}								     
+#endif
+	return(code);							     
+}
+#endif	/* CONFIG_SGI_IO_ERROR_HANDLING */
+
+typedef uint64_t		counter_t;
+
+extern counter_t		error_retry_count_get(devfs_handle_t);
+extern error_return_code_t	error_retry_count_set(devfs_handle_t,counter_t);
+extern counter_t		error_retry_count_increment(devfs_handle_t);
+extern counter_t		error_retry_count_decrement(devfs_handle_t);
+
+/* Except for the PIO Read error typically the other errors are handled in
+ * the context of an asynchronous error interrupt.
+ */
+#define	IS_ERROR_INTR_CONTEXT(_ec)	((_ec & IOECODE_DMA) 		|| \
+					 (_ec == IOECODE_PIO_WRITE))
+
+/* Some convenience macros on device state. This state is accessed only 
+ * thru the calls the io error handling layer.
+ */
+#if defined(CONFIG_SGI_IO_ERROR_HANDLING)
+#define IS_DEVICE_SHUTDOWN(_d) 	(error_state_get(_d) == ERROR_STATE_SHUTDOWN)
+#else
+extern boolean_t		is_device_shutdown(devfs_handle_t);
+#define IS_DEVICE_SHUTDOWN(_d) 	(is_device_shutdown(_d))
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_SN_IOERROR_HANDLING_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/iograph.h linux/include/asm-ia64/sn/iograph.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/iograph.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/iograph.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,200 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_IOGRAPH_H
+#define _ASM_SN_IOGRAPH_H
+
+/*
+ * During initialization, platform-dependent kernel code establishes some
+ * basic elements of the hardware graph.  This file contains edge and
+ * info labels that are used across various platforms -- it serves as an
+ * ad-hoc registry.
+ */
+
+/* edges names */
+#define EDGE_LBL_BUS			"bus"
+#define EDGE_LBL_CONN			".connection"
+#define EDGE_LBL_ECP			"ecp"		/* EPP/ECP plp */
+#define EDGE_LBL_ECPP			"ecpp"
+#define EDGE_LBL_GUEST			".guest"	/* For IOC3 */
+#define EDGE_LBL_HOST			".host"		/* For IOC3 */
+#define EDGE_LBL_PERFMON		"mon"
+#define EDGE_LBL_USRPCI			"usrpci"
+#define EDGE_LBL_VME			"vmebus"
+#define EDGE_LBL_BLOCK			"block"
+#define EDGE_LBL_BOARD			"board"
+#define EDGE_LBL_CHAR			"char"
+#define EDGE_LBL_CONTROLLER		"controller"
+#define EDGE_LBL_CPU			"cpu"
+#define EDGE_LBL_CPUNUM			"cpunum"
+#define EDGE_LBL_DISABLED		"disabled"
+#define EDGE_LBL_DISK			"disk"
+#define EDGE_LBL_DMA_ENGINE             "dma_engine"    /* Only available on
+							   VMEbus now        */
+#define EDGE_LBL_NET			"net"		/* all nw. devs */
+#define EDGE_LBL_EF			"ef"		/* For if_ef ethernet */
+#define EDGE_LBL_ET			"et"		/* For if_ee ethernet */
+#define EDGE_LBL_EC			"ec"		/* For if_ec2 ether */
+#define EDGE_LBL_ECF			"ec"		/* For if_ecf enet */
+#define EDGE_LBL_EM			"ec"		/* For O2 ether */
+#define EDGE_LBL_IPG			"ipg"		/* For IPG FDDI */
+#define EDGE_LBL_XPI			"xpi"		/* For IPG FDDI */
+#define EDGE_LBL_HIP			"hip"		/* For HIPPI */
+#define EDGE_LBL_GSN                    "gsn"           /* For GSN */
+#define EDGE_LBL_ATM			"atm"		/* For ATM */
+#define EDGE_LBL_FXP			"fxp"		/* For FXP ether */
+#define EDGE_LBL_EP			"ep"		/* For eplex ether */
+#define EDGE_LBL_VFE			"vfe"		/* For VFE ether */
+#define EDGE_LBL_GFE			"gfe"		/* For GFE ether */
+#define EDGE_LBL_RNS			"rns"		/* RNS PCI FDDI card */
+#define EDGE_LBL_MTR			"mtr"		/* MTR PCI 802.5 card */
+#define EDGE_LBL_FV			"fv"		/* FV VME 802.5 card */
+#define EDGE_LBL_GTR			"gtr"		/* GTR GIO 802.5 card */
+#define EDGE_LBL_ISDN                   "isdn"		/* Digi PCI ISDN-BRI card */
+
+#define EDGE_LBL_EISA			"eisa"
+#define EDGE_LBL_ENET			"ethernet"
+#define EDGE_LBL_FLOPPY			"floppy"
+#define EDGE_LBL_PFD			"pfd"		/* For O2 pfd floppy */
+#define EDGE_LBL_FOP                    "fop"           /* Fetchop pseudo device */
+#define EDGE_LBL_GIO			"gio"
+#define EDGE_LBL_HEART			"heart"		/* For RACER */
+#define EDGE_LBL_HPC			"hpc"
+#define EDGE_LBL_GFX			"gfx"
+#define EDGE_LBL_HUB			"hub"		/* For SN0 */
+#define EDGE_LBL_IBUS			"ibus"		/* For EVEREST */
+#define EDGE_LBL_INTERCONNECT		"link"
+#define EDGE_LBL_IO			"io"
+#define EDGE_LBL_IO4			"io4"		/* For EVEREST */
+#define EDGE_LBL_IOC3			"ioc3"
+#define EDGE_LBL_LUN                    "lun"
+#define EDGE_LBL_MACE                   "mace" 		/* O2 mace */
+#define EDGE_LBL_MACHDEP                "machdep"       /* Platform depedent devices */
+#define EDGE_LBL_MASTER			".master"
+#define EDGE_LBL_MEMORY			"memory"
+#define EDGE_LBL_META_ROUTER		"metarouter"
+#define EDGE_LBL_MIDPLANE		"midplane"
+#define EDGE_LBL_MODULE			"module"
+#define EDGE_LBL_NODE			"node"
+#define EDGE_LBL_NODENUM		"nodenum"
+#define EDGE_LBL_NVRAM			"nvram"
+#define EDGE_LBL_PARTITION		"partition"
+#define EDGE_LBL_PCI			"pci"
+#define EDGE_LBL_PORT			"port"
+#define EDGE_LBL_PROM			"prom"
+#define EDGE_LBL_RACK			"rack"
+#define EDGE_LBL_RDISK			"rdisk"
+#define EDGE_LBL_ROUTER			"router"
+#define EDGE_LBL_RPOS			"bay"		/* Position in rack */
+#define EDGE_LBL_SCSI			"scsi"
+#define EDGE_LBL_SCSI_CTLR		"scsi_ctlr"
+#define EDGE_LBL_SLOT			"slot"
+#define EDGE_LBL_TAPE			"tape"
+#define EDGE_LBL_TARGET                 "target"
+#define EDGE_LBL_UNKNOWN		"unknown"
+#define EDGE_LBL_VOLUME			"volume"
+#define EDGE_LBL_VOLUME_HEADER		"volume_header"
+#define EDGE_LBL_XBOW			"xbow"
+#define	EDGE_LBL_XIO			"xio"
+#define EDGE_LBL_XSWITCH		".xswitch"
+#define EDGE_LBL_XTALK			"xtalk"
+#define EDGE_LBL_XWIDGET		"xwidget"
+#define EDGE_LBL_ELSC			"elsc"
+#define EDGE_LBL_L1			"L1"
+#define EDGE_LBL_MADGE_TR               "Madge-tokenring"
+#define EDGE_LBL_XPLINK			"xplink" 	/* Cross partition */
+#define	EDGE_LBL_XPLINK_NET		"net" 		/* XP network devs */
+#define	EDGE_LBL_XPLINK_RAW		"raw"		/* XP Raw devs */
+#define	EDGE_LBL_XPLINK_KERNEL		"kernel"	/* XP kernel devs */
+#define	EDGE_LBL_XPLINK_ADMIN		"admin"	   	/* Partition admin */
+#define	EDGE_LBL_KAIO			"kaio"	   	/* Kernel async i/o poll */
+#define EDGE_LBL_RPS                    "rps"           /* redundant power supply */ 
+#define EDGE_LBL_XBOX_RPS               "xbox_rps"      /* redundant power supply for xbox unit */ 
+#define EDGE_LBL_IOBRICK		"iobrick"
+#define EDGE_LBL_PBRICK			"pbrick"
+#define EDGE_LBL_IBRICK			"ibrick"
+#define EDGE_LBL_XBRICK			"xbrick"
+#define EDGE_LBL_CPUBUS			"cpubus"	/* CPU Interfaces (SysAd) */
+
+/* vertex info labels in hwgraph */
+#define INFO_LBL_CNODEID		"_cnodeid"
+#define INFO_LBL_CONTROLLER_NAME	"_controller_name"
+#define INFO_LBL_CPUBUS			"_cpubus"
+#define INFO_LBL_CPUID			"_cpuid"
+#define INFO_LBL_CPU_INFO		"_cpu"
+#define INFO_LBL_DETAIL_INVENT		"_detail_invent" /* inventory data*/
+#define INFO_LBL_DEVICE_DESC		"_device_desc"
+#define INFO_LBL_DIAGVAL                "_diag_reason"   /* Reason disabled */
+#define INFO_LBL_DKIOTIME		"_dkiotime"
+#define INFO_LBL_DRIVER			"_driver"	/* points to attached device_driver_t */
+#define INFO_LBL_ELSC			"_elsc"
+#define INFO_LBL_FC_PORTNAME		"_fc_portname"
+#define INFO_LBL_GIOIO			"_gioio"
+#define INFO_LBL_GFUNCS			"_gioio_ops"	/* ops vector for gio providers */
+#define INFO_LBL_HUB_INFO		"_hubinfo"
+#define INFO_LBL_HWGFSLIST		"_hwgfs_list"
+#define INFO_LBL_TRAVERSE		"_hwg_traverse" /* hwgraph traverse function */
+#define INFO_LBL_INVENT 		"_invent"	/* inventory data */
+#define INFO_LBL_MLRESET		"_mlreset"	/* present if device preinitialized */
+#define INFO_LBL_MODULE_INFO		"_module"	/* module data ptr */
+#define INFO_LBL_MONDATA		"_mon"		/* monitor data ptr */
+#define INFO_LBL_MDPERF_DATA		"_mdperf"	/* mdperf monitoring*/
+#define INFO_LBL_NIC			"_nic"
+#define INFO_LBL_NODE_INFO		"_node"
+#define	INFO_LBL_PCIBR_HINTS		"_pcibr_hints"
+#define INFO_LBL_PCIIO			"_pciio"
+#define INFO_LBL_PFUNCS			"_pciio_ops"	/* ops vector for gio providers */
+#define INFO_LBL_PERMISSIONS		"_permissions"	/* owner, uid, gid */
+#define INFO_LBL_ROUTER_INFO		"_router"
+#define INFO_LBL_SUBDEVS		"_subdevs"	/* subdevice enable bits */
+#define INFO_LBL_VME_FUNCS		"_vmeio_ops"	/* ops vector for VME providers */
+#define INFO_LBL_XSWITCH		"_xswitch"
+#define INFO_LBL_XSWITCH_ID		"_xswitch_id"
+#define INFO_LBL_XSWITCH_VOL		"_xswitch_volunteer"
+#define INFO_LBL_XFUNCS			"_xtalk_ops"	/* ops vector for gio providers */
+#define INFO_LBL_XWIDGET		"_xwidget"
+#define INFO_LBL_GRIO_DSK		"_grio_disk"	/* guaranteed rate I/O */
+#define INFO_LBL_ASYNC_ATTACH           "_async_attach"	/* parallel attachment */
+#define INFO_LBL_GFXID			"_gfxid"	/* gfx pipe ID #s */
+/* Device/Driver  Admin directive labels  */
+#define ADMIN_LBL_INTR_TARGET		"INTR_TARGET"	/* Target cpu for device interrupts*/
+#define ADMIN_LBL_INTR_SWLEVEL		"INTR_SWLEVEL"	/* Priority level of the ithread */
+
+#define	ADMIN_LBL_DMATRANS_NODE		"PCIBUS_DMATRANS_NODE" /* Node used for
+								* 32-bit Direct
+								* Mapping I/O
+								*/
+#define ADMIN_LBL_DISABLED		"DISABLE"	/* Device has been disabled */
+#define ADMIN_LBL_DETACH		"DETACH"	/* Device has been detached */
+
+#define ADMIN_LBL_THREAD_PRI		"thread_priority" 
+							/* Driver adminstrator
+							 * hint parameter for 
+							 * thread priority
+							 */
+#define ADMIN_LBL_THREAD_CLASS		"thread_class" 
+							/* Driver adminstrator
+							 * hint parameter for 
+							 * thread priority
+							 * default class
+							 */
+/* Special reserved info labels (also hwgfs attributes) */
+#define _DEVNAME_ATTR		"_devname"	/* device name */
+#define _DRIVERNAME_ATTR	"_drivername"	/* driver name */
+#define _INVENT_ATTR		"_inventory"	/* device inventory data */
+#define _MASTERNODE_ATTR	"_masternode"	/* node that "controls" device */
+
+/* Info labels that begin with '_' cannot be overwritten by an attr_set call */
+#define INFO_LBL_RESERVED(name) ((name)[0] == '_')
+
+#if defined(__KERNEL__)
+void init_all_devices(void);
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_SN_IOGRAPH_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/klconfig.h linux/include/asm-ia64/sn/klconfig.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/klconfig.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/klconfig.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,960 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Derived from IRIX <sys/SN/klconfig.h>.
+ *
+ * Copyright (C) 1992 - 1997, 1999 Silicon Graphics, Inc.
+ * Copyright (C) 1999 by Ralf Baechle
+ */
+#ifndef	_ASM_SN_KLCONFIG_H
+#define	_ASM_SN_KLCONFIG_H
+
+/*
+ * klconfig.h
+ */
+
+/*
+ * The KLCONFIG structures store info about the various BOARDs found
+ * during Hardware Discovery. In addition, it stores info about the
+ * components found on the BOARDs.
+ */
+
+/*
+ * WARNING:
+ *	Certain assembly language routines (notably xxxxx.s) in the IP27PROM 
+ *	will depend on the format of the data structures in this file.  In 
+ *      most cases, rearranging the fields can seriously break things.   
+ *      Adding fields in the beginning or middle can also break things.
+ *      Add fields if necessary, to the end of a struct in such a way
+ *      that offsets of existing fields do not change.
+ */
+
+#include <linux/types.h>
+#include <asm/sn/types.h>
+#include <asm/sn/slotnum.h>
+#include <asm/sn/router.h>
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+#include <asm/sn/sgi.h>
+#include <asm/sn/sn1/addrs.h>
+#include <asm/sn/vector.h>
+#include <asm/sn/agent.h>
+// #include <sys/graph.h>
+// #include <asm/sn/arc/types.h>
+#include <asm/sn/arc/hinv.h>
+#include <asm/sn/xtalk/xbow.h>
+#include <asm/sn/xtalk/xtalk.h>
+#include <asm/sn/kldir.h>
+#include <asm/sn/sn_fru.h>
+
+#endif  /* CONFIG_SGI_IP35 ... */
+
+#define KLCFGINFO_MAGIC	0xbeedbabe
+
+typedef s32 klconf_off_t;
+
+#define	MAX_MODULE_ID		255
+#define SIZE_PAD		4096 /* 4k padding for structures */
+#if (defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)) && defined(BRINGUP) /* MAX_SLOTS_PER_NODE??? */
+/* 
+ * 1 NODE brick, 2 Router bricks (1 local, 1 meta), 6 XIO Widgets, 
+ * 1 Midplane (midplane will likely become IO brick when Bruce cleans
+ * up IP35 klconfig)
+ */
+#define MAX_SLOTS_PER_NODE	(1 + 2 + 6 + 1) 
+#else
+/* 
+ * 1 NODE brd, 2 Router brd (1 8p, 1 meta), 6 Widgets, 
+ * 2 Midplanes assuming no pci card cages 
+ */
+#define MAX_SLOTS_PER_NODE	(1 + 2 + 6 + 2) 
+#endif
+
+/* XXX if each node is guranteed to have some memory */
+
+#define MAX_PCI_DEVS		8
+
+/* lboard_t->brd_flags fields */
+/* All bits in this field are currently used. Try the pad fields if
+   you need more flag bits */
+
+#define ENABLE_BOARD 		0x01
+#define FAILED_BOARD  		0x02
+#define DUPLICATE_BOARD 	0x04    /* Boards like midplanes/routers which
+                                   	   are discovered twice. Use one of them */
+#define VISITED_BOARD		0x08	/* Used for compact hub numbering. */
+#define LOCAL_MASTER_IO6	0x10 	/* master io6 for that node */
+#define GLOBAL_MASTER_IO6	0x20
+#define THIRD_NIC_PRESENT 	0x40  	/* for future use */
+#define SECOND_NIC_PRESENT 	0x80 	/* addons like MIO are present */
+
+/* klinfo->flags fields */
+
+#define KLINFO_ENABLE 		0x01    /* This component is enabled */
+#define KLINFO_FAILED   	0x02 	/* This component failed */
+#define KLINFO_DEVICE   	0x04 	/* This component is a device */
+#define KLINFO_VISITED  	0x08 	/* This component has been visited */
+#define KLINFO_CONTROLLER   	0x10 	/* This component is a device controller */
+#define KLINFO_INSTALL   	0x20  	/* Install a driver */
+#define	KLINFO_HEADLESS		0x40	/* Headless (or hubless) component */
+#define IS_CONSOLE_IOC3(i)	((((klinfo_t *)i)->flags) & KLINFO_INSTALL)
+
+#define GB2		0x80000000
+
+#define MAX_RSV_PTRS	32
+
+/* Structures to manage various data storage areas */
+/* The numbers must be contiguous since the array index i
+   is used in the code to allocate various areas. 
+*/
+
+#define BOARD_STRUCT 		0
+#define COMPONENT_STRUCT 	1
+#define ERRINFO_STRUCT 		2
+#define KLMALLOC_TYPE_MAX 	(ERRINFO_STRUCT + 1)
+#define DEVICE_STRUCT 		3
+
+
+typedef struct console_s {
+#if defined(CONFIG_IA64_SGI_IO)	/* FIXME */
+	__psunsigned_t 	uart_base;
+	__psunsigned_t 	config_base;
+	__psunsigned_t 	memory_base;
+#else
+	unsigned long 	uart_base;
+	unsigned long 	config_base;
+	unsigned long 	memory_base;
+#endif
+	short		baud;
+	short		flag;
+	int		type;
+	nasid_t		nasid;
+	char		wid;
+	char 		npci;
+	nic_t		baseio_nic;
+} console_t;
+
+typedef struct klc_malloc_hdr {
+        klconf_off_t km_base;
+        klconf_off_t km_limit;
+        klconf_off_t km_current;
+} klc_malloc_hdr_t;
+
+/* Functions/macros needed to use this structure */
+
+typedef struct kl_config_hdr {
+	u64		ch_magic;	/* set this to KLCFGINFO_MAGIC */
+	u32		ch_version;    /* structure version number */
+	klconf_off_t	ch_malloc_hdr_off; /* offset of ch_malloc_hdr */
+	klconf_off_t	ch_cons_off;       /* offset of ch_cons */
+	klconf_off_t	ch_board_info;	/* the link list of boards */
+	console_t	ch_cons_info;	/* address info of the console */
+	klc_malloc_hdr_t ch_malloc_hdr[KLMALLOC_TYPE_MAX];
+	confidence_t	ch_sw_belief;	/* confidence that software is bad*/
+	confidence_t	ch_sn0net_belief; /* confidence that sn0net is bad */
+} kl_config_hdr_t;
+
+
+#define KL_CONFIG_HDR(_nasid) 	((kl_config_hdr_t *)(KLCONFIG_ADDR(_nasid)))
+#define KL_CONFIG_INFO_OFFSET(_nasid)					\
+        (KL_CONFIG_HDR(_nasid)->ch_board_info)
+#define KL_CONFIG_INFO_SET_OFFSET(_nasid, _off)				\
+        (KL_CONFIG_HDR(_nasid)->ch_board_info = (_off))
+
+#if !defined(SIMULATED_KLGRAPH)
+#define KL_CONFIG_INFO(_nasid) 						\
+        (lboard_t *)((KL_CONFIG_HDR(_nasid)->ch_board_info) ?		\
+	 NODE_OFFSET_TO_K0((_nasid), KL_CONFIG_HDR(_nasid)->ch_board_info) : \
+	 0)
+#else
+/*
+ * For Fake klgraph info.
+ */
+extern kl_config_hdr_t *linux_klcfg;
+#define KL_CONFIG_INFO(_nasid) (lboard_t *)((ulong)linux_klcfg->ch_board_info | 0xe000000000000000)
+#endif	/* CONFIG_IA64_SGI_IO */
+
+#define KL_CONFIG_MAGIC(_nasid)		(KL_CONFIG_HDR(_nasid)->ch_magic)
+
+#define KL_CONFIG_CHECK_MAGIC(_nasid)					\
+        (KL_CONFIG_HDR(_nasid)->ch_magic == KLCFGINFO_MAGIC)
+
+#define KL_CONFIG_HDR_INIT_MAGIC(_nasid)	\
+                  (KL_CONFIG_HDR(_nasid)->ch_magic = KLCFGINFO_MAGIC)
+
+/* --- New Macros for the changed kl_config_hdr_t structure --- */
+
+#if defined(CONFIG_IA64_SGI_IO)
+#define PTR_CH_MALLOC_HDR(_k)   ((klc_malloc_hdr_t *)\
+			((__psunsigned_t)_k + (_k->ch_malloc_hdr_off)))
+#else
+#define PTR_CH_MALLOC_HDR(_k)   ((klc_malloc_hdr_t *)\
+			(unsigned long)_k + (_k->ch_malloc_hdr_off)))
+#endif
+
+#define KL_CONFIG_CH_MALLOC_HDR(_n)   PTR_CH_MALLOC_HDR(KL_CONFIG_HDR(_n))
+
+#if defined(CONFIG_IA64_SGI_IO)
+#define PTR_CH_CONS_INFO(_k)	((console_t *)\
+			((__psunsigned_t)_k + (_k->ch_cons_off)))
+#else
+#define PTR_CH_CONS_INFO(_k)	((console_t *)\
+			((unsigned long)_k + (_k->ch_cons_off)))
+#endif
+
+#define KL_CONFIG_CH_CONS_INFO(_n)   PTR_CH_CONS_INFO(KL_CONFIG_HDR(_n))
+
+/* ------------------------------------------------------------- */
+
+#define KL_CONFIG_INFO_START(_nasid)	\
+        (klconf_off_t)(KLCONFIG_OFFSET(_nasid) + sizeof(kl_config_hdr_t))
+
+#define KL_CONFIG_BOARD_NASID(_brd)	((_brd)->brd_nasid)
+#define KL_CONFIG_BOARD_SET_NEXT(_brd, _off)	((_brd)->brd_next = (_off))
+
+#define KL_CONFIG_DUPLICATE_BOARD(_brd)	((_brd)->brd_flags & DUPLICATE_BOARD)
+
+#define XBOW_PORT_TYPE_HUB(_xbowp, _link) 	\
+               ((_xbowp)->xbow_port_info[(_link) - BASE_XBOW_PORT].port_flag & XBOW_PORT_HUB)
+#define XBOW_PORT_TYPE_IO(_xbowp, _link) 	\
+               ((_xbowp)->xbow_port_info[(_link) - BASE_XBOW_PORT].port_flag & XBOW_PORT_IO)
+
+#define XBOW_PORT_IS_ENABLED(_xbowp, _link) 	\
+               ((_xbowp)->xbow_port_info[(_link) - BASE_XBOW_PORT].port_flag & XBOW_PORT_ENABLE)
+#define XBOW_PORT_NASID(_xbowp, _link) 	\
+               ((_xbowp)->xbow_port_info[(_link) - BASE_XBOW_PORT].port_nasid)
+
+#define XBOW_PORT_IO     0x1
+#define XBOW_PORT_HUB    0x2
+#define XBOW_PORT_ENABLE 0x4
+
+#define	SN0_PORT_FENCE_SHFT	0
+#define	SN0_PORT_FENCE_MASK	(1 << SN0_PORT_FENCE_SHFT)
+
+/*
+ * The KLCONFIG area is organized as a LINKED LIST of BOARDs. A BOARD
+ * can be either 'LOCAL' or 'REMOTE'. LOCAL means it is attached to 
+ * the LOCAL/current NODE. REMOTE means it is attached to a different
+ * node.(TBD - Need a way to treat ROUTER boards.)
+ *
+ * There are 2 different structures to represent these boards -
+ * lboard - Local board, rboard - remote board. These 2 structures
+ * can be arbitrarily mixed in the LINKED LIST of BOARDs. (Refer
+ * Figure below). The first byte of the rboard or lboard structure
+ * is used to find out its type - no unions are used.
+ * If it is a lboard, then the config info of this board will be found
+ * on the local node. (LOCAL NODE BASE + offset value gives pointer to 
+ * the structure.
+ * If it is a rboard, the local structure contains the node number
+ * and the offset of the beginning of the LINKED LIST on the remote node.
+ * The details of the hardware on a remote node can be built locally,
+ * if required, by reading the LINKED LIST on the remote node and 
+ * ignoring all the rboards on that node.
+ *
+ * The local node uses the REMOTE NODE NUMBER + OFFSET to point to the 
+ * First board info on the remote node. The remote node list is 
+ * traversed as the local list, using the REMOTE BASE ADDRESS and not
+ * the local base address and ignoring all rboard values.
+ *
+ * 
+ KLCONFIG
+
+ +------------+      +------------+      +------------+      +------------+
+ |  lboard    |  +-->|   lboard   |  +-->|   rboard   |  +-->|   lboard   |
+ +------------+  |   +------------+  |   +------------+  |   +------------+
+ | board info |  |   | board info |  |   |errinfo,bptr|  |   | board info |
+ +------------+  |   +------------+  |   +------------+  |   +------------+
+ | offset     |--+   |  offset    |--+   |  offset    |--+   |offset=NULL |
+ +------------+      +------------+      +------------+      +------------+
+
+
+ +------------+
+ | board info |
+ +------------+       +--------------------------------+
+ | compt 1    |------>| type, rev, diaginfo, size ...  |  (CPU)
+ +------------+       +--------------------------------+
+ | compt 2    |--+
+ +------------+  |    +--------------------------------+
+ |  ...       |  +--->| type, rev, diaginfo, size ...  |  (MEM_BANK)
+ +------------+       +--------------------------------+
+ | errinfo    |--+
+ +------------+  |    +--------------------------------+
+                 +--->|r/l brd errinfo,compt err flags |
+                      +--------------------------------+
+
+ *
+ * Each BOARD consists of COMPONENTs and the BOARD structure has 
+ * pointers (offsets) to its COMPONENT structure.
+ * The COMPONENT structure has version info, size and speed info, revision,
+ * error info and the NIC info. This structure can accomodate any
+ * BOARD with arbitrary COMPONENT composition.
+ *
+ * The ERRORINFO part of each BOARD has error information
+ * that describes errors about the BOARD itself. It also has flags to
+ * indicate the COMPONENT(s) on the board that have errors. The error 
+ * information specific to the COMPONENT is present in the respective 
+ * COMPONENT structure.
+ *
+ * The ERRORINFO structure is also treated like a COMPONENT, ie. the 
+ * BOARD has pointers(offset) to the ERRORINFO structure. The rboard
+ * structure also has a pointer to the ERRORINFO structure. This is 
+ * the place to store ERRORINFO about a REMOTE NODE, if the HUB on
+ * that NODE is not working or if the REMOTE MEMORY is BAD. In cases where 
+ * only the CPU of the REMOTE NODE is disabled, the ERRORINFO pointer can
+ * be a NODE NUMBER, REMOTE OFFSET combination, pointing to error info 
+ * which is present on the REMOTE NODE.(TBD)
+ * REMOTE ERRINFO can be stored on any of the nearest nodes 
+ * or on all the nearest nodes.(TBD)
+ * Like BOARD structures, REMOTE ERRINFO structures can be built locally
+ * using the rboard errinfo pointer.
+ *
+ * In order to get useful information from this Data organization, a set of
+ * interface routines are provided (TBD). The important thing to remember while
+ * manipulating the structures, is that, the NODE number information should
+ * be used. If the NODE is non-zero (remote) then each offset should
+ * be added to the REMOTE BASE ADDR else it should be added to the LOCAL BASE ADDR. 
+ * This includes offsets for BOARDS, COMPONENTS and ERRORINFO.
+ * 
+ * Note that these structures do not provide much info about connectivity.
+ * That info will be part of HWGRAPH, which is an extension of the cfg_t
+ * data structure. (ref IP27prom/cfg.h) It has to be extended to include
+ * the IO part of the Network(TBD).
+ *
+ * The data structures below define the above concepts.
+ */
+
+/*
+ * Values for CPU types
+ */
+#define KL_CPU_R4000		0x1	/* Standard R4000 */
+#define KL_CPU_TFP		0x2	/* TFP processor */
+#define	KL_CPU_R10000		0x3	/* R10000 (T5) */
+#define KL_CPU_NONE		(-1)	/* no cpu present in slot */
+
+/*
+ * IP27 BOARD classes
+ */
+
+#define KLCLASS_MASK	0xf0   
+#define KLCLASS_NONE	0x00
+#define KLCLASS_NODE	0x10             /* CPU, Memory and HUB board */
+#define KLCLASS_CPU	KLCLASS_NODE	
+#define KLCLASS_IO	0x20             /* BaseIO, 4 ch SCSI, ethernet, FDDI 
+					    and the non-graphics widget boards */
+#define KLCLASS_ROUTER	0x30             /* Router board */
+#define KLCLASS_MIDPLANE 0x40            /* We need to treat this as a board
+                                            so that we can record error info */
+#define KLCLASS_GFX	0x50		/* graphics boards */
+
+#define KLCLASS_PSEUDO_GFX	0x60	/* HDTV type cards that use a gfx
+					 * hw ifc to xtalk and are not gfx
+					 * class for sw purposes */
+
+#define KLCLASS_IOBRICK	0x70		/* IP35 iobrick */
+
+#define KLCLASS_MAX	7		/* Bump this if a new CLASS is added */
+#define KLTYPE_MAX	10		/* Bump this if a new CLASS is added */
+
+#define KLCLASS_UNKNOWN	0xf0
+
+#define KLCLASS(_x) ((_x) & KLCLASS_MASK)
+
+/*
+ * IP27 board types
+ */
+
+#define KLTYPE_MASK	0x0f
+#define KLTYPE_NONE	0x00
+#define KLTYPE_EMPTY	0x00
+
+#define KLTYPE_WEIRDCPU (KLCLASS_CPU | 0x0)
+#define KLTYPE_IP27	(KLCLASS_CPU | 0x1) /* 2 CPUs(R10K) per board */
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+#define KLTYPE_IP35	KLTYPE_IP27
+#endif
+
+#define KLTYPE_WEIRDIO	(KLCLASS_IO  | 0x0)
+#define KLTYPE_BASEIO	(KLCLASS_IO  | 0x1) /* IOC3, SuperIO, Bridge, SCSI */
+#define KLTYPE_IO6	KLTYPE_BASEIO       /* Additional name */
+#define KLTYPE_4CHSCSI	(KLCLASS_IO  | 0x2)
+#define KLTYPE_MSCSI	KLTYPE_4CHSCSI      /* Additional name */
+#define KLTYPE_ETHERNET	(KLCLASS_IO  | 0x3)
+#define KLTYPE_MENET	KLTYPE_ETHERNET     /* Additional name */
+#define KLTYPE_FDDI  	(KLCLASS_IO  | 0x4)
+#define KLTYPE_UNUSED	(KLCLASS_IO  | 0x5) /* XXX UNUSED */
+#define KLTYPE_HAROLD   (KLCLASS_IO  | 0x6) /* PCI SHOE BOX */
+#define KLTYPE_PCI	KLTYPE_HAROLD
+#define KLTYPE_VME      (KLCLASS_IO  | 0x7) /* Any 3rd party VME card */
+#define KLTYPE_MIO   	(KLCLASS_IO  | 0x8)
+#define KLTYPE_FC    	(KLCLASS_IO  | 0x9)
+#define KLTYPE_LINC    	(KLCLASS_IO  | 0xA)
+#define KLTYPE_TPU    	(KLCLASS_IO  | 0xB) /* Tensor Processing Unit */
+#define KLTYPE_GSN_A   	(KLCLASS_IO  | 0xC) /* Main GSN board */
+#define KLTYPE_GSN_B   	(KLCLASS_IO  | 0xD) /* Auxiliary GSN board */
+
+#define KLTYPE_GFX	(KLCLASS_GFX | 0x0) /* unknown graphics type */
+#define KLTYPE_GFX_KONA (KLCLASS_GFX | 0x1) /* KONA graphics on IP27 */
+#define KLTYPE_GFX_MGRA (KLCLASS_GFX | 0x3) /* MGRAS graphics on IP27 */
+
+#define KLTYPE_WEIRDROUTER (KLCLASS_ROUTER | 0x0)
+#define KLTYPE_ROUTER     (KLCLASS_ROUTER | 0x1)
+#define KLTYPE_ROUTER2    KLTYPE_ROUTER		/* Obsolete! */
+#define KLTYPE_NULL_ROUTER (KLCLASS_ROUTER | 0x2)
+#define KLTYPE_META_ROUTER (KLCLASS_ROUTER | 0x3)
+
+#define KLTYPE_WEIRDMIDPLANE (KLCLASS_MIDPLANE | 0x0)
+#define KLTYPE_MIDPLANE8  (KLCLASS_MIDPLANE | 0x1) /* 8 slot backplane */
+#define KLTYPE_MIDPLANE    KLTYPE_MIDPLANE8
+#define KLTYPE_PBRICK_XBOW	(KLCLASS_MIDPLANE | 0x2)
+
+#define KLTYPE_IOBRICK		(KLCLASS_IOBRICK | 0x0)
+#define KLTYPE_IBRICK		(KLCLASS_IOBRICK | 0x1)
+#define KLTYPE_PBRICK		(KLCLASS_IOBRICK | 0x2)
+#define KLTYPE_XBRICK		(KLCLASS_IOBRICK | 0x3)
+
+#define KLTYPE_PBRICK_BRIDGE	KLTYPE_PBRICK
+
+/* The value of type should be more than 8 so that hinv prints
+ * out the board name from the NIC string. For values less than
+ * 8 the name of the board needs to be hard coded in a few places.
+ * When bringup started nic names had not standardized and so we
+ * had to hard code. (For people interested in history.) 
+ */
+#define KLTYPE_XTHD   	(KLCLASS_PSEUDO_GFX | 0x9)
+
+#define KLTYPE_UNKNOWN	(KLCLASS_UNKNOWN | 0xf)
+
+#define KLTYPE(_x) 	((_x) & KLTYPE_MASK)
+#define IS_MIO_PRESENT(l)	((l->brd_type == KLTYPE_BASEIO) && \
+				 (l->brd_flags & SECOND_NIC_PRESENT))
+#define IS_MIO_IOC3(l,n)	(IS_MIO_PRESENT(l) && (n > 2))
+
+/* 
+ * board structures
+ */
+
+#define MAX_COMPTS_PER_BRD 24
+
+#define LOCAL_BOARD 1
+#define REMOTE_BOARD 2
+
+#define LBOARD_STRUCT_VERSION 	2
+
+typedef struct lboard_s {
+	klconf_off_t 	brd_next;         /* Next BOARD */
+	unsigned char 	struct_type;      /* type of structure, local or remote */
+	unsigned char 	brd_type;         /* type+class */
+	unsigned char 	brd_sversion;     /* version of this structure */
+        unsigned char 	brd_brevision;    /* board revision */
+        unsigned char 	brd_promver;      /* board prom version, if any */
+ 	unsigned char 	brd_flags;        /* Enabled, Disabled etc */
+	unsigned char 	brd_slot;         /* slot number */
+	unsigned short	brd_debugsw;      /* Debug switches */
+	moduleid_t	brd_module;       /* module to which it belongs */
+	partid_t 	brd_partition;    /* Partition number */
+        unsigned short 	brd_diagval;      /* diagnostic value */
+        unsigned short 	brd_diagparm;     /* diagnostic parameter */
+        unsigned char 	brd_inventory;    /* inventory history */
+        unsigned char 	brd_numcompts;    /* Number of components */
+        nic_t         	brd_nic;          /* Number in CAN */
+	nasid_t		brd_nasid;        /* passed parameter */
+	klconf_off_t 	brd_compts[MAX_COMPTS_PER_BRD]; /* pointers to COMPONENTS */
+	klconf_off_t 	brd_errinfo;      /* Board's error information */
+	struct lboard_s *brd_parent;	  /* Logical parent for this brd */
+	devfs_handle_t	brd_graph_link;   /* vertex hdl to connect extern compts */
+	confidence_t	brd_confidence;	  /* confidence that the board is bad */
+	nasid_t		brd_owner;        /* who owns this board */
+	unsigned char 	brd_nic_flags;    /* To handle 8 more NICs */
+	char		brd_name[32];
+} lboard_t;
+
+
+/*
+ *	Make sure we pass back the calias space address for local boards.
+ *	klconfig board traversal and error structure extraction defines.
+ */
+
+#define BOARD_SLOT(_brd)	((_brd)->brd_slot)
+
+#define KLCF_CLASS(_brd)	KLCLASS((_brd)->brd_type)
+#define KLCF_TYPE(_brd)		KLTYPE((_brd)->brd_type)
+#define KLCF_REMOTE(_brd)  	(((_brd)->struct_type & LOCAL_BOARD) ? 0 : 1)
+#define KLCF_NUM_COMPS(_brd)	((_brd)->brd_numcompts)
+#define KLCF_MODULE_ID(_brd)	((_brd)->brd_module)
+
+#ifndef SIMULATED_KLGRAPH
+#define KLCF_NEXT(_brd) 		((_brd)->brd_next ? (lboard_t *)((_brd)->brd_next):  NULL)
+#define KLCF_COMP(_brd, _ndx)   \
+		(klinfo_t *)(NODE_OFFSET_TO_K0(NASID_GET(_brd), \
+						(_brd)->brd_compts[(_ndx)]))
+#define KLCF_COMP_ERROR(_brd, _comp)    \
+		(NODE_OFFSET_TO_K0(NASID_GET(_brd), (_comp)->errinfo))
+
+#else
+/*
+ * For fake klgraph info.
+ */
+#define KLCF_COMP(_brd, _ndx)           (klinfo_t *)((ulong) 0xe000000000000000 |((_brd)->brd_compts[(_ndx)]))
+#define KLCF_NEXT(_brd)	((_brd)->brd_next ? (lboard_t *)((ulong) 0xe000000000000000 | (_brd->brd_next)) : NULL)
+#define KLCF_COMP_ERROR(_brd, _comp)    (_brd = _brd , (_comp)->errinfo)
+
+#endif	/* SIMULATED_KLGRAPH */
+
+#define KLCF_COMP_TYPE(_comp)	((_comp)->struct_type)
+#define KLCF_BRIDGE_W_ID(_comp)	((_comp)->physid)	/* Widget ID */
+
+
+
+/*
+ * Generic info structure. This stores common info about a 
+ * component.
+ */
+ 
+typedef struct klinfo_s {                  /* Generic info */
+        unsigned char   struct_type;       /* type of this structure */
+        unsigned char   struct_version;    /* version of this structure */
+        unsigned char   flags;            /* Enabled, disabled etc */
+        unsigned char   revision;         /* component revision */
+        unsigned short  diagval;          /* result of diagnostics */
+        unsigned short  diagparm;         /* diagnostic parameter */
+        unsigned char   inventory;        /* previous inventory status */
+        unsigned short  partid;		   /* widget part number */
+	nic_t 		nic;              /* MUst be aligned properly */
+        unsigned char   physid;           /* physical id of component */
+        unsigned int    virtid;           /* virtual id as seen by system */
+	unsigned char	widid;	          /* Widget id - if applicable */
+	nasid_t		nasid;            /* node number - from parent */
+	char		pad1;		  /* pad out structure. */
+	char		pad2;		  /* pad out structure. */
+	COMPONENT	*arcs_compt;      /* ptr to the arcs struct for ease*/
+        klconf_off_t	errinfo;          /* component specific errors */
+        unsigned short  pad3;             /* pci fields have moved over to */
+        unsigned short  pad4;             /* klbri_t */
+} klinfo_t ;
+
+#define KLCONFIG_INFO_ENABLED(_i)	((_i)->flags & KLINFO_ENABLE)
+/*
+ * Component structures.
+ * Following are the currently identified components:
+ * 	CPU, HUB, MEM_BANK, 
+ * 	XBOW(consists of 16 WIDGETs, each of which can be HUB or GRAPHICS or BRIDGE)
+ * 	BRIDGE, IOC3, SuperIO, SCSI, FDDI 
+ * 	ROUTER
+ * 	GRAPHICS
+ */
+#define KLSTRUCT_UNKNOWN	0
+#define KLSTRUCT_CPU  		1
+#define KLSTRUCT_HUB  		2
+#define KLSTRUCT_MEMBNK 	3
+#define KLSTRUCT_XBOW 		4
+#define KLSTRUCT_BRI 		5
+#define KLSTRUCT_IOC3 		6
+#define KLSTRUCT_PCI 		7
+#define KLSTRUCT_VME 		8
+#define KLSTRUCT_ROU		9
+#define KLSTRUCT_GFX 		10
+#define KLSTRUCT_SCSI 		11
+#define KLSTRUCT_FDDI 		12
+#define KLSTRUCT_MIO 		13
+#define KLSTRUCT_DISK 		14
+#define KLSTRUCT_TAPE 		15
+#define KLSTRUCT_CDROM 		16
+#define KLSTRUCT_HUB_UART 	17
+#define KLSTRUCT_IOC3ENET 	18
+#define KLSTRUCT_IOC3UART 	19
+#define KLSTRUCT_UNUSED		20 /* XXX UNUSED */
+#define KLSTRUCT_IOC3PCKM       21
+#define KLSTRUCT_RAD        	22
+#define KLSTRUCT_HUB_TTY        23
+#define KLSTRUCT_IOC3_TTY 	24
+
+/* Early Access IO proms are compatible
+   only with KLSTRUCT values upto 24. */
+
+#define KLSTRUCT_FIBERCHANNEL 	25
+#define KLSTRUCT_MOD_SERIAL_NUM 26
+#define KLSTRUCT_IOC3MS         27
+#define KLSTRUCT_TPU            28
+#define KLSTRUCT_GSN_A          29
+#define KLSTRUCT_GSN_B          30
+#define KLSTRUCT_XTHD           31
+
+/*
+ * These are the indices of various components within a lboard structure.
+ */
+
+#define IP27_CPU0_INDEX 0
+#define IP27_CPU1_INDEX 1
+#define IP27_HUB_INDEX 2
+#define IP27_MEM_INDEX 3
+
+#define BASEIO_BRIDGE_INDEX 0
+#define BASEIO_IOC3_INDEX 1
+#define BASEIO_SCSI1_INDEX 2
+#define BASEIO_SCSI2_INDEX 3
+
+#define MIDPLANE_XBOW_INDEX 0
+#define ROUTER_COMPONENT_INDEX 0
+
+#define CH4SCSI_BRIDGE_INDEX 0
+
+/* Info holders for various hardware components */
+
+typedef u64 *pci_t;
+typedef u64 *vmeb_t;
+typedef u64 *vmed_t;
+typedef u64 *fddi_t;
+typedef u64 *scsi_t;
+typedef u64 *mio_t;
+typedef u64 *graphics_t;
+typedef u64 *router_t;
+
+/*
+ * The port info in ip27_cfg area translates to a lboart_t in the 
+ * KLCONFIG area. But since KLCONFIG does not use pointers, lboart_t
+ * is stored in terms of a nasid and a offset from start of KLCONFIG 
+ * area  on that nasid.
+ */
+typedef struct klport_s {
+	nasid_t		port_nasid;
+	unsigned char	port_flag;
+	klconf_off_t	port_offset;
+} klport_t;
+
+typedef struct klcpu_s {                          /* CPU */
+	klinfo_t 	cpu_info;
+	unsigned short 	cpu_prid;	/* Processor PRID value */
+	unsigned short 	cpu_fpirr;	/* FPU IRR value */
+    	unsigned short 	cpu_speed;	/* Speed in MHZ */
+    	unsigned short 	cpu_scachesz;	/* secondary cache size in MB */
+    	unsigned short 	cpu_scachespeed;/* secondary cache speed in MHz */
+} klcpu_t ;
+
+#define CPU_STRUCT_VERSION   2
+
+typedef struct klhub_s {			/* HUB */
+	klinfo_t 	hub_info;
+	uint 		hub_flags;		/* PCFG_HUB_xxx flags */
+	klport_t	hub_port;		/* hub is connected to this */
+	nic_t		hub_box_nic;		/* nic of containing box */
+	klconf_off_t	hub_mfg_nic;		/* MFG NIC string */
+	u64		hub_speed;		/* Speed of hub in HZ */
+} klhub_t ;
+
+typedef struct klhub_uart_s {			/* HUB */
+	klinfo_t 	hubuart_info;
+	uint 		hubuart_flags;		/* PCFG_HUB_xxx flags */
+	nic_t		hubuart_box_nic;	/* nic of containing box */
+} klhub_uart_t ;
+
+#define MEMORY_STRUCT_VERSION   2
+
+typedef struct klmembnk_s {			/* MEMORY BANK */
+	klinfo_t 	membnk_info;
+    	short 		membnk_memsz;		/* Total memory in megabytes */
+	short		membnk_dimm_select; /* bank to physical addr mapping*/
+	short		membnk_bnksz[MD_MEM_BANKS]; /* Memory bank sizes */
+	short		membnk_attr;
+} klmembnk_t ;
+
+#define KLCONFIG_MEMBNK_SIZE(_info, _bank)	\
+                            ((_info)->membnk_bnksz[(_bank)])
+
+
+#define MEMBNK_PREMIUM 1
+#define KLCONFIG_MEMBNK_PREMIUM(_info, _bank)	\
+                            ((_info)->membnk_attr & (MEMBNK_PREMIUM << (_bank)))
+
+#define MAX_SERIAL_NUM_SIZE 10
+
+typedef struct klmod_serial_num_s {
+      klinfo_t        snum_info;
+      union {
+              char snum_str[MAX_SERIAL_NUM_SIZE];
+              unsigned long long       snum_int;
+      } snum;
+} klmod_serial_num_t;
+
+/* Macros needed to access serial number structure in lboard_t.
+   Hard coded values are necessary since we cannot treat 
+   serial number struct as a component without losing compatibility
+   between prom versions. */
+
+#define GET_SNUM_COMP(_l) 	((klmod_serial_num_t *)\
+				KLCF_COMP(_l, _l->brd_numcompts))
+
+#define MAX_XBOW_LINKS 16
+
+typedef struct klxbow_s {                          /* XBOW */
+	klinfo_t 	xbow_info ;
+    	klport_t	xbow_port_info[MAX_XBOW_LINKS] ; /* Module number */
+        int		xbow_master_hub_link;
+        /* type of brd connected+component struct ptr+flags */
+} klxbow_t ;
+
+#define MAX_PCI_SLOTS 8
+
+typedef struct klpci_device_s {
+	s32	pci_device_id;	/* 32 bits of vendor/device ID. */
+	s32	pci_device_pad;	/* 32 bits of padding. */
+} klpci_device_t;
+
+#define BRIDGE_STRUCT_VERSION	2
+
+typedef struct klbri_s {                          /* BRIDGE */
+	klinfo_t 	bri_info ;
+    	unsigned char	bri_eprominfo ;    /* IO6prom connected to bridge */
+    	unsigned char	bri_bustype ;      /* PCI/VME BUS bridge/GIO */
+    	pci_t    	pci_specific  ;    /* PCI Board config info */
+	klpci_device_t	bri_devices[MAX_PCI_DEVS] ;	/* PCI IDs */
+	klconf_off_t	bri_mfg_nic ;
+} klbri_t ;
+
+#define MAX_IOC3_TTY	2
+
+typedef struct klioc3_s {                          /* IOC3 */
+	klinfo_t 	ioc3_info ;
+    	unsigned char	ioc3_ssram ;        /* Info about ssram */
+    	unsigned char	ioc3_nvram ;        /* Info about nvram */
+    	klinfo_t	ioc3_superio ;      /* Info about superio */
+	klconf_off_t	ioc3_tty_off ;
+	klinfo_t	ioc3_enet ;
+	klconf_off_t	ioc3_enet_off ;
+	klconf_off_t	ioc3_kbd_off ;
+} klioc3_t ;
+
+#define MAX_VME_SLOTS 8
+
+typedef struct klvmeb_s {                          /* VME BRIDGE - PCI CTLR */
+	klinfo_t 	vmeb_info ;
+	vmeb_t		vmeb_specific ;
+    	klconf_off_t   	vmeb_brdinfo[MAX_VME_SLOTS]   ;    /* VME Board config info */
+} klvmeb_t ;
+
+typedef struct klvmed_s {                          /* VME DEVICE - VME BOARD */
+	klinfo_t	vmed_info ;
+	vmed_t		vmed_specific ;
+    	klconf_off_t   	vmed_brdinfo[MAX_VME_SLOTS]   ;    /* VME Board config info */
+} klvmed_t ;
+
+#define ROUTER_VECTOR_VERS	2
+
+/* XXX - Don't we need the number of ports here?!? */
+typedef struct klrou_s {                          /* ROUTER */
+	klinfo_t 	rou_info ;
+	uint		rou_flags ;           /* PCFG_ROUTER_xxx flags */
+	nic_t		rou_box_nic ;         /* nic of the containing module */
+    	klport_t 	rou_port[MAX_ROUTER_PORTS + 1] ; /* array index 1 to 6 */
+	klconf_off_t	rou_mfg_nic ;     /* MFG NIC string */
+	u64	rou_vector;	  /* vector from master node */
+} klrou_t ;
+
+/*
+ *  Graphics Controller/Device
+ *
+ *  (IP27/IO6) Prom versions 6.13 (and 6.5.1 kernels) and earlier
+ *  used a couple different structures to store graphics information.
+ *  For compatibility reasons, the newer data structure preserves some
+ *  of the layout so that fields that are used in the old versions remain
+ *  in the same place (with the same info).  Determination of what version
+ *  of this structure we have is done by checking the cookie field.
+ */
+#define KLGFX_COOKIE	0x0c0de000
+
+typedef struct klgfx_s {		/* GRAPHICS Device */
+	klinfo_t 	gfx_info;
+	klconf_off_t    old_gndevs;	/* for compatibility with older proms */
+	klconf_off_t    old_gdoff0;	/* for compatibility with older proms */
+	uint		cookie;		/* for compatibility with older proms */
+	uint		moduleslot;
+	struct klgfx_s	*gfx_next_pipe;
+	graphics_t	gfx_specific;
+	klconf_off_t    pad0;		/* for compatibility with older proms */
+	klconf_off_t    gfx_mfg_nic;
+} klgfx_t;
+
+typedef struct klxthd_s {   
+	klinfo_t 	xthd_info ;
+	klconf_off_t	xthd_mfg_nic ;        /* MFG NIC string */
+} klxthd_t ;
+
+typedef struct kltpu_s {                     /* TPU board */
+	klinfo_t 	tpu_info ;
+	klconf_off_t	tpu_mfg_nic ;        /* MFG NIC string */
+} kltpu_t ;
+
+typedef struct klgsn_s {                     /* GSN board */
+	klinfo_t 	gsn_info ;
+	klconf_off_t	gsn_mfg_nic ;        /* MFG NIC string */
+} klgsn_t ;
+
+#define MAX_SCSI_DEVS 16
+
+/*
+ * NOTE: THis is the max sized kl* structure and is used in klmalloc.c
+ * to allocate space of type COMPONENT. Make sure that if the size of
+ * any other component struct becomes more than this, then redefine
+ * that as the size to be klmalloced.
+ */
+
+typedef struct klscsi_s {                          /* SCSI Controller */
+	klinfo_t 	scsi_info ;
+    	scsi_t       	scsi_specific   ; 
+	unsigned char 	scsi_numdevs ;
+	klconf_off_t	scsi_devinfo[MAX_SCSI_DEVS] ; 
+} klscsi_t ;
+
+typedef struct klscdev_s {                          /* SCSI device */
+	klinfo_t 	scdev_info ;
+	struct scsidisk_data *scdev_cfg ; /* driver fills up this */
+} klscdev_t ;
+
+typedef struct klttydev_s {                          /* TTY device */
+	klinfo_t 	ttydev_info ;
+	struct terminal_data *ttydev_cfg ; /* driver fills up this */
+} klttydev_t ;
+
+typedef struct klenetdev_s {                          /* ENET device */
+	klinfo_t 	enetdev_info ;
+	struct net_data *enetdev_cfg ; /* driver fills up this */
+} klenetdev_t ;
+
+typedef struct klkbddev_s {                          /* KBD device */
+	klinfo_t 	kbddev_info ;
+	struct keyboard_data *kbddev_cfg ; /* driver fills up this */
+} klkbddev_t ;
+
+typedef struct klmsdev_s {                          /* mouse device */
+        klinfo_t        msdev_info ;
+        void 		*msdev_cfg ; 
+} klmsdev_t ;
+
+#define MAX_FDDI_DEVS 10 /* XXX Is this true */
+
+typedef struct klfddi_s {                          /* FDDI */
+	klinfo_t 	fddi_info ;
+    	fddi_t        	fddi_specific ;       
+	klconf_off_t	fddi_devinfo[MAX_FDDI_DEVS] ;
+} klfddi_t ;
+
+typedef struct klmio_s {                          /* MIO */
+	klinfo_t 	mio_info ;
+    	mio_t       	mio_specific   ; 
+} klmio_t ;
+
+
+typedef union klcomp_s {
+	klcpu_t		kc_cpu;
+	klhub_t		kc_hub;
+	klmembnk_t 	kc_mem;
+	klxbow_t  	kc_xbow;
+	klbri_t		kc_bri;
+	klioc3_t	kc_ioc3;
+	klvmeb_t	kc_vmeb;
+	klvmed_t	kc_vmed;
+	klrou_t		kc_rou;
+	klgfx_t		kc_gfx;
+	klscsi_t	kc_scsi;
+	klscdev_t	kc_scsi_dev;
+	klfddi_t	kc_fddi;
+	klmio_t		kc_mio;
+	klmod_serial_num_t kc_snum ;
+} klcomp_t;
+
+typedef union kldev_s {      /* for device structure allocation */
+	klscdev_t	kc_scsi_dev ;
+	klttydev_t	kc_tty_dev ;
+	klenetdev_t	kc_enet_dev ;
+	klkbddev_t 	kc_kbd_dev ;
+} kldev_t ;
+
+/* Data structure interface routines. TBD */
+
+/* Include launch info in this file itself? TBD */
+
+/*
+ * TBD - Can the ARCS and device driver related info also be included in the
+ * KLCONFIG area. On the IO4PROM, prom device driver info is part of cfgnode_t 
+ * structure, viz private to the IO4prom.
+ */
+
+/* 
+ * TBD - Allocation issues. 
+ *
+ * Do we need to Mark off sepatate heaps for lboard_t, rboard_t, component, 
+ * errinfo and allocate from them, or have a single heap and allocate all 
+ * structures from it. Debug is easier in the former method since we can
+ * dump all similar structs in one command, but there will be lots of holes, 
+ * in memory and max limits are needed for number of structures.
+ * Another way to make it organized, is to have a union of all components
+ * and allocate a aligned chunk of memory greater than the biggest
+ * component.
+ */
+
+typedef union {
+	lboard_t *lbinfo ;
+} biptr_t ;
+
+
+#define BRI_PER_XBOW 6
+#define PCI_PER_BRI  8
+#define DEV_PER_PCI  16
+
+
+/* Virtual dipswitch values (starting from switch "7"): */
+
+#define VDS_NOGFX		0x8000	/* Don't enable gfx and autoboot */
+#define VDS_NOMP		0x100	/* Don't start slave processors */
+#define VDS_MANUMODE		0x80	/* Manufacturing mode */
+#define VDS_NOARB		0x40	/* No bootmaster arbitration */
+#define VDS_PODMODE		0x20	/* Go straight to POD mode */
+#define VDS_NO_DIAGS		0x10	/* Don't run any diags after BM arb */
+#define VDS_DEFAULTS		0x08	/* Use default environment values */
+#define VDS_NOMEMCLEAR		0x04	/* Don't run mem cfg code */
+#define VDS_2ND_IO4		0x02	/* Boot from the second IO4 */
+#define VDS_DEBUG_PROM		0x01	/* Print PROM debugging messages */
+
+/* external declarations of Linux kernel functions. */
+
+extern lboard_t *find_lboard(lboard_t *start, unsigned char type);
+extern klinfo_t *find_component(lboard_t *brd, klinfo_t *kli, unsigned char type);
+extern klinfo_t *find_first_component(lboard_t *brd, unsigned char type);
+extern klcpu_t *nasid_slice_to_cpuinfo(nasid_t, int);
+
+
+#if defined(CONFIG_IA64_SGI_IO)
+extern xwidgetnum_t nodevertex_widgetnum_get(devfs_handle_t node_vtx);
+extern devfs_handle_t nodevertex_xbow_peer_get(devfs_handle_t node_vtx);
+extern lboard_t *find_gfxpipe(int pipenum);
+extern void setup_gfxpipe_link(devfs_handle_t vhdl,int pipenum);
+extern lboard_t *find_lboard_class(lboard_t *start, unsigned char brd_class);
+extern lboard_t *find_lboard_module_class(lboard_t *start, moduleid_t mod,
+                                               unsigned char brd_class);
+extern lboard_t *find_nic_lboard(lboard_t *, nic_t);
+extern lboard_t *find_nic_type_lboard(nasid_t, unsigned char, nic_t);
+extern lboard_t *find_lboard_modslot(lboard_t *start, moduleid_t mod, slotid_t slot);
+extern lboard_t *find_lboard_module(lboard_t *start, moduleid_t mod);
+extern lboard_t *get_board_name(nasid_t nasid, moduleid_t mod, slotid_t slot, char *name);
+extern int	config_find_nic_router(nasid_t, nic_t, lboard_t **, klrou_t**);
+extern int	config_find_nic_hub(nasid_t, nic_t, lboard_t **, klhub_t**);
+extern int	config_find_xbow(nasid_t, lboard_t **, klxbow_t**);
+extern klcpu_t *get_cpuinfo(cpuid_t cpu);
+extern int 	update_klcfg_cpuinfo(nasid_t, int);
+extern void 	board_to_path(lboard_t *brd, char *path);
+extern moduleid_t get_module_id(nasid_t nasid);
+extern void 	nic_name_convert(char *old_name, char *new_name);
+extern int 	module_brds(nasid_t nasid, lboard_t **module_brds, int n);
+extern lboard_t *brd_from_key(uint64_t key);
+extern void 	device_component_canonical_name_get(lboard_t *,klinfo_t *,
+						    char *);
+extern int	board_serial_number_get(lboard_t *,char *);
+extern int	is_master_baseio(nasid_t,moduleid_t,slotid_t);
+extern nasid_t	get_actual_nasid(lboard_t *brd) ;
+extern net_vec_t klcfg_discover_route(lboard_t *, lboard_t *, int);
+#else	/* CONFIG_IA64_SGI_IO */
+extern klcpu_t *sn_get_cpuinfo(cpuid_t cpu);
+#endif	/* CONFIG_IA64_SGI_IO */
+
+#endif /* _ASM_SN_KLCONFIG_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/kldir.h linux/include/asm-ia64/sn/kldir.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/kldir.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/kldir.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,245 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Derived from IRIX <sys/SN/kldir.h>, revision 1.21.
+ *
+ * Copyright (C) 1992 - 1997, 1999 Silicon Graphics, Inc.
+ * Copyright (C) 1999 by Ralf Baechle
+ */
+#ifndef _ASM_SN_KLDIR_H
+#define _ASM_SN_KLDIR_H
+
+#if defined(CONFIG_IA64_SGI_IO)
+#include <asm/sn/sgi.h>
+#endif
+
+/*
+ * The kldir memory area resides at a fixed place in each node's memory and
+ * provides pointers to most other IP27 memory areas.  This allows us to
+ * resize and/or relocate memory areas at a later time without breaking all
+ * firmware and kernels that use them.  Indices in the array are
+ * permanently dedicated to areas listed below.  Some memory areas (marked
+ * below) reside at a permanently fixed location, but are included in the
+ * directory for completeness.
+ */
+
+#define KLDIR_MAGIC		0x434d5f53505f5357
+
+/*
+ * The upper portion of the memory map applies during boot
+ * only and is overwritten by IRIX/SYMMON.
+ *
+ *                                    MEMORY MAP PER NODE
+ *
+ * 0x2000000 (32M)         +-----------------------------------------+
+ *                         |      IO6 BUFFERS FOR FLASH ENET IOC3    |
+ * 0x1F80000 (31.5M)       +-----------------------------------------+
+ *                         |      IO6 TEXT/DATA/BSS/stack            |
+ * 0x1C00000 (30M)         +-----------------------------------------+
+ *                         |      IO6 PROM DEBUG TEXT/DATA/BSS/stack |
+ * 0x0800000 (28M)         +-----------------------------------------+
+ *                         |      IP27 PROM TEXT/DATA/BSS/stack      |
+ * 0x1B00000 (27M)         +-----------------------------------------+
+ *                         |      IP27 CFG                           |
+ * 0x1A00000 (26M)         +-----------------------------------------+
+ *                         |      Graphics PROM                      |
+ * 0x1800000 (24M)         +-----------------------------------------+
+ *                         |      3rd Party PROM drivers             |
+ * 0x1600000 (22M)         +-----------------------------------------+
+ *                         |                                         |
+ *                         |      Free                               |
+ *                         |                                         |
+ *                         +-----------------------------------------+
+ *                         |      UNIX DEBUG Version                 |
+ * 0x190000 (2M--)         +-----------------------------------------+
+ *                         |      SYMMON                             |
+ *                         |      (For UNIX Debug only)              |
+ * 0x34000 (208K)          +-----------------------------------------+
+ *                         |      SYMMON STACK [NUM_CPU_PER_NODE]    |
+ *                         |      (For UNIX Debug only)              |
+ * 0x25000 (148K)          +-----------------------------------------+
+ *                         |      KLCONFIG - II (temp)               |
+ *                         |                                         |
+ *                         |    ----------------------------         |
+ *                         |                                         |
+ *                         |      UNIX NON-DEBUG Version             |
+ * 0x19000 (100K)          +-----------------------------------------+
+ *
+ *
+ * The lower portion of the memory map contains information that is
+ * permanent and is used by the IP27PROM, IO6PROM and IRIX.
+ *
+ * 0x19000 (100K)          +-----------------------------------------+
+ *                         |                                         |
+ *                         |      PI Error Spools (32K)              |
+ *                         |                                         |
+ * 0x12000 (72K)           +-----------------------------------------+
+ *                         |      Unused                             |
+ * 0x11c00 (71K)           +-----------------------------------------+
+ *                         |      CPU 1 NMI Eframe area       	     |
+ * 0x11a00 (70.5K)         +-----------------------------------------+
+ *                         |      CPU 0 NMI Eframe area       	     |
+ * 0x11800 (70K)           +-----------------------------------------+
+ *                         |      CPU 1 NMI Register save area       |
+ * 0x11600 (69.5K)         +-----------------------------------------+
+ *                         |      CPU 0 NMI Register save area       |
+ * 0x11400 (69K)           +-----------------------------------------+
+ *                         |      GDA (1k)                           |
+ * 0x11000 (68K)           +-----------------------------------------+
+ *                         |      Early cache Exception stack        |
+ *                         |             and/or                      |
+ *			   |      kernel/io6prom nmi registers	     |
+ * 0x10800  (66k)	   +-----------------------------------------+
+ *			   |      cache error eframe   	 	     |
+ * 0x10400 (65K)           +-----------------------------------------+
+ *                         |      Exception Handlers (UALIAS copy)   |
+ * 0x10000 (64K)           +-----------------------------------------+
+ *                         |                                         |
+ *                         |                                         |
+ *                         |      KLCONFIG - I (permanent) (48K)     |
+ *                         |                                         |
+ *                         |                                         |
+ *                         |                                         |
+ * 0x4000 (16K)            +-----------------------------------------+
+ *                         |      NMI Handler (Protected Page)       |
+ * 0x3000 (12K)            +-----------------------------------------+
+ *                         |      ARCS PVECTORS (master node only)   |
+ * 0x2c00 (11K)            +-----------------------------------------+
+ *                         |      ARCS TVECTORS (master node only)   |
+ * 0x2800 (10K)            +-----------------------------------------+
+ *                         |      LAUNCH [NUM_CPU]                   |
+ * 0x2400 (9K)             +-----------------------------------------+
+ *                         |      Low memory directory (KLDIR)       |
+ * 0x2000 (8K)             +-----------------------------------------+
+ *                         |      ARCS SPB (1K)                      |
+ * 0x1000 (4K)             +-----------------------------------------+
+ *                         |      Early cache Exception stack        |
+ *                         |             and/or                      |
+ *			   |      kernel/io6prom nmi registers	     |
+ * 0x800  (2k)	           +-----------------------------------------+
+ *			   |      cache error eframe   	 	     |
+ * 0x400 (1K)              +-----------------------------------------+
+ *                         |      Exception Handlers                 |
+ * 0x0   (0K)              +-----------------------------------------+
+ */
+
+#ifdef LANGUAGE_ASSEMBLY
+#define KLDIR_OFF_MAGIC			0x00
+#define KLDIR_OFF_OFFSET		0x08
+#define KLDIR_OFF_POINTER		0x10
+#define KLDIR_OFF_SIZE			0x18
+#define KLDIR_OFF_COUNT			0x20
+#define KLDIR_OFF_STRIDE		0x28
+#endif /* LANGUAGE_ASSEMBLY */
+
+#if !defined(CONFIG_IA64_SGI_IO)
+
+/*
+ * This is defined here because IP27_SYMMON_STK_SIZE must be at least what
+ * we define here.  Since it's set up in the prom.  We can't redefine it later
+ * and expect more space to be allocated.  The way to find out the true size
+ * of the symmon stacks is to divide SYMMON_STK_SIZE by SYMMON_STK_STRIDE
+ * for a particular node.
+ */
+#define SYMMON_STACK_SIZE		0x8000
+
+#if defined (PROM) || defined (SABLE)
+
+/*
+ * These defines are prom version dependent.  No code other than the IP27
+ * prom should attempt to use these values.
+ */
+#define IP27_LAUNCH_OFFSET		0x2400
+#define IP27_LAUNCH_SIZE		0x400
+#define IP27_LAUNCH_COUNT		2
+#define IP27_LAUNCH_STRIDE		0x200
+
+#define IP27_KLCONFIG_OFFSET		0x4000
+#define IP27_KLCONFIG_SIZE		0xc000
+#define IP27_KLCONFIG_COUNT		1
+#define IP27_KLCONFIG_STRIDE		0
+
+#define IP27_NMI_OFFSET			0x3000
+#define IP27_NMI_SIZE			0x40
+#define IP27_NMI_COUNT			2
+#define IP27_NMI_STRIDE			0x40
+
+#define IP27_PI_ERROR_OFFSET		0x12000
+#define IP27_PI_ERROR_SIZE		0x4000
+#define IP27_PI_ERROR_COUNT		1
+#define IP27_PI_ERROR_STRIDE		0
+
+#define IP27_SYMMON_STK_OFFSET		0x25000
+#define IP27_SYMMON_STK_SIZE		0xe000
+#define IP27_SYMMON_STK_COUNT		2
+/* IP27_SYMMON_STK_STRIDE must be >= SYMMON_STACK_SIZE */
+#define IP27_SYMMON_STK_STRIDE		0x7000
+
+#define IP27_FREEMEM_OFFSET		0x19000
+#define IP27_FREEMEM_SIZE		-1
+#define IP27_FREEMEM_COUNT		1
+#define IP27_FREEMEM_STRIDE		0
+
+#endif /* PROM || SABLE*/
+/*
+ * There will be only one of these in a partition so the IO6 must set it up.
+ */
+#define IO6_GDA_OFFSET			0x11000
+#define IO6_GDA_SIZE			0x400
+#define IO6_GDA_COUNT			1
+#define IO6_GDA_STRIDE			0
+
+/*
+ * save area of kernel nmi regs in the prom format
+ */
+#define IP27_NMI_KREGS_OFFSET		0x11400
+#define IP27_NMI_KREGS_CPU_SIZE		0x200
+/*
+ * save area of kernel nmi regs in eframe format 
+ */
+#define IP27_NMI_EFRAME_OFFSET		0x11800
+#define IP27_NMI_EFRAME_SIZE		0x200
+
+#define KLDIR_ENT_SIZE			0x40
+#define KLDIR_MAX_ENTRIES		(0x400 / 0x40)
+
+#endif	/* !CONFIG_IA64_SGI_IO */
+
+#ifdef _LANGUAGE_C
+typedef struct kldir_ent_s {
+	u64		magic;		/* Indicates validity of entry      */
+	off_t		offset;		/* Offset from start of node space  */
+#if defined(CONFIG_IA64_SGI_IO)	/* FIXME */
+	__psunsigned_t	pointer;	/* Pointer to area in some cases    */
+#else
+	unsigned long	pointer;	/* Pointer to area in some cases    */
+#endif
+	size_t		size;		/* Size in bytes 		    */
+	u64		count;		/* Repeat count if array, 1 if not  */
+	size_t		stride;		/* Stride if array, 0 if not        */
+	char		rsvd[16];	/* Pad entry to 0x40 bytes          */
+	/* NOTE: These 16 bytes are used in the Partition KLDIR
+	   entry to store partition info. Refer to klpart.h for this. */
+} kldir_ent_t;
+#endif /* _LANGUAGE_C */
+
+#if defined(CONFIG_IA64_SGI_IO)
+
+#define KLDIR_ENT_SIZE			0x40
+#define KLDIR_MAX_ENTRIES		(0x400 / 0x40)
+
+/*
+ * The actual offsets of each memory area are machine-dependent
+ */
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+#include <asm/sn/sn1/kldir.h>
+#else
+#error "kldir.h is currently defined for IP27 and IP35 platforms only"
+#endif
+
+#endif	/* CONFIG_IA64_SGI_IO */
+
+#endif /* _ASM_SN_KLDIR_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/ksys/elsc.h linux/include/asm-ia64/sn/ksys/elsc.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/ksys/elsc.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/ksys/elsc.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,162 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_KSYS_ELSC_H
+#define _ASM_SN_KSYS_ELSC_H
+
+#if defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+#include <asm/sn/ksys/l1.h>
+#endif
+
+// #include <asm/sn/ksys/i2c.h>
+
+#define ELSC_I2C_ADDR		0x08
+#define ELSC_I2C_HUB0		0x09
+#define ELSC_I2C_HUB1		0x0a
+#define ELSC_I2C_HUB2		0x0b
+#define ELSC_I2C_HUB3		0x0c
+
+#define ELSC_PACKET_MAX		96
+#define ELSC_ACP_MAX		86		/* 84+cr+lf */
+#define ELSC_LINE_MAX		(ELSC_ACP_MAX - 2)
+
+/*
+ * ELSC character queue type for I/O
+ */
+
+#define ELSC_QSIZE	128		/* Power of 2 is more efficient */
+
+typedef sc_cq_t elsc_cq_t;
+
+/*
+ * ELSC structure passed around as handle
+ */
+
+typedef l1sc_t elsc_t;
+
+void	elsc_init(elsc_t *e, nasid_t nasid);
+
+int	elsc_process(elsc_t *e);
+int	elsc_msg_check(elsc_t *e, char *msg, int msg_max);
+int	elsc_msg_callback(elsc_t *e,
+			  void (*callback)(void *callback_data, char *msg),
+			  void *callback_data);
+#if 0
+char   *elsc_errmsg(int code);
+
+int	elsc_nvram_write(elsc_t *e, int addr, char *buf, int len);
+int	elsc_nvram_read(elsc_t *e, int addr, char *buf, int len);
+int	elsc_nvram_magic(elsc_t *e);
+#endif
+
+int	elsc_command(elsc_t *e, int only_if_message);
+int	elsc_parse(elsc_t *e, char *p1, char *p2, char *p3);
+int	elsc_ust_write(elsc_t *e, uchar_t c);
+int 	elsc_ust_read(elsc_t *e, char *c);
+
+
+
+/*
+ * System controller commands
+ */
+
+int	elsc_version(elsc_t *e, char *result);
+#if 0
+int	elsc_debug_set(elsc_t *e, u_char byte1, u_char byte2);
+int	elsc_debug_get(elsc_t *e, u_char *byte1, u_char *byte2);
+#endif
+int	elsc_module_set(elsc_t *e, int module);
+int	elsc_module_get(elsc_t *e);
+int	elsc_partition_set(elsc_t *e, int partition);
+int	elsc_partition_get(elsc_t *e);
+int	elsc_domain_set(elsc_t *e, int domain);
+int	elsc_domain_get(elsc_t *e);
+int	elsc_cluster_set(elsc_t *e, int cluster);
+int	elsc_cluster_get(elsc_t *e);
+int	elsc_cell_set(elsc_t *e, int cell);
+int	elsc_cell_get(elsc_t *e);
+int	elsc_bist_set(elsc_t *e, char bist_status);
+char	elsc_bist_get(elsc_t *e);
+int	elsc_lock(elsc_t *e,
+		  int retry_interval_usec,
+		  int timeout_usec, u_char lock_val);
+int	elsc_unlock(elsc_t *e);
+int	elsc_display_char(elsc_t *e, int led, int chr);
+int	elsc_display_digit(elsc_t *e, int led, int num, int l_case);
+#if 0
+int	elsc_display_mesg(elsc_t *e, char *chr);	/* 8-char input */
+int	elsc_password_set(elsc_t *e, char *password);	/* 4-char input */
+int	elsc_password_get(elsc_t *e, char *password);	/* 4-char output */
+int	elsc_rpwr_query(elsc_t *e, int is_master);
+int	elsc_power_query(elsc_t *e);
+int	elsc_power_down(elsc_t *e, int sec);
+int	elsc_power_cycle(elsc_t *e);
+int	elsc_system_reset(elsc_t *e);
+int	elsc_dip_switches(elsc_t *e);
+int	elsc_nic_get(elsc_t *e, uint64_t *nic, int verbose);
+#endif
+
+int	_elsc_hbt(elsc_t *e, int ival, int rdly);
+
+#define	elsc_hbt_enable(e, ival, rdly)	_elsc_hbt(e, ival, rdly)
+#define	elsc_hbt_disable(e)		_elsc_hbt(e, 0, 0)
+#define	elsc_hbt_send(e)		_elsc_hbt(e, 0, 1)
+
+/*
+ * Routines for using the ELSC as a UART.  There's a version of each
+ * routine that takes a pointer to an elsc_t, and another version that
+ * gets the pointer by calling a user-supplied global routine "get_elsc".
+ * The latter version is useful when the elsc is employed for stdio.
+ */
+
+#define ELSCUART_FLASH		0x3c			/* LED pattern */
+
+elsc_t	       *get_elsc(void);
+
+int	elscuart_probe(void);
+void	elscuart_init(void *);
+int	elscuart_poll(void);
+int	elscuart_readc(void);
+int	elscuart_getc(void);
+int	elscuart_putc(int);
+int	elscuart_puts(char *);
+char   *elscuart_gets(char *, int);
+int	elscuart_flush(void);
+
+
+
+/*
+ * Error codes
+ *
+ *   The possible ELSC error codes are a superset of the I2C error codes,
+ *   so ELSC error codes begin at -100.
+ */
+
+#define ELSC_ERROR_NONE			0
+
+#define ELSC_ERROR_CMD_SEND	       -100	/* Error sending command    */
+#define ELSC_ERROR_CMD_CHECKSUM	       -101	/* Command checksum bad     */
+#define ELSC_ERROR_CMD_UNKNOWN	       -102	/* Unknown command          */
+#define ELSC_ERROR_CMD_ARGS	       -103	/* Invalid argument(s)      */
+#define ELSC_ERROR_CMD_PERM	       -104	/* Permission denied	    */
+#define ELSC_ERROR_CMD_STATE	       -105	/* not allowed in this state*/
+
+#define ELSC_ERROR_RESP_TIMEOUT	       -110	/* ELSC response timeout    */
+#define ELSC_ERROR_RESP_CHECKSUM       -111	/* Response checksum bad    */
+#define ELSC_ERROR_RESP_FORMAT	       -112	/* Response format error    */
+#define ELSC_ERROR_RESP_DIR	       -113	/* Response direction error */
+
+#define ELSC_ERROR_MSG_LOST	       -120	/* Queue full; msg. lost    */
+#define ELSC_ERROR_LOCK_TIMEOUT	       -121	/* ELSC response timeout    */
+#define ELSC_ERROR_DATA_SEND	       -122	/* Error sending data       */
+#define ELSC_ERROR_NIC		       -123	/* NIC processing error     */
+#define ELSC_ERROR_NVMAGIC	       -124	/* Bad magic no. in NVRAM   */
+#define ELSC_ERROR_MODULE	       -125	/* Moduleid processing err  */
+
+#endif /* _ASM_SN_KSYS_ELSC_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/ksys/i2c.h linux/include/asm-ia64/sn/ksys/i2c.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/ksys/i2c.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/ksys/i2c.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,77 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_KSYS_I2C_H
+#define _ASM_SN_KSYS_I2C_H
+
+#if _STANDALONE
+# include "rtc.h"
+#else
+# define rtc_time()	(GET_LOCAL_RTC * NSEC_PER_CYCLE / 1000)
+# define rtc_sleep	us_delay
+# define rtc_time_t	uint64_t
+#endif
+
+typedef u_char			i2c_addr_t;	/* 7-bit address            */
+
+int		i2c_init(nasid_t);
+
+int		i2c_probe(nasid_t nasid, rtc_time_t timeout);
+
+int		i2c_arb(nasid_t, rtc_time_t timeout, rtc_time_t *token_start);
+
+int		i2c_master_xmit(nasid_t,
+				i2c_addr_t addr,
+				u_char *buf,
+				int len_max,
+				int *len_ptr,
+				rtc_time_t timeout,
+				int only_if_message);
+
+int		i2c_master_recv(nasid_t,
+				i2c_addr_t addr,
+				u_char *buf,
+				int len_max,
+				int *len_ptr,
+				int emblen,
+				rtc_time_t timeout,
+				int only_if_message);
+
+int		i2c_master_xmit_recv(nasid_t,
+				     i2c_addr_t addr,
+				     u_char *xbuf,
+				     int xlen_max,
+				     int *xlen_ptr,
+				     u_char *rbuf,
+				     int rlen_max,
+				     int *rlen_ptr,
+				     int emblen,
+				     rtc_time_t timeout,
+				     int only_if_message);
+
+char	       *i2c_errmsg(int code);
+
+/*
+ * Error codes
+ */
+
+#define I2C_ERROR_NONE		 0
+#define I2C_ERROR_INIT		-1	/* Initialization error             */
+#define I2C_ERROR_STATE		-2	/* Unexpected chip state	    */
+#define I2C_ERROR_NAK		-3	/* Addressed slave not responding   */
+#define I2C_ERROR_TO_ARB	-4	/* Timeout waiting for sysctlr arb  */
+#define I2C_ERROR_TO_BUSY	-5	/* Timeout waiting for busy bus     */
+#define I2C_ERROR_TO_SENDA	-6	/* Timeout sending address byte     */
+#define I2C_ERROR_TO_SENDD	-7	/* Timeout sending data byte        */
+#define I2C_ERROR_TO_RECVA	-8	/* Timeout receiving address byte   */
+#define I2C_ERROR_TO_RECVD	-9	/* Timeout receiving data byte      */
+#define I2C_ERROR_NO_MESSAGE	-10	/* No message was waiting	    */
+#define I2C_ERROR_NO_ELSC	-11	/* ELSC is disabled for access 	    */ 	
+
+#endif /* _ASM_SN_KSYS_I2C_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/ksys/l1.h linux/include/asm-ia64/sn/ksys/l1.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/ksys/l1.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/ksys/l1.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,375 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+#ifndef _ASM_SN_KSYS_L1_H
+#define _ASM_SN_KSYS_L1_H
+
+#include <asm/sn/vector.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/sn1/bedrock.h>
+
+#define BRL1_QSIZE	128	/* power of 2 is more efficient */
+#define BRL1_BUFSZ	264	/* needs to be large enough
+				 * to hold 2 flags, escaped
+				 * CRC, type/subchannel byte,
+				 * and escaped payload
+				 */
+
+#define BRL1_IQS          32
+#define BRL1_OQS          4
+
+
+typedef struct sc_cq_s {
+    u_char              buf[BRL1_QSIZE];
+    int                 ipos, opos, tent_next;
+} sc_cq_t;
+
+/* An l1sc_t struct can be associated with the local (C-brick) L1 or an L1
+ * on an R-brick.  In the R-brick case, the l1sc_t records a vector path
+ * to the R-brick's junk bus UART.  In the C-brick case, we just use the
+ * following flag to denote the local uart.
+ *
+ * This value can't be confused with a network vector because the least-
+ * significant nibble of a network vector cannot be greater than 8.
+ */
+#define BRL1_LOCALUART	((net_vec_t)0xf)
+
+/* L1<->Bedrock reserved subchannels */
+
+/* console channels */
+#define SC_CONS_CPU0    0x00
+#define SC_CONS_CPU1    0x01
+#define SC_CONS_CPU2    0x02
+#define SC_CONS_CPU3    0x03
+
+#define L1_ELSCUART_SUBCH(p)	(p)
+#define L1_ELSCUART_CPU(ch)	(ch)
+
+#define SC_CONS_SYSTEM  CPUS_PER_NODE
+
+/* mapping subchannels to queues */
+#define MAP_IQ(s)       (s)
+#define MAP_OQ(s)       (s)
+     
+#define BRL1_NUM_SUBCHANS 32
+#define BRL1_CMD_SUBCH	  16
+#define BRL1_EVENT_SUBCH  (BRL1_NUM_SUBCHANS - 1)
+#define BRL1_SUBCH_RSVD   0
+#define BRL1_SUBCH_FREE   (-1)
+
+/* constants for L1 hwgraph vertex info */
+#define CBRICK_L1	(__psint_t)1
+#define IOBRICK_L1	(__psint_t)2
+#define RBRICK_L1	(__psint_t)3
+
+
+struct l1sc_s;     
+typedef void (*brl1_notif_t)(struct l1sc_s *, int);
+typedef int  (*brl1_uartf_t)(struct l1sc_s *);
+
+/* structure for controlling a subchannel */
+typedef struct brl1_sch_s {
+    int		use;		/* if this subchannel is free,
+				 * use == BRL1_SUBCH_FREE */
+    uint	target;		/* type, rack and slot of component to
+				 * which this subchannel is directed */
+    int		packet_arrived; /* true if packet arrived on
+				 * this subchannel */
+    sc_cq_t *	iqp;		/* input queue for this subchannel */
+    sv_t	arrive_sv;	/* used to wait for a packet */
+    lock_t	data_lock;	/* synchronize access to input queues and
+				 * other fields of the brl1_sch_s struct */
+    brl1_notif_t tx_notify;     /* notify higher layer that transmission may 
+				 * continue */
+    brl1_notif_t rx_notify;	/* notify higher layer that a packet has been
+				 * received */
+} brl1_sch_t;
+
+/* br<->l1 protocol states */
+#define BRL1_IDLE	0
+#define BRL1_FLAG	1
+#define BRL1_HDR	2
+#define BRL1_BODY	3
+#define BRL1_ESC	4
+#define BRL1_RESET	7
+
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+/*
+ * l1sc_t structure-- tracks protocol state, open subchannels, etc.
+ */
+typedef struct l1sc_s {
+    nasid_t	 nasid;		/* nasid with which this instance
+				 * of the structure is associated */
+    moduleid_t	 modid;         /* module id of this brick */
+    u_char	 verbose;	/* non-zero if elscuart routines should
+				 * prefix output */
+    net_vec_t    uart;		/* vector path to UART, or BRL1_LOCALUART */
+    int		 sent;		/* number of characters sent */
+    int		 send_len;	/* number of characters in send buf */
+    brl1_uartf_t putc_f;	/* pointer to UART putc function */
+    brl1_uartf_t getc_f;	/* pointer to UART getc function */
+
+    lock_t	 send_lock;	/* arbitrates send synchronization */
+    lock_t	 recv_lock;	/* arbitrates uart receive access */
+    lock_t	 subch_lock;	/* arbitrates subchannel allocation */
+    cpuid_t	 intr_cpu;	/* cpu that receives L1 interrupts */
+
+    u_char	 send_in_use;	/* non-zero if send buffer contains an
+				 * unsent or partially-sent  packet */
+    u_char	 fifo_space;	/* current depth of UART send FIFO */
+
+    u_char	 brl1_state;	/* current state of the receive side */
+    u_char	 brl1_last_hdr;	/* last header byte received */
+
+    char	 send[BRL1_BUFSZ]; /* send buffer */
+
+    int		 sol;		/* "start of line" (see elscuart routines) */
+    int		 cons_listen;	/* non-zero if the elscuart interface should
+				 * also check the system console subchannel */
+    brl1_sch_t	 subch[BRL1_NUM_SUBCHANS];
+    				/* subchannels provided by link */
+
+    sc_cq_t	 garbage_q;	/* a place to put unsolicited packets */
+    sc_cq_t	 oq[BRL1_OQS];	/* elscuart output queues */
+
+} l1sc_t;
+
+
+/* error codes */
+#define BRL1_VALID	  0
+#define BRL1_FULL_Q	(-1)
+#define BRL1_CRC	(-2)
+#define BRL1_PROTOCOL	(-3)
+#define BRL1_NO_MESSAGE	(-4)
+#define BRL1_LINK	(-5)
+#define BRL1_BUSY	(-6)
+
+#define SC_SUCCESS      BRL1_VALID
+#define SC_NMSG         BRL1_NO_MESSAGE
+#define SC_BUSY         BRL1_BUSY
+#define SC_NOPEN        (-7)
+#define SC_BADSUBCH     (-8)
+#define SC_TIMEDOUT	(-9)
+#define SC_NSUBCH	(-10)
+
+
+/* L1 Target Addresses */
+/*
+ * L1 commands and responses use source/target addresses that are
+ * 32 bits long.  These are broken up into multiple bitfields that
+ * specify the type of the target controller (could actually be L2
+ * L3, not just L1), the rack and bay of the target, and the task
+ * id (L1 functionality is divided into several independent "tasks"
+ * that can each receive command requests and transmit responses)
+ */
+#define L1_ADDR_TYPE_SHFT	28
+#define L1_ADDR_TYPE_MASK	0xF0000000
+#define L1_ADDR_TYPE_L1		0x00	/* L1 system controller */
+#define L1_ADDR_TYPE_L2		0x01	/* L2 system controller */
+#define L1_ADDR_TYPE_L3		0x02	/* L3 system controller */
+#define L1_ADDR_TYPE_CBRICK	0x03	/* attached C brick	*/
+#define L1_ADDR_TYPE_IOBRICK	0x04	/* attached I/O brick	*/
+
+#define L1_ADDR_RACK_SHFT	18
+#define L1_ADDR_RACK_MASK	0x0FFC0000
+#define	L1_ADDR_RACK_LOCAL	0x3ff	/* local brick's rack	*/
+
+#define L1_ADDR_BAY_SHFT	12
+#define L1_ADDR_BAY_MASK	0x0003F000
+#define	L1_ADDR_BAY_LOCAL	0x3f	/* local brick's bay	*/
+
+#define L1_ADDR_TASK_SHFT	0
+#define L1_ADDR_TASK_MASK	0x0000001F
+#define L1_ADDR_TASK_INVALID	0x00	/* invalid task 	*/
+#define	L1_ADDR_TASK_IROUTER	0x01	/* iRouter		*/
+#define L1_ADDR_TASK_SYS_MGMT	0x02	/* system management port */
+#define L1_ADDR_TASK_CMD	0x03	/* command interpreter	*/
+#define L1_ADDR_TASK_ENV	0x04	/* environmental monitor */
+#define L1_ADDR_TASK_BEDROCK	0x05	/* bedrock		*/
+#define L1_ADDR_TASK_GENERAL	0x06	/* general requests	*/
+
+#define L1_ADDR_LOCAL				\
+    (L1_ADDR_TYPE_L1 << L1_ADDR_TYPE_SHFT) |	\
+    (L1_ADDR_RACK_LOCAL << L1_ADDR_RACK_SHFT) |	\
+    (L1_ADDR_BAY_LOCAL << L1_ADDR_BAY_SHFT)
+
+#define L1_ADDR_LOCALIO					\
+    (L1_ADDR_TYPE_IOBRICK << L1_ADDR_TYPE_SHFT) |	\
+    (L1_ADDR_RACK_LOCAL << L1_ADDR_RACK_SHFT) |		\
+    (L1_ADDR_BAY_LOCAL << L1_ADDR_BAY_SHFT)
+
+#define L1_ADDR_LOCAL_SHFT	L1_ADDR_BAY_SHFT
+
+/* response argument types */
+#define L1_ARG_INT		0x00	/* 4-byte integer (big-endian)	*/
+#define L1_ARG_ASCII		0x01	/* null-terminated ASCII string */
+#define L1_ARG_UNKNOWN		0x80	/* unknown data type.  The low
+					 * 7 bits will contain the data
+					 * length.			*/
+
+/* response codes */
+#define L1_RESP_OK	    0	/* no problems encountered      */
+#define L1_RESP_IROUTER	(-  1)	/* iRouter error	        */
+#define L1_RESP_ARGC	(-100)	/* arg count mismatch	        */
+#define L1_RESP_REQC	(-101)	/* bad request code	        */
+#define L1_RESP_NAVAIL	(-104)	/* requested data not available */
+#define L1_RESP_ARGVAL	(-105)  /* arg value out of range       */
+
+/* L1 general requests */
+
+/* request codes */
+#define	L1_REQ_RDBG		0x0001	/* read debug switches	*/
+#define L1_REQ_RRACK		0x0002	/* read brick rack & bay */
+#define L1_REQ_RRBT		0x0003  /* read brick rack, bay & type */
+#define L1_REQ_SER_NUM		0x0004  /* read brick serial number */
+#define L1_REQ_FW_REV		0x0005  /* read L1 firmware revision */
+#define L1_REQ_EEPROM		0x0006  /* read EEPROM info */
+#define L1_REQ_EEPROM_FMT	0x0007  /* get EEPROM data format & size */
+#define L1_REQ_SYS_SERIAL	0x0008	/* read system serial number */
+#define L1_REQ_PARTITION_GET	0x0009	/* read partition id */
+#define L1_REQ_PORTSPEED	0x000a	/* get ioport speed */
+
+#define L1_REQ_CONS_SUBCH	0x1002  /* select this node's console 
+					 * subchannel */
+#define L1_REQ_CONS_NODE	0x1003  /* volunteer to be the master 
+					 * (console-hosting) node */
+#define L1_REQ_DISP1		0x1004  /* write line 1 of L1 display */
+#define L1_REQ_DISP2		0x1005  /* write line 2 of L1 display */
+#define L1_REQ_PARTITION_SET	0x1006	/* set partition id */
+#define L1_REQ_EVENT_SUBCH	0x1007	/* set the subchannel for system
+					   controller event transmission */
+
+#define L1_REQ_RESET		0x2001	/* request a full system reset */
+
+/* L1 command interpreter requests */
+
+/* request codes */
+#define L1_REQ_EXEC_CMD		0x0000	/* interpret and execute an ASCII
+					   command string */
+
+
+/* brick type response codes */
+#define L1_BRICKTYPE_C	0x43
+#define L1_BRICKTYPE_I	0x49
+#define L1_BRICKTYPE_P	0x50
+#define L1_BRICKTYPE_R  0x52
+#define L1_BRICKTYPE_X  0x58
+
+/* EEPROM codes (for the "read EEPROM" request) */
+/* c brick */
+#define L1_EEP_NODE		0x00	/* node board */
+#define L1_EEP_PIMM0		0x01
+#define L1_EEP_PIMM(x)		(L1_EEP_PIMM0+(x))
+#define L1_EEP_DIMM0		0x03
+#define L1_EEP_DIMM(x)		(L1_EEP_DIMM0+(x))
+
+/* other brick types */
+#define L1_EEP_POWER		0x00	/* power board */
+#define L1_EEP_LOGIC		0x01	/* logic board */
+
+/* info area types */
+#define L1_EEP_CHASSIS		1	/* chassis info area */
+#define L1_EEP_BOARD		2	/* board info area */
+#define L1_EEP_IUSE		3	/* internal use area */
+#define L1_EEP_SPD		4	/* serial presence detect record */
+
+typedef uint32_t l1addr_t;
+
+#define L1_BUILD_ADDR(addr,at,r,s,t)					\
+    (*(l1addr_t *)(addr) = ((l1addr_t)(at) << L1_ADDR_TYPE_SHFT) |	\
+			     ((l1addr_t)(r)  << L1_ADDR_RACK_SHFT) |	\
+			     ((l1addr_t)(s)  << L1_ADDR_BAY_SHFT) |	\
+			     ((l1addr_t)(t)  << L1_ADDR_TASK_SHFT))
+
+#define L1_ADDRESS_TO_TASK(addr,trb,tsk)				\
+    (*(l1addr_t *)(addr) = (l1addr_t)(trb) |				\
+    			     ((l1addr_t)(tsk) << L1_ADDR_TASK_SHFT))
+
+
+#define L1_DISPLAY_LINE_LENGTH	12	/* L1 display characters/line */
+
+#ifdef L1_DISP_2LINES
+#define L1_DISPLAY_LINES	2	/* number of L1 display lines */
+#else
+#define L1_DISPLAY_LINES	1	/* number of L1 display lines available
+					 * to system software */
+#endif
+
+#define SC_EVENT_CLASS_MASK ((unsigned short)0xff00)
+
+#define bzero(d, n)	memset((d), 0, (n))
+
+/* public interfaces to L1 system controller */
+
+int	sc_open( l1sc_t *sc, uint target );
+int	sc_close( l1sc_t *sc, int ch );
+int	sc_construct_msg( l1sc_t *sc, int ch, 
+			  char *msg, int msg_len,
+			  uint addr_task, short req_code,
+			  int req_nargs, ... );
+int	sc_interpret_resp( char *resp, int resp_nargs, ... );
+int	sc_send( l1sc_t *sc, int ch, char *msg, int len, int wait );
+int	sc_recv( l1sc_t *sc, int ch, char *msg, int *len, uint64_t block );
+int	sc_command( l1sc_t *sc, int ch, char *cmd, char *resp, int *len );
+int	sc_command_kern( l1sc_t *sc, int ch, char *cmd, char *resp, int *len );
+int	sc_poll( l1sc_t *sc, int ch );
+void	sc_init( l1sc_t *sc, nasid_t nasid, net_vec_t uart );
+void	sc_intr_enable( l1sc_t *sc );
+
+#if 0
+int	sc_portspeed_get( l1sc_t *sc );
+#endif
+
+int	l1_cons_poll( l1sc_t *sc );
+int	l1_cons_getc( l1sc_t *sc );
+void	l1_cons_init( l1sc_t *sc );
+int	l1_cons_read( l1sc_t *sc, char *buf, int avail );
+int	l1_cons_write( l1sc_t *sc, char *msg, int len, int wait );
+void	l1_cons_tx_notif( l1sc_t *sc, brl1_notif_t func );
+void	l1_cons_rx_notif( l1sc_t *sc, brl1_notif_t func );
+
+int	_elscuart_putc( l1sc_t *sc, int c );
+int	_elscuart_getc( l1sc_t *sc );
+int	_elscuart_poll( l1sc_t *sc );
+int	_elscuart_readc( l1sc_t *sc );
+int	_elscuart_flush( l1sc_t *sc );
+int	_elscuart_probe( l1sc_t *sc );
+void	_elscuart_init( l1sc_t *sc );
+void	elscuart_syscon_listen( l1sc_t *sc );
+
+int	elsc_rack_bay_get(l1sc_t *e, uint *rack, uint *bay);
+int	elsc_rack_bay_type_get(l1sc_t *e, uint *rack, 
+			       uint *bay, uint *brick_type);
+int	elsc_cons_subch(l1sc_t *e, uint ch);
+int	elsc_cons_node(l1sc_t *e);
+int	elsc_display_line(l1sc_t *e, char *line, int lnum);
+
+extern l1sc_t *get_elsc( void );
+extern void    set_elsc( l1sc_t *e );
+
+#define get_l1sc	get_elsc
+#define set_l1sc(e)	set_elsc(e)
+
+#define get_master_l1sc get_l1sc
+
+int	router_module_get( nasid_t nasid, net_vec_t path );
+
+int	iobrick_rack_bay_type_get( l1sc_t *sc, uint *rack,
+				   uint *bay, uint *brick_type );
+int	iobrick_module_get( l1sc_t *sc );
+int	iobrick_pci_slot_pwr( l1sc_t *sc, int bus, int slot, int up );
+int	iobrick_pci_bus_pwr( l1sc_t *sc, int bus, int up );
+int	iobrick_sc_version( l1sc_t *sc, char *result );
+
+
+#endif /* !_LANGUAGE_ASSEMBLY */
+#endif /* _ASM_SN_KSYS_L1_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/labelcl.h linux/include/asm-ia64/sn/labelcl.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/labelcl.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/labelcl.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,93 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_LABELCL_H
+#define _ASM_SN_LABELCL_H
+
+#define LABELCL_MAGIC 0x4857434c	/* 'HWLC' */
+#define LABEL_LENGTH_MAX 256		/* Includes NULL char */
+#define INFO_DESC_PRIVATE -1      	/* default */
+#define INFO_DESC_EXPORT  0       	/* export info itself */
+
+/*
+ * Internal Error codes.
+ */
+typedef enum labelcl_error_e {  LABELCL_SUCCESS,          /* 0 */
+                                LABELCL_DUP,              /* 1 */
+                                LABELCL_NOT_FOUND,        /* 2 */
+                                LABELCL_BAD_PARAM,        /* 3 */
+                                LABELCL_HIT_LIMIT,        /* 4 */
+                                LABELCL_CANNOT_ALLOC,     /* 5 */
+                                LABELCL_ILLEGAL_REQUEST,  /* 6 */
+                                LABELCL_IN_USE            /* 7 */
+                                } labelcl_error_t;
+
+
+/*
+ * Description of a label entry.
+ */
+typedef struct label_info_s {
+        char			*name;
+        arb_info_desc_t		desc;
+        arbitrary_info_t	info;
+} label_info_t;
+
+/*
+ * Definition of the data structure that provides the link to 
+ * the hwgraph fastinfo and the label entries associated with a 
+ * particular devfs entry.
+ */
+typedef struct labelcl_info_s {
+	unsigned long	hwcl_magic;
+	unsigned long	num_labels;
+	void		*label_list;
+	arbitrary_info_t IDX_list[HWGRAPH_NUM_INDEX_INFO];
+} labelcl_info_t;
+
+/*
+ * Definitions for the string table that holds the actual names 
+ * of the labels.
+ */
+struct string_table_item {
+        struct string_table_item        *next;
+        char                            string[1];
+};
+
+struct string_table {
+        struct string_table_item        *string_table_head;
+        long                            string_table_generation;
+};
+
+
+#define STRTBL_BASIC_SIZE ((size_t)(((struct string_table_item *)0)->string))
+#define STRTBL_ITEM_SIZE(str_length) (STRTBL_BASIC_SIZE + (str_length) + 1)
+
+#define STRTBL_ALLOC(str_length) \
+        ((struct string_table_item *)kmalloc(STRTBL_ITEM_SIZE(str_length), GFP_KERNEL))
+
+#define STRTBL_FREE(ptr) kfree(ptr)
+
+
+extern labelcl_info_t *labelcl_info_create(void);
+extern int labelcl_info_destroy(labelcl_info_t *);
+extern int labelcl_info_add_LBL(struct devfs_entry *, char *, arb_info_desc_t, arbitrary_info_t);
+extern int labelcl_info_remove_LBL(struct devfs_entry *, char *, arb_info_desc_t *, arbitrary_info_t *);
+extern int labelcl_info_replace_LBL(struct devfs_entry *, char *, arb_info_desc_t,
+                        arbitrary_info_t, arb_info_desc_t *, arbitrary_info_t *);
+extern int labelcl_info_get_LBL(struct devfs_entry *, char *, arb_info_desc_t *,
+                      arbitrary_info_t *);
+extern int labelcl_info_get_next_LBL(struct devfs_entry *, char *, arb_info_desc_t *,
+                           arbitrary_info_t *, labelcl_info_place_t *);
+extern int labelcl_info_replace_IDX(struct devfs_entry *, int, arbitrary_info_t, 
+			arbitrary_info_t *);
+extern int labelcl_info_connectpt_set(struct devfs_entry *, struct devfs_entry *);
+extern int labelcl_info_get_IDX(struct devfs_entry *, int, arbitrary_info_t *);
+extern struct devfs_entry *device_info_connectpt_get(struct devfs_entry *);
+
+#endif /* _ASM_SN_LABELCL_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/mem_refcnt.h linux/include/asm-ia64/sn/mem_refcnt.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/mem_refcnt.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/mem_refcnt.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,26 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_MEM_REFCNT_H
+#define _ASM_SN_MEM_REFCNT_H
+
+extern int mem_refcnt_attach(devfs_handle_t hub);
+extern int mem_refcnt_open(devfs_handle_t *devp, mode_t oflag, int otyp, cred_t *crp);
+extern int mem_refcnt_close(devfs_handle_t dev, int oflag, int otyp, cred_t *crp);
+extern int mem_refcnt_mmap(devfs_handle_t dev, vhandl_t *vt, off_t off, size_t len, uint prot);
+extern int mem_refcnt_unmap(devfs_handle_t dev, vhandl_t *vt);
+extern int mem_refcnt_ioctl(devfs_handle_t dev,
+                 int cmd,
+                 void *arg,
+                 int mode,
+                 cred_t *cred_p,
+                 int *rvalp);
+        
+
+#endif /* _ASM_SN_MEM_REFCNT_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/mmzone.h linux/include/asm-ia64/sn/mmzone.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/mmzone.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/mmzone.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,111 @@
+/*
+ * Written by Kanoj Sarcar (kanoj@sgi.com) Jan 2000
+ * Copyright, 2000, Silicon Graphics, sprasad@engr.sgi.com
+ */
+#ifndef _LINUX_ASM_SN_MMZONE_H
+#define _LINUX_ASM_SN_MMZONE_H
+
+#include <asm/sn/mmzone_sn1.h>
+#include <asm/sn/sn_cpuid.h>
+
+/*
+ * Memory is conceptually divided into chunks. A chunk is either
+ * completely present, or else the kernel assumes it is completely
+ * absent. Each node consists of a number of contiguous chunks.
+ */
+
+#define CHUNKMASK       	(~(CHUNKSZ - 1))
+#define CHUNKNUM(vaddr)        	(__pa(vaddr) >> CHUNKSHIFT)
+#define PCHUNKNUM(paddr)        ((paddr) >> CHUNKSHIFT)
+
+#define MAXCHUNKS      		(MAXNODES * MAX_CHUNKS_PER_NODE)
+
+extern int chunktonid[];
+#define CHUNKTONID(cnum)       (chunktonid[cnum])
+
+typedef struct plat_pglist_data {
+       pg_data_t       gendata;		/* try to keep this first. */
+       unsigned long   virtstart;
+       unsigned long   size;
+} plat_pg_data_t;
+
+extern plat_pg_data_t plat_node_data[];
+
+extern int numa_debug(void);
+
+/*
+ * The foll two will move into linux/mmzone.h RSN.
+ */
+#define NODE_START(n)  plat_node_data[(n)].virtstart
+#define NODE_SIZE(n)   plat_node_data[(n)].size
+
+#define KVADDR_TO_NID(kaddr) \
+       ((CHUNKTONID(CHUNKNUM((kaddr))) != -1) ? (CHUNKTONID(CHUNKNUM((kaddr)))) : \
+       (printk("DISCONTIGBUG: %s line %d addr 0x%lx", __FILE__, __LINE__, \
+       (unsigned long)(kaddr)), numa_debug()))
+#if 0
+#define KVADDR_TO_NID(kaddr) CHUNKTONID(CHUNKNUM((kaddr)))
+#endif
+
+/* These 2 macros should never be used if KVADDR_TO_NID(kaddr) is -1 */
+/*
+ * Given a kaddr, ADDR_TO_MAPBASE finds the owning node of the memory
+ * and returns the mem_map of that node.
+ */
+#define ADDR_TO_MAPBASE(kaddr) \
+                       NODE_MEM_MAP(KVADDR_TO_NID((unsigned long)(kaddr)))
+
+/*
+ * Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory
+ * and returns the kaddr corresponding to first physical page in the
+ * node's mem_map.
+ */
+#define LOCAL_BASE_ADDR(kaddr) NODE_START(KVADDR_TO_NID(kaddr))
+
+#ifdef CONFIG_DISCONTIGMEM
+
+/*
+ * Return a pointer to the node data for node n.
+ * Assume that n is the compact node id.
+ */
+#define NODE_DATA(n)   (&((plat_node_data + (n))->gendata))
+
+/*
+ * NODE_MEM_MAP gives the kaddr for the mem_map of the node.
+ */
+#define NODE_MEM_MAP(nid)      (NODE_DATA((nid))->node_mem_map)
+
+/* This macro should never be used if KVADDR_TO_NID(kaddr) is -1 */
+#define LOCAL_MAP_NR(kvaddr) \
+        (((unsigned long)(kvaddr)-LOCAL_BASE_ADDR((kvaddr))) >> PAGE_SHIFT)
+#define MAP_NR_SN1(kaddr)   (LOCAL_MAP_NR((kaddr)) + \
+                (((unsigned long)ADDR_TO_MAPBASE((kaddr)) - PAGE_OFFSET) / \
+                sizeof(mem_map_t)))
+#if 0
+#define MAP_NR_VALID(kaddr)   (LOCAL_MAP_NR((kaddr)) + \
+                (((unsigned long)ADDR_TO_MAPBASE((kaddr)) - PAGE_OFFSET) / \
+                sizeof(mem_map_t)))
+#define MAP_NR_SN1(kaddr)	((KVADDR_TO_NID(kaddr) == -1) ? (max_mapnr + 1) :\
+				MAP_NR_VALID(kaddr))
+#endif
+
+/* FIXME */
+#define sn1_pte_pagenr(x)		MAP_NR_SN1(PAGE_OFFSET + (unsigned long)((pte_val(x)&_PFN_MASK) & PAGE_MASK))
+#define pte_page(pte)			(mem_map + sn1_pte_pagenr(pte))
+/* FIXME */
+
+#define kern_addr_valid(addr)   ((KVADDR_TO_NID((unsigned long)addr) >= \
+        numnodes) ? 0 : (test_bit(LOCAL_MAP_NR((addr)), \
+        NODE_DATA(KVADDR_TO_NID((unsigned long)addr))->valid_addr_bitmap)))
+
+#define virt_to_page(kaddr)	(mem_map + MAP_NR_SN1(kaddr))
+
+#else /* CONFIG_DISCONTIGMEM */
+
+#define MAP_NR_SN1(addr)	(((unsigned long) (addr) - PAGE_OFFSET) >> PAGE_SHIFT)
+
+#endif /* CONFIG_DISCONTIGMEM */
+
+#define numa_node_id()		cpuid_to_cnodeid(smp_processor_id())
+
+#endif /* !_LINUX_ASM_SN_MMZONE_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/mmzone_default.h linux/include/asm-ia64/sn/mmzone_default.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/mmzone_default.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/mmzone_default.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,15 @@
+/*
+ * Copyright, 2000, Silicon Graphics, sprasad@engr.sgi.com
+ */
+
+#define MAXNODES                16
+#define MAXNASIDS               16
+
+#define CHUNKSZ                (8*1024*1024)
+#define CHUNKSHIFT              23      /* 2 ^^ CHUNKSHIFT == CHUNKSZ */
+
+#define CNODEID_TO_NASID(n)	n
+#define NASID_TO_CNODEID(n)     n
+
+#define MAX_CHUNKS_PER_NODE     8
+
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/mmzone_sn1.h linux/include/asm-ia64/sn/mmzone_sn1.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/mmzone_sn1.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/mmzone_sn1.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,104 @@
+#ifndef _ASM_IA64_MMZONE_SN1_H
+#define _ASM_IA64_MMZONE_SN1_H
+
+/*
+ * Copyright, 2000, Silicon Graphics, sprasad@engr.sgi.com
+ */
+
+/* Maximum configuration supported by SNIA hardware. There are other
+ * restrictions that may limit us to a smaller max configuration.
+ */
+#define MAXNODES                128
+#define MAXNASIDS		128
+
+#define CHUNKSZ                (64*1024*1024)
+#define CHUNKSHIFT              26      /* 2 ^^ CHUNKSHIFT == CHUNKSZ */
+
+extern int 	cnodeid_map[] ;
+extern int	nasid_map[] ;
+
+#define CNODEID_TO_NASID(n)	(cnodeid_map[(n)])
+#define NASID_TO_CNODEID(n)     (nasid_map[(n)])
+
+#define MAX_CHUNKS_PER_NODE     128
+
+
+/*
+ * These are a bunch of sn1 hw specific defines. For now, keep it 
+ * in this file. If it gets too diverse we may want to create a 
+ * mmhwdefs_sn1.h
+ */
+
+/*
+ * Structure of the mem config of the node as a SN1 MI reg
+ * Medusa supports this reg config.
+ */
+
+typedef struct node_memmap_s
+{
+        unsigned int    b0      :1,     /* 0 bank 0 present */
+                        b1      :1,     /* 1 bank 1 present */
+                        r01     :2,     /* 2-3 reserved */
+                        b01size :4,     /* 4-7 Size of bank 0 and 1 */
+                        b2      :1,     /* 8 bank 2 present */
+                        b3      :1,     /* 9 bank 3 present */
+                        r23     :2,     /* 10-11 reserved */
+                        b23size :4,     /* 12-15 Size of bank 2 and 3 */
+                        b4      :1,     /* 16 bank 4 present */
+                        b5      :1,     /* 17 bank 5 present */
+                        r45     :2,     /* 18-19 reserved */
+                        b45size :4,     /* 20-23 Size of bank 4 and 5 */
+                        b6      :1,     /* 24 bank 6 present */
+                        b7      :1,     /* 25 bank 7 present */
+                        r67     :2,     /* 26-27 reserved */
+                        b67size :4;     /* 28-31 Size of bank 6 and 7 */
+} node_memmap_t ;
+
+#define GBSHIFT                 30
+#define MBSHIFT                 20
+
+/*
+ * SN1 Arch defined values
+ */
+#define SN1_MAX_BANK_PER_NODE   8
+#define SN1_BANK_PER_NODE_SHIFT 3       /* derived from SN1_MAX_BANK_PER_NODE */
+#define SN1_NODE_ADDR_SHIFT     (GBSHIFT+3)             /* 8GB */
+#define SN1_BANK_ADDR_SHIFT     (SN1_NODE_ADDR_SHIFT-SN1_BANK_PER_NODE_SHIFT)
+
+#define SN1_BANK_SIZE_SHIFT     (MBSHIFT+6)     /* 64 MB */
+#define SN1_MIN_BANK_SIZE_SHIFT SN1_BANK_SIZE_SHIFT
+
+/*
+ * BankSize nibble to bank size mapping
+ *
+ *      1 - 64 MB
+ *      2 - 128 MB
+ *      3 - 256 MB
+ *      4 - 512 MB
+ *      5 - 1024 MB (1GB)
+ */
+
+/* fixme - this macro breaks for bsize 6-8 and 0 */
+
+#ifdef CONFIG_IA64_SGI_SN1_SIM
+/* Support the medusa hack for 8M/16M/32M nodes */
+#define BankSizeBytes(bsize)            ((bsize<6) ? (1<<((bsize-1)+SN1_BANK_SIZE_SHIFT)) :\
+					 (1<<((bsize-9)+MBSHIFT)))
+#else
+#define BankSizeBytes(bsize)            (1<<((bsize-1)+SN1_BANK_SIZE_SHIFT))
+#endif
+
+#define BankSizeToEFIPages(bsize)       ((BankSizeBytes(bsize)) >> 12)
+
+#define GetPhysAddr(n,b)                (((u64)n<<SN1_NODE_ADDR_SHIFT) | \
+                                                ((u64)b<<SN1_BANK_ADDR_SHIFT))
+
+#define GetNasId(paddr)			((u64)(paddr) >> SN1_NODE_ADDR_SHIFT)
+
+#define GetBankId(paddr)						\
+				(((u64)(paddr) >> SN1_BANK_ADDR_SHIFT) & 7)
+
+#define SN1_MAX_BANK_SIZE		((u64)BankSizeBytes(5))
+#define SN1_BANK_SIZE_MASK		(~(SN1_MAX_BANK_SIZE-1))
+
+#endif /* _ASM_IA64_MMZONE_SN1_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/module.h linux/include/asm-ia64/sn/module.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/module.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/module.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,204 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_MODULE_H
+#define _ASM_SN_MODULE_H
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#include <asm/sn/systeminfo.h>
+#include <asm/sn/klconfig.h>
+#include <asm/sn/ksys/elsc.h>
+
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+#ifdef BRINGUP /* max. number of modules?  Should be about 300.*/
+#define MODULE_MAX			56
+#endif /* BRINGUP */
+#define MODULE_MAX_NODES		1
+#endif /* CONFIG_SGI_IP35 */
+#define MODULE_HIST_CNT			16
+#define MAX_MODULE_LEN			16
+
+/* Well-known module IDs */
+#define MODULE_UNKNOWN		(-2) /* initial value of klconfig brd_module */
+/* #define INVALID_MODULE	(-1) ** generic invalid moduleid_t (arch.h) */
+#define MODULE_NOT_SET		0    /* module ID not set in sys ctlrs. */
+
+/* parameter for format_module_id() */
+#define MODULE_FORMAT_BRIEF	1
+#define MODULE_FORMAT_LONG	2
+
+
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+
+/*
+ *	Module id format
+ *
+ *	  15-12 Brick type (enumerated)
+ *	   11-6	Rack ID	(encoded class, group, number)
+ *	    5-0 Brick position in rack (0-63)
+ */
+/*
+ * Macros for getting the brick type
+ */
+#define MODULE_BTYPE_MASK	0xf000
+#define MODULE_BTYPE_SHFT	12
+#define MODULE_GET_BTYPE(_m)	(((_m) & MODULE_BTYPE_MASK) >> MODULE_BTYPE_SHFT)
+#define MODULE_BT_TO_CHAR(_b)	(brick_types[(_b)])
+#define MODULE_GET_BTCHAR(_m)	(MODULE_BT_TO_CHAR(MODULE_GET_BTYPE(_m)))
+
+/*
+ * Macros for getting the rack ID.
+ */
+#define MODULE_RACK_MASK	0x0fc0
+#define MODULE_RACK_SHFT	6
+#define MODULE_GET_RACK(_m)	(((_m) & MODULE_RACK_MASK) >> MODULE_RACK_SHFT)
+
+/*
+ * Macros for getting the brick position
+ */
+#define MODULE_BPOS_MASK	0x003f
+#define MODULE_BPOS_SHFT	0
+#define MODULE_GET_BPOS(_m)	(((_m) & MODULE_BPOS_MASK) >> MODULE_BPOS_SHFT)
+
+/*
+ * Macros for constructing moduleid_t's
+ */
+#define RBT_TO_MODULE(_r, _b, _t) ((_r) << MODULE_RACK_SHFT | \
+				   (_b) << MODULE_BPOS_SHFT | \
+				   (_t) << MODULE_BTYPE_SHFT)
+
+/*
+ * Macros for encoding and decoding rack IDs
+ * A rack number consists of three parts:
+ *   class	1 bit, 0==CPU/mixed, 1==I/O
+ *   group	2 bits for CPU/mixed, 3 bits for I/O
+ *   number	3 bits for CPU/mixed, 2 bits for I/O (1 based)
+ */
+#define RACK_GROUP_BITS(_r)	(RACK_GET_CLASS(_r) ? 3 : 2)
+#define RACK_NUM_BITS(_r)	(RACK_GET_CLASS(_r) ? 2 : 3)
+
+#define RACK_CLASS_MASK(_r)	0x20
+#define RACK_CLASS_SHFT(_r)	5
+#define RACK_GET_CLASS(_r)	\
+	(((_r) & RACK_CLASS_MASK(_r)) >> RACK_CLASS_SHFT(_r))
+#define RACK_ADD_CLASS(_r, _c)	\
+	((_r) |= (_c) << RACK_CLASS_SHFT(_r) & RACK_CLASS_MASK(_r))
+
+#define RACK_GROUP_SHFT(_r)	RACK_NUM_BITS(_r)
+#define RACK_GROUP_MASK(_r)	\
+	( (((unsigned)1<<RACK_GROUP_BITS(_r)) - 1) << RACK_GROUP_SHFT(_r) )
+#define RACK_GET_GROUP(_r)	\
+	(((_r) & RACK_GROUP_MASK(_r)) >> RACK_GROUP_SHFT(_r))
+#define RACK_ADD_GROUP(_r, _g)	\
+	((_r) |= (_g) << RACK_GROUP_SHFT(_r) & RACK_GROUP_MASK(_r))
+
+#define RACK_NUM_SHFT(_r)	0
+#define RACK_NUM_MASK(_r)	\
+	( (((unsigned)1<<RACK_NUM_BITS(_r)) - 1) << RACK_NUM_SHFT(_r) )
+#define RACK_GET_NUM(_r)	\
+	( (((_r) & RACK_NUM_MASK(_r)) >> RACK_NUM_SHFT(_r)) + 1 )
+#define RACK_ADD_NUM(_r, _n)	\
+	((_r) |= ((_n) - 1) << RACK_NUM_SHFT(_r) & RACK_NUM_MASK(_r))
+
+/*
+ * Brick type definitions
+ */
+#define MAX_BRICK_TYPES		16 /* 1 << (MODULE_RACK_SHFT - MODULE_BTYPE_SHFT */
+
+extern char brick_types[];
+
+#define MODULE_CBRICK		0
+#define MODULE_RBRICK		1
+#define MODULE_IBRICK		2
+#define MODULE_KBRICK		3
+#define MODULE_XBRICK		4
+#define MODULE_DBRICK		5
+#define MODULE_PBRICK		6
+
+/*
+ * Moduleid_t comparison macros
+ */
+/* Don't compare the brick type:  only the position is significant */
+#define MODULE_CMP(_m1, _m2)	(((_m1)&(MODULE_RACK_MASK|MODULE_BPOS_MASK)) -\
+				 ((_m2)&(MODULE_RACK_MASK|MODULE_BPOS_MASK)))
+#define MODULE_MATCH(_m1, _m2)	(MODULE_CMP((_m1),(_m2)) == 0)
+
+#else
+
+/*
+ * Some code that uses this macro will not be conditionally compiled.
+ */
+#define MODULE_GET_BTCHAR(_m)	('?')
+#define MODULE_CMP(_m1, _m2)	((_m1) - (_m2))
+#define MODULE_MATCH(_m1, _m2)	(MODULE_CMP((_m1),(_m2)) == 0)
+
+#endif /* CONFIG_SGI_IP35 || CONFIG_IA64_SGI_SN1 */
+
+typedef struct module_s module_t;
+
+struct module_s {
+    moduleid_t		id;		/* Module ID of this module        */
+
+    spinlock_t		lock;		/* Lock for this structure	   */
+
+    /* List of nodes in this module */
+    cnodeid_t		nodes[MODULE_MAX_NODES];
+    int			nodecnt;	/* Number of nodes in array        */
+
+    /* Fields for Module System Controller */
+    int			mesgpend;	/* Message pending                 */
+    int			shutdown;	/* Shutdown in progress            */
+    struct semaphore	thdcnt;		/* Threads finished counter        */
+
+    elsc_t		elsc;
+    spinlock_t		elsclock;
+
+    time_t		intrhist[MODULE_HIST_CNT];
+    int			histptr;
+
+    int			hbt_active;	/* MSC heartbeat monitor active    */
+    uint64_t		hbt_last;	/* RTC when last heartbeat sent    */
+
+    /* Module serial number info */
+    union {
+	char		snum_str[MAX_SERIAL_NUM_SIZE];	 /* used by CONFIG_SGI_IP27    */
+	uint64_t	snum_int;			 /* used by speedo */
+    } snum;
+    int			snum_valid;
+
+    int			disable_alert;
+    int			count_down;
+};
+
+/* module.c */
+extern module_t	       *modules[MODULE_MAX];	/* Indexed by cmoduleid_t   */
+extern int		nummodules;
+
+#ifndef CONFIG_IA64_SGI_IO
+/* Clashes with LINUX stuff */
+extern void		module_init(void);
+#endif
+extern module_t	       *module_lookup(moduleid_t id);
+
+extern elsc_t	       *get_elsc(void);
+
+extern int		get_kmod_info(cmoduleid_t cmod,
+				      module_info_t *mod_info);
+
+extern void		format_module_id(char *buffer, moduleid_t m, int fmt);
+extern int		parse_module_id(char *buffer);
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif /* _ASM_SN_MODULE_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/nic.h linux/include/asm-ia64/sn/nic.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/nic.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/nic.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,128 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_NIC_H
+#define _ASM_SN_NIC_H
+
+#include <asm/types.h>
+
+#define MCR_DATA(x)			((int) ((x) & 1))
+#define MCR_DONE(x)			((x) & 2)
+#define MCR_PACK(pulse, sample)		((pulse) << 10 | (sample) << 2)
+
+typedef __psunsigned_t	nic_data_t;
+
+typedef int		
+nic_access_f(nic_data_t data,
+	     int pulse, int sample, int delay);
+
+typedef nic_access_f   *nic_access_t;
+
+typedef struct nic_vmce_s      *nic_vmce_t;
+typedef void			nic_vmc_func(devfs_handle_t v);
+
+/*
+ * PRIVATE data for Dallas NIC
+ */
+
+typedef struct nic_state_t {
+    nic_access_t	access;
+    nic_data_t		data;
+    int			last_disc;
+    int			done;
+    int			bit_index;
+    int			disc_marker;
+    uchar_t		bits[64];
+} nic_state_t;
+
+/*
+ * Public interface for Dallas NIC
+ *
+ *
+ *   Access Routine
+ *
+ *   nic_setup requires an access routine that pulses the NIC line for a
+ *   specified duration, samples the NIC line after a specified duration,
+ *   then delays for a third specified duration (for precharge).
+ *
+ *   This general scheme allows us to access NICs through any medium
+ *   (e.g. hub regs, bridge regs, vector writes, system ctlr commands).
+ *
+ *   The access routine should return the sample value 0 or 1, or if an
+ *   error occurs, return a negative error code.  Negative error codes from
+ *   the access routine will abort the NIC operation and be propagated
+ *   through out of the top-level NIC call.
+ */
+
+#define NIC_OK			0
+#define NIC_DONE		1
+#define NIC_FAIL		2
+#define NIC_BAD_CRC		3
+#define NIC_NOT_PRESENT		4
+#define NIC_REDIR_LOOP		5
+#define NIC_PARAM		6
+#define NIC_NOMEM		7
+
+uint64_t nic_get_phase_bits(void);
+
+extern int nic_setup(nic_state_t *ns,
+		     nic_access_t access,
+		     nic_data_t data);
+
+extern int nic_next(nic_state_t *ns,
+		    char *serial,
+		    char *family,
+		    char *crc);
+
+extern int nic_read_one_page(nic_state_t *ns,
+			     char *family,
+			     char *serial,
+			     char *crc,
+			     int start,
+			     uchar_t *redirect,
+			     uchar_t *byte);
+
+extern int nic_read_mfg(nic_state_t *ns,
+			char *family,
+			char *serial,
+			char *crc,
+			uchar_t *pageA,
+			uchar_t *pageB);
+
+extern int nic_info_get(nic_access_t access,
+			nic_data_t data,
+			char *info);
+
+extern int nic_item_info_get(char *buf, char *item, char **item_info);
+
+nic_access_f	nic_access_mcr32;
+
+extern char *nic_vertex_info_get(devfs_handle_t v);
+
+extern char *nic_vertex_info_set(nic_access_t access,
+				 nic_data_t data, 
+				 devfs_handle_t v);
+
+extern int nic_vertex_info_match(devfs_handle_t vertex,
+				 char *name);
+
+extern char *nic_bridge_vertex_info(devfs_handle_t vertex,
+				    nic_data_t	data);
+extern char *nic_hq4_vertex_info(devfs_handle_t vertex,
+				 nic_data_t data);
+extern char *nic_ioc3_vertex_info(devfs_handle_t vertex,
+				    nic_data_t	data,
+				    int32_t *gpcr_s);
+
+extern char *nic_hub_vertex_info(devfs_handle_t vertex);
+
+extern nic_vmce_t	nic_vmc_add(char *, nic_vmc_func *);
+extern void		nic_vmc_del(nic_vmce_t);
+
+#endif /* _ASM_SN_NIC_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/nodemask.h linux/include/asm-ia64/sn/nodemask.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/nodemask.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/nodemask.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,328 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_NODEMASK_H
+#define _ASM_SN_NODEMASK_H
+
+#if defined(__KERNEL__) || defined(_KMEMUSER)
+
+#if CONFIG_SGI_IP35 || CONFIG_IA64_SGI_SN1 || CONFIG_IA64_GENERIC
+#include <asm/sn/sn1/arch.h>    /* needed for MAX_COMPACT_NODES */
+#endif
+
+#define CNODEMASK_BOOTED_MASK		boot_cnodemask
+#define CNODEMASK_BIPW    64
+
+#if !defined(SN0XXL) && !defined(CONFIG_SGI_IP35) && !defined(CONFIG_IA64_SGI_SN1) && !defined(CONFIG_IA64_GENERIC)
+			/* MAXCPUS 128p (64 nodes) or less */
+
+#define CNODEMASK_SIZE    1
+typedef uint64_t cnodemask_t;
+
+#define CNODEMASK_WORD(p,w)     (p)
+#define CNODEMASK_SET_WORD(p,w,val)     (p) = val
+#define CNODEMASK_CLRALL(p)     (p) = 0
+#define CNODEMASK_SETALL(p)     (p) = ~((cnodemask_t)0)
+#define CNODEMASK_IS_ZERO(p)	((p) == 0)
+#define CNODEMASK_IS_NONZERO(p)	((p) != 0)
+#define CNODEMASK_NOTEQ(p, q)	((p) != (q))
+#define CNODEMASK_EQ(p, q)      ((p) == (q))
+#define CNODEMASK_LSB_ISONE(p)  ((p) & 0x1ULL)
+
+#define CNODEMASK_ZERO()        ((cnodemask_t)0)
+#define CNODEMASK_CVTB(bit)     (1ULL << (bit))
+#define CNODEMASK_SETB(p, bit)	((p) |= 1ULL << (bit))
+#define CNODEMASK_CLRB(p, bit)	((p) &= ~(1ULL << (bit)))
+#define CNODEMASK_TSTB(p, bit)	((p) & (1ULL << (bit)))
+
+#define CNODEMASK_SETM(p, q)	((p) |= (q))
+#define CNODEMASK_CLRM(p, q)	((p) &= ~(q))
+#define CNODEMASK_ANDM(p, q)	((p) &= (q))
+#define CNODEMASK_TSTM(p, q)	((p) & (q))
+
+#define CNODEMASK_CPYNOTM(p, q)	((p) = ~(q))
+#define CNODEMASK_CPY(p, q)     ((p) = (q))
+#define CNODEMASK_ORNOTM(p, q)	((p) |= ~(q))
+#define CNODEMASK_SHIFTL(p)     ((p) <<= 1)
+#define CNODEMASK_SHIFTR(p)     ((p) >>= 1)
+#define CNODEMASK_SHIFTL_PTR(p)     (*(p) <<= 1)
+#define CNODEMASK_SHIFTR_PTR(p)     (*(p) >>= 1)
+
+/* Atomically set or clear a particular bit */
+#define CNODEMASK_ATOMSET_BIT(p, bit) atomicSetUlong((cnodemask_t *)&(p), (1ULL<<(bit))) 
+#define CNODEMASK_ATOMCLR_BIT(p, bit) atomicClearUlong((cnodemask_t *)&(p), (1ULL<<(bit)))
+
+/* Atomically set or clear a collection of bits */
+#define CNODEMASK_ATOMSET(p, q)  atomicSetUlong((cnodemask_t *)&(p), q)
+#define CNODEMASK_ATOMCLR(p, q)  atomicClearUlong((cnodemask_t *)&(p), q)
+
+/* Atomically set or clear a collection of bits, returning the old value */
+#define CNODEMASK_ATOMSET_MASK(__old, p, q)	{ \
+		(__old) = atomicSetUlong((cnodemask_t *)&(p), q); \
+}
+#define CNODEMASK_ATOMCLR_MASK(__old, p, q)	{ \
+		(__old) = atomicClearUlong((cnodemask_t *)&(p),q); \
+}
+
+#define CNODEMASK_FROM_NUMNODES(n)	((~(cnodemask_t)0)>>(CNODEMASK_BIPW-(n)))
+
+#else  /* SN0XXL || SN1 - MAXCPUS > 128 */
+
+#define CNODEMASK_SIZE    (MAX_COMPACT_NODES / CNODEMASK_BIPW)
+
+typedef struct {
+        uint64_t _bits[CNODEMASK_SIZE];
+} cnodemask_t;
+
+#define CNODEMASK_WORD(p,w)  \
+	((w >= 0 && w < CNODEMASK_SIZE) ? (p)._bits[(w)] : 0)
+#define CNODEMASK_SET_WORD(p,w,val)  { 				\
+	if (w >= 0 && w < CNODEMASK_SIZE) 			\
+		(p)._bits[(w)] = val;				\
+}
+
+#define CNODEMASK_CLRALL(p)       {                             \
+        int i;                                                  \
+                                                                \
+        for (i = 0 ; i < CNODEMASK_SIZE ; i++)                  \
+                (p)._bits[i] = 0;                               \
+}
+
+#define CNODEMASK_SETALL(p)       {                             \
+        int i;                                                  \
+                                                                \
+        for (i = 0 ; i < CNODEMASK_SIZE ; i++)                  \
+                (p)._bits[i] = ~(0);                            \
+}
+
+#define CNODEMASK_LSB_ISONE(p)  ((p)._bits[0] & 0x1ULL)
+
+
+#define CNODEMASK_SETM(p,q)       {                             \
+        int i;                                                  \
+                                                                \
+        for (i = 0 ; i < CNODEMASK_SIZE ; i++)                  \
+                (p)._bits[i] |= ((q)._bits[i]);                 \
+}
+
+#define CNODEMASK_CLRM(p,q)       {                             \
+        int i;                                                  \
+                                                                \
+        for (i = 0 ; i < CNODEMASK_SIZE ; i++)                  \
+                (p)._bits[i] &= ~((q)._bits[i]);                \
+}
+
+#define CNODEMASK_ANDM(p,q)       {                             \
+        int i;                                                  \
+                                                                \
+        for (i = 0 ; i < CNODEMASK_SIZE ; i++)                  \
+                (p)._bits[i] &= ((q)._bits[i]);                 \
+}
+
+#define CNODEMASK_CPY(p, q)  {					\
+        int i;                                                  \
+                                                                \
+        for (i = 0 ; i < CNODEMASK_SIZE ; i++)                  \
+                (p)._bits[i] = (q)._bits[i];	                \
+}
+
+#define CNODEMASK_CPYNOTM(p,q)    {                             \
+        int i;                                                  \
+                                                                \
+        for (i = 0 ; i < CNODEMASK_SIZE ; i++)                  \
+                (p)._bits[i] = ~((q)._bits[i]);                 \
+}
+
+#define CNODEMASK_ORNOTM(p,q)     {                             \
+        int i;                                                  \
+                                                                \
+        for (i = 0 ; i < CNODEMASK_SIZE ; i++)                  \
+                (p)._bits[i] |= ~((q)._bits[i]);                \
+}
+
+#define CNODEMASK_INDEX(bit)      ((bit) >> 6)
+#define CNODEMASK_SHFT(bit)       ((bit) & 0x3f)
+
+
+#define CNODEMASK_SETB(p, bit)	 				\
+	(p)._bits[CNODEMASK_INDEX(bit)] |= (1ULL << CNODEMASK_SHFT(bit))
+
+
+#define CNODEMASK_CLRB(p, bit)					\
+	(p)._bits[CNODEMASK_INDEX(bit)] &= ~(1ULL << CNODEMASK_SHFT(bit)) 
+
+
+#define CNODEMASK_TSTB(p, bit)		\
+	((p)._bits[CNODEMASK_INDEX(bit)] & (1ULL << CNODEMASK_SHFT(bit))) 
+
+/** Probably should add atomic update for entire cnodemask_t struct **/
+
+/* Atomically set or clear a particular bit */
+#define CNODEMASK_ATOMSET_BIT(p, bit) \
+        (atomicSetUlong((unsigned long *)&(p)._bits[CNODEMASK_INDEX(bit)], (1ULL << CNODEMASK_SHFT(bit))));
+#define CNODEMASK_ATOMCLR_BIT(__old, p, bit) \
+        (atomicClearUlong((unsigned long *)&(p)._bits[CNODEMASK_INDEX(bit)], (1ULL << CNODEMASK_SHFT(bit))));
+
+/* Atomically set or clear a collection of bits */
+#define CNODEMASK_ATOMSET(p, q) { \
+        int i;				\
+					\
+        for (i = 0 ; i < CNODEMASK_SIZE ; i++) { \
+	      atomicSetUlong((unsigned long *)&(p)._bits[i], (q)._bits[i]);  \
+        }				\
+}
+#define CNODEMASK_ATOMCLR(p, q) { \
+        int i;				\
+                        		\
+        for (i = 0 ; i < CNODEMASK_SIZE ; i++) {	\
+	      atomicClearUlong((unsigned long *)&(p)._bits[i], (q)._bits[i]); \
+        }				\
+}
+
+/* Atomically set or clear a collection of bits, returning the old value */
+#define CNODEMASK_ATOMSET_MASK(__old, p, q)  { \
+        int i;				\
+					\
+        for (i = 0 ; i < CNODEMASK_SIZE ; i++) { \
+           (__old)._bits[i] =	 \
+	      atomicSetUlong((unsigned long *)&(p)._bits[i], (q)._bits[i]);  \
+        }				\
+}
+#define CNODEMASK_ATOMCLR_MASK(__old, p, q) {					\
+        int i;				\
+                        		\
+        for (i = 0 ; i < CNODEMASK_SIZE ; i++) {	\
+           (__old)._bits[i] =				\
+	      atomicClearUlong((unsigned long *)&(p)._bits[i], (q)._bits[i]); \
+        }				\
+}
+
+__inline static cnodemask_t CNODEMASK_CVTB(int bit) 
+{
+	cnodemask_t __tmp;
+	CNODEMASK_CLRALL(__tmp);
+	CNODEMASK_SETB(__tmp,bit);
+	return(__tmp);
+}
+
+
+__inline static cnodemask_t CNODEMASK_ZERO(void)
+{
+	cnodemask_t __tmp;
+	CNODEMASK_CLRALL(__tmp);
+	return(__tmp);
+}
+
+__inline static int CNODEMASK_IS_ZERO (cnodemask_t p)
+{
+        int i;
+
+        for (i = 0 ; i < CNODEMASK_SIZE ; i++)
+                if (p._bits[i] != 0)
+                        return 0;
+        return 1;
+}
+
+__inline static int CNODEMASK_IS_NONZERO (cnodemask_t p)
+{
+        int i;
+
+        for (i = 0 ; i < CNODEMASK_SIZE ; i++)
+                if (p._bits[i] != 0)
+                        return 1;
+        return 0;
+}
+
+__inline static int CNODEMASK_NOTEQ (cnodemask_t p, cnodemask_t q)
+{
+        int i;
+
+        for (i = 0 ; i < CNODEMASK_SIZE ; i++)
+                if (p._bits[i] != q._bits[i])
+                        return 1;
+        return 0;
+}
+
+__inline static int CNODEMASK_EQ (cnodemask_t p, cnodemask_t q)
+{
+        int i;
+
+        for (i = 0 ; i < CNODEMASK_SIZE ; i++)
+                if (p._bits[i] != q._bits[i])
+                        return 0;
+        return 1;
+}
+
+
+__inline static int CNODEMASK_TSTM (cnodemask_t p, cnodemask_t q)
+{
+        int i;
+
+        for (i = 0 ; i < CNODEMASK_SIZE ; i++)
+                if (p._bits[i] & q._bits[i])
+                        return 1;
+        return 0;
+}
+
+__inline static void CNODEMASK_SHIFTL_PTR (cnodemask_t *p)
+{
+        int i;
+        uint64_t upper;
+
+        /*
+         * shift words starting with the last word
+         * of the vector and work backward to the first
+         * word updating the low order bits with the
+         * high order bit of the prev word.
+         */
+        for (i=(CNODEMASK_SIZE-1); i > 0; --i) {
+	   upper = (p->_bits[i-1] & (1ULL<<(CNODEMASK_BIPW-1))) ? 1 : 0;
+           p->_bits[i] <<= 1;
+           p->_bits[i] |= upper;
+        }
+        p->_bits[i] <<= 1;
+}
+
+__inline static void CNODEMASK_SHIFTR_PTR (cnodemask_t *p)
+{
+        int i;
+        uint64_t lower;
+
+        /*
+         * shift words starting with the first word
+         * of the vector and work forward to the last
+         * word updating the high order bit with the
+         * low order bit of the next word.
+         */
+        for (i=0; i < (CNODEMASK_SIZE-2); ++i) {
+	   lower = (p->_bits[i+1] & (0x1)) ? 1 : 0;
+           p->_bits[i] >>= 1;
+           p->_bits[i] |= (lower<<((CNODEMASK_BIPW-1)));
+        }
+        p->_bits[i] >>= 1;
+}
+
+__inline static cnodemask_t CNODEMASK_FROM_NUMNODES(int n)
+{
+	cnodemask_t __tmp;
+	int i;
+	CNODEMASK_CLRALL(__tmp);
+	for (i=0; i<n; i++) {
+		CNODEMASK_SETB(__tmp, i);
+	}
+	return(__tmp);
+}
+
+#endif /* SN0XXL || SN1 */
+
+extern cnodemask_t boot_cnodemask;
+
+#endif /* __KERNEL__ || _KMEMUSER */
+
+#endif /* _ASM_SN_NODEMASK_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/nodepda.h linux/include/asm-ia64/sn/nodepda.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/nodepda.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/nodepda.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,444 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_NODEPDA_H
+#define _ASM_SN_NODEPDA_H
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#include <asm/sn/agent.h>
+#include <asm/sn/intr.h>
+#include <asm/sn/router.h>
+/* #include <SN/klkernvars.h> */
+#ifdef IRIX
+typedef struct module_s module_t;       /* Avoids sys/SN/module.h */
+#else
+#include <asm/sn/module.h>
+#endif
+/* #include <SN/slotnum.h> */
+
+/*
+ * NUMA Node-Specific Data structures are defined in this file.
+ * In particular, this is the location of the node PDA.
+ * A pointer to the right node PDA is saved in each CPU PDA.
+ */
+
+/*
+ * Subnode PDA structures. Each node needs a few data structures that 
+ * correspond to the PIs on the HUB chip that supports the node.
+ *
+ * WARNING!!!! 6.5.x compatibility requirements prevent us from
+ * changing or reordering fields in the following structure for IP27.
+ * It is essential that the data mappings not change for IP27 platforms.
+ * It is OK to add fields that are IP35 specific if they are under #ifdef IP35.
+ */
+struct subnodepda_s {
+	intr_vecblk_t	intr_dispatch0;
+	intr_vecblk_t	intr_dispatch1;
+	uint64_t	next_prof_timeout;
+	int		prof_count;
+};
+
+
+typedef struct subnodepda_s subnode_pda_t;
+
+
+struct ptpool_s;
+
+
+/*
+ * Node-specific data structure.
+ *
+ * One of these structures is allocated on each node of a NUMA system.
+ * Non-NUMA systems are considered to be systems with one node, and
+ * hence there will be one of this structure for the entire system.
+ *
+ * This structure provides a convenient way of keeping together 
+ * all per-node data structures. 
+ */
+
+
+#ifndef CONFIG_IA64_SGI_IO
+/*
+ * The following structure is contained in the nodepda & contains
+ * a lock & queue-head for sanon pages that belong to the node.
+ * See the anon manager for more details.
+ */
+typedef struct {
+	lock_t  sal_lock;
+	plist_t sal_listhead;
+} sanon_list_head_t;
+#endif
+
+
+
+struct nodepda_s {
+
+#ifdef	NUMA_BASE
+
+	/* 
+	 * Pointer to this node's copy of Nodepdaindr 
+	 */
+	struct nodepda_s	**pernode_pdaindr; 
+
+	/*
+         * Data used for migration control
+         */
+	struct migr_control_data_s *mcd; 
+
+	/*
+         * Data used for replication control
+         */
+	struct repl_control_data_s *rcd;
+
+        /*
+         * Numa statistics
+         */
+	struct numa_stats_s *numa_stats;
+
+        /*
+         * Load distribution
+         */
+        uint memfit_assign;
+
+        /*
+         * New extended memory reference counters
+         */
+        void *migr_refcnt_counterbase;
+        void *migr_refcnt_counterbuffer;
+        size_t migr_refcnt_cbsize;
+        int  migr_refcnt_numsets;
+
+        /*
+         * mem_tick quiescing lock
+         */
+        uint mem_tick_lock;
+
+        /*
+         * Migration candidate set
+         * by migration prologue intr handler
+         */
+        uint64_t migr_candidate;
+
+	/*
+	 * Each node gets its own syswait counter to remove contention
+	 * on the global one.
+	 */
+#ifndef CONFIG_IA64_SGI_IO
+	struct syswait syswait;
+#endif
+
+#endif	/* NUMA_BASE */
+	/*
+	 * Node-specific Zone structures.
+	 */
+#ifndef CONFIG_IA64_SGI_IO
+	zoneset_element_t	node_zones;
+	pg_data_t	node_pg_data;	/* VM page data structures */ 
+	plist_t	error_discard_plist;
+#endif
+	uint		error_discard_count;
+	uint		error_page_count;
+	uint		error_cleaned_count;
+	spinlock_t	error_discard_lock;
+
+	/* Information needed for SN Hub chip interrupt handling. */
+	subnode_pda_t	snpda[NUM_SUBNODES];
+	/* Distributed kernel support */
+#ifndef CONFIG_IA64_SGI_IO
+	kern_vars_t	kern_vars;
+#endif
+	/* Vector operation support */
+	/* Change this to a sleep lock? */
+	spinlock_t	vector_lock;
+	/* State of the vector unit for this node */
+	char		vector_unit_busy;
+	cpuid_t         node_first_cpu; /* Starting cpu number for node */
+	ushort          node_num_cpus;  /* Number of cpus present       */
+
+	/* node utlbmiss info */
+  	spinlock_t		node_utlbswitchlock;
+	volatile cpumask_t	node_utlbmiss_flush;
+	volatile signed char	node_need_utlbmiss_patch;
+	volatile char		node_utlbmiss_patched;
+	nodepda_router_info_t	*npda_rip_first;
+	nodepda_router_info_t	**npda_rip_last;
+	int		dependent_routers;
+	devfs_handle_t 	xbow_vhdl;
+	nasid_t		xbow_peer;	/* NASID of our peer hub on xbow */
+	struct semaphore xbow_sema;	/* Sema for xbow synchronization */
+	slotid_t	slotdesc;
+	moduleid_t	module_id;	/* Module ID (redundant local copy) */
+	module_t	*module;	/* Pointer to containing module */
+	int		hub_chip_rev;	/* Rev of my Hub chip */
+	char		nasid_mask[NASID_MASK_BYTES];
+					/* Need a copy of the nasid mask
+					 * on every node */
+	xwidgetnum_t 	basew_id;
+	devfs_handle_t 	basew_xc;
+	spinlock_t	fprom_lock;
+	char		ni_error_print; /* For printing ni error state
+					 * only once during system panic
+					 */
+#ifndef CONFIG_IA64_SGI_IO
+	md_perf_monitor_t node_md_perfmon;
+	hubstat_t	hubstats;
+	int		hubticks;
+	int		huberror_ticks;
+	sbe_info_t	*sbe_info;	/* ECC single-bit error statistics */
+#endif	/* !CONFIG_IA64_SGI_IO */
+
+	router_queue_t  *visited_router_q;
+	router_queue_t	*bfs_router_q; 
+					/* Used for router traversal */
+#if defined (CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+	router_map_ent_t router_map[MAX_RTR_BREADTH];
+#endif
+	int		num_routers;	/* Total routers in the system */
+
+        char		membank_flavor;
+	                                /* Indicates what sort of memory 
+					 * banks are present on this node
+					 */
+	
+	char		*hwg_node_name;	/* hwgraph node name */
+
+	struct widget_info_t *widget_info;	/* Node as xtalk widget */
+	devfs_handle_t	node_vertex;	/* Hwgraph vertex for this node */
+
+	void 		*pdinfo;	/* Platform-dependent per-node info */
+	uint64_t	*dump_stack;	/* Dump stack during nmi handling */
+	int		dump_count;	/* To allow only one cpu-per-node */
+#if defined BRINGUP
+#ifndef CONFIG_IA64_SGI_IO
+	io_perf_monitor_t node_io_perfmon;
+#endif
+#endif
+
+	/*
+	 * Each node gets its own pdcount counter to remove contention
+	 * on the global one.
+	 */
+
+	int pdcount;			/* count of pdinserted pages */
+
+#ifdef	NUMA_BASE
+	void		*cached_global_pool;	/* pointer to cached vmpool */
+#endif /* NUMA_BASE */
+
+#ifndef CONFIG_IA64_SGI_IO
+	sanon_list_head_t sanon_list_head;	/* head for sanon pages */	
+#endif
+#ifdef	NUMA_BASE
+	struct ptpool_s	*ptpool;	/* ptpool for this node */
+#endif /* NUMA_BASE */
+
+	/*
+	 * The BTEs on this node are shared by the local cpus
+	 */
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+#ifndef CONFIG_IA64_SGI_IO
+	bteinfo_t	*node_bte_info[BTES_PER_NODE];
+#endif
+#endif
+};
+
+typedef struct nodepda_s nodepda_t;
+
+
+#define NODE_MODULEID(_node)	(NODEPDA(_node)->module_id)
+#define NODE_SLOTID(_node)	(NODEPDA(_node)->slotdesc)
+
+#ifdef	NUMA_BASE
+/*
+ * Access Functions for node PDA.
+ * Since there is one nodepda for each node, we need a convenient mechanism
+ * to access these nodepdas without cluttering code with #ifdefs.
+ * The next set of definitions provides this.
+ * Routines are expected to use 
+ *
+ *	nodepda		-> to access PDA for the node on which code is running
+ *	subnodepda	-> to access subnode PDA for the node on which code is running
+ *
+ *	NODEPDA(x)	-> to access node PDA for cnodeid 'x'
+ *	SUBNODEPDA(x,s)	-> to access subnode PDA for cnodeid/slice 'x'
+ */
+
+#ifndef CONFIG_IA64_SGI_IO
+#define	nodepda		private.p_nodepda	/* Ptr to this node's PDA */
+#if CONFIG_SGI_IP35 || CONFIG_IA64_SGI_SN1 || CONFIG_IA64_GENERIC
+#define subnodepda	private.p_subnodepda	/* Ptr to this node's subnode PDA */
+#endif
+
+#else
+/*
+ * Until we have a shared node local area defined, do it this way ..
+ * like in Caliase space.  See above.
+ */
+extern nodepda_t        *nodepda;
+extern subnode_pda_t	*subnodepda;
+#endif
+
+/* 
+ * Nodepdaindr[]
+ * This is a private data structure for use only in early initialization.
+ * All users of nodepda should use the macro NODEPDA(nodenum) to get
+ * the suitable nodepda structure.
+ * This macro has the advantage of not requiring #ifdefs for NUMA and
+ * non-NUMA code.
+ */
+extern nodepda_t	*Nodepdaindr[]; 
+/*
+ * NODEPDA_GLOBAL(x) macro should ONLY be used during early initialization.
+ * Once meminit is complete, NODEPDA(x) is ready to use.
+ * During early init, the system fills up Nodepdaindr.  By the time we
+ * are in meminit(), all nodepdas are initialized, and hence
+ * we can fill up the node_pdaindr array in each nodepda structure.
+ */
+#define	NODEPDA_GLOBAL(x)	Nodepdaindr[x]
+
+/*
+ * Returns a pointer to a given node's nodepda.
+ */
+#define	NODEPDA(x)		(nodepda->pernode_pdaindr[x])
+
+/*
+ * Returns a pointer to a given node/slice's subnodepda.
+ *	SUBNODEPDA(cnode, subnode) - uses cnode as first arg
+ *	SNPDA(npda, subnode)	   - uses pointer to nodepda as first arg
+ */
+#define	SUBNODEPDA(x,sn)	(&nodepda->pernode_pdaindr[x]->snpda[sn])
+#define	SNPDA(npda,sn)		(&(npda)->snpda[sn])
+
+#define NODEPDA_ERROR_FOOTPRINT(node, cpu) \
+                   (&(NODEPDA(node)->error_stamp[cpu]))
+#define NODEPDA_MDP_MON(node)	(&(NODEPDA(node)->node_md_perfmon))
+#define NODEPDA_IOP_MON(node)	(&(NODEPDA(node)->node_io_perfmon))
+
+/*
+ * Macros to access data structures inside nodepda 
+ */
+#if NUMA_MIGR_CONTROL
+#define NODEPDA_MCD(node) (NODEPDA(node)->mcd)
+#endif /* NUMA_MIGR_CONTROL */
+
+#if NUMA_REPL_CONTROL
+#define NODEPDA_RCD(node) (NODEPDA(node)->rcd)
+#endif /* NUMA_REPL_CONTROL */
+
+#if (NUMA_MIGR_CONTROL || NUMA_REPL_CONTROL)
+#define NODEPDA_LRS(node) (NODEPDA(node)->lrs)
+#endif /* (NUMA_MIGR_CONTROL || NUMA_REPL_CONTROL) */
+
+/* 
+ * Exported functions
+ */
+extern nodepda_t *nodepda_alloc(void);
+
+#else	/* !NUMA_BASE */
+/*
+ * For a single-node system we will just have one global nodepda pointer
+ * allocated at startup.  The global nodepda will point to this nodepda 
+ * structure.
+ */
+extern nodepda_t	*Nodepdaindr; 
+
+/*
+ * On non-NUMA systems, NODEPDA_GLOBAL and NODEPDA macros collapse to
+ * be the same.
+ */
+#define	NODEPDA_GLOBAL(x)	Nodepdaindr
+
+/*
+ * Returns a pointer to a given node's nodepda.
+ */
+#define	NODEPDA(x)	Nodepdaindr
+
+/*
+ * nodepda can also be defined as private.p_nodepda.
+ * But on non-NUMA systems, there is only one nodepda, and there is
+ * no reason to go through the PDA to access this pointer.
+ * Hence nodepda aliases to the global nodepda directly.
+ *
+ * Routines should use nodepda to access the local node's PDA.
+ */
+#define	nodepda		(Nodepdaindr)
+
+#endif	/* NUMA_BASE */
+
+/* Quickly convert a compact node ID into a hwgraph vertex */
+#define cnodeid_to_vertex(cnodeid) (NODEPDA(cnodeid)->node_vertex)
+
+
+/* Check if given a compact node id the corresponding node has all the
+ * cpus disabled. 
+ */
+#define is_headless_node(_cnode)	((_cnode == CNODEID_NONE) || \
+					 (CNODE_NUM_CPUS(_cnode) == 0))
+/* Check if given a node vertex handle the corresponding node has all the
+ * cpus disabled. 
+ */
+#define is_headless_node_vertex(_nodevhdl) \
+			is_headless_node(nodevertex_to_cnodeid(_nodevhdl))
+
+#ifdef	__cplusplus
+}
+#endif
+
+#ifdef NUMA_BASE
+/*
+ * To remove contention on the global syswait counter each node will have
+ * its own.  Each clock tick the clock cpu will re-calculate the global
+ * syswait counter by summing from each of the nodes.  The other cpus will
+ * continue to read the global one during their clock ticks.   This does 
+ * present a problem when a thread increments the count on one node and wakes
+ * up on a different node and decrements it there.  Eventually the count could
+ * overflow if this happens continually for a long period.  To prevent this
+ * second_thread() periodically preserves the current syswait state and
+ * resets the counters.
+ */
+#define ADD_SYSWAIT(_field)	atomicAddInt(&nodepda->syswait._field, 1)
+#define SUB_SYSWAIT(_field)	atomicAddInt(&nodepda->syswait._field, -1)
+#else
+#define ADD_SYSWAIT(_field)				\
+{							\
+	ASSERT(syswait._field >= 0);			\
+	atomicAddInt(&syswait._field, 1);		\
+}
+#define SUB_SYSWAIT(_field)				\
+{							\
+	ASSERT(syswait._field > 0);			\
+	atomicAddInt(&syswait._field, -1);		\
+}
+#endif /* NUMA_BASE */
+
+#ifdef NUMA_BASE
+/*
+ * Another global variable to remove contention from: pdcount.
+ * See above comments for SYSWAIT.
+ */
+#define ADD_PDCOUNT(_n)					\
+{							\
+	atomicAddInt(&nodepda->pdcount, _n);		\
+	if (_n > 0 && !pdflag)				\
+		pdflag = 1;				\
+}
+#else
+#define ADD_PDCOUNT(_n)					\
+{							\
+	ASSERT(&pdcount >= 0);				\
+	atomicAddInt(&pdcount, _n);			\
+	if (_n > 0 && !pdflag)				\
+		pdflag = 1;				\
+}
+#endif /* NUMA_BASE */
+
+#endif /* _ASM_SN_NODEPDA_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/pci/bridge.h linux/include/asm-ia64/sn/pci/bridge.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/pci/bridge.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/pci/bridge.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,1729 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_PCI_BRIDGE_H
+#define _ASM_SN_PCI_BRIDGE_H
+
+
+/*
+ * bridge.h - header file for bridge chip and bridge portion of xbridge chip
+ */
+
+#include <asm/sn/xtalk/xwidget.h>
+
+/* I/O page size */
+
+#if _PAGESZ == 4096
+#define IOPFNSHIFT		12	/* 4K per mapped page */
+#else
+#define IOPFNSHIFT		14	/* 16K per mapped page */
+#endif				/* _PAGESZ */
+
+#define IOPGSIZE		(1 << IOPFNSHIFT)
+#define IOPG(x)			((x) >> IOPFNSHIFT)
+#define IOPGOFF(x)		((x) & (IOPGSIZE-1))
+
+/* Bridge RAM sizes */
+
+#define BRIDGE_INTERNAL_ATES	128
+#define XBRIDGE_INTERNAL_ATES	1024
+
+#define BRIDGE_ATE_RAM_SIZE     (BRIDGE_INTERNAL_ATES<<3)	/* 1kB ATE */
+#define XBRIDGE_ATE_RAM_SIZE    (XBRIDGE_INTERNAL_ATES<<3)	/* 8kB ATE */
+
+#define BRIDGE_CONFIG_BASE	0x20000		/* start of bridge's */
+						/* map to each device's */
+						/* config space */
+#define BRIDGE_CONFIG1_BASE	0x28000		/* type 1 device config space */
+#define BRIDGE_CONFIG_END	0x30000
+#define BRIDGE_CONFIG_SLOT_SIZE 0x1000		/* each map == 4k */
+
+#define BRIDGE_SSRAM_512K	0x00080000	/* 512kB */
+#define BRIDGE_SSRAM_128K	0x00020000	/* 128kB */
+#define BRIDGE_SSRAM_64K	0x00010000	/* 64kB */
+#define BRIDGE_SSRAM_0K		0x00000000	/* 0kB */
+
+/* ========================================================================
+ *    Bridge address map
+ */
+
+#if defined(_LANGUAGE_C) || defined(_LANGUAGE_C_PLUS_PLUS)
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * All accesses to bridge hardware registers must be done
+ * using 32-bit loads and stores.
+ */
+typedef uint32_t	bridgereg_t;
+
+typedef uint64_t	bridge_ate_t;
+
+/* pointers to bridge ATEs
+ * are always "pointer to volatile"
+ */
+typedef volatile bridge_ate_t  *bridge_ate_p;
+
+/*
+ * It is generally preferred that hardware registers on the bridge
+ * are located from C code via this structure.
+ *
+ * Generated from Bridge spec dated 04oct95
+ */
+
+#ifdef LITTLE_ENDIAN
+
+typedef volatile struct bridge_s {
+
+    /* Local Registers				       0x000000-0x00FFFF */
+
+    /* standard widget configuration		       0x000000-0x000057 */
+    widget_cfg_t	    b_widget;		    /* 0x000000 */
+
+    /* helper fieldnames for accessing bridge widget */
+
+#define b_wid_id			b_widget.w_id
+#define b_wid_stat			b_widget.w_status
+#define b_wid_err_upper			b_widget.w_err_upper_addr
+#define b_wid_err_lower			b_widget.w_err_lower_addr
+#define b_wid_control			b_widget.w_control
+#define b_wid_req_timeout		b_widget.w_req_timeout
+#define b_wid_int_upper			b_widget.w_intdest_upper_addr
+#define b_wid_int_lower			b_widget.w_intdest_lower_addr
+#define b_wid_err_cmdword		b_widget.w_err_cmd_word
+#define b_wid_llp			b_widget.w_llp_cfg
+#define b_wid_tflush			b_widget.w_tflush
+
+    /*
+     * we access these through synergy unswizzled space, so the address
+     * gets twiddled (i.e. references to 0x4 actually go to 0x0 and vv.)
+     * That's why we put the register first and filler second.
+     */
+    /* bridge-specific widget configuration	       0x000058-0x00007F */
+    bridgereg_t             b_wid_aux_err;          /* 0x00005C */
+    bridgereg_t		    _pad_000058;
+
+    bridgereg_t             b_wid_resp_upper;       /* 0x000064 */
+    bridgereg_t             _pad_000060;
+
+    bridgereg_t             b_wid_resp_lower;       /* 0x00006C */
+    bridgereg_t             _pad_000068;
+
+    bridgereg_t             b_wid_tst_pin_ctrl;     /* 0x000074 */
+    bridgereg_t             _pad_000070;
+
+    bridgereg_t		    _pad_000078[2];
+
+    /* PMU & Map				       0x000080-0x00008F */
+    bridgereg_t             b_dir_map;              /* 0x000084 */
+    bridgereg_t             _pad_000080;
+    bridgereg_t		    _pad_000088[2];
+
+    /* SSRAM					       0x000090-0x00009F */
+    bridgereg_t             b_ram_perr_or_map_fault;/* 0x000094 */
+    bridgereg_t             _pad_000090;
+#define b_ram_perr  b_ram_perr_or_map_fault	/* Bridge */
+#define b_map_fault b_ram_perr_or_map_fault	/* Xbridge */
+    bridgereg_t		    _pad_000098[2];
+
+    /* Arbitration				       0x0000A0-0x0000AF */
+    bridgereg_t             b_arb;                  /* 0x0000A4 */
+    bridgereg_t             _pad_0000A0;
+    bridgereg_t		    _pad_0000A8[2];
+
+    /* Number In A Can				       0x0000B0-0x0000BF */
+    bridgereg_t             b_nic;                  /* 0x0000B4 */
+    bridgereg_t             _pad_0000B0;
+    bridgereg_t		    _pad_0000B8[2];
+
+    /* PCI/GIO					       0x0000C0-0x0000FF */
+    bridgereg_t             b_bus_timeout;          /* 0x0000C4 */
+    bridgereg_t             _pad_0000C0;
+#define b_pci_bus_timeout b_bus_timeout
+
+    bridgereg_t             b_pci_cfg;              /* 0x0000CC */
+    bridgereg_t             _pad_0000C8;
+
+    bridgereg_t             b_pci_err_upper;        /* 0x0000D4 */
+    bridgereg_t             _pad_0000D0;
+
+    bridgereg_t             b_pci_err_lower;        /* 0x0000DC */
+    bridgereg_t             _pad_0000D8;
+    bridgereg_t		    _pad_0000E0[8];
+#define b_gio_err_lower b_pci_err_lower
+#define b_gio_err_upper b_pci_err_upper
+
+    /* Interrupt				       0x000100-0x0001FF */
+    bridgereg_t             b_int_status;           /* 0x000104 */
+    bridgereg_t             _pad_000100;
+
+    bridgereg_t             b_int_enable;           /* 0x00010C */
+    bridgereg_t             _pad_000108;
+
+    bridgereg_t             b_int_rst_stat;         /* 0x000114 */
+    bridgereg_t             _pad_000110;
+
+    bridgereg_t             b_int_mode;             /* 0x00011C */
+    bridgereg_t             _pad_000118;
+
+    bridgereg_t             b_int_device;           /* 0x000124 */
+    bridgereg_t             _pad_000120;
+
+    bridgereg_t             b_int_host_err;         /* 0x00012C */
+    bridgereg_t             _pad_000128;
+
+    struct {
+        bridgereg_t             addr;               /* 0x0001{34,,,6C} */
+        bridgereg_t             __pad;              /* 0x0001{30,,,68} */
+    } b_int_addr[8];				    /* 0x000130 */
+
+    bridgereg_t             b_err_int_view;         /* 0x000174 */
+    bridgereg_t             _pad_000170;
+
+    bridgereg_t             b_mult_int;             /* 0x00017c */
+    bridgereg_t             _pad_000178;
+
+    struct {
+        bridgereg_t             intr;               /* 0x0001{84,,,BC} */
+        bridgereg_t             __pad;              /* 0x0001{80,,,B8} */
+    } b_force_always[8];			    /* 0x000180 */
+
+    struct {
+        bridgereg_t             intr;               /* 0x0001{C4,,,FC} */
+        bridgereg_t             __pad;              /* 0x0001{C0,,,F8} */
+    } b_force_pin[8];			    	    /* 0x0001C0 */
+
+    /* Device					       0x000200-0x0003FF */
+    struct {
+        bridgereg_t             reg;                /* 0x0002{04,,,3C} */
+        bridgereg_t             __pad;              /* 0x0002{00,,,38} */
+    } b_device[8];				    /* 0x000200 */
+
+    struct {
+        bridgereg_t             reg;                /* 0x0002{44,,,7C} */
+        bridgereg_t             __pad;              /* 0x0002{40,,,78} */
+    } b_wr_req_buf[8];				    /* 0x000240 */
+
+    struct {
+        bridgereg_t             reg;                /* 0x0002{84,,,8C} */
+        bridgereg_t             __pad;              /* 0x0002{80,,,88} */
+    } b_rrb_map[2];				    /* 0x000280 */
+#define	b_even_resp	b_rrb_map[0].reg	    /* 0x000284 */
+#define	b_odd_resp	b_rrb_map[1].reg	    /* 0x00028C */
+
+    bridgereg_t             b_resp_status;          /* 0x000294 */
+    bridgereg_t             _pad_000290;
+
+    bridgereg_t             b_resp_clear;           /* 0x00029C */
+    bridgereg_t             _pad_000298;
+
+    bridgereg_t		    _pad_0002A0[24];
+
+    /* Xbridge only */
+    struct {
+	bridgereg_t	        upper;              /* 0x0003{04,,,F4} */
+	bridgereg_t             __pad1;		    /* 0x0003{00,,,F0} */
+	bridgereg_t             lower;              /* 0x0003{0C,,,FC} */
+	bridgereg_t             __pad2;             /* 0x0003{08,,,F8} */
+    } b_buf_addr_match[16];
+
+    /* Performance Monitor Registers (even only) */
+    struct {
+        bridgereg_t             flush_w_touch;      /* 0x000404,,,5C4 */
+        bridgereg_t             __pad1;             /* 0x000400,,,5C0 */
+
+        bridgereg_t             flush_wo_touch;     /* 0x00040C,,,5CC */
+        bridgereg_t             __pad2;             /* 0x000408,,,5C8 */
+
+        bridgereg_t             inflight;           /* 0x000414,,,5D4 */
+        bridgereg_t             __pad3;             /* 0x000410,,,5D0 */
+
+        bridgereg_t             prefetch;           /* 0x00041C,,,5DC */
+        bridgereg_t             __pad4;             /* 0x000418,,,5D8 */
+
+        bridgereg_t             total_pci_retry;    /* 0x000424,,,5E4 */
+        bridgereg_t             __pad5;             /* 0x000420,,,5E0 */
+
+        bridgereg_t             max_pci_retry;      /* 0x00042C,,,5EC */
+        bridgereg_t             __pad6;             /* 0x000428,,,5E8 */
+
+        bridgereg_t             max_latency;        /* 0x000434,,,5F4 */
+        bridgereg_t             __pad7;             /* 0x000430,,,5F0 */
+
+        bridgereg_t             clear_all;          /* 0x00043C,,,5FC */
+        bridgereg_t             __pad8;             /* 0x000438,,,5F8 */
+    } b_buf_count[8];
+
+    char                    _pad_000600[0x010000 - 0x000600];
+
+    /*
+     * The Xbridge has 1024 internal ATE's and the Bridge has 128.
+     * Make enough room for the Xbridge ATE's and depend on runtime
+     * checks to limit access to bridge ATE's.
+     */
+
+    /* Internal Address Translation Entry RAM	       0x010000-0x011fff */
+    union {
+	bridge_ate_t		wr;		/* write-only */
+	struct {
+	    bridgereg_t             rd;         /* read-only */
+            bridgereg_t             _p_pad;
+	}			hi;
+    }			    b_int_ate_ram[XBRIDGE_INTERNAL_ATES];
+
+#define b_int_ate_ram_lo(idx) b_int_ate_ram[idx+512].hi.rd
+
+    /* the xbridge read path for internal ates starts at 0x12000.
+     * I don't believe we ever try to read the ates.
+     */
+    /* Internal Address Translation Entry RAM LOW       0x012000-0x013fff */
+    struct {
+	bridgereg_t             rd; 
+        bridgereg_t             _p_pad;
+    }			    xb_int_ate_ram_lo[XBRIDGE_INTERNAL_ATES];
+
+    char		    _pad_014000[0x20000 - 0x014000];
+
+    /* PCI Device Configuration Spaces		       0x020000-0x027FFF */
+    union {				/* make all access sizes available. */
+	uchar_t			c[0x1000 / 1];
+	uint16_t		s[0x1000 / 2];
+	uint32_t		l[0x1000 / 4];
+	uint64_t		d[0x1000 / 8];
+	union {
+	    uchar_t		c[0x100 / 1];
+	    uint16_t		s[0x100 / 2];
+	    uint32_t		l[0x100 / 4];
+	    uint64_t		d[0x100 / 8];
+	}			f[8];
+    } b_type0_cfg_dev[8];			    /* 0x020000 */
+
+    /* PCI Type 1 Configuration Space		       0x028000-0x028FFF */
+    union {				/* make all access sizes available. */
+	uchar_t			c[0x1000 / 1];
+	uint16_t		s[0x1000 / 2];
+	uint32_t		l[0x1000 / 4];
+	uint64_t		d[0x1000 / 8];
+    } b_type1_cfg;				    /* 0x028000-0x029000 */
+
+    char		    _pad_029000[0x007000];  /* 0x029000-0x030000 */
+
+    /* PCI Interrupt Acknowledge Cycle		       0x030000 */
+    union {
+	uchar_t			c[8 / 1];
+	uint16_t		s[8 / 2];
+	uint32_t		l[8 / 4];
+	uint64_t		d[8 / 8];
+    } b_pci_iack;				    /* 0x030000 */
+
+    uchar_t		    _pad_030007[0x04fff8];  /* 0x030008-0x07FFFF */
+
+    /* External Address Translation Entry RAM	       0x080000-0x0FFFFF */
+    bridge_ate_t	    b_ext_ate_ram[0x10000];
+
+    /* Reserved					       0x100000-0x1FFFFF */
+    char		    _pad_100000[0x200000-0x100000];
+
+    /* PCI/GIO Device Spaces			       0x200000-0xBFFFFF */
+    union {				/* make all access sizes available. */
+	uchar_t			c[0x100000 / 1];
+	uint16_t		s[0x100000 / 2];
+	uint32_t		l[0x100000 / 4];
+	uint64_t		d[0x100000 / 8];
+    } b_devio_raw[10];			/* 0x200000 */
+
+    /* b_devio macro is a bit strange; it reflects the
+     * fact that the Bridge ASIC provides 2M for the
+     * first two DevIO windows and 1M for the other six.
+     */
+#define b_devio(n)	b_devio_raw[((n)<2)?(n*2):(n+2)]
+
+    /* External Flash Proms 1,0			       0xC00000-0xFFFFFF */
+    union {				/* make all access sizes available. */
+	uchar_t			c[0x400000 / 1];	/* read-only */
+	uint16_t		s[0x400000 / 2];	/* read-write */
+	uint32_t		l[0x400000 / 4];	/* read-only */
+	uint64_t		d[0x400000 / 8];	/* read-only */
+    } b_external_flash;			/* 0xC00000 */
+} bridge_t;
+
+#else
+
+/*
+ * Field formats for Error Command Word and Auxillary Error Command Word
+ * of bridge.
+ */
+typedef struct bridge_err_cmdword_s {
+    union {
+	uint32_t		cmd_word;
+	struct {
+	    uint32_t		    didn:4,	/* Destination ID */
+				    sidn:4,	/* SOurce ID	  */
+				    pactyp:4,	/* Packet type	  */
+				    tnum:5,	/* Trans Number	  */
+				    coh:1,	/* Coh Transacti  */
+				    ds:2,	/* Data size	  */
+				    gbr:1,	/* GBR enable	  */
+				    vbpm:1,	/* VBPM message	  */
+				    error:1,	/* Error occured  */
+				    barr:1,	/* Barrier op	  */
+				    rsvd:8;
+	} berr_st;
+    } berr_un;
+} bridge_err_cmdword_t;
+
+typedef volatile struct bridge_s {
+
+    /* Local Registers                                 0x000000-0x00FFFF */
+
+    /* standard widget configuration                   0x000000-0x000057 */
+    widget_cfg_t            b_widget;               /* 0x000000 */
+
+    /* helper fieldnames for accessing bridge widget */
+
+#define b_wid_id                        b_widget.w_id
+#define b_wid_stat                      b_widget.w_status
+#define b_wid_err_upper                 b_widget.w_err_upper_addr
+#define b_wid_err_lower                 b_widget.w_err_lower_addr
+#define b_wid_control                   b_widget.w_control
+#define b_wid_req_timeout               b_widget.w_req_timeout
+#define b_wid_int_upper                 b_widget.w_intdest_upper_addr
+#define b_wid_int_lower                 b_widget.w_intdest_lower_addr
+#define b_wid_err_cmdword               b_widget.w_err_cmd_word
+#define b_wid_llp                       b_widget.w_llp_cfg
+#define b_wid_tflush                    b_widget.w_tflush
+
+    /* bridge-specific widget configuration            0x000058-0x00007F */
+    bridgereg_t             _pad_000058;
+    bridgereg_t             b_wid_aux_err;          /* 0x00005C */
+    bridgereg_t             _pad_000060;
+    bridgereg_t             b_wid_resp_upper;       /* 0x000064 */
+    bridgereg_t             _pad_000068;
+    bridgereg_t             b_wid_resp_lower;       /* 0x00006C */
+    bridgereg_t             _pad_000070;
+    bridgereg_t             b_wid_tst_pin_ctrl;     /* 0x000074 */
+    bridgereg_t             _pad_000078[2];
+
+    /* PMU & Map                                       0x000080-0x00008F */
+    bridgereg_t             _pad_000080;
+    bridgereg_t             b_dir_map;              /* 0x000084 */
+    bridgereg_t             _pad_000088[2];
+
+    /* SSRAM                                           0x000090-0x00009F */
+    bridgereg_t             _pad_000090;
+    bridgereg_t             b_ram_perr_or_map_fault;/* 0x000094 */
+#define b_ram_perr  b_ram_perr_or_map_fault     /* Bridge */
+#define b_map_fault b_ram_perr_or_map_fault     /* Xbridge */
+    bridgereg_t             _pad_000098[2];
+
+    /* Arbitration                                     0x0000A0-0x0000AF */
+    bridgereg_t             _pad_0000A0;
+    bridgereg_t             b_arb;                  /* 0x0000A4 */
+    bridgereg_t             _pad_0000A8[2];
+
+    /* Number In A Can                                 0x0000B0-0x0000BF */
+    bridgereg_t             _pad_0000B0;
+    bridgereg_t             b_nic;                  /* 0x0000B4 */
+    bridgereg_t             _pad_0000B8[2];
+
+    /* PCI/GIO                                         0x0000C0-0x0000FF */
+    bridgereg_t             _pad_0000C0;
+    bridgereg_t             b_bus_timeout;          /* 0x0000C4 */
+#define b_pci_bus_timeout b_bus_timeout
+
+    bridgereg_t             _pad_0000C8;
+    bridgereg_t             b_pci_cfg;              /* 0x0000CC */
+    bridgereg_t             _pad_0000D0;
+    bridgereg_t             b_pci_err_upper;        /* 0x0000D4 */
+    bridgereg_t             _pad_0000D8;
+    bridgereg_t             b_pci_err_lower;        /* 0x0000DC */
+    bridgereg_t             _pad_0000E0[8];
+#define b_gio_err_lower b_pci_err_lower
+#define b_gio_err_upper b_pci_err_upper
+
+    /* Interrupt                                       0x000100-0x0001FF */
+    bridgereg_t             _pad_000100;
+    bridgereg_t             b_int_status;           /* 0x000104 */
+    bridgereg_t             _pad_000108;
+    bridgereg_t             b_int_enable;           /* 0x00010C */
+    bridgereg_t             _pad_000110;
+    bridgereg_t             b_int_rst_stat;         /* 0x000114 */
+    bridgereg_t             _pad_000118;
+    bridgereg_t             b_int_mode;             /* 0x00011C */
+    bridgereg_t             _pad_000120;
+    bridgereg_t             b_int_device;           /* 0x000124 */
+    bridgereg_t             _pad_000128;
+    bridgereg_t             b_int_host_err;         /* 0x00012C */
+
+    struct {
+        bridgereg_t             __pad;              /* 0x0001{30,,,68} */
+        bridgereg_t             addr;               /* 0x0001{34,,,6C} */
+    } b_int_addr[8];                                /* 0x000130 */
+
+    bridgereg_t             _pad_000170;
+    bridgereg_t             b_err_int_view;         /* 0x000174 */
+    bridgereg_t             _pad_000178;
+    bridgereg_t             b_mult_int;             /* 0x00017c */
+
+    struct {
+        bridgereg_t             __pad;              /* 0x0001{80,,,B8} */
+        bridgereg_t             intr;               /* 0x0001{84,,,BC} */
+    } b_force_always[8];                            /* 0x000180 */
+
+    struct {
+        bridgereg_t             __pad;              /* 0x0001{C0,,,F8} */
+        bridgereg_t             intr;               /* 0x0001{C4,,,FC} */
+    } b_force_pin[8];                               /* 0x0001C0 */
+
+    /* Device                                          0x000200-0x0003FF */
+    struct {
+        bridgereg_t             __pad;              /* 0x0002{00,,,38} */
+        bridgereg_t             reg;                /* 0x0002{04,,,3C} */
+    } b_device[8];                                  /* 0x000200 */
+
+    struct {
+        bridgereg_t             __pad;              /* 0x0002{40,,,78} */
+        bridgereg_t             reg;                /* 0x0002{44,,,7C} */
+    } b_wr_req_buf[8];                              /* 0x000240 */
+
+    struct {
+        bridgereg_t             __pad;              /* 0x0002{80,,,88} */
+        bridgereg_t             reg;                /* 0x0002{84,,,8C} */
+    } b_rrb_map[2];                                 /* 0x000280 */
+#define b_even_resp     b_rrb_map[0].reg            /* 0x000284 */
+#define b_odd_resp      b_rrb_map[1].reg            /* 0x00028C */
+
+    bridgereg_t             _pad_000290;
+    bridgereg_t             b_resp_status;          /* 0x000294 */
+    bridgereg_t             _pad_000298;
+    bridgereg_t             b_resp_clear;           /* 0x00029C */
+
+    bridgereg_t             _pad_0002A0[24];
+
+    /* Xbridge only */
+    struct {
+        bridgereg_t             __pad1;             /* 0x0003{00,,,F0} */
+        bridgereg_t             upper;              /* 0x0003{04,,,F4} */
+        bridgereg_t             __pad2;             /* 0x0003{08,,,F8} */
+        bridgereg_t             lower;              /* 0x0003{0C,,,FC} */
+    } b_buf_addr_match[16];
+
+    /* Performance Monitor Registers (even only) */
+    struct {
+        bridgereg_t             __pad1;             /* 0x000400,,,5C0 */
+        bridgereg_t             flush_w_touch;      /* 0x000404,,,5C4 */
+        bridgereg_t             __pad2;             /* 0x000408,,,5C8 */
+        bridgereg_t             flush_wo_touch;     /* 0x00040C,,,5CC */
+        bridgereg_t             __pad3;             /* 0x000410,,,5D0 */
+        bridgereg_t             inflight;           /* 0x000414,,,5D4 */
+        bridgereg_t             __pad4;             /* 0x000418,,,5D8 */
+        bridgereg_t             prefetch;           /* 0x00041C,,,5DC */
+        bridgereg_t             __pad5;             /* 0x000420,,,5E0 */
+        bridgereg_t             total_pci_retry;    /* 0x000424,,,5E4 */
+        bridgereg_t             __pad6;             /* 0x000428,,,5E8 */
+        bridgereg_t             max_pci_retry;      /* 0x00042C,,,5EC */
+        bridgereg_t             __pad7;             /* 0x000430,,,5F0 */
+        bridgereg_t             max_latency;        /* 0x000434,,,5F4 */
+        bridgereg_t             __pad8;             /* 0x000438,,,5F8 */
+        bridgereg_t             clear_all;          /* 0x00043C,,,5FC */
+    } b_buf_count[8];
+
+    char                    _pad_000600[0x010000 - 0x000600];
+
+    /*
+     * The Xbridge has 1024 internal ATE's and the Bridge has 128.
+     * Make enough room for the Xbridge ATE's and depend on runtime
+     * checks to limit access to bridge ATE's.
+     */
+
+    /* Internal Address Translation Entry RAM          0x010000-0x011fff */
+    union {
+        bridge_ate_t            wr;             /* write-only */
+        struct {
+            bridgereg_t             _p_pad;
+            bridgereg_t             rd;         /* read-only */
+        }                       hi;
+    }                       b_int_ate_ram[XBRIDGE_INTERNAL_ATES];
+
+#define b_int_ate_ram_lo(idx) b_int_ate_ram[idx+512].hi.rd
+
+    /* the xbridge read path for internal ates starts at 0x12000.
+     * I don't believe we ever try to read the ates.
+     */
+    /* Internal Address Translation Entry RAM LOW       0x012000-0x013fff */
+    struct {
+        bridgereg_t             _p_pad;
+        bridgereg_t             rd;             /* read-only */
+    }                       xb_int_ate_ram_lo[XBRIDGE_INTERNAL_ATES];
+
+    char                    _pad_014000[0x20000 - 0x014000];
+
+    /* PCI Device Configuration Spaces                 0x020000-0x027FFF */
+    union {                             /* make all access sizes available. */
+        uchar_t                 c[0x1000 / 1];
+        uint16_t                s[0x1000 / 2];
+        uint32_t              l[0x1000 / 4];
+        uint64_t              d[0x1000 / 8];
+        union {
+            uchar_t             c[0x100 / 1];
+            uint16_t            s[0x100 / 2];
+            uint32_t          l[0x100 / 4];
+            uint64_t          d[0x100 / 8];
+        }                       f[8];
+    } b_type0_cfg_dev[8];                           /* 0x020000 */
+
+
+    /* PCI Type 1 Configuration Space                  0x028000-0x028FFF */
+    union {                             /* make all access sizes available. */
+        uchar_t                 c[0x1000 / 1];
+        uint16_t                s[0x1000 / 2];
+        uint32_t              l[0x1000 / 4];
+        uint64_t              d[0x1000 / 8];
+    } b_type1_cfg;                                  /* 0x028000-0x029000 */
+
+    char                    _pad_029000[0x007000];  /* 0x029000-0x030000 */
+
+    /* PCI Interrupt Acknowledge Cycle                 0x030000 */
+    union {
+        uchar_t                 c[8 / 1];
+        uint16_t                s[8 / 2];
+        uint32_t              l[8 / 4];
+        uint64_t              d[8 / 8];
+    } b_pci_iack;                                   /* 0x030000 */
+
+    uchar_t                 _pad_030007[0x04fff8];  /* 0x030008-0x07FFFF */
+
+    /* External Address Translation Entry RAM          0x080000-0x0FFFFF */
+    bridge_ate_t            b_ext_ate_ram[0x10000];
+
+    /* Reserved                                        0x100000-0x1FFFFF */
+    char                    _pad_100000[0x200000-0x100000];
+
+    /* PCI/GIO Device Spaces                           0x200000-0xBFFFFF */
+    union {                             /* make all access sizes available. */
+        uchar_t                 c[0x100000 / 1];
+        uint16_t                s[0x100000 / 2];
+        uint32_t              l[0x100000 / 4];
+        uint64_t              d[0x100000 / 8];
+    } b_devio_raw[10];                  /* 0x200000 */
+
+    /* b_devio macro is a bit strange; it reflects the
+     * fact that the Bridge ASIC provides 2M for the
+     * first two DevIO windows and 1M for the other six.
+     */
+#define b_devio(n)      b_devio_raw[((n)<2)?(n*2):(n+2)]
+
+    /* External Flash Proms 1,0                        0xC00000-0xFFFFFF */
+    union {                             /* make all access sizes available. */
+        uchar_t                 c[0x400000 / 1];        /* read-only */
+        uint16_t                s[0x400000 / 2];        /* read-write */
+        uint32_t              l[0x400000 / 4];        /* read-only */
+        uint64_t              d[0x400000 / 8];        /* read-only */
+    } b_external_flash;                 /* 0xC00000 */
+} bridge_t;
+
+#endif
+
+
+
+
+
+
+#define berr_field	berr_un.berr_st
+#endif				/* LANGUAGE_C */
+
+/*
+ * The values of these macros can and should be crosschecked
+ * regularly against the offsets of the like-named fields
+ * within the "bridge_t" structure above.
+ */
+
+/* Byte offset macros for Bridge internal registers */
+
+#define BRIDGE_WID_ID		WIDGET_ID
+#define BRIDGE_WID_STAT		WIDGET_STATUS
+#define BRIDGE_WID_ERR_UPPER	WIDGET_ERR_UPPER_ADDR
+#define BRIDGE_WID_ERR_LOWER	WIDGET_ERR_LOWER_ADDR
+#define BRIDGE_WID_CONTROL	WIDGET_CONTROL
+#define BRIDGE_WID_REQ_TIMEOUT	WIDGET_REQ_TIMEOUT
+#define BRIDGE_WID_INT_UPPER	WIDGET_INTDEST_UPPER_ADDR
+#define BRIDGE_WID_INT_LOWER	WIDGET_INTDEST_LOWER_ADDR
+#define BRIDGE_WID_ERR_CMDWORD	WIDGET_ERR_CMD_WORD
+#define BRIDGE_WID_LLP		WIDGET_LLP_CFG
+#define BRIDGE_WID_TFLUSH	WIDGET_TFLUSH
+
+#define BRIDGE_WID_AUX_ERR	0x00005C	/* Aux Error Command Word */
+#define BRIDGE_WID_RESP_UPPER	0x000064	/* Response Buf Upper Addr */
+#define BRIDGE_WID_RESP_LOWER	0x00006C	/* Response Buf Lower Addr */
+#define BRIDGE_WID_TST_PIN_CTRL 0x000074	/* Test pin control */
+
+#define BRIDGE_DIR_MAP		0x000084	/* Direct Map reg */
+
+/* Bridge has SSRAM Parity Error and Xbridge has Map Fault here */
+#define BRIDGE_RAM_PERR 	0x000094	/* SSRAM Parity Error */
+#define BRIDGE_MAP_FAULT	0x000094	/* Map Fault */
+
+#define BRIDGE_ARB		0x0000A4	/* Arbitration Priority reg */
+
+#define BRIDGE_NIC		0x0000B4	/* Number In A Can */
+
+#define BRIDGE_BUS_TIMEOUT	0x0000C4	/* Bus Timeout Register */
+#define BRIDGE_PCI_BUS_TIMEOUT	BRIDGE_BUS_TIMEOUT
+#define BRIDGE_PCI_CFG		0x0000CC	/* PCI Type 1 Config reg */
+#define BRIDGE_PCI_ERR_UPPER	0x0000D4	/* PCI error Upper Addr */
+#define BRIDGE_PCI_ERR_LOWER	0x0000DC	/* PCI error Lower Addr */
+
+#define BRIDGE_INT_STATUS	0x000104	/* Interrupt Status */
+#define BRIDGE_INT_ENABLE	0x00010C	/* Interrupt Enables */
+#define BRIDGE_INT_RST_STAT	0x000114	/* Reset Intr Status */
+#define BRIDGE_INT_MODE		0x00011C	/* Interrupt Mode */
+#define BRIDGE_INT_DEVICE	0x000124	/* Interrupt Device */
+#define BRIDGE_INT_HOST_ERR	0x00012C	/* Host Error Field */
+
+#define BRIDGE_INT_ADDR0	0x000134	/* Host Address Reg */
+#define BRIDGE_INT_ADDR_OFF	0x000008	/* Host Addr offset (1..7) */
+#define BRIDGE_INT_ADDR(x)	(BRIDGE_INT_ADDR0+(x)*BRIDGE_INT_ADDR_OFF)
+
+#define BRIDGE_INT_VIEW		0x000174	/* Interrupt view */
+#define BRIDGE_MULTIPLE_INT	0x00017c	/* Multiple interrupt occured */
+
+#define BRIDGE_FORCE_ALWAYS0	0x000184	/* Force an interrupt (always)*/
+#define BRIDGE_FORCE_ALWAYS_OFF 0x000008	/* Force Always offset */
+#define BRIDGE_FORCE_ALWAYS(x)  (BRIDGE_FORCE_ALWAYS0+(x)*BRIDGE_FORCE_ALWAYS_OFF)
+
+#define BRIDGE_FORCE_PIN0	0x0001c4	/* Force an interrupt */
+#define BRIDGE_FORCE_PIN_OFF 	0x000008	/* Force Pin offset */
+#define BRIDGE_FORCE_PIN(x)  (BRIDGE_FORCE_PIN0+(x)*BRIDGE_FORCE_PIN_OFF)
+
+#define BRIDGE_DEVICE0		0x000204	/* Device 0 */
+#define BRIDGE_DEVICE_OFF	0x000008	/* Device offset (1..7) */
+#define BRIDGE_DEVICE(x)	(BRIDGE_DEVICE0+(x)*BRIDGE_DEVICE_OFF)
+
+#define BRIDGE_WR_REQ_BUF0	0x000244	/* Write Request Buffer 0 */
+#define BRIDGE_WR_REQ_BUF_OFF	0x000008	/* Buffer Offset (1..7) */
+#define BRIDGE_WR_REQ_BUF(x)	(BRIDGE_WR_REQ_BUF0+(x)*BRIDGE_WR_REQ_BUF_OFF)
+
+#define BRIDGE_EVEN_RESP	0x000284	/* Even Device Response Buf */
+#define BRIDGE_ODD_RESP		0x00028C	/* Odd Device Response Buf */
+
+#define BRIDGE_RESP_STATUS	0x000294	/* Read Response Status reg */
+#define BRIDGE_RESP_CLEAR	0x00029C	/* Read Response Clear reg */
+
+#define BRIDGE_BUF_ADDR_UPPER0	0x000304
+#define BRIDGE_BUF_ADDR_UPPER_OFF 0x000010	/* PCI Buffer Upper Offset */
+#define BRIDGE_BUF_ADDR_UPPER(x) (BRIDGE_BUF_ADDR_UPPER0+(x)*BRIDGE_BUF_ADDR_UPPER_OFF)
+
+#define BRIDGE_BUF_ADDR_LOWER0	0x00030c
+#define BRIDGE_BUF_ADDR_LOWER_OFF 0x000010	/* PCI Buffer Upper Offset */
+#define BRIDGE_BUF_ADDR_LOWER(x) (BRIDGE_BUF_ADDR_LOWER0+(x)*BRIDGE_BUF_ADDR_LOWER_OFF)
+
+/* 
+ * Performance Monitor Registers.
+ *
+ * The Performance registers are those registers which are associated with
+ * monitoring the performance of PCI generated reads to the host environ
+ * ment. Because of the size of the register file only the even registers
+ * were instrumented.
+ */
+
+#define BRIDGE_BUF_OFF 0x40
+#define BRIDGE_BUF_NEXT(base, off) (base+((off)*BRIDGE_BUF_OFF))
+
+/*
+ * Buffer (x) Flush Count with Data Touch Register.
+ *
+ * This counter is incremented each time the corresponding response buffer
+ * is flushed after at least a single data element in the buffer is used.
+ * A word write to this address clears the count.
+ */
+
+#define BRIDGE_BUF_0_FLUSH_TOUCH  0x000404
+#define BRIDGE_BUF_2_FLUSH_TOUCH  BRIDGE_BUF_NEXT(BRIDGE_BUF_0_FLUSH_TOUCH, 1)
+#define BRIDGE_BUF_4_FLUSH_TOUCH  BRIDGE_BUF_NEXT(BRIDGE_BUF_0_FLUSH_TOUCH, 2)
+#define BRIDGE_BUF_6_FLUSH_TOUCH  BRIDGE_BUF_NEXT(BRIDGE_BUF_0_FLUSH_TOUCH, 3)
+#define BRIDGE_BUF_8_FLUSH_TOUCH  BRIDGE_BUF_NEXT(BRIDGE_BUF_0_FLUSH_TOUCH, 4)
+#define BRIDGE_BUF_10_FLUSH_TOUCH  BRIDGE_BUF_NEXT(BRIDGE_BUF_0_FLUSH_TOUCH, 5)
+#define BRIDGE_BUF_12_FLUSH_TOUCH  BRIDGE_BUF_NEXT(BRIDGE_BUF_0_FLUSH_TOUCH, 6)
+#define BRIDGE_BUF_14_FLUSH_TOUCH  BRIDGE_BUF_NEXT(BRIDGE_BUF_0_FLUSH_TOUCH, 7)
+
+/*
+ * Buffer (x) Flush Count w/o Data Touch Register
+ *
+ * This counter is incremented each time the corresponding response buffer
+ * is flushed without any data element in the buffer being used. A word
+ * write to this address clears the count.
+ */
+
+
+#define BRIDGE_BUF_0_FLUSH_NOTOUCH  0x00040c
+#define BRIDGE_BUF_2_FLUSH_NOTOUCH  BRIDGE_BUF_NEXT(BRIDGE_BUF_0_FLUSH_NOTOUCH, 1)
+#define BRIDGE_BUF_4_FLUSH_NOTOUCH  BRIDGE_BUF_NEXT(BRIDGE_BUF_0_FLUSH_NOTOUCH, 2)
+#define BRIDGE_BUF_6_FLUSH_NOTOUCH  BRIDGE_BUF_NEXT(BRIDGE_BUF_0_FLUSH_NOTOUCH, 3)
+#define BRIDGE_BUF_8_FLUSH_NOTOUCH  BRIDGE_BUF_NEXT(BRIDGE_BUF_0_FLUSH_NOTOUCH, 4)
+#define BRIDGE_BUF_10_FLUSH_NOTOUCH  BRIDGE_BUF_NEXT(BRIDGE_BUF_0_FLUSH_NOTOUCH, 5)
+#define BRIDGE_BUF_12_FLUSH_NOTOUCH  BRIDGE_BUF_NEXT(BRIDGE_BUF_0_FLUSH_NOTOUCH, 6)
+#define BRIDGE_BUF_14_FLUSH_NOTOUCH  BRIDGE_BUF_NEXT(BRIDGE_BUF_0_FLUSH_NOTOUCH, 7)
+
+/*
+ * Buffer (x) Request in Flight Count Register
+ *
+ * This counter is incremented on each bus clock while the request is in
+ * flight. A word write to this address clears the count.
+ */
+
+#define BRIDGE_BUF_0_INFLIGHT	 0x000414
+#define BRIDGE_BUF_2_INFLIGHT    BRIDGE_BUF_NEXT(BRIDGE_BUF_0_INFLIGHT, 1)
+#define BRIDGE_BUF_4_INFLIGHT    BRIDGE_BUF_NEXT(BRIDGE_BUF_0_INFLIGHT, 2)
+#define BRIDGE_BUF_6_INFLIGHT    BRIDGE_BUF_NEXT(BRIDGE_BUF_0_INFLIGHT, 3)
+#define BRIDGE_BUF_8_INFLIGHT    BRIDGE_BUF_NEXT(BRIDGE_BUF_0_INFLIGHT, 4)
+#define BRIDGE_BUF_10_INFLIGHT   BRIDGE_BUF_NEXT(BRIDGE_BUF_0_INFLIGHT, 5)
+#define BRIDGE_BUF_12_INFLIGHT   BRIDGE_BUF_NEXT(BRIDGE_BUF_0_INFLIGHT, 6)
+#define BRIDGE_BUF_14_INFLIGHT   BRIDGE_BUF_NEXT(BRIDGE_BUF_0_INFLIGHT, 7)
+
+/*
+ * Buffer (x) Prefetch Request Count Register
+ *
+ * This counter is incremented each time the request using this buffer was
+ * generated from the prefetcher. A word write to this address clears the
+ * count.
+ */
+
+#define BRIDGE_BUF_0_PREFETCH	 0x00041C
+#define BRIDGE_BUF_2_PREFETCH    BRIDGE_BUF_NEXT(BRIDGE_BUF_0_PREFETCH, 1)
+#define BRIDGE_BUF_4_PREFETCH    BRIDGE_BUF_NEXT(BRIDGE_BUF_0_PREFETCH, 2)
+#define BRIDGE_BUF_6_PREFETCH    BRIDGE_BUF_NEXT(BRIDGE_BUF_0_PREFETCH, 3)
+#define BRIDGE_BUF_8_PREFETCH    BRIDGE_BUF_NEXT(BRIDGE_BUF_0_PREFETCH, 4)
+#define BRIDGE_BUF_10_PREFETCH   BRIDGE_BUF_NEXT(BRIDGE_BUF_0_PREFETCH, 5)
+#define BRIDGE_BUF_12_PREFETCH   BRIDGE_BUF_NEXT(BRIDGE_BUF_0_PREFETCH, 6)
+#define BRIDGE_BUF_14_PREFETCH   BRIDGE_BUF_NEXT(BRIDGE_BUF_0_PREFETCH, 7)
+
+/*
+ * Buffer (x) Total PCI Retry Count Register
+ *
+ * This counter is incremented each time a PCI bus retry occurs and the ad
+ * dress matches the tag for the selected buffer. The buffer must also has
+ * this request in-flight. A word write to this address clears the count.
+ */
+
+#define BRIDGE_BUF_0_PCI_RETRY	 0x000424
+#define BRIDGE_BUF_2_PCI_RETRY    BRIDGE_BUF_NEXT(BRIDGE_BUF_0_PCI_RETRY, 1)
+#define BRIDGE_BUF_4_PCI_RETRY    BRIDGE_BUF_NEXT(BRIDGE_BUF_0_PCI_RETRY, 2)
+#define BRIDGE_BUF_6_PCI_RETRY    BRIDGE_BUF_NEXT(BRIDGE_BUF_0_PCI_RETRY, 3)
+#define BRIDGE_BUF_8_PCI_RETRY    BRIDGE_BUF_NEXT(BRIDGE_BUF_0_PCI_RETRY, 4)
+#define BRIDGE_BUF_10_PCI_RETRY   BRIDGE_BUF_NEXT(BRIDGE_BUF_0_PCI_RETRY, 5)
+#define BRIDGE_BUF_12_PCI_RETRY   BRIDGE_BUF_NEXT(BRIDGE_BUF_0_PCI_RETRY, 6)
+#define BRIDGE_BUF_14_PCI_RETRY   BRIDGE_BUF_NEXT(BRIDGE_BUF_0_PCI_RETRY, 7)
+
+/*
+ * Buffer (x) Max PCI Retry Count Register
+ *
+ * This counter is contains the maximum retry count for a single request
+ * which was in-flight for this buffer. A word write to this address
+ * clears the count.
+ */
+
+#define BRIDGE_BUF_0_MAX_PCI_RETRY	 0x00042C
+#define BRIDGE_BUF_2_MAX_PCI_RETRY    BRIDGE_BUF_NEXT(BRIDGE_BUF_0_MAX_PCI_RETRY, 1)
+#define BRIDGE_BUF_4_MAX_PCI_RETRY    BRIDGE_BUF_NEXT(BRIDGE_BUF_0_MAX_PCI_RETRY, 2)
+#define BRIDGE_BUF_6_MAX_PCI_RETRY    BRIDGE_BUF_NEXT(BRIDGE_BUF_0_MAX_PCI_RETRY, 3)
+#define BRIDGE_BUF_8_MAX_PCI_RETRY    BRIDGE_BUF_NEXT(BRIDGE_BUF_0_MAX_PCI_RETRY, 4)
+#define BRIDGE_BUF_10_MAX_PCI_RETRY   BRIDGE_BUF_NEXT(BRIDGE_BUF_0_MAX_PCI_RETRY, 5)
+#define BRIDGE_BUF_12_MAX_PCI_RETRY   BRIDGE_BUF_NEXT(BRIDGE_BUF_0_MAX_PCI_RETRY, 6)
+#define BRIDGE_BUF_14_MAX_PCI_RETRY   BRIDGE_BUF_NEXT(BRIDGE_BUF_0_MAX_PCI_RETRY, 7)
+
+/*
+ * Buffer (x) Max Latency Count Register
+ *
+ * This counter is contains the maximum count (in bus clocks) for a single
+ * request which was in-flight for this buffer. A word write to this
+ * address clears the count.
+ */
+
+#define BRIDGE_BUF_0_MAX_LATENCY	 0x000434
+#define BRIDGE_BUF_2_MAX_LATENCY    BRIDGE_BUF_NEXT(BRIDGE_BUF_0_MAX_LATENCY, 1)
+#define BRIDGE_BUF_4_MAX_LATENCY    BRIDGE_BUF_NEXT(BRIDGE_BUF_0_MAX_LATENCY, 2)
+#define BRIDGE_BUF_6_MAX_LATENCY    BRIDGE_BUF_NEXT(BRIDGE_BUF_0_MAX_LATENCY, 3)
+#define BRIDGE_BUF_8_MAX_LATENCY    BRIDGE_BUF_NEXT(BRIDGE_BUF_0_MAX_LATENCY, 4)
+#define BRIDGE_BUF_10_MAX_LATENCY   BRIDGE_BUF_NEXT(BRIDGE_BUF_0_MAX_LATENCY, 5)
+#define BRIDGE_BUF_12_MAX_LATENCY   BRIDGE_BUF_NEXT(BRIDGE_BUF_0_MAX_LATENCY, 6)
+#define BRIDGE_BUF_14_MAX_LATENCY   BRIDGE_BUF_NEXT(BRIDGE_BUF_0_MAX_LATENCY, 7)
+
+/*
+ * Buffer (x) Clear All Register
+ *
+ * Any access to this register clears all the count values for the (x)
+ * registers.
+ */
+
+#define BRIDGE_BUF_0_CLEAR_ALL	 0x00043C
+#define BRIDGE_BUF_2_CLEAR_ALL    BRIDGE_BUF_NEXT(BRIDGE_BUF_0_CLEAR_ALL, 1)
+#define BRIDGE_BUF_4_CLEAR_ALL    BRIDGE_BUF_NEXT(BRIDGE_BUF_0_CLEAR_ALL, 2)
+#define BRIDGE_BUF_6_CLEAR_ALL    BRIDGE_BUF_NEXT(BRIDGE_BUF_0_CLEAR_ALL, 3)
+#define BRIDGE_BUF_8_CLEAR_ALL    BRIDGE_BUF_NEXT(BRIDGE_BUF_0_CLEAR_ALL, 4)
+#define BRIDGE_BUF_10_CLEAR_ALL   BRIDGE_BUF_NEXT(BRIDGE_BUF_0_CLEAR_ALL, 5)
+#define BRIDGE_BUF_12_CLEAR_ALL   BRIDGE_BUF_NEXT(BRIDGE_BUF_0_CLEAR_ALL, 6)
+#define BRIDGE_BUF_14_CLEAR_ALL   BRIDGE_BUF_NEXT(BRIDGE_BUF_0_CLEAR_ALL, 7)
+
+/* end of Performance Monitor Registers */
+
+/* Byte offset macros for Bridge I/O space */
+
+#define BRIDGE_ATE_RAM		0x00010000	/* Internal Addr Xlat Ram */
+
+#define BRIDGE_TYPE0_CFG_DEV0	0x00020000	/* Type 0 Cfg, Device 0 */
+#define BRIDGE_TYPE0_CFG_SLOT_OFF	0x00001000	/* Type 0 Cfg Slot Offset (1..7) */
+#define BRIDGE_TYPE0_CFG_FUNC_OFF	0x00000100	/* Type 0 Cfg Func Offset (1..7) */
+#define BRIDGE_TYPE0_CFG_DEV(s)		(BRIDGE_TYPE0_CFG_DEV0+\
+					 (s)*BRIDGE_TYPE0_CFG_SLOT_OFF)
+#define BRIDGE_TYPE0_CFG_DEVF(s,f)	(BRIDGE_TYPE0_CFG_DEV0+\
+					 (s)*BRIDGE_TYPE0_CFG_SLOT_OFF+\
+					 (f)*BRIDGE_TYPE0_CFG_FUNC_OFF)
+
+#define BRIDGE_TYPE1_CFG	0x00028000	/* Type 1 Cfg space */
+
+#define BRIDGE_PCI_IACK		0x00030000	/* PCI Interrupt Ack */
+#define BRIDGE_EXT_SSRAM	0x00080000	/* Extern SSRAM (ATE) */
+
+/* Byte offset macros for Bridge device IO spaces */
+
+#define BRIDGE_DEV_CNT		8	/* Up to 8 devices per bridge */
+#define BRIDGE_DEVIO0		0x00200000	/* Device IO 0 Addr */
+#define BRIDGE_DEVIO1		0x00400000	/* Device IO 1 Addr */
+#define BRIDGE_DEVIO2		0x00600000	/* Device IO 2 Addr */
+#define BRIDGE_DEVIO_OFF	0x00100000	/* Device IO Offset (3..7) */
+
+#define BRIDGE_DEVIO_2MB	0x00200000	/* Device IO Offset (0..1) */
+#define BRIDGE_DEVIO_1MB	0x00100000	/* Device IO Offset (2..7) */
+
+#if	LANGUAGE_C
+
+#define BRIDGE_DEVIO(x)		((x)<=1 ? BRIDGE_DEVIO0+(x)*BRIDGE_DEVIO_2MB : BRIDGE_DEVIO2+((x)-2)*BRIDGE_DEVIO_1MB)
+#endif				/* LANGUAGE_C */
+
+#define BRIDGE_EXTERNAL_FLASH	0x00C00000	/* External Flash PROMS */
+
+/* ========================================================================
+ *    Bridge register bit field definitions
+ */
+
+/* Widget part number of bridge */
+#define BRIDGE_WIDGET_PART_NUM		0xc002
+#define XBRIDGE_WIDGET_PART_NUM		0xd002
+
+/* Manufacturer of bridge */
+#define BRIDGE_WIDGET_MFGR_NUM		0x036
+#define XBRIDGE_WIDGET_MFGR_NUM		0x024
+
+/* Revision numbers for known [X]Bridge revisions */
+#define BRIDGE_REV_A			0x1
+#define BRIDGE_REV_B			0x2
+#define BRIDGE_REV_C			0x3
+#define	BRIDGE_REV_D			0x4
+#define XBRIDGE_REV_A			0x1
+#define XBRIDGE_REV_B			0x2
+
+/* Part + Rev numbers allows distinction and acscending sequence */
+#define BRIDGE_PART_REV_A	(BRIDGE_WIDGET_PART_NUM << 4 | BRIDGE_REV_A)
+#define BRIDGE_PART_REV_B	(BRIDGE_WIDGET_PART_NUM << 4 | BRIDGE_REV_B)
+#define BRIDGE_PART_REV_C	(BRIDGE_WIDGET_PART_NUM << 4 | BRIDGE_REV_C)
+#define	BRIDGE_PART_REV_D	(BRIDGE_WIDGET_PART_NUM << 4 | BRIDGE_REV_D)
+#define XBRIDGE_PART_REV_A	(XBRIDGE_WIDGET_PART_NUM << 4 | XBRIDGE_REV_A)
+#define XBRIDGE_PART_REV_B	(XBRIDGE_WIDGET_PART_NUM << 4 | XBRIDGE_REV_B)
+
+/* Bridge widget status register bits definition */
+
+#define BRIDGE_STAT_LLP_REC_CNT		(0xFFu << 24)
+#define BRIDGE_STAT_LLP_TX_CNT		(0xFF << 16)
+#define BRIDGE_STAT_FLASH_SELECT	(0x1 << 6)
+#define BRIDGE_STAT_PCI_GIO_N		(0x1 << 5)
+#define BRIDGE_STAT_PENDING		(0x1F << 0)
+
+/* Bridge widget control register bits definition */
+#define BRIDGE_CTRL_FLASH_WR_EN		(0x1ul << 31)
+#define BRIDGE_CTRL_EN_CLK50		(0x1 << 30)
+#define BRIDGE_CTRL_EN_CLK40		(0x1 << 29)
+#define BRIDGE_CTRL_EN_CLK33		(0x1 << 28)
+#define BRIDGE_CTRL_RST(n)		((n) << 24)
+#define BRIDGE_CTRL_RST_MASK		(BRIDGE_CTRL_RST(0xF))
+#define BRIDGE_CTRL_RST_PIN(x)		(BRIDGE_CTRL_RST(0x1 << (x)))
+#define BRIDGE_CTRL_IO_SWAP		(0x1 << 23)
+#define BRIDGE_CTRL_MEM_SWAP		(0x1 << 22)
+#define BRIDGE_CTRL_PAGE_SIZE		(0x1 << 21)
+#define BRIDGE_CTRL_SS_PAR_BAD		(0x1 << 20)
+#define BRIDGE_CTRL_SS_PAR_EN		(0x1 << 19)
+#define BRIDGE_CTRL_SSRAM_SIZE(n)	((n) << 17)
+#define BRIDGE_CTRL_SSRAM_SIZE_MASK	(BRIDGE_CTRL_SSRAM_SIZE(0x3))
+#define BRIDGE_CTRL_SSRAM_512K		(BRIDGE_CTRL_SSRAM_SIZE(0x3))
+#define BRIDGE_CTRL_SSRAM_128K		(BRIDGE_CTRL_SSRAM_SIZE(0x2))
+#define BRIDGE_CTRL_SSRAM_64K		(BRIDGE_CTRL_SSRAM_SIZE(0x1))
+#define BRIDGE_CTRL_SSRAM_1K		(BRIDGE_CTRL_SSRAM_SIZE(0x0))
+#define BRIDGE_CTRL_F_BAD_PKT		(0x1 << 16)
+#define BRIDGE_CTRL_LLP_XBAR_CRD(n)	((n) << 12)
+#define BRIDGE_CTRL_LLP_XBAR_CRD_MASK	(BRIDGE_CTRL_LLP_XBAR_CRD(0xf))
+#define BRIDGE_CTRL_CLR_RLLP_CNT	(0x1 << 11)
+#define BRIDGE_CTRL_CLR_TLLP_CNT	(0x1 << 10)
+#define BRIDGE_CTRL_SYS_END		(0x1 << 9)
+#define BRIDGE_CTRL_MAX_TRANS(n)	((n) << 4)
+#define BRIDGE_CTRL_MAX_TRANS_MASK	(BRIDGE_CTRL_MAX_TRANS(0x1f))
+#define BRIDGE_CTRL_WIDGET_ID(n)	((n) << 0)
+#define BRIDGE_CTRL_WIDGET_ID_MASK	(BRIDGE_CTRL_WIDGET_ID(0xf))
+
+/* Bridge Response buffer Error Upper Register bit fields definition */
+#define BRIDGE_RESP_ERRUPPR_DEVNUM_SHFT (20)
+#define BRIDGE_RESP_ERRUPPR_DEVNUM_MASK (0x7 << BRIDGE_RESP_ERRUPPR_DEVNUM_SHFT)
+#define BRIDGE_RESP_ERRUPPR_BUFNUM_SHFT (16)
+#define BRIDGE_RESP_ERRUPPR_BUFNUM_MASK (0xF << BRIDGE_RESP_ERRUPPR_BUFNUM_SHFT)
+#define BRIDGE_RESP_ERRRUPPR_BUFMASK	(0xFFFF)
+
+#define BRIDGE_RESP_ERRUPPR_BUFNUM(x)	\
+			(((x) & BRIDGE_RESP_ERRUPPR_BUFNUM_MASK) >> \
+				BRIDGE_RESP_ERRUPPR_BUFNUM_SHFT)
+
+#define BRIDGE_RESP_ERRUPPR_DEVICE(x)	\
+			(((x) &	 BRIDGE_RESP_ERRUPPR_DEVNUM_MASK) >> \
+				 BRIDGE_RESP_ERRUPPR_DEVNUM_SHFT)
+
+/* Bridge direct mapping register bits definition */
+#define BRIDGE_DIRMAP_W_ID_SHFT		20
+#define BRIDGE_DIRMAP_W_ID		(0xf << BRIDGE_DIRMAP_W_ID_SHFT)
+#define BRIDGE_DIRMAP_RMF_64		(0x1 << 18)
+#define BRIDGE_DIRMAP_ADD512		(0x1 << 17)
+#define BRIDGE_DIRMAP_OFF		(0x1ffff << 0)
+#define BRIDGE_DIRMAP_OFF_ADDRSHFT	(31)	/* lsbit of DIRMAP_OFF is xtalk address bit 31 */
+
+/* Bridge Arbitration register bits definition */
+#define BRIDGE_ARB_REQ_WAIT_TICK(x)	((x) << 16)
+#define BRIDGE_ARB_REQ_WAIT_TICK_MASK	BRIDGE_ARB_REQ_WAIT_TICK(0x3)
+#define BRIDGE_ARB_REQ_WAIT_EN(x)	((x) << 8)
+#define BRIDGE_ARB_REQ_WAIT_EN_MASK	BRIDGE_ARB_REQ_WAIT_EN(0xff)
+#define BRIDGE_ARB_FREEZE_GNT		(1 << 6)
+#define BRIDGE_ARB_HPRI_RING_B2		(1 << 5)
+#define BRIDGE_ARB_HPRI_RING_B1		(1 << 4)
+#define BRIDGE_ARB_HPRI_RING_B0		(1 << 3)
+#define BRIDGE_ARB_LPRI_RING_B2		(1 << 2)
+#define BRIDGE_ARB_LPRI_RING_B1		(1 << 1)
+#define BRIDGE_ARB_LPRI_RING_B0		(1 << 0)
+
+/* Bridge Bus time-out register bits definition */
+#define BRIDGE_BUS_PCI_RETRY_HLD(x)	((x) << 16)
+#define BRIDGE_BUS_PCI_RETRY_HLD_MASK	BRIDGE_BUS_PCI_RETRY_HLD(0x1f)
+#define BRIDGE_BUS_GIO_TIMEOUT		(1 << 12)
+#define BRIDGE_BUS_PCI_RETRY_CNT(x)	((x) << 0)
+#define BRIDGE_BUS_PCI_RETRY_MASK	BRIDGE_BUS_PCI_RETRY_CNT(0x3ff)
+
+/* Bridge interrupt status register bits definition */
+#define BRIDGE_ISR_MULTI_ERR		(0x1u << 31)	/* bridge only */
+#define BRIDGE_ISR_PMU_ESIZE_FAULT	(0x1 << 30)	/* bridge only */
+#define BRIDGE_ISR_PAGE_FAULT		(0x1 << 30)	/* xbridge only */
+#define BRIDGE_ISR_UNEXP_RESP		(0x1 << 29)
+#define BRIDGE_ISR_BAD_XRESP_PKT	(0x1 << 28)
+#define BRIDGE_ISR_BAD_XREQ_PKT		(0x1 << 27)
+#define BRIDGE_ISR_RESP_XTLK_ERR	(0x1 << 26)
+#define BRIDGE_ISR_REQ_XTLK_ERR		(0x1 << 25)
+#define BRIDGE_ISR_INVLD_ADDR		(0x1 << 24)
+#define BRIDGE_ISR_UNSUPPORTED_XOP	(0x1 << 23)
+#define BRIDGE_ISR_XREQ_FIFO_OFLOW	(0x1 << 22)
+#define BRIDGE_ISR_LLP_REC_SNERR	(0x1 << 21)
+#define BRIDGE_ISR_LLP_REC_CBERR	(0x1 << 20)
+#define BRIDGE_ISR_LLP_RCTY		(0x1 << 19)
+#define BRIDGE_ISR_LLP_TX_RETRY		(0x1 << 18)
+#define BRIDGE_ISR_LLP_TCTY		(0x1 << 17)
+#define BRIDGE_ISR_SSRAM_PERR		(0x1 << 16)
+#define BRIDGE_ISR_PCI_ABORT		(0x1 << 15)
+#define BRIDGE_ISR_PCI_PARITY		(0x1 << 14)
+#define BRIDGE_ISR_PCI_SERR		(0x1 << 13)
+#define BRIDGE_ISR_PCI_PERR		(0x1 << 12)
+#define BRIDGE_ISR_PCI_MST_TIMEOUT	(0x1 << 11)
+#define BRIDGE_ISR_GIO_MST_TIMEOUT	BRIDGE_ISR_PCI_MST_TIMEOUT
+#define BRIDGE_ISR_PCI_RETRY_CNT	(0x1 << 10)
+#define BRIDGE_ISR_XREAD_REQ_TIMEOUT	(0x1 << 9)
+#define BRIDGE_ISR_GIO_B_ENBL_ERR	(0x1 << 8)
+#define BRIDGE_ISR_INT_MSK		(0xff << 0)
+#define BRIDGE_ISR_INT(x)		(0x1 << (x))
+
+#define BRIDGE_ISR_LINK_ERROR		\
+		(BRIDGE_ISR_LLP_REC_SNERR|BRIDGE_ISR_LLP_REC_CBERR|	\
+		 BRIDGE_ISR_LLP_RCTY|BRIDGE_ISR_LLP_TX_RETRY|		\
+		 BRIDGE_ISR_LLP_TCTY)
+
+#define BRIDGE_ISR_PCIBUS_PIOERR	\
+		(BRIDGE_ISR_PCI_MST_TIMEOUT|BRIDGE_ISR_PCI_ABORT)
+
+#define BRIDGE_ISR_PCIBUS_ERROR		\
+		(BRIDGE_ISR_PCIBUS_PIOERR|BRIDGE_ISR_PCI_PERR|		\
+		 BRIDGE_ISR_PCI_SERR|BRIDGE_ISR_PCI_RETRY_CNT|		\
+		 BRIDGE_ISR_PCI_PARITY)
+
+#define BRIDGE_ISR_XTALK_ERROR		\
+		(BRIDGE_ISR_XREAD_REQ_TIMEOUT|BRIDGE_ISR_XREQ_FIFO_OFLOW|\
+		 BRIDGE_ISR_UNSUPPORTED_XOP|BRIDGE_ISR_INVLD_ADDR|	\
+		 BRIDGE_ISR_REQ_XTLK_ERR|BRIDGE_ISR_RESP_XTLK_ERR|	\
+		 BRIDGE_ISR_BAD_XREQ_PKT|BRIDGE_ISR_BAD_XRESP_PKT|	\
+		 BRIDGE_ISR_UNEXP_RESP)
+
+#define BRIDGE_ISR_ERRORS		\
+		(BRIDGE_ISR_LINK_ERROR|BRIDGE_ISR_PCIBUS_ERROR|		\
+		 BRIDGE_ISR_XTALK_ERROR|BRIDGE_ISR_SSRAM_PERR|		\
+		 BRIDGE_ISR_PMU_ESIZE_FAULT)
+
+/*
+ * List of Errors which are fatal and kill the sytem
+ */
+#define BRIDGE_ISR_ERROR_FATAL		\
+		((BRIDGE_ISR_XTALK_ERROR & ~BRIDGE_ISR_XREAD_REQ_TIMEOUT)|\
+		 BRIDGE_ISR_PCI_SERR|BRIDGE_ISR_PCI_PARITY )
+
+#define BRIDGE_ISR_ERROR_DUMP		\
+		(BRIDGE_ISR_PCIBUS_ERROR|BRIDGE_ISR_PMU_ESIZE_FAULT|	\
+		 BRIDGE_ISR_XTALK_ERROR|BRIDGE_ISR_SSRAM_PERR)
+
+/* Bridge interrupt enable register bits definition */
+#define BRIDGE_IMR_UNEXP_RESP		BRIDGE_ISR_UNEXP_RESP
+#define BRIDGE_IMR_PMU_ESIZE_FAULT	BRIDGE_ISR_PMU_ESIZE_FAULT
+#define BRIDGE_IMR_BAD_XRESP_PKT	BRIDGE_ISR_BAD_XRESP_PKT
+#define BRIDGE_IMR_BAD_XREQ_PKT		BRIDGE_ISR_BAD_XREQ_PKT
+#define BRIDGE_IMR_RESP_XTLK_ERR	BRIDGE_ISR_RESP_XTLK_ERR
+#define BRIDGE_IMR_REQ_XTLK_ERR		BRIDGE_ISR_REQ_XTLK_ERR
+#define BRIDGE_IMR_INVLD_ADDR		BRIDGE_ISR_INVLD_ADDR
+#define BRIDGE_IMR_UNSUPPORTED_XOP	BRIDGE_ISR_UNSUPPORTED_XOP
+#define BRIDGE_IMR_XREQ_FIFO_OFLOW	BRIDGE_ISR_XREQ_FIFO_OFLOW
+#define BRIDGE_IMR_LLP_REC_SNERR	BRIDGE_ISR_LLP_REC_SNERR
+#define BRIDGE_IMR_LLP_REC_CBERR	BRIDGE_ISR_LLP_REC_CBERR
+#define BRIDGE_IMR_LLP_RCTY		BRIDGE_ISR_LLP_RCTY
+#define BRIDGE_IMR_LLP_TX_RETRY		BRIDGE_ISR_LLP_TX_RETRY
+#define BRIDGE_IMR_LLP_TCTY		BRIDGE_ISR_LLP_TCTY
+#define BRIDGE_IMR_SSRAM_PERR		BRIDGE_ISR_SSRAM_PERR
+#define BRIDGE_IMR_PCI_ABORT		BRIDGE_ISR_PCI_ABORT
+#define BRIDGE_IMR_PCI_PARITY		BRIDGE_ISR_PCI_PARITY
+#define BRIDGE_IMR_PCI_SERR		BRIDGE_ISR_PCI_SERR
+#define BRIDGE_IMR_PCI_PERR		BRIDGE_ISR_PCI_PERR
+#define BRIDGE_IMR_PCI_MST_TIMEOUT	BRIDGE_ISR_PCI_MST_TIMEOUT
+#define BRIDGE_IMR_GIO_MST_TIMEOUT	BRIDGE_ISR_GIO_MST_TIMEOUT
+#define BRIDGE_IMR_PCI_RETRY_CNT	BRIDGE_ISR_PCI_RETRY_CNT
+#define BRIDGE_IMR_XREAD_REQ_TIMEOUT	BRIDGE_ISR_XREAD_REQ_TIMEOUT
+#define BRIDGE_IMR_GIO_B_ENBL_ERR	BRIDGE_ISR_GIO_B_ENBL_ERR
+#define BRIDGE_IMR_INT_MSK		BRIDGE_ISR_INT_MSK
+#define BRIDGE_IMR_INT(x)		BRIDGE_ISR_INT(x)
+
+/* Bridge interrupt reset register bits definition */
+#define BRIDGE_IRR_MULTI_CLR		(0x1 << 6)
+#define BRIDGE_IRR_CRP_GRP_CLR		(0x1 << 5)
+#define BRIDGE_IRR_RESP_BUF_GRP_CLR	(0x1 << 4)
+#define BRIDGE_IRR_REQ_DSP_GRP_CLR	(0x1 << 3)
+#define BRIDGE_IRR_LLP_GRP_CLR		(0x1 << 2)
+#define BRIDGE_IRR_SSRAM_GRP_CLR	(0x1 << 1)
+#define BRIDGE_IRR_PCI_GRP_CLR		(0x1 << 0)
+#define BRIDGE_IRR_GIO_GRP_CLR		(0x1 << 0)
+#define BRIDGE_IRR_ALL_CLR		0x7f
+
+#define BRIDGE_IRR_CRP_GRP		(BRIDGE_ISR_UNEXP_RESP | \
+					 BRIDGE_ISR_XREQ_FIFO_OFLOW)
+#define BRIDGE_IRR_RESP_BUF_GRP		(BRIDGE_ISR_BAD_XRESP_PKT | \
+					 BRIDGE_ISR_RESP_XTLK_ERR | \
+					 BRIDGE_ISR_XREAD_REQ_TIMEOUT)
+#define BRIDGE_IRR_REQ_DSP_GRP		(BRIDGE_ISR_UNSUPPORTED_XOP | \
+					 BRIDGE_ISR_BAD_XREQ_PKT | \
+					 BRIDGE_ISR_REQ_XTLK_ERR | \
+					 BRIDGE_ISR_INVLD_ADDR)
+#define BRIDGE_IRR_LLP_GRP		(BRIDGE_ISR_LLP_REC_SNERR | \
+					 BRIDGE_ISR_LLP_REC_CBERR | \
+					 BRIDGE_ISR_LLP_RCTY | \
+					 BRIDGE_ISR_LLP_TX_RETRY | \
+					 BRIDGE_ISR_LLP_TCTY)
+#define BRIDGE_IRR_SSRAM_GRP		(BRIDGE_ISR_SSRAM_PERR | \
+					 BRIDGE_ISR_PMU_ESIZE_FAULT)
+#define BRIDGE_IRR_PCI_GRP		(BRIDGE_ISR_PCI_ABORT | \
+					 BRIDGE_ISR_PCI_PARITY | \
+					 BRIDGE_ISR_PCI_SERR | \
+					 BRIDGE_ISR_PCI_PERR | \
+					 BRIDGE_ISR_PCI_MST_TIMEOUT | \
+					 BRIDGE_ISR_PCI_RETRY_CNT)
+
+#define BRIDGE_IRR_GIO_GRP		(BRIDGE_ISR_GIO_B_ENBL_ERR | \
+					 BRIDGE_ISR_GIO_MST_TIMEOUT)
+
+/* Bridge INT_DEV register bits definition */
+#define BRIDGE_INT_DEV_SHFT(n)		((n)*3)
+#define BRIDGE_INT_DEV_MASK(n)		(0x7 << BRIDGE_INT_DEV_SHFT(n))
+#define BRIDGE_INT_DEV_SET(_dev, _line) (_dev << BRIDGE_INT_DEV_SHFT(_line))	
+
+/* Bridge interrupt(x) register bits definition */
+#define BRIDGE_INT_ADDR_HOST		0x0003FF00
+#define BRIDGE_INT_ADDR_FLD		0x000000FF
+
+#define BRIDGE_TMO_PCI_RETRY_HLD_MASK	0x1f0000
+#define BRIDGE_TMO_GIO_TIMEOUT_MASK	0x001000
+#define BRIDGE_TMO_PCI_RETRY_CNT_MASK	0x0003ff
+
+#define BRIDGE_TMO_PCI_RETRY_CNT_MAX	0x3ff
+
+#ifdef SN0
+/*
+ * The NASID should be shifted by this amount and stored into the
+ * interrupt(x) register.
+ */
+#define BRIDGE_INT_ADDR_NASID_SHFT	8
+
+/*
+ * The BRIDGE_INT_ADDR_DEST_IO bit should be set to send an interrupt to
+ * memory.
+ */
+#define BRIDGE_INT_ADDR_DEST_IO		(1 << 17)
+#define BRIDGE_INT_ADDR_DEST_MEM	0
+#define BRIDGE_INT_ADDR_MASK		(1 << 17)
+#endif
+
+/* Bridge device(x) register bits definition */
+#define BRIDGE_DEV_ERR_LOCK_EN		(1ull << 28)
+#define BRIDGE_DEV_PAGE_CHK_DIS		(1ull << 27)
+#define BRIDGE_DEV_FORCE_PCI_PAR	(1ull << 26)
+#define BRIDGE_DEV_VIRTUAL_EN		(1ull << 25)
+#define BRIDGE_DEV_PMU_WRGA_EN		(1ull << 24)
+#define BRIDGE_DEV_DIR_WRGA_EN		(1ull << 23)
+#define BRIDGE_DEV_DEV_SIZE		(1ull << 22)
+#define BRIDGE_DEV_RT			(1ull << 21)
+#define BRIDGE_DEV_SWAP_PMU		(1ull << 20)
+#define BRIDGE_DEV_SWAP_DIR		(1ull << 19)
+#define BRIDGE_DEV_PREF			(1ull << 18)
+#define BRIDGE_DEV_PRECISE		(1ull << 17)
+#define BRIDGE_DEV_COH			(1ull << 16)
+#define BRIDGE_DEV_BARRIER		(1ull << 15)
+#define BRIDGE_DEV_GBR			(1ull << 14)
+#define BRIDGE_DEV_DEV_SWAP		(1ull << 13)
+#define BRIDGE_DEV_DEV_IO_MEM		(1ull << 12)
+#define BRIDGE_DEV_OFF_MASK		0x00000fff
+#define BRIDGE_DEV_OFF_ADDR_SHFT	20
+
+#define XBRIDGE_DEV_PMU_BITS		BRIDGE_DEV_PMU_WRGA_EN
+#define BRIDGE_DEV_PMU_BITS		(BRIDGE_DEV_PMU_WRGA_EN		| \
+					 BRIDGE_DEV_SWAP_PMU)
+#define BRIDGE_DEV_D32_BITS		(BRIDGE_DEV_DIR_WRGA_EN		| \
+					 BRIDGE_DEV_SWAP_DIR		| \
+					 BRIDGE_DEV_PREF		| \
+					 BRIDGE_DEV_PRECISE		| \
+					 BRIDGE_DEV_COH			| \
+					 BRIDGE_DEV_BARRIER)
+#define XBRIDGE_DEV_D64_BITS		(BRIDGE_DEV_DIR_WRGA_EN		| \
+					 BRIDGE_DEV_COH			| \
+					 BRIDGE_DEV_BARRIER)
+#define BRIDGE_DEV_D64_BITS		(BRIDGE_DEV_DIR_WRGA_EN		| \
+					 BRIDGE_DEV_SWAP_DIR		| \
+					 BRIDGE_DEV_COH			| \
+					 BRIDGE_DEV_BARRIER)
+
+/* Bridge Error Upper register bit field definition */
+#define BRIDGE_ERRUPPR_DEVMASTER	(0x1 << 20)	/* Device was master */
+#define BRIDGE_ERRUPPR_PCIVDEV		(0x1 << 19)	/* Virtual Req value */
+#define BRIDGE_ERRUPPR_DEVNUM_SHFT	(16)
+#define BRIDGE_ERRUPPR_DEVNUM_MASK	(0x7 << BRIDGE_ERRUPPR_DEVNUM_SHFT)
+#define BRIDGE_ERRUPPR_DEVICE(err)	(((err) >> BRIDGE_ERRUPPR_DEVNUM_SHFT) & 0x7)
+#define BRIDGE_ERRUPPR_ADDRMASK		(0xFFFF)
+
+/* Bridge interrupt mode register bits definition */
+#define BRIDGE_INTMODE_CLR_PKT_EN(x)	(0x1 << (x))
+
+/* this should be written to the xbow's link_control(x) register */
+#define BRIDGE_CREDIT	3
+
+/* RRB assignment register */
+#define	BRIDGE_RRB_EN	0x8	/* after shifting down */
+#define	BRIDGE_RRB_DEV	0x7	/* after shifting down */
+#define	BRIDGE_RRB_VDEV	0x4	/* after shifting down */
+#define	BRIDGE_RRB_PDEV	0x3	/* after shifting down */
+
+/* RRB status register */
+#define	BRIDGE_RRB_VALID(r)	(0x00010000<<(r))
+#define	BRIDGE_RRB_INUSE(r)	(0x00000001<<(r))
+
+/* RRB clear register */
+#define	BRIDGE_RRB_CLEAR(r)	(0x00000001<<(r))
+
+/* xbox system controller declarations */
+#define XBOX_BRIDGE_WID         8
+#define FLASH_PROM1_BASE        0xE00000 /* To read the xbox sysctlr status */
+#define XBOX_RPS_EXISTS		1 << 6	 /* RPS bit in status register */
+#define XBOX_RPS_FAIL		1 << 4	 /* RPS status bit in register */
+
+/* ========================================================================
+ */
+/*
+ * Macros for Xtalk to Bridge bus (PCI/GIO) PIO
+ * refer to section 4.2.1 of Bridge Spec for xtalk to PCI/GIO PIO mappings
+ */
+/* XTALK addresses that map into Bridge Bus addr space */
+#define BRIDGE_PIO32_XTALK_ALIAS_BASE	0x000040000000L
+#define BRIDGE_PIO32_XTALK_ALIAS_LIMIT	0x00007FFFFFFFL
+#define BRIDGE_PIO64_XTALK_ALIAS_BASE	0x000080000000L
+#define BRIDGE_PIO64_XTALK_ALIAS_LIMIT	0x0000BFFFFFFFL
+#define BRIDGE_PCIIO_XTALK_ALIAS_BASE	0x000100000000L
+#define BRIDGE_PCIIO_XTALK_ALIAS_LIMIT	0x0001FFFFFFFFL
+
+/* Ranges of PCI bus space that can be accessed via PIO from xtalk */
+#define BRIDGE_MIN_PIO_ADDR_MEM		0x00000000	/* 1G PCI memory space */
+#define BRIDGE_MAX_PIO_ADDR_MEM		0x3fffffff
+#define BRIDGE_MIN_PIO_ADDR_IO		0x00000000	/* 4G PCI IO space */
+#define BRIDGE_MAX_PIO_ADDR_IO		0xffffffff
+
+/* XTALK addresses that map into PCI addresses */
+#define BRIDGE_PCI_MEM32_BASE		BRIDGE_PIO32_XTALK_ALIAS_BASE
+#define BRIDGE_PCI_MEM32_LIMIT		BRIDGE_PIO32_XTALK_ALIAS_LIMIT
+#define BRIDGE_PCI_MEM64_BASE		BRIDGE_PIO64_XTALK_ALIAS_BASE
+#define BRIDGE_PCI_MEM64_LIMIT		BRIDGE_PIO64_XTALK_ALIAS_LIMIT
+#define BRIDGE_PCI_IO_BASE		BRIDGE_PCIIO_XTALK_ALIAS_BASE
+#define BRIDGE_PCI_IO_LIMIT		BRIDGE_PCIIO_XTALK_ALIAS_LIMIT
+
+/*
+ * Macros for Bridge bus (PCI/GIO) to Xtalk DMA
+ */
+/* Bridge Bus DMA addresses */
+#define BRIDGE_LOCAL_BASE		0
+#define BRIDGE_DMA_MAPPED_BASE		0x40000000
+#define BRIDGE_DMA_MAPPED_SIZE		0x40000000	/* 1G Bytes */
+#define BRIDGE_DMA_DIRECT_BASE		0x80000000
+#define BRIDGE_DMA_DIRECT_SIZE		0x80000000	/* 2G Bytes */
+
+#define PCI32_LOCAL_BASE		BRIDGE_LOCAL_BASE
+
+/* PCI addresses of regions decoded by Bridge for DMA */
+#define PCI32_MAPPED_BASE		BRIDGE_DMA_MAPPED_BASE
+#define PCI32_DIRECT_BASE		BRIDGE_DMA_DIRECT_BASE
+
+#if	LANGUAGE_C
+
+#define IS_PCI32_LOCAL(x)	((uint64_t)(x) < PCI32_MAPPED_BASE)
+#define IS_PCI32_MAPPED(x)	((uint64_t)(x) < PCI32_DIRECT_BASE && \
+					(uint64_t)(x) >= PCI32_MAPPED_BASE)
+#define IS_PCI32_DIRECT(x)	((uint64_t)(x) >= PCI32_MAPPED_BASE)
+#define IS_PCI64(x)		((uint64_t)(x) >= PCI64_BASE)
+#endif				/* LANGUAGE_C */
+
+/*
+ * The GIO address space.
+ */
+/* Xtalk to GIO PIO */
+#define BRIDGE_GIO_MEM32_BASE		BRIDGE_PIO32_XTALK_ALIAS_BASE
+#define BRIDGE_GIO_MEM32_LIMIT		BRIDGE_PIO32_XTALK_ALIAS_LIMIT
+
+#define GIO_LOCAL_BASE			BRIDGE_LOCAL_BASE
+
+/* GIO addresses of regions decoded by Bridge for DMA */
+#define GIO_MAPPED_BASE			BRIDGE_DMA_MAPPED_BASE
+#define GIO_DIRECT_BASE			BRIDGE_DMA_DIRECT_BASE
+
+#if	LANGUAGE_C
+
+#define IS_GIO_LOCAL(x)		((uint64_t)(x) < GIO_MAPPED_BASE)
+#define IS_GIO_MAPPED(x)	((uint64_t)(x) < GIO_DIRECT_BASE && \
+					(uint64_t)(x) >= GIO_MAPPED_BASE)
+#define IS_GIO_DIRECT(x)	((uint64_t)(x) >= GIO_MAPPED_BASE)
+#endif				/* LANGUAGE_C */
+
+/* PCI to xtalk mapping */
+
+/* given a DIR_OFF value and a pci/gio 32 bits direct address, determine
+ * which xtalk address is accessed
+ */
+#define BRIDGE_DIRECT_32_SEG_SIZE	BRIDGE_DMA_DIRECT_SIZE
+#define BRIDGE_DIRECT_32_TO_XTALK(dir_off,adr)		\
+	((dir_off) * BRIDGE_DIRECT_32_SEG_SIZE +	\
+		((adr) & (BRIDGE_DIRECT_32_SEG_SIZE - 1)) + PHYS_RAMBASE)
+
+/* 64-bit address attribute masks */
+#define PCI64_ATTR_TARG_MASK	0xf000000000000000
+#define PCI64_ATTR_TARG_SHFT	60
+#define PCI64_ATTR_PREF		(1ull << 59)
+#define PCI64_ATTR_PREC		(1ull << 58)
+#define PCI64_ATTR_VIRTUAL	(1ull << 57)
+#define PCI64_ATTR_BAR		(1ull << 56)
+#define PCI64_ATTR_SWAP		(1ull << 55)
+#define PCI64_ATTR_RMF_MASK	0x00ff000000000000
+#define PCI64_ATTR_RMF_SHFT	48
+
+#if LANGUAGE_C
+/* Address translation entry for mapped pci32 accesses */
+typedef union ate_u {
+    uint64_t		    ent;
+    struct xb_ate_s {					/* xbridge */
+	uint64_t		:16;
+	uint64_t		addr:36;
+	uint64_t		targ:4;
+	uint64_t		reserved:2;
+        uint64_t		swap:1;
+	uint64_t		barrier:1;
+	uint64_t		prefetch:1;
+	uint64_t		precise:1;
+	uint64_t		coherent:1;
+	uint64_t		valid:1;
+    } xb_field;
+    struct ate_s {					/* bridge */
+	uint64_t		rmf:16;
+	uint64_t		addr:36;
+	uint64_t		targ:4;
+	uint64_t		reserved:3;
+	uint64_t		barrier:1;
+	uint64_t		prefetch:1;
+	uint64_t		precise:1;
+	uint64_t		coherent:1;
+	uint64_t		valid:1;
+    } field;
+} ate_t;
+#endif				/* LANGUAGE_C */
+
+#define ATE_V		(1 << 0)
+#define ATE_CO		(1 << 1)
+#define ATE_PREC	(1 << 2)
+#define ATE_PREF	(1 << 3)
+#define ATE_BAR		(1 << 4)
+#define ATE_SWAP        (1 << 5)
+
+#define ATE_PFNSHIFT		12
+#define ATE_TIDSHIFT		8
+#define ATE_RMFSHIFT		48
+
+#define mkate(xaddr, xid, attr) ((xaddr) & 0x0000fffffffff000ULL) | \
+				((xid)<<ATE_TIDSHIFT) | \
+				(attr)
+
+/*
+ * for xbridge, bit 29 of the pci address is the swap bit */
+#define ATE_SWAPSHIFT		29
+#define ATE_SWAP_ON(x)		((x) |= (1 << ATE_SWAPSHIFT))
+#define ATE_SWAP_OFF(x)		((x) &= ~(1 << ATE_SWAPSHIFT))
+
+#define is_xbridge(bridge) \
+        (XWIDGET_PART_NUM(bridge->b_wid_id) == XBRIDGE_WIDGET_PART_NUM)
+
+#if	LANGUAGE_C
+
+/* ========================================================================
+ */
+
+#ifdef	MACROFIELD_LINE
+/*
+ * This table forms a relation between the byte offset macros normally
+ * used for ASM coding and the calculated byte offsets of the fields
+ * in the C structure.
+ *
+ * See bridge_check.c and bridge_html.c for further details.
+ */
+#ifndef MACROFIELD_LINE_BITFIELD
+#define MACROFIELD_LINE_BITFIELD(m)	/* ignored */
+#endif
+
+struct macrofield_s	bridge_macrofield[] =
+{
+
+    MACROFIELD_LINE(BRIDGE_WID_ID, b_wid_id)
+    MACROFIELD_LINE_BITFIELD(WIDGET_REV_NUM)
+    MACROFIELD_LINE_BITFIELD(WIDGET_PART_NUM)
+    MACROFIELD_LINE_BITFIELD(WIDGET_MFG_NUM)
+    MACROFIELD_LINE(BRIDGE_WID_STAT, b_wid_stat)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_STAT_LLP_REC_CNT)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_STAT_LLP_TX_CNT)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_STAT_FLASH_SELECT)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_STAT_PCI_GIO_N)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_STAT_PENDING)
+    MACROFIELD_LINE(BRIDGE_WID_ERR_UPPER, b_wid_err_upper)
+    MACROFIELD_LINE(BRIDGE_WID_ERR_LOWER, b_wid_err_lower)
+    MACROFIELD_LINE(BRIDGE_WID_CONTROL, b_wid_control)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_CTRL_FLASH_WR_EN)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_CTRL_EN_CLK50)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_CTRL_EN_CLK40)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_CTRL_EN_CLK33)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_CTRL_RST_MASK)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_CTRL_IO_SWAP)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_CTRL_MEM_SWAP)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_CTRL_PAGE_SIZE)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_CTRL_SS_PAR_BAD)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_CTRL_SS_PAR_EN)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_CTRL_SSRAM_SIZE_MASK)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_CTRL_F_BAD_PKT)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_CTRL_LLP_XBAR_CRD_MASK)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_CTRL_CLR_RLLP_CNT)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_CTRL_CLR_TLLP_CNT)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_CTRL_SYS_END)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_CTRL_MAX_TRANS_MASK)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_CTRL_WIDGET_ID_MASK)
+    MACROFIELD_LINE(BRIDGE_WID_REQ_TIMEOUT, b_wid_req_timeout)
+    MACROFIELD_LINE(BRIDGE_WID_INT_UPPER, b_wid_int_upper)
+    MACROFIELD_LINE_BITFIELD(WIDGET_INT_VECTOR)
+    MACROFIELD_LINE_BITFIELD(WIDGET_TARGET_ID)
+    MACROFIELD_LINE_BITFIELD(WIDGET_UPP_ADDR)
+    MACROFIELD_LINE(BRIDGE_WID_INT_LOWER, b_wid_int_lower)
+    MACROFIELD_LINE(BRIDGE_WID_ERR_CMDWORD, b_wid_err_cmdword)
+    MACROFIELD_LINE_BITFIELD(WIDGET_DIDN)
+    MACROFIELD_LINE_BITFIELD(WIDGET_SIDN)
+    MACROFIELD_LINE_BITFIELD(WIDGET_PACTYP)
+    MACROFIELD_LINE_BITFIELD(WIDGET_TNUM)
+    MACROFIELD_LINE_BITFIELD(WIDGET_COHERENT)
+    MACROFIELD_LINE_BITFIELD(WIDGET_DS)
+    MACROFIELD_LINE_BITFIELD(WIDGET_GBR)
+    MACROFIELD_LINE_BITFIELD(WIDGET_VBPM)
+    MACROFIELD_LINE_BITFIELD(WIDGET_ERROR)
+    MACROFIELD_LINE_BITFIELD(WIDGET_BARRIER)
+    MACROFIELD_LINE(BRIDGE_WID_LLP, b_wid_llp)
+    MACROFIELD_LINE_BITFIELD(WIDGET_LLP_MAXRETRY)
+    MACROFIELD_LINE_BITFIELD(WIDGET_LLP_NULLTIMEOUT)
+    MACROFIELD_LINE_BITFIELD(WIDGET_LLP_MAXBURST)
+    MACROFIELD_LINE(BRIDGE_WID_TFLUSH, b_wid_tflush)
+    MACROFIELD_LINE(BRIDGE_WID_AUX_ERR, b_wid_aux_err)
+    MACROFIELD_LINE(BRIDGE_WID_RESP_UPPER, b_wid_resp_upper)
+    MACROFIELD_LINE(BRIDGE_WID_RESP_LOWER, b_wid_resp_lower)
+    MACROFIELD_LINE(BRIDGE_WID_TST_PIN_CTRL, b_wid_tst_pin_ctrl)
+    MACROFIELD_LINE(BRIDGE_DIR_MAP, b_dir_map)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_DIRMAP_W_ID)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_DIRMAP_RMF_64)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_DIRMAP_ADD512)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_DIRMAP_OFF)
+    MACROFIELD_LINE(BRIDGE_RAM_PERR, b_ram_perr)
+    MACROFIELD_LINE(BRIDGE_ARB, b_arb)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_ARB_REQ_WAIT_TICK_MASK)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_ARB_REQ_WAIT_EN_MASK)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_ARB_FREEZE_GNT)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_ARB_HPRI_RING_B2)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_ARB_HPRI_RING_B1)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_ARB_HPRI_RING_B0)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_ARB_LPRI_RING_B2)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_ARB_LPRI_RING_B1)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_ARB_LPRI_RING_B0)
+    MACROFIELD_LINE(BRIDGE_NIC, b_nic)
+    MACROFIELD_LINE(BRIDGE_PCI_BUS_TIMEOUT, b_pci_bus_timeout)
+    MACROFIELD_LINE(BRIDGE_PCI_CFG, b_pci_cfg)
+    MACROFIELD_LINE(BRIDGE_PCI_ERR_UPPER, b_pci_err_upper)
+    MACROFIELD_LINE(BRIDGE_PCI_ERR_LOWER, b_pci_err_lower)
+    MACROFIELD_LINE(BRIDGE_INT_STATUS, b_int_status)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_ISR_MULTI_ERR)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_ISR_PMU_ESIZE_FAULT)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_ISR_UNEXP_RESP)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_ISR_BAD_XRESP_PKT)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_ISR_BAD_XREQ_PKT)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_ISR_RESP_XTLK_ERR)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_ISR_REQ_XTLK_ERR)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_ISR_INVLD_ADDR)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_ISR_UNSUPPORTED_XOP)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_ISR_XREQ_FIFO_OFLOW)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_ISR_LLP_REC_SNERR)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_ISR_LLP_REC_CBERR)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_ISR_LLP_RCTY)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_ISR_LLP_TX_RETRY)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_ISR_LLP_TCTY)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_ISR_SSRAM_PERR)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_ISR_PCI_ABORT)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_ISR_PCI_PARITY)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_ISR_PCI_SERR)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_ISR_PCI_PERR)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_ISR_PCI_MST_TIMEOUT)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_ISR_PCI_RETRY_CNT)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_ISR_XREAD_REQ_TIMEOUT)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_ISR_GIO_B_ENBL_ERR)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_ISR_INT_MSK)
+    MACROFIELD_LINE(BRIDGE_INT_ENABLE, b_int_enable)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_IMR_UNEXP_RESP)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_IMR_PMU_ESIZE_FAULT)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_IMR_BAD_XRESP_PKT)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_IMR_BAD_XREQ_PKT)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_IMR_RESP_XTLK_ERR)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_IMR_REQ_XTLK_ERR)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_IMR_INVLD_ADDR)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_IMR_UNSUPPORTED_XOP)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_IMR_XREQ_FIFO_OFLOW)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_IMR_LLP_REC_SNERR)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_IMR_LLP_REC_CBERR)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_IMR_LLP_RCTY)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_IMR_LLP_TX_RETRY)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_IMR_LLP_TCTY)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_IMR_SSRAM_PERR)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_IMR_PCI_ABORT)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_IMR_PCI_PARITY)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_IMR_PCI_SERR)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_IMR_PCI_PERR)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_IMR_PCI_MST_TIMEOUT)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_IMR_PCI_RETRY_CNT)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_IMR_XREAD_REQ_TIMEOUT)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_IMR_GIO_B_ENBL_ERR)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_IMR_INT_MSK)
+    MACROFIELD_LINE(BRIDGE_INT_RST_STAT, b_int_rst_stat)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_IRR_ALL_CLR)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_IRR_MULTI_CLR)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_IRR_CRP_GRP_CLR)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_IRR_RESP_BUF_GRP_CLR)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_IRR_REQ_DSP_GRP_CLR)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_IRR_LLP_GRP_CLR)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_IRR_SSRAM_GRP_CLR)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_IRR_PCI_GRP_CLR)
+    MACROFIELD_LINE(BRIDGE_INT_MODE, b_int_mode)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_INTMODE_CLR_PKT_EN(7))
+    MACROFIELD_LINE_BITFIELD(BRIDGE_INTMODE_CLR_PKT_EN(6))
+    MACROFIELD_LINE_BITFIELD(BRIDGE_INTMODE_CLR_PKT_EN(5))
+    MACROFIELD_LINE_BITFIELD(BRIDGE_INTMODE_CLR_PKT_EN(4))
+    MACROFIELD_LINE_BITFIELD(BRIDGE_INTMODE_CLR_PKT_EN(3))
+    MACROFIELD_LINE_BITFIELD(BRIDGE_INTMODE_CLR_PKT_EN(2))
+    MACROFIELD_LINE_BITFIELD(BRIDGE_INTMODE_CLR_PKT_EN(1))
+    MACROFIELD_LINE_BITFIELD(BRIDGE_INTMODE_CLR_PKT_EN(0))
+    MACROFIELD_LINE(BRIDGE_INT_DEVICE, b_int_device)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_INT_DEV_MASK(7))
+    MACROFIELD_LINE_BITFIELD(BRIDGE_INT_DEV_MASK(6))
+    MACROFIELD_LINE_BITFIELD(BRIDGE_INT_DEV_MASK(5))
+    MACROFIELD_LINE_BITFIELD(BRIDGE_INT_DEV_MASK(4))
+    MACROFIELD_LINE_BITFIELD(BRIDGE_INT_DEV_MASK(3))
+    MACROFIELD_LINE_BITFIELD(BRIDGE_INT_DEV_MASK(2))
+    MACROFIELD_LINE_BITFIELD(BRIDGE_INT_DEV_MASK(1))
+    MACROFIELD_LINE_BITFIELD(BRIDGE_INT_DEV_MASK(0))
+    MACROFIELD_LINE(BRIDGE_INT_HOST_ERR, b_int_host_err)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_INT_ADDR_HOST)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_INT_ADDR_FLD)
+    MACROFIELD_LINE(BRIDGE_INT_ADDR0, b_int_addr[0].addr)
+    MACROFIELD_LINE(BRIDGE_INT_ADDR(0), b_int_addr[0].addr)
+    MACROFIELD_LINE(BRIDGE_INT_ADDR(1), b_int_addr[1].addr)
+    MACROFIELD_LINE(BRIDGE_INT_ADDR(2), b_int_addr[2].addr)
+    MACROFIELD_LINE(BRIDGE_INT_ADDR(3), b_int_addr[3].addr)
+    MACROFIELD_LINE(BRIDGE_INT_ADDR(4), b_int_addr[4].addr)
+    MACROFIELD_LINE(BRIDGE_INT_ADDR(5), b_int_addr[5].addr)
+    MACROFIELD_LINE(BRIDGE_INT_ADDR(6), b_int_addr[6].addr)
+    MACROFIELD_LINE(BRIDGE_INT_ADDR(7), b_int_addr[7].addr)
+    MACROFIELD_LINE(BRIDGE_DEVICE0, b_device[0].reg)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_DEV_ERR_LOCK_EN)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_DEV_PAGE_CHK_DIS)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_DEV_FORCE_PCI_PAR)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_DEV_VIRTUAL_EN)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_DEV_PMU_WRGA_EN)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_DEV_DIR_WRGA_EN)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_DEV_DEV_SIZE)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_DEV_RT)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_DEV_SWAP_PMU)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_DEV_SWAP_DIR)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_DEV_PREF)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_DEV_PRECISE)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_DEV_COH)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_DEV_BARRIER)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_DEV_GBR)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_DEV_DEV_SWAP)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_DEV_DEV_IO_MEM)
+    MACROFIELD_LINE_BITFIELD(BRIDGE_DEV_OFF_MASK)
+    MACROFIELD_LINE(BRIDGE_DEVICE(0), b_device[0].reg)
+    MACROFIELD_LINE(BRIDGE_DEVICE(1), b_device[1].reg)
+    MACROFIELD_LINE(BRIDGE_DEVICE(2), b_device[2].reg)
+    MACROFIELD_LINE(BRIDGE_DEVICE(3), b_device[3].reg)
+    MACROFIELD_LINE(BRIDGE_DEVICE(4), b_device[4].reg)
+    MACROFIELD_LINE(BRIDGE_DEVICE(5), b_device[5].reg)
+    MACROFIELD_LINE(BRIDGE_DEVICE(6), b_device[6].reg)
+    MACROFIELD_LINE(BRIDGE_DEVICE(7), b_device[7].reg)
+    MACROFIELD_LINE(BRIDGE_WR_REQ_BUF0, b_wr_req_buf[0].reg)
+    MACROFIELD_LINE(BRIDGE_WR_REQ_BUF(0), b_wr_req_buf[0].reg)
+    MACROFIELD_LINE(BRIDGE_WR_REQ_BUF(1), b_wr_req_buf[1].reg)
+    MACROFIELD_LINE(BRIDGE_WR_REQ_BUF(2), b_wr_req_buf[2].reg)
+    MACROFIELD_LINE(BRIDGE_WR_REQ_BUF(3), b_wr_req_buf[3].reg)
+    MACROFIELD_LINE(BRIDGE_WR_REQ_BUF(4), b_wr_req_buf[4].reg)
+    MACROFIELD_LINE(BRIDGE_WR_REQ_BUF(5), b_wr_req_buf[5].reg)
+    MACROFIELD_LINE(BRIDGE_WR_REQ_BUF(6), b_wr_req_buf[6].reg)
+    MACROFIELD_LINE(BRIDGE_WR_REQ_BUF(7), b_wr_req_buf[7].reg)
+    MACROFIELD_LINE(BRIDGE_EVEN_RESP, b_even_resp)
+    MACROFIELD_LINE(BRIDGE_ODD_RESP, b_odd_resp)
+    MACROFIELD_LINE(BRIDGE_RESP_STATUS, b_resp_status)
+    MACROFIELD_LINE(BRIDGE_RESP_CLEAR, b_resp_clear)
+    MACROFIELD_LINE(BRIDGE_ATE_RAM, b_int_ate_ram)
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEV0, b_type0_cfg_dev[0])
+
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEV(0), b_type0_cfg_dev[0])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(0,0), b_type0_cfg_dev[0].f[0])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(0,1), b_type0_cfg_dev[0].f[1])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(0,2), b_type0_cfg_dev[0].f[2])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(0,3), b_type0_cfg_dev[0].f[3])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(0,4), b_type0_cfg_dev[0].f[4])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(0,5), b_type0_cfg_dev[0].f[5])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(0,6), b_type0_cfg_dev[0].f[6])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(0,7), b_type0_cfg_dev[0].f[7])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEV(1), b_type0_cfg_dev[1])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(1,0), b_type0_cfg_dev[1].f[0])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(1,1), b_type0_cfg_dev[1].f[1])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(1,2), b_type0_cfg_dev[1].f[2])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(1,3), b_type0_cfg_dev[1].f[3])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(1,4), b_type0_cfg_dev[1].f[4])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(1,5), b_type0_cfg_dev[1].f[5])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(1,6), b_type0_cfg_dev[1].f[6])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(1,7), b_type0_cfg_dev[1].f[7])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEV(2), b_type0_cfg_dev[2])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(2,0), b_type0_cfg_dev[2].f[0])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(2,1), b_type0_cfg_dev[2].f[1])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(2,2), b_type0_cfg_dev[2].f[2])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(2,3), b_type0_cfg_dev[2].f[3])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(2,4), b_type0_cfg_dev[2].f[4])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(2,5), b_type0_cfg_dev[2].f[5])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(2,6), b_type0_cfg_dev[2].f[6])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(2,7), b_type0_cfg_dev[2].f[7])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEV(3), b_type0_cfg_dev[3])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(3,0), b_type0_cfg_dev[3].f[0])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(3,1), b_type0_cfg_dev[3].f[1])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(3,2), b_type0_cfg_dev[3].f[2])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(3,3), b_type0_cfg_dev[3].f[3])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(3,4), b_type0_cfg_dev[3].f[4])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(3,5), b_type0_cfg_dev[3].f[5])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(3,6), b_type0_cfg_dev[3].f[6])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(3,7), b_type0_cfg_dev[3].f[7])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEV(4), b_type0_cfg_dev[4])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(4,0), b_type0_cfg_dev[4].f[0])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(4,1), b_type0_cfg_dev[4].f[1])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(4,2), b_type0_cfg_dev[4].f[2])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(4,3), b_type0_cfg_dev[4].f[3])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(4,4), b_type0_cfg_dev[4].f[4])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(4,5), b_type0_cfg_dev[4].f[5])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(4,6), b_type0_cfg_dev[4].f[6])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(4,7), b_type0_cfg_dev[4].f[7])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEV(5), b_type0_cfg_dev[5])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(5,0), b_type0_cfg_dev[5].f[0])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(5,1), b_type0_cfg_dev[5].f[1])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(5,2), b_type0_cfg_dev[5].f[2])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(5,3), b_type0_cfg_dev[5].f[3])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(5,4), b_type0_cfg_dev[5].f[4])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(5,5), b_type0_cfg_dev[5].f[5])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(5,6), b_type0_cfg_dev[5].f[6])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(5,7), b_type0_cfg_dev[5].f[7])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEV(6), b_type0_cfg_dev[6])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(6,0), b_type0_cfg_dev[6].f[0])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(6,1), b_type0_cfg_dev[6].f[1])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(6,2), b_type0_cfg_dev[6].f[2])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(6,3), b_type0_cfg_dev[6].f[3])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(6,4), b_type0_cfg_dev[6].f[4])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(6,5), b_type0_cfg_dev[6].f[5])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(6,6), b_type0_cfg_dev[6].f[6])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(6,7), b_type0_cfg_dev[6].f[7])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEV(7), b_type0_cfg_dev[7])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(7,0), b_type0_cfg_dev[7].f[0])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(7,1), b_type0_cfg_dev[7].f[1])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(7,2), b_type0_cfg_dev[7].f[2])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(7,3), b_type0_cfg_dev[7].f[3])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(7,4), b_type0_cfg_dev[7].f[4])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(7,5), b_type0_cfg_dev[7].f[5])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(7,6), b_type0_cfg_dev[7].f[6])
+    MACROFIELD_LINE(BRIDGE_TYPE0_CFG_DEVF(7,7), b_type0_cfg_dev[7].f[7])
+
+    MACROFIELD_LINE(BRIDGE_TYPE1_CFG, b_type1_cfg)
+    MACROFIELD_LINE(BRIDGE_PCI_IACK, b_pci_iack)
+    MACROFIELD_LINE(BRIDGE_EXT_SSRAM, b_ext_ate_ram)
+    MACROFIELD_LINE(BRIDGE_DEVIO0, b_devio(0))
+    MACROFIELD_LINE(BRIDGE_DEVIO(0), b_devio(0))
+    MACROFIELD_LINE(BRIDGE_DEVIO(1), b_devio(1))
+    MACROFIELD_LINE(BRIDGE_DEVIO(2), b_devio(2))
+    MACROFIELD_LINE(BRIDGE_DEVIO(3), b_devio(3))
+    MACROFIELD_LINE(BRIDGE_DEVIO(4), b_devio(4))
+    MACROFIELD_LINE(BRIDGE_DEVIO(5), b_devio(5))
+    MACROFIELD_LINE(BRIDGE_DEVIO(6), b_devio(6))
+    MACROFIELD_LINE(BRIDGE_DEVIO(7), b_devio(7))
+    MACROFIELD_LINE(BRIDGE_EXTERNAL_FLASH, b_external_flash)
+};
+#endif
+
+#ifdef __cplusplus
+};
+#endif
+#endif				/* C or C++ */ 
+
+#endif                          /* _ASM_SN_PCI_BRIDGE_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/pci/pci_bus_cvlink.h linux/include/asm-ia64/sn/pci/pci_bus_cvlink.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/pci/pci_bus_cvlink.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/pci/pci_bus_cvlink.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,29 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_PCI_CVLINK_H
+#define _ASM_SN_PCI_CVLINK_H
+
+#define SET_PCIA64(dev) \
+	(((struct sn1_device_sysdata *)((dev)->sysdata))->isa64) = 1
+#define IS_PCIA64(dev)	(((dev)->dma_mask == 0xffffffffffffffffUL) || \
+		(((struct sn1_device_sysdata *)((dev)->sysdata))->isa64))
+#define IS_PCI32G(dev)	((dev)->dma_mask >= 0xffffffff)
+#define IS_PCI32L(dev)	((dev)->dma_mask < 0xffffffff)
+
+struct sn1_widget_sysdata {
+        devfs_handle_t  vhdl;
+};
+
+struct sn1_device_sysdata {
+        devfs_handle_t  vhdl;
+	int		isa64;
+};
+
+#endif				/* _ASM_SN_PCI_CVLINK_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/pci/pci_defs.h linux/include/asm-ia64/sn/pci/pci_defs.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/pci/pci_defs.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/pci/pci_defs.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,242 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_PCI_PCI_DEFS_H
+#define _ASM_SN_PCI_PCI_DEFS_H
+
+/* defines for the PCI bus architecture */
+
+/* Bit layout of address fields for Type-1
+ * Configuration Space cycles.
+ */
+#define	PCI_TYPE0_SLOT_MASK	0xFFFFF800
+#define	PCI_TYPE0_FUNC_MASK	0x00000700
+#define	PCI_TYPE0_REG_MASK	0x000000FF
+
+#define	PCI_TYPE0_SLOT_SHFT	11
+#define	PCI_TYPE0_FUNC_SHFT	8
+#define	PCI_TYPE0_REG_SHFT	0
+
+#define	PCI_TYPE0_FUNC(a)	(((a) & PCI_TYPE0_FUNC_MASK) >> PCI_TYPE0_FUNC_SHFT)
+#define	PCI_TYPE0_REG(a)	(((a) & PCI_TYPE0_REG_MASK) >> PCI_TYPE0_REG_SHFT)
+
+#define	PCI_TYPE0(s,f,r)	((((1<<(s)) << PCI_TYPE0_SLOT_SHFT) & PCI_TYPE0_SLOT_MASK) |\
+				 (((f) << PCI_TYPE0_FUNC_SHFT) & PCI_TYPE0_FUNC_MASK) |\
+				 (((r) << PCI_TYPE0_REG_SHFT) & PCI_TYPE0_REG_MASK))
+
+/* Bit layout of address fields for Type-1
+ * Configuration Space cycles.
+ * NOTE: I'm including the byte offset within
+ * the 32-bit word as part of the register
+ * number as an extension of the layout in
+ * the PCI spec.
+ */
+#define	PCI_TYPE1_BUS_MASK	0x00FF0000
+#define	PCI_TYPE1_SLOT_MASK	0x0000F100
+#define	PCI_TYPE1_FUNC_MASK	0x00000700
+#define	PCI_TYPE1_REG_MASK	0x000000FF
+
+#define	PCI_TYPE1_BUS_SHFT	16
+#define	PCI_TYPE1_SLOT_SHFT	11
+#define	PCI_TYPE1_FUNC_SHFT	8
+#define	PCI_TYPE1_REG_SHFT	0
+
+#define	PCI_TYPE1_BUS(a)	(((a) & PCI_TYPE1_BUS_MASK) >> PCI_TYPE1_BUS_SHFT)
+#define	PCI_TYPE1_SLOT(a)	(((a) & PCI_TYPE1_SLOT_MASK) >> PCI_TYPE1_SLOT_SHFT)
+#define	PCI_TYPE1_FUNC(a)	(((a) & PCI_TYPE1_FUNC_MASK) >> PCI_TYPE1_FUNC_SHFT)
+#define	PCI_TYPE1_REG(a)	(((a) & PCI_TYPE1_REG_MASK) >> PCI_TYPE1_REG_SHFT)
+
+#define	PCI_TYPE1(b,s,f,r)	((((b) << PCI_TYPE1_BUS_SHFT) & PCI_TYPE1_BUS_MASK) |\
+				 (((s) << PCI_TYPE1_SLOT_SHFT) & PCI_TYPE1_SLOT_MASK) |\
+				 (((f) << PCI_TYPE1_FUNC_SHFT) & PCI_TYPE1_FUNC_MASK) |\
+				 (((r) << PCI_TYPE1_REG_SHFT) & PCI_TYPE1_REG_MASK))
+
+/* Byte offsets of registers in CFG space
+ */
+#define	PCI_CFG_VENDOR_ID	0x00		/* Vendor ID (2 bytes) */
+#define	PCI_CFG_DEVICE_ID	0x02		/* Device ID (2 bytes) */
+
+#define	PCI_CFG_COMMAND		0x04		/* Command (2 bytes) */
+#define	PCI_CFG_STATUS		0x06		/* Status (2 bytes) */
+
+/* NOTE: if you are using a C "switch" statement to
+ * differentiate between the Config space registers, be
+ * aware that PCI_CFG_CLASS_CODE and PCI_CFG_BASE_CLASS
+ * are the same offset.
+ */
+#define	PCI_CFG_REV_ID		0x08		/* Revision Id (1 byte) */
+#define	PCI_CFG_CLASS_CODE	0x09		/* Class Code (3 bytes) */
+#define	PCI_CFG_BASE_CLASS	0x09		/* Base Class (1 byte) */
+#define	PCI_CFG_SUB_CLASS	0x0A		/* Sub Class (1 byte) */
+#define	PCI_CFG_PROG_IF		0x0B		/* Prog Interface (1 byte) */
+
+#define	PCI_CFG_CACHE_LINE	0x0C		/* Cache line size (1 byte) */
+#define	PCI_CFG_LATENCY_TIMER	0x0D		/* Latency Timer (1 byte) */
+#define	PCI_CFG_HEADER_TYPE	0x0E		/* Header Type (1 byte) */
+#define	PCI_CFG_BIST		0x0F		/* Built In Self Test */
+
+#define	PCI_CFG_BASE_ADDR_0	0x10		/* Base Address (4 bytes) */
+#define	PCI_CFG_BASE_ADDR_1	0x14		/* Base Address (4 bytes) */
+#define	PCI_CFG_BASE_ADDR_2	0x18		/* Base Address (4 bytes) */
+#define	PCI_CFG_BASE_ADDR_3	0x1C		/* Base Address (4 bytes) */
+#define	PCI_CFG_BASE_ADDR_4	0x20		/* Base Address (4 bytes) */
+#define	PCI_CFG_BASE_ADDR_5	0x24		/* Base Address (4 bytes) */
+
+#define	PCI_CFG_BASE_ADDR_OFF	0x04		/* Base Address Offset (1..5)*/
+#define	PCI_CFG_BASE_ADDR(n)	(PCI_CFG_BASE_ADDR_0 + (n)*PCI_CFG_BASE_ADDR_OFF)
+#define	PCI_CFG_BASE_ADDRS	6		/* up to this many BASE regs */
+
+#define	PCI_CFG_CARDBUS_CIS	0x28		/* Cardbus CIS Pointer (4B) */
+
+#define	PCI_CFG_SUBSYS_VEND_ID	0x2C		/* Subsystem Vendor ID (2B) */
+#define	PCI_CFG_SUBSYS_ID	0x2E		/* Subsystem ID */
+
+#define	PCI_EXPANSION_ROM	0x30		/* Expansion Rom Base (4B) */
+
+#define	PCI_INTR_LINE		0x3C		/* Interrupt Line (1B) */
+#define	PCI_INTR_PIN		0x3D		/* Interrupt Pin (1B) */
+#define	PCI_MIN_GNT		0x3E		/* Minimum Grant (1B) */
+#define	PCI_MAX_LAT		0x3F		/* Maximum Latency (1B) */
+
+#define PCI_CFG_VEND_SPECIFIC	0x40		/* first vendor specific reg */
+
+/* layout for Type 0x01 headers */
+
+#define	PCI_CFG_PPB_BUS_PRI		0x18	/* immediate upstream bus # */
+#define	PCI_CFG_PPB_BUS_SEC		0x19	/* immediate downstream bus # */
+#define	PCI_CFG_PPB_BUS_SUB		0x1A	/* last downstream bus # */
+#define	PCI_CFG_PPB_SEC_LAT		0x1B	/* latency timer for SEC bus */
+#define PCI_CFG_PPB_IOBASE		0x1C	/* IO Base Addr bits 12..15 */
+#define PCI_CFG_PPB_IOLIM		0x1D	/* IO Limit Addr bits 12..15 */
+#define	PCI_CFG_PPB_SEC_STAT		0x1E	/* Secondary Status */
+#define PCI_CFG_PPB_MEMBASE		0x20	/* MEM Base Addr bits 16..31 */
+#define PCI_CFG_PPB_MEMLIM		0x22	/* MEM Limit Addr bits 16..31 */
+#define PCI_CFG_PPB_MEMPFBASE		0x24	/* PfMEM Base Addr bits 16..31 */
+#define PCI_CFG_PPB_MEMPFLIM		0x26	/* PfMEM Limit Addr bits 16..31 */
+#define PCI_CFG_PPB_MEMPFBASEHI		0x28	/* PfMEM Base Addr bits 32..63 */
+#define PCI_CFG_PPB_MEMPFLIMHI		0x2C	/* PfMEM Limit Addr bits 32..63 */
+#define PCI_CFG_PPB_IOBASEHI		0x30	/* IO Base Addr bits 16..31 */
+#define PCI_CFG_PPB_IOLIMHI		0x32	/* IO Limit Addr bits 16..31 */
+#define	PCI_CFG_PPB_SUB_VENDOR		0x34	/* Subsystem Vendor ID */
+#define	PCI_CFG_PPB_SUB_DEVICE		0x36	/* Subsystem Device ID */
+#define	PCI_CFG_PPB_INT_PIN		0x3D	/* Interrupt Pin */
+#define	PCI_CFG_PPB_BRIDGE_CTRL		0x3E	/* Bridge Control */
+     /* XXX- these might be DEC 21152 specific */
+#define	PCI_CFG_PPB_CHIP_CTRL		0x40
+#define	PCI_CFG_PPB_DIAG_CTRL		0x41
+#define	PCI_CFG_PPB_ARB_CTRL		0x42
+#define	PCI_CFG_PPB_SERR_DISABLE	0x64
+#define	PCI_CFG_PPB_CLK2_CTRL		0x68
+#define	PCI_CFG_PPB_SERR_STATUS		0x6A
+
+/* Command Register layout (0x04) */
+#define	PCI_CMD_IO_SPACE	0x001		/* I/O Space device */
+#define	PCI_CMD_MEM_SPACE	0x002		/* Memory Space */
+#define	PCI_CMD_BUS_MASTER	0x004		/* Bus Master */
+#define	PCI_CMD_SPEC_CYCLES	0x008		/* Special Cycles */
+#define	PCI_CMD_MEMW_INV_ENAB	0x010		/* Memory Write Inv Enable */
+#define	PCI_CMD_VGA_PALETTE_SNP	0x020		/* VGA Palette Snoop */
+#define	PCI_CMD_PAR_ERR_RESP	0x040		/* Parity Error Response */
+#define	PCI_CMD_WAIT_CYCLE_CTL	0x080		/* Wait Cycle Control */
+#define	PCI_CMD_SERR_ENABLE	0x100		/* SERR# Enable */
+#define	PCI_CMD_F_BK_BK_ENABLE	0x200		/* Fast Back-to-Back Enable */
+
+/* Status Register Layout (0x06) */
+#define	PCI_STAT_PAR_ERR_DET	0x8000		/* Detected Parity Error */
+#define	PCI_STAT_SYS_ERR	0x4000		/* Signaled System Error */
+#define	PCI_STAT_RCVD_MSTR_ABT	0x2000		/* Received Master Abort */
+#define	PCI_STAT_RCVD_TGT_ABT	0x1000		/* Received Target Abort */
+#define	PCI_STAT_SGNL_TGT_ABT	0x0800		/* Signaled Target Abort */
+
+#define	PCI_STAT_DEVSEL_TIMING	0x0600		/* DEVSEL Timing Mask */
+#define	DEVSEL_TIMING(_x)	(((_x) >> 9) & 3)	/* devsel tim macro */
+#define	DEVSEL_FAST		0		/* Fast timing */
+#define	DEVSEL_MEDIUM		1		/* Medium timing */
+#define	DEVSEL_SLOW		2		/* Slow timing */
+
+#define	PCI_STAT_DATA_PAR_ERR	0x0100		/* Data Parity Err Detected */
+#define	PCI_STAT_F_BK_BK_CAP	0x0080		/* Fast Back-to-Back Capable */
+#define	PCI_STAT_UDF_SUPP	0x0040		/* UDF Supported */
+#define	PCI_STAT_66MHZ_CAP	0x0020		/* 66 MHz Capable */
+
+/* BIST Register Layout (0x0F) */
+#define	PCI_BIST_BIST_CAP	0x80		/* BIST Capable */
+#define	PCI_BIST_START_BIST	0x40		/* Start BIST */
+#define	PCI_BIST_CMPLTION_MASK	0x0F		/* COMPLETION MASK */
+#define	PCI_BIST_CMPL_OK	0x00		/* 0 value is completion OK */
+
+/* Base Address Register 0x10 */
+#define	PCI_BA_IO_SPACE		0x1		/* I/O Space Marker */
+#define	PCI_BA_MEM_LOCATION	0x6		/* 2 bits for location avail */
+#define	PCI_BA_MEM_32BIT	0x0		/* Anywhere in 32bit space */
+#define	PCI_BA_MEM_1MEG		0x2		/* Locate below 1 Meg */
+#define	PCI_BA_MEM_64BIT	0x4		/* Anywhere in 64bit space */
+#define	PCI_BA_PREFETCH		0x8		/* Prefetchable, no side effect */
+
+/* PIO interface macros */
+
+#ifndef IOC3_EMULATION
+
+#define PCI_INB(x)          (*((volatile char*)x))
+#define PCI_INH(x)          (*((volatile short*)x))
+#define PCI_INW(x)          (*((volatile int*)x))
+#define PCI_OUTB(x,y)       (*((volatile char*)x) = y)
+#define PCI_OUTH(x,y)       (*((volatile short*)x) = y)
+#define PCI_OUTW(x,y)       (*((volatile int*)x) = y)
+
+#else
+
+extern uint pci_read(void * address, int type);
+extern void pci_write(void * address, int data, int type);
+
+#define BYTE   1
+#define HALF   2
+#define WORD   4
+
+#define PCI_INB(x)          pci_read((void *)(x),BYTE)
+#define PCI_INH(x)          pci_read((void *)(x),HALF)
+#define PCI_INW(x)          pci_read((void *)(x),WORD)
+#define PCI_OUTB(x,y)       pci_write((void *)(x),(y),BYTE)
+#define PCI_OUTH(x,y)       pci_write((void *)(x),(y),HALF)
+#define PCI_OUTW(x,y)       pci_write((void *)(x),(y),WORD)
+
+#endif /* !IOC3_EMULATION */
+						/* effects on reads, merges */
+
+#ifdef CONFIG_SGI_IP22
+#define BYTECOUNT_W_GIO	    0xbf400000
+#endif
+
+/*
+ * Definition of address layouts for PCI Config mechanism #1
+ * XXX- These largely duplicate PCI_TYPE1 constants at the top
+ * of the file; the two groups should probably be combined.
+ */
+
+#define CFG1_ADDR_REGISTER_MASK		0x000000fc
+#define CFG1_ADDR_FUNCTION_MASK		0x00000700
+#define CFG1_ADDR_DEVICE_MASK		0x0000f800
+#define CFG1_ADDR_BUS_MASK		0x00ff0000
+
+#define CFG1_REGISTER_SHIFT		2
+#define CFG1_FUNCTION_SHIFT		8
+#define CFG1_DEVICE_SHIFT		11
+#define CFG1_BUS_SHIFT			16
+
+#ifdef CONFIG_SGI_IP32
+ /* Definitions related to IP32 PCI Bridge policy
+  * XXX- should probaly be moved to a mace-specific header
+  */
+#define PCI_CONFIG_BITS			0xfe0085ff
+#define	PCI_CONTROL_MRMRA_ENABLE	0x00000800
+#define PCI_FIRST_IO_ADDR		0x1000
+#define PCI_IO_MAP_INCR			0x1000
+#endif /* CONFIG_SGI_IP32 */
+
+#endif /* _ASM_SN_PCI_PCI_DEFS_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/pci/pcibr.h linux/include/asm-ia64/sn/pci/pcibr.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/pci/pcibr.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/pci/pcibr.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,360 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_PCI_PCIBR_H
+#define _ASM_SN_PCI_PCIBR_H
+
+#if defined(__KERNEL__)
+
+#include <asm/sn/dmamap.h>
+#include <asm/sn/iobus.h>
+#include <asm/sn/pio.h>
+
+#include <asm/sn/pci/pciio.h>
+#include <asm/sn/pci/bridge.h>
+
+/* =====================================================================
+ *    symbolic constants used by pcibr's xtalk bus provider
+ */
+
+#define PCIBR_PIOMAP_BUSY		0x80000000
+
+#define PCIBR_DMAMAP_BUSY		0x80000000
+#define	PCIBR_DMAMAP_SSRAM		0x40000000
+
+#define PCIBR_INTR_BLOCKED		0x40000000
+#define PCIBR_INTR_BUSY			0x80000000
+
+#if LANGUAGE_C
+
+/* =====================================================================
+ *    opaque types used by pcibr's xtalk bus provider
+ */
+
+typedef struct pcibr_piomap_s *pcibr_piomap_t;
+typedef struct pcibr_dmamap_s *pcibr_dmamap_t;
+typedef struct pcibr_intr_s *pcibr_intr_t;
+
+/* =====================================================================
+ *    primary entry points: Bridge (pcibr) device driver
+ *
+ *	These functions are normal device driver entry points
+ *	and are called along with the similar entry points from
+ *	other device drivers. They are included here as documentation
+ *	of their existance and purpose.
+ *
+ *	pcibr_init() is called to inform us that there is a pcibr driver
+ *	configured into the kernel; it is responsible for registering
+ *	as a crosstalk widget and providing a routine to be called
+ *	when a widget with the proper part number is observed.
+ *
+ *	pcibr_attach() is called for each vertex in the hardware graph
+ *	corresponding to a crosstalk widget with the manufacturer
+ *	code and part number registered by pcibr_init().
+ */
+
+extern void		pcibr_init(void);
+
+extern int		pcibr_attach(devfs_handle_t);
+
+/* =====================================================================
+ *    bus provider function table
+ *
+ *	Normally, this table is only handed off explicitly
+ *	during provider initialization, and the PCI generic
+ *	layer will stash a pointer to it in the vertex; however,
+ *	exporting it explicitly enables a performance hack in
+ *	the generic PCI provider where if we know at compile
+ *	time that the only possible PCI provider is a
+ *	pcibr, we can go directly to this ops table.
+ */
+
+extern pciio_provider_t pcibr_provider;
+
+/* =====================================================================
+ *    secondary entry points: pcibr PCI bus provider
+ *
+ *	These functions are normally exported explicitly by
+ *	a direct call from the pcibr initialization routine
+ *	into the generic crosstalk provider; they are included
+ *	here to enable a more aggressive performance hack in
+ *	the generic crosstalk layer, where if we know that the
+ *	only possible crosstalk provider is pcibr, and we can
+ *	guarantee that all entry points are properly named, and
+ *	we can deal with the implicit casting properly, then
+ *	we can turn many of the generic provider routines into
+ *	plain brances, or even eliminate them (given sufficient
+ *	smarts on the part of the compilation system).
+ */
+
+extern pcibr_piomap_t	pcibr_piomap_alloc(devfs_handle_t dev,
+					   device_desc_t dev_desc,
+					   pciio_space_t space,
+					   iopaddr_t pci_addr,
+					   size_t byte_count,
+					   size_t byte_count_max,
+					   unsigned flags);
+
+extern void		pcibr_piomap_free(pcibr_piomap_t piomap);
+
+extern caddr_t		pcibr_piomap_addr(pcibr_piomap_t piomap,
+					  iopaddr_t xtalk_addr,
+					  size_t byte_count);
+
+extern void		pcibr_piomap_done(pcibr_piomap_t piomap);
+
+extern caddr_t		pcibr_piotrans_addr(devfs_handle_t dev,
+					    device_desc_t dev_desc,
+					    pciio_space_t space,
+					    iopaddr_t pci_addr,
+					    size_t byte_count,
+					    unsigned flags);
+
+extern iopaddr_t	pcibr_piospace_alloc(devfs_handle_t dev,
+					     device_desc_t dev_desc,
+					     pciio_space_t space,
+					     size_t byte_count,
+					     size_t alignment);
+extern void		pcibr_piospace_free(devfs_handle_t dev,
+					    pciio_space_t space,
+					    iopaddr_t pciaddr,
+					    size_t byte_count);
+
+extern pcibr_dmamap_t	pcibr_dmamap_alloc(devfs_handle_t dev,
+					   device_desc_t dev_desc,
+					   size_t byte_count_max,
+					   unsigned flags);
+
+extern void		pcibr_dmamap_free(pcibr_dmamap_t dmamap);
+
+extern iopaddr_t	pcibr_dmamap_addr(pcibr_dmamap_t dmamap,
+					  paddr_t paddr,
+					  size_t byte_count);
+
+extern alenlist_t	pcibr_dmamap_list(pcibr_dmamap_t dmamap,
+					  alenlist_t palenlist,
+					  unsigned flags);
+
+extern void		pcibr_dmamap_done(pcibr_dmamap_t dmamap);
+
+extern iopaddr_t	pcibr_dmatrans_addr(devfs_handle_t dev,
+					    device_desc_t dev_desc,
+					    paddr_t paddr,
+					    size_t byte_count,
+					    unsigned flags);
+
+extern alenlist_t	pcibr_dmatrans_list(devfs_handle_t dev,
+					    device_desc_t dev_desc,
+					    alenlist_t palenlist,
+					    unsigned flags);
+
+extern void		pcibr_dmamap_drain(pcibr_dmamap_t map);
+
+extern void		pcibr_dmaaddr_drain(devfs_handle_t vhdl,
+					    paddr_t addr,
+					    size_t bytes);
+
+extern void		pcibr_dmalist_drain(devfs_handle_t vhdl,
+					    alenlist_t list);
+
+typedef unsigned	pcibr_intr_ibit_f(pciio_info_t info,
+					  pciio_intr_line_t lines);
+
+extern void		pcibr_intr_ibit_set(devfs_handle_t, pcibr_intr_ibit_f *);
+
+extern pcibr_intr_t	pcibr_intr_alloc(devfs_handle_t dev,
+					 device_desc_t dev_desc,
+					 pciio_intr_line_t lines,
+					 devfs_handle_t owner_dev);
+
+extern void		pcibr_intr_free(pcibr_intr_t intr);
+
+extern int		pcibr_intr_connect(pcibr_intr_t intr,
+					   intr_func_t intr_func,
+					   intr_arg_t intr_arg,
+					   void *thread);
+
+extern void		pcibr_intr_disconnect(pcibr_intr_t intr);
+
+extern devfs_handle_t	pcibr_intr_cpu_get(pcibr_intr_t intr);
+
+extern void		pcibr_provider_startup(devfs_handle_t pcibr);
+
+extern void		pcibr_provider_shutdown(devfs_handle_t pcibr);
+
+extern int		pcibr_reset(devfs_handle_t dev);
+
+extern int              pcibr_write_gather_flush(devfs_handle_t dev);
+
+extern pciio_endian_t	pcibr_endian_set(devfs_handle_t dev,
+					 pciio_endian_t device_end,
+					 pciio_endian_t desired_end);
+
+extern pciio_priority_t pcibr_priority_set(devfs_handle_t dev,
+					   pciio_priority_t device_prio);
+
+extern uint64_t		pcibr_config_get(devfs_handle_t conn,
+					 unsigned reg,
+					 unsigned size);
+
+extern void		pcibr_config_set(devfs_handle_t conn,
+					 unsigned reg,
+					 unsigned size,
+					 uint64_t value);
+
+extern int		pcibr_error_devenable(devfs_handle_t pconn_vhdl,
+					      int error_code);
+
+extern pciio_slot_t	pcibr_error_extract(devfs_handle_t pcibr_vhdl,
+					    pciio_space_t *spacep,
+					    iopaddr_t *addrp);
+
+extern int		pcibr_rrb_alloc(devfs_handle_t pconn_vhdl,
+					int *count_vchan0,
+					int *count_vchan1);
+
+extern int		pcibr_wrb_flush(devfs_handle_t pconn_vhdl);
+extern int		pcibr_rrb_check(devfs_handle_t pconn_vhdl,
+					int *count_vchan0,
+					int *count_vchan1,
+					int *count_reserved,
+					int *count_pool);
+
+extern int		pcibr_alloc_all_rrbs(devfs_handle_t vhdl, int even_odd,
+					     int dev_1_rrbs, int virt1,
+					     int dev_2_rrbs, int virt2,
+					     int dev_3_rrbs, int virt3,
+					     int dev_4_rrbs, int virt4);
+
+typedef void
+rrb_alloc_funct_f	(devfs_handle_t xconn_vhdl,
+			 int *vendor_list);
+
+typedef rrb_alloc_funct_f      *rrb_alloc_funct_t;
+
+void			pcibr_set_rrb_callback(devfs_handle_t xconn_vhdl,
+					       rrb_alloc_funct_f *func);
+
+extern void		pcibr_device_unregister(devfs_handle_t);
+extern int		pcibr_dma_enabled(devfs_handle_t);
+/*
+ * Bridge-specific flags that can be set via pcibr_device_flags_set
+ * and cleared via pcibr_device_flags_clear.  Other flags are
+ * more generic and are maniuplated through PCI-generic interfaces.
+ *
+ * Note that all PCI implementation-specific flags (Bridge flags, in
+ * this case) are in bits 15-31.  The lower 15 bits are reserved
+ * for PCI-generic flags.
+ *
+ * Some of these flags have been "promoted" to the
+ * generic layer, so they can be used without having
+ * to "know" that the PCI bus is hosted by a Bridge.
+ *
+ * PCIBR_NO_ATE_ROUNDUP: Request that no rounding up be done when 
+ * allocating ATE's. ATE count computation will assume that the
+ * address to be mapped will start on a page boundary.
+ */
+#define PCIBR_NO_ATE_ROUNDUP    0x00008000
+#define PCIBR_WRITE_GATHER	0x00010000	/* please use PCIIO version */
+#define PCIBR_NOWRITE_GATHER	0x00020000	/* please use PCIIO version */
+#define PCIBR_PREFETCH		0x00040000	/* please use PCIIO version */
+#define PCIBR_NOPREFETCH	0x00080000	/* please use PCIIO version */
+#define PCIBR_PRECISE		0x00100000
+#define PCIBR_NOPRECISE		0x00200000
+#define PCIBR_BARRIER		0x00400000
+#define PCIBR_NOBARRIER		0x00800000
+#define PCIBR_VCHAN0		0x01000000
+#define PCIBR_VCHAN1		0x02000000
+#define PCIBR_64BIT		0x04000000
+#define PCIBR_NO64BIT		0x08000000
+#define PCIBR_SWAP		0x10000000
+#define PCIBR_NOSWAP		0x20000000
+
+#define	PCIBR_EXTERNAL_ATES	0x40000000	/* uses external ATEs */
+#define	PCIBR_ACTIVE		0x80000000	/* need a "done" */
+
+/* Flags that have meaning to pcibr_device_flags_{set,clear} */
+#define PCIBR_DEVICE_FLAGS (	\
+	PCIBR_WRITE_GATHER	|\
+	PCIBR_NOWRITE_GATHER	|\
+	PCIBR_PREFETCH		|\
+	PCIBR_NOPREFETCH	|\
+	PCIBR_PRECISE		|\
+	PCIBR_NOPRECISE		|\
+	PCIBR_BARRIER		|\
+	PCIBR_NOBARRIER		\
+)
+
+/* Flags that have meaning to *_dmamap_alloc, *_dmatrans_{addr,list} */
+#define PCIBR_DMA_FLAGS (	\
+	PCIBR_PREFETCH		|\
+	PCIBR_NOPREFETCH	|\
+	PCIBR_PRECISE		|\
+	PCIBR_NOPRECISE		|\
+	PCIBR_BARRIER		|\
+	PCIBR_NOBARRIER		|\
+	PCIBR_VCHAN0		|\
+	PCIBR_VCHAN1		\
+)
+
+typedef int		pcibr_device_flags_t;
+
+/*
+ * Set bits in the Bridge Device(x) register for this device.
+ * "flags" are defined above. NOTE: this includes turning
+ * things *OFF* as well as turning them *ON* ...
+ */
+extern int		pcibr_device_flags_set(devfs_handle_t dev,
+					     pcibr_device_flags_t flags);
+
+/*
+ * Allocate Read Response Buffers for use by the specified device.
+ * count_vchan0 is the total number of buffers desired for the
+ * "normal" channel.  count_vchan1 is the total number of buffers
+ * desired for the "virtual" channel.  Returns 0 on success, or
+ * <0 on failure, which occurs when we're unable to allocate any
+ * buffers to a channel that desires at least one buffer.
+ */
+extern int		pcibr_rrb_alloc(devfs_handle_t pconn_vhdl,
+					int *count_vchan0,
+					int *count_vchan1);
+
+/*
+ * Get the starting PCIbus address out of the given DMA map.
+ * This function is supposed to be used by a close friend of PCI bridge
+ * since it relies on the fact that the starting address of the map is fixed at
+ * the allocation time in the current implementation of PCI bridge.
+ */
+extern iopaddr_t	pcibr_dmamap_pciaddr_get(pcibr_dmamap_t);
+
+extern xwidget_intr_preset_f pcibr_xintr_preset;
+
+extern void		pcibr_hints_fix_rrbs(devfs_handle_t);
+extern void		pcibr_hints_dualslot(devfs_handle_t, pciio_slot_t, pciio_slot_t);
+extern void		pcibr_hints_subdevs(devfs_handle_t, pciio_slot_t, uint64_t);
+extern void		pcibr_hints_handsoff(devfs_handle_t);
+
+typedef unsigned	pcibr_intr_bits_f(pciio_info_t, pciio_intr_line_t);
+extern void		pcibr_hints_intr_bits(devfs_handle_t, pcibr_intr_bits_f *);
+
+extern int		pcibr_asic_rev(devfs_handle_t);
+
+#endif 	/* _LANGUAGE_C */
+#endif	/* #if defined(__KERNEL__) */
+/* 
+ * Some useful ioctls into the pcibr driver
+ */
+#define PCIBR			'p'
+#define _PCIBR(x)		((PCIBR << 8) | (x))
+
+#define PCIBR_SLOT_POWERUP	_PCIBR(1)
+#define PCIBR_SLOT_SHUTDOWN	_PCIBR(2)
+#define PCIBR_SLOT_INQUIRY	_PCIBR(3)
+
+#endif				/* _ASM_SN_PCI_PCIBR_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/pci/pcibr_private.h linux/include/asm-ia64/sn/pci/pcibr_private.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/pci/pcibr_private.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/pci/pcibr_private.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,415 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_PCI_PCIBR_PRIVATE_H
+#define _ASM_SN_PCI_PCIBR_PRIVATE_H
+
+/*
+ * pcibr_private.h -- private definitions for pcibr
+ * only the pcibr driver (and its closest friends)
+ * should ever peek into this file.
+ */
+
+#include <asm/sn/pci/pciio_private.h>
+
+/*
+ * convenience typedefs
+ */
+
+typedef uint64_t pcibr_DMattr_t;
+typedef uint32_t pcibr_ATEattr_t;
+
+typedef struct pcibr_info_s *pcibr_info_t, **pcibr_info_h;
+typedef struct pcibr_soft_s *pcibr_soft_t;
+typedef struct pcibr_soft_slot_s *pcibr_soft_slot_t;
+typedef struct pcibr_hints_s *pcibr_hints_t;
+typedef struct pcibr_intr_list_s *pcibr_intr_list_t;
+typedef struct pcibr_intr_wrap_s *pcibr_intr_wrap_t;
+
+/*
+ * Bridge sets up PIO using this information.
+ */
+struct pcibr_piomap_s {
+    struct pciio_piomap_s   bp_pp;	/* generic stuff */
+
+#define	bp_flags	bp_pp.pp_flags	/* PCIBR_PIOMAP flags */
+#define	bp_dev		bp_pp.pp_dev	/* associated pci card */
+#define	bp_slot		bp_pp.pp_slot	/* which slot the card is in */
+#define	bp_space	bp_pp.pp_space	/* which address space */
+#define	bp_pciaddr	bp_pp.pp_pciaddr	/* starting offset of mapping */
+#define	bp_mapsz	bp_pp.pp_mapsz	/* size of this mapping */
+#define	bp_kvaddr	bp_pp.pp_kvaddr	/* kernel virtual address to use */
+
+    iopaddr_t               bp_xtalk_addr;	/* corresponding xtalk address */
+    xtalk_piomap_t          bp_xtalk_pio;	/* corresponding xtalk resource */
+    pcibr_piomap_t	    bp_next;	/* Next piomap on the list */
+    pcibr_soft_t	    bp_soft;	/* backpointer to bridge soft data */
+    int			    bp_toc[1];	/* PCI timeout counter */
+
+};
+
+/*
+ * Bridge sets up DMA using this information.
+ */
+struct pcibr_dmamap_s {
+    struct pciio_dmamap_s   bd_pd;
+#define	bd_flags	bd_pd.pd_flags	/* PCIBR_DMAMAP flags */
+#define	bd_dev		bd_pd.pd_dev	/* associated pci card */
+#define	bd_slot		bd_pd.pd_slot	/* which slot the card is in */
+    struct pcibr_soft_s    *bd_soft;	/* pcibr soft state backptr */
+    xtalk_dmamap_t          bd_xtalk;	/* associated xtalk resources */
+
+    size_t                  bd_max_size;	/* maximum size of mapping */
+    xwidgetnum_t            bd_xio_port;	/* target XIO port */
+    iopaddr_t               bd_xio_addr;	/* target XIO address */
+    iopaddr_t               bd_pci_addr;	/* via PCI address */
+
+    int                     bd_ate_index;	/* Address Translation Entry Index */
+    int                     bd_ate_count;	/* number of ATE's allocated */
+    bridge_ate_p            bd_ate_ptr;		/* where to write first ATE */
+    bridge_ate_t            bd_ate_proto;	/* prototype ATE (for xioaddr=0) */
+    bridge_ate_t            bd_ate_prime;	/* value of 1st ATE written */
+};
+
+/*
+ * Bridge sets up interrupts using this information.
+ */
+
+struct pcibr_intr_s {
+    struct pciio_intr_s     bi_pi;
+#define	bi_flags	bi_pi.pi_flags	/* PCIBR_INTR flags */
+#define	bi_dev		bi_pi.pi_dev	/* associated pci card */
+#define	bi_lines	bi_pi.pi_lines	/* which PCI interrupt line(s) */
+#define	bi_func		bi_pi.pi_func	/* handler function (when connected) */
+#define	bi_arg		bi_pi.pi_arg	/* handler parameter (when connected) */
+#define bi_tinfo	bi_pi.pi_tinfo	/* Thread info (when connected) */
+#define bi_mustruncpu	bi_pi.pi_mustruncpu /* Where we must run. */
+#define bi_irq		bi_pi.pi_irq	/* IRQ assigned. */
+#define bi_cpu		bi_pi.pi_cpu	/* cpu assigned. */
+    unsigned                bi_ibits;	/* which Bridge interrupt bit(s) */
+    pcibr_soft_t            bi_soft;	/* shortcut to soft info */
+};
+
+/*
+ * per-connect point pcibr data, including
+ * standard pciio data in-line:
+ */
+struct pcibr_info_s {
+    struct pciio_info_s	    f_c;	/* MUST BE FIRST. */
+#define	f_vertex	f_c.c_vertex	/* back pointer to vertex */
+#define	f_bus		f_c.c_bus	/* which bus the card is in */
+#define	f_slot		f_c.c_slot	/* which slot the card is in */
+#define	f_func		f_c.c_func	/* which func (on multi-func cards) */
+#define	f_vendor	f_c.c_vendor	/* PCI card "vendor" code */
+#define	f_device	f_c.c_device	/* PCI card "device" code */
+#define	f_master	f_c.c_master	/* PCI bus provider */
+#define	f_mfast		f_c.c_mfast	/* cached fastinfo from c_master */
+#define	f_pops		f_c.c_pops	/* cached provider from c_master */
+#define	f_efunc		f_c.c_efunc	/* error handling function */
+#define	f_einfo		f_c.c_einfo	/* first parameter for efunc */
+#define	f_window	f_c.c_window	/* state of BASE regs */
+#define	f_rbase		f_c.c_rbase	/* expansion rom base */
+#define	f_rsize		f_c.c_rsize	/* expansion rom size */
+#define	f_piospace	f_c.c_piospace	/* additional I/O spaces allocated */
+
+    /* pcibr-specific connection state */
+    int			    f_ibit[4];	/* Bridge bit for each INTx */
+    pcibr_piomap_t	    f_piomap;
+};
+
+/* =====================================================================
+ *          Shared Interrupt Information
+ */
+
+struct pcibr_intr_list_s {
+    pcibr_intr_list_t       il_next;
+    pcibr_intr_t            il_intr;
+    volatile bridgereg_t   *il_wrbf;	/* ptr to b_wr_req_buf[] */
+};
+
+/* =====================================================================
+ *          Interrupt Wrapper Data
+ */
+struct pcibr_intr_wrap_s {
+    pcibr_soft_t            iw_soft;	/* which bridge */
+    volatile bridgereg_t   *iw_stat;	/* ptr to b_int_status */
+    bridgereg_t             iw_intr;	/* bits in b_int_status */
+    pcibr_intr_list_t       iw_list;	/* ghostbusters! */
+};
+
+#define	PCIBR_ISR_ERR_START	8
+#define PCIBR_ISR_MAX_ERRS 	32
+
+/* =====================================================================
+ *            Bridge Device State structure
+ *
+ *      one instance of this structure is kept for each
+ *      Bridge ASIC in the system.
+ */
+
+struct pcibr_soft_s {
+    devfs_handle_t            bs_conn;	/* xtalk connection point */
+    devfs_handle_t            bs_vhdl;	/* vertex owned by pcibr */
+    int                     bs_int_enable;	/* Mask of enabled intrs */
+    bridge_t               *bs_base;	/* PIO pointer to Bridge chip */
+    char                   *bs_name;	/* hw graph name */
+    xwidgetnum_t            bs_xid;	/* Bridge's xtalk ID number */
+    devfs_handle_t            bs_master;	/* xtalk master vertex */
+    xwidgetnum_t            bs_mxid;	/* master's xtalk ID number */
+
+    iopaddr_t               bs_dir_xbase;	/* xtalk address for 32-bit PCI direct map */
+    xwidgetnum_t	    bs_dir_xport;	/* xtalk port for 32-bit PCI direct map */
+
+    struct map             *bs_int_ate_map;	/* rmalloc map for internal ATEs */
+    struct map             *bs_ext_ate_map;	/* rmalloc map for external ATEs */
+    short		    bs_int_ate_size;	/* number of internal ates */
+    short		    bs_xbridge;		/* if 1 then xbridge */
+
+    int                     bs_rev_num;	/* revision number of Bridge */
+
+    unsigned                bs_dma_flags;	/* revision-implied DMA flags */
+
+    /*
+     * Lock used primarily to get mutual exclusion while managing any
+     * bridge resources..
+     */
+    lock_t                  bs_lock;
+    
+    devfs_handle_t	    bs_noslot_conn;	/* NO-SLOT connection point */
+    pcibr_info_t	    bs_noslot_info;
+    struct pcibr_soft_slot_s {
+	/* information we keep about each CFG slot */
+
+	/* some devices (ioc3 in non-slotted
+	 * configurations, sometimes) make use
+	 * of more than one REQ/GNT/INT* signal
+	 * sets. The slot corresponding to the
+	 * IDSEL that the device responds to is
+	 * called the host slot; the slot
+	 * numbers that the device is stealing
+	 * REQ/GNT/INT bits from are known as
+	 * the guest slots.
+	 */
+	int                     has_host;
+	pciio_slot_t            host_slot;
+	devfs_handle_t		slot_conn;
+	/* Potentially several connection points
+	 * for this slot. bss_ninfo is how many,
+	 * and bss_infos is a pointer to
+	 * an array pcibr_info_t values (which are
+	 * pointers to pcibr_info structs, stored
+	 * as device_info in connection ponts).
+	 */
+	int			bss_ninfo;
+	pcibr_info_h	        bss_infos;
+
+	/* Temporary Compatibility Macros, for
+	 * stuff that has moved out of bs_slot
+	 * and into the info structure. These
+	 * will go away when their users have
+	 * converted over to multifunction-
+	 * friendly use of bss_{ninfo,infos}.
+	 */
+#define	bss_vendor_id	bss_infos[0]->f_vendor
+#define	bss_device_id	bss_infos[0]->f_device
+#define	bss_window	bss_infos[0]->f_window
+#define	bssw_space	w_space
+#define	bssw_base	w_base
+#define	bssw_size	w_size
+
+	/* Where is DevIO(x) pointing? */
+	/* bssd_space is NONE if it is not assigned. */
+	struct {
+	    pciio_space_t           bssd_space;
+	    iopaddr_t               bssd_base;
+	} bss_devio;
+
+	/* Shadow value for Device(x) register,
+	 * so we don't have to go to the chip.
+	 */
+	bridgereg_t             bss_device;
+
+	/* Number of sets on GBR/REALTIME bit outstanding
+	 * Used by Priority I/O for tracking reservations
+	 */
+	int                     bss_pri_uctr;
+
+	/* Number of "uses" of PMU, 32-bit direct,
+	 * and 64-bit direct DMA (0:none, <0: trans,
+	 * >0: how many dmamaps). Device(x) bits
+	 * controlling attribute of each kind of
+	 * channel can't be changed by dmamap_alloc
+	 * or dmatrans if the controlling counter
+	 * is nonzero. dmatrans is forever.
+	 */
+	int                     bss_pmu_uctr;
+	int                     bss_d32_uctr;
+	int                     bss_d64_uctr;
+
+	/* When the contents of mapping configuration
+	 * information is locked down by dmatrans,
+	 * repeated checks of the same flags should
+	 * be shortcircuited for efficiency.
+	 */
+	iopaddr_t		bss_d64_base;
+	unsigned		bss_d64_flags;
+	iopaddr_t		bss_d32_base;
+	unsigned		bss_d32_flags;
+
+	/* Shadow information used for implementing
+	 * Bridge Hardware WAR #484930
+	 */
+	int			bss_ext_ates_active;
+        volatile unsigned      *bss_cmd_pointer;
+	unsigned		bss_cmd_shadow;
+
+    } bs_slot[8];
+
+    pcibr_intr_bits_f	       *bs_intr_bits;
+
+    /* RRB MANAGEMENT
+     * bs_rrb_fixed: bitmap of slots whose RRB
+     *	allocations we should not "automatically" change
+     * bs_rrb_avail: number of RRBs that have not
+     *  been allocated or reserved for {even,odd} slots
+     * bs_rrb_res: number of RRBs reserved for the
+     *	use of the index slot number
+     * bs_rrb_valid: number of RRBs marked valid
+     *	for the indexed slot number; indexes 8-15
+     *	are for the virtual channels for slots 0-7.
+     */
+    int                     bs_rrb_fixed;
+    int			    bs_rrb_avail[2];
+    int			    bs_rrb_res[8];
+    int			    bs_rrb_valid[16];
+
+    struct {
+	/* Each Bridge interrupt bit has a single XIO
+	 * interrupt channel allocated.
+	 */
+	xtalk_intr_t            bsi_xtalk_intr;
+	/*
+	 * We do not like sharing PCI interrrupt lines
+	 * between devices, but the Origin 200 PCI
+	 * layout forces us to do so.
+	 */
+	pcibr_intr_list_t       bsi_pcibr_intr_list;
+	pcibr_intr_wrap_t       bsi_pcibr_intr_wrap;
+	int                     bsi_pcibr_wrap_set;
+
+    } bs_intr[8];
+
+    xtalk_intr_t		bsi_err_intr;
+
+    /*
+     * We stash away some information in this structure on getting
+     * an error interrupt. This information is used during PIO read/
+     * write error handling.
+     *
+     * As it stands now, we do not re-enable the error interrupt
+     * till the error is resolved. Error resolution happens either at
+     * bus error time for PIO Read errors (~100 microseconds), or at
+     * the scheduled timeout time for PIO write errors (~milliseconds).
+     * If this delay causes problems, we may need to move towards
+     * a different scheme..
+     *
+     * Note that there is no locking while looking at this data structure.
+     * There should not be any race between bus error code and
+     * error interrupt code.. will look into this if needed.
+     */
+    struct br_errintr_info {
+	int                     bserr_toutcnt;
+#ifdef IRIX
+	toid_t                  bserr_toutid;	/* Timeout started by errintr */
+#endif
+	iopaddr_t               bserr_addr;	/* Address where error occured */
+	bridgereg_t             bserr_intstat;	/* interrupts active at error time */
+    } bs_errinfo;
+
+    /*
+     * PCI Bus Space allocation data structure.
+     * This info is used to satisfy the callers of pcibr_piospace_alloc
+     * interface. Most of these users need "large" amounts of PIO
+     * space (typically in Megabytes), and they generally tend to
+     * take once and never release..
+     * For Now use a simple algorithm to manage it. On allocation,
+     * Update the _base field to reflect next free address.
+     *
+     * Freeing does nothing.. So, once allocated, it's gone for good.
+     */
+    struct br_pcisp_info {
+	iopaddr_t               pci_io_base;
+	iopaddr_t               pci_io_last;
+	iopaddr_t               pci_swin_base;
+	iopaddr_t               pci_swin_last;
+	iopaddr_t               pci_mem_base;
+	iopaddr_t               pci_mem_last;
+    } bs_spinfo;
+
+    struct bs_errintr_stat_s {
+	uint32_t              bs_errcount_total;
+	uint32_t              bs_lasterr_timestamp;
+	uint32_t              bs_lasterr_snapshot;
+    } bs_errintr_stat[PCIBR_ISR_MAX_ERRS];
+
+    /*
+     * Bridge-wide endianness control for
+     * large-window PIO mappings
+     *
+     * These fields are set to PCIIO_BYTE_SWAP
+     * or PCIIO_WORD_VALUES once the swapper
+     * has been configured, one way or the other,
+     * for the direct windows. If they are zero,
+     * nobody has a PIO mapping through that window,
+     * and the swapper can be set either way.
+     */
+    unsigned		bs_pio_end_io;
+    unsigned		bs_pio_end_mem;
+};
+
+#define	PCIBR_ERRTIME_THRESHOLD		(100)
+#define	PCIBR_ERRRATE_THRESHOLD		(100)
+
+/*
+ * pcibr will respond to hints dropped in its vertex
+ * using the following structure.
+ */
+struct pcibr_hints_s {
+    /* ph_host_slot is actually +1 so "0" means "no host" */
+    pciio_slot_t            ph_host_slot[8];	/* REQ/GNT/INT in use by ... */
+    unsigned                ph_rrb_fixed;	/* do not change RRB allocations */
+    unsigned                ph_hands_off;	/* prevent further pcibr operations */
+    rrb_alloc_funct_t       rrb_alloc_funct;	/* do dynamic rrb allocation */
+    pcibr_intr_bits_f	   *ph_intr_bits;	/* map PCI INT[ABCD] to Bridge Int(n) */
+};
+
+extern int              pcibr_prefetch_enable_rev, pcibr_wg_enable_rev;
+
+/*
+ * Number of bridge non-fatal error interrupts we can see before
+ * we decide to disable that interrupt.
+ */
+#define	PCIBR_ERRINTR_DISABLE_LEVEL	10000
+
+/* =====================================================================
+ *    Bridge (pcibr) state management functions
+ *
+ *      pcibr_soft_get is here because we do it in a lot
+ *      of places and I want to make sure they all stay
+ *      in step with each other.
+ *
+ *      pcibr_soft_set is here because I want it to be
+ *      closely associated with pcibr_soft_get, even
+ *      though it is only called in one place.
+ */
+
+#define pcibr_soft_get(v)       ((pcibr_soft_t)hwgraph_fastinfo_get((v)))
+#define pcibr_soft_set(v,i)     (hwgraph_fastinfo_set((v), (arbitrary_info_t)(i)))
+
+#endif				/* _ASM_SN_PCI_PCIBR_PRIVATE_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/pci/pciio.h linux/include/asm-ia64/sn/pci/pciio.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/pci/pciio.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/pci/pciio.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,717 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_PCI_PCIIO_H
+#define _ASM_SN_PCI_PCIIO_H
+
+/*
+ * pciio.h -- platform-independent PCI interface
+ */
+
+#include <asm/sn/ioerror.h>
+#include <asm/sn/iobus.h>
+
+
+#if defined(_LANGUAGE_C) || defined(_LANGUAGE_C_PLUS_PLUS)
+
+#include <asm/sn/dmamap.h>
+#include <asm/sn/alenlist.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef int pciio_vendor_id_t;
+
+#define PCIIO_VENDOR_ID_NONE	-1
+
+typedef int pciio_device_id_t;
+
+#define PCIIO_DEVICE_ID_NONE	-1
+
+#ifdef colin
+typedef char pciio_bus_t;	/* PCI bus number (0..255) */
+typedef char pciio_slot_t;	/* PCI slot number (0..31, 255) */
+typedef char pciio_function_t;	/* PCI func number (0..7, 255) */
+#else
+typedef uint8_t pciio_bus_t;       /* PCI bus number (0..255) */
+typedef uint8_t pciio_slot_t;      /* PCI slot number (0..31, 255) */
+typedef uint8_t pciio_function_t;  /* PCI func number (0..7, 255) */
+#endif
+
+#define	PCIIO_SLOTS		((pciio_slot_t)32)
+#define	PCIIO_FUNCS		((pciio_function_t)8)
+
+#define	PCIIO_SLOT_NONE		((pciio_slot_t)255)
+#define	PCIIO_FUNC_NONE		((pciio_function_t)255)
+
+typedef int pciio_intr_line_t;		/* PCI interrupt line(s) */
+
+#define PCIIO_INTR_LINE(n)      (0x1 << (n))
+#define PCIIO_INTR_LINE_A	(0x1)
+#define PCIIO_INTR_LINE_B	(0x2)
+#define PCIIO_INTR_LINE_C	(0x4)
+#define PCIIO_INTR_LINE_D	(0x8)
+
+typedef int pciio_space_t;		/* PCI address space designation */
+
+#define PCIIO_SPACE_NONE	(0)
+#define	PCIIO_SPACE_ROM		(1)
+#define PCIIO_SPACE_IO		(2)
+/*	PCIIO_SPACE_		(3) */
+#define PCIIO_SPACE_MEM		(4)
+#define PCIIO_SPACE_MEM32	(5)
+#define PCIIO_SPACE_MEM64	(6)
+#define PCIIO_SPACE_CFG		(7)
+#define PCIIO_SPACE_WIN0	(8)
+#define PCIIO_SPACE_WIN(n)	(PCIIO_SPACE_WIN0+(n))	/* 8..13 */
+/*	PCIIO_SPACE_		(14) */
+#define PCIIO_SPACE_BAD		(15)
+
+#if 1	/* does anyone really use these? */
+#define PCIIO_SPACE_USER0	(20)
+#define PCIIO_SPACE_USER(n)	(PCIIO_SPACE_USER0+(n))	/* 20 .. ? */
+#endif
+
+/*
+ * PCI_NOWHERE is the error value returned in
+ * place of a PCI address when there is no
+ * corresponding address.
+ */
+#define	PCI_NOWHERE		(0)
+
+/*
+ *    Acceptable flag bits for pciio service calls
+ *
+ * PCIIO_FIXED: require that mappings be established
+ *	using fixed sharable resources; address
+ *	translation results will be permanently
+ *	available. (PIOMAP_FIXED and DMAMAP_FIXED are
+ *	the same numeric value and are acceptable).
+ * PCIIO_NOSLEEP: if any part of the operation would
+ *	sleep waiting for resoruces, return an error
+ *	instead. (PIOMAP_NOSLEEP and DMAMAP_NOSLEEP are
+ *	the same numeric value and are acceptable).
+ * PCIIO_INPLACE: when operating on alenlist structures,
+ *	reuse the source alenlist rather than creating a
+ *	new one. (PIOMAP_INPLACE and DMAMAP_INPLACE are
+ *	the same numeric value and are acceptable).
+ *
+ * PCIIO_DMA_CMD: configure this stream as a
+ *	generic "command" stream. Generally this
+ *	means turn off prefetchers and write
+ *	gatherers, and whatever else might be
+ *	necessary to make command ring DMAs
+ *	work as expected.
+ * PCIIO_DMA_DATA: configure this stream as a
+ *	generic "data" stream. Generally, this
+ *	means turning on prefetchers and write
+ *	gatherers, and anything else that might
+ *	increase the DMA throughput (short of
+ *	using "high priority" or "real time"
+ *	resources that may lower overall system
+ *	performance).
+ * PCIIO_DMA_A64: this device is capable of
+ *	using 64-bit DMA addresses. Unless this
+ *	flag is specified, it is assumed that
+ *	the DMA address must be in the low 4G
+ *	of PCI space.
+ * PCIIO_PREFETCH: if there are prefetchers
+ *	available, they can be turned on.
+ * PCIIO_NOPREFETCH: any prefetchers along
+ *	the dma path should be turned off.
+ * PCIIO_WRITE_GATHER: if there are write gatherers
+ *	available, they can be turned on.
+ * PCIIO_NOWRITE_GATHER: any write gatherers along
+ *	the dma path should be turned off.
+ *
+ * PCIIO_BYTE_STREAM: the DMA stream represents a group
+ *	of ordered bytes. Arrange all byte swapping
+ *	hardware so that the bytes land in the correct
+ *	order. This is a common setting for data
+ *	channels, but is NOT implied by PCIIO_DMA_DATA.
+ * PCIIO_WORD_VALUES: the DMA stream is used to
+ *	communicate quantities stored in multiple bytes,
+ *	and the device doing the DMA is little-endian;
+ *	arrange any swapping hardware so that
+ *	32-bit-wide values are maintained. This is a
+ *	common setting for command rings that contain
+ *	DMA addresses and counts, but is NOT implied by
+ *	PCIIO_DMA_CMD. CPU Accesses to 16-bit fields
+ *	must have their address xor-ed with 2, and
+ *	accesses to individual bytes must have their
+ *	addresses xor-ed with 3 relative to what the
+ *	device expects.
+ *
+ * NOTE: any "provider specific" flags that
+ * conflict with the generic flags will
+ * override the generic flags, locally
+ * at that provider.
+ *
+ * Also, note that PCI-generic flags (PCIIO_) are
+ * in bits 0-14. The upper bits, 15-31, are reserved
+ * for PCI implementation-specific flags.
+ */
+
+#define	PCIIO_FIXED		DMAMAP_FIXED
+#define	PCIIO_NOSLEEP		DMAMAP_NOSLEEP
+#define	PCIIO_INPLACE		DMAMAP_INPLACE
+
+#define PCIIO_DMA_CMD		0x0010
+#define PCIIO_DMA_DATA		0x0020
+#define PCIIO_DMA_A64		0x0040
+
+#define PCIIO_WRITE_GATHER	0x0100
+#define PCIIO_NOWRITE_GATHER	0x0200
+#define PCIIO_PREFETCH		0x0400
+#define PCIIO_NOPREFETCH	0x0800
+
+/* Requesting an endianness setting that the
+ * underlieing hardware can not support
+ * WILL result in a failure to allocate
+ * dmamaps or complete a dmatrans.
+ */
+#define	PCIIO_BYTE_STREAM	0x1000	/* set BYTE SWAP for "byte stream" */
+#define	PCIIO_WORD_VALUES	0x2000	/* set BYTE SWAP for "word values" */
+
+/*
+ * Interface to deal with PCI endianness.
+ * The driver calls pciio_endian_set once, supplying the actual endianness of
+ * the device and the desired endianness.  On SGI systems, only use LITTLE if
+ * dealing with a driver that does software swizzling.  Most of the time,
+ * it's preferable to request BIG.  The return value indicates the endianness
+ * that is actually achieved.  On systems that support hardware swizzling,
+ * the achieved endianness will be the desired endianness.  On systems without
+ * swizzle hardware, the achieved endianness will be the device's endianness.
+ */
+typedef enum pciio_endian_e {
+    PCIDMA_ENDIAN_BIG,
+    PCIDMA_ENDIAN_LITTLE
+} pciio_endian_t;
+
+/*
+ * Interface to set PCI arbitration priority for devices that require
+ * realtime characteristics.  pciio_priority_set is used to switch a
+ * device between the PCI high-priority arbitration ring and the low
+ * priority arbitration ring.
+ *
+ * (Note: this is strictly for the PCI arbitrary priority.  It has
+ * no direct relationship to GBR.)
+ */
+typedef enum pciio_priority_e {
+    PCI_PRIO_LOW,
+    PCI_PRIO_HIGH
+} pciio_priority_t;
+
+/*
+ * handles of various sorts
+ */
+typedef struct pciio_piomap_s *pciio_piomap_t;
+typedef struct pciio_dmamap_s *pciio_dmamap_t;
+typedef struct pciio_intr_s *pciio_intr_t;
+typedef struct pciio_info_s *pciio_info_t;
+typedef struct pciio_piospace_s *pciio_piospace_t;
+
+/* PIO MANAGEMENT */
+
+/*
+ *    A NOTE ON PCI PIO ADDRESSES
+ *
+ *      PCI supports three different address spaces: CFG
+ *      space, MEM space and I/O space. Further, each
+ *      card always accepts CFG accesses at an address
+ *      based on which slot it is attached to, but can
+ *      decode up to six address ranges.
+ *
+ *      Assignment of the base address registers for all
+ *      PCI devices is handled centrally; most commonly,
+ *      device drivers will want to talk to offsets
+ *      within one or another of the address ranges. In
+ *      order to do this, which of these "address
+ *      spaces" the PIO is directed into must be encoded
+ *      in the flag word.
+ *
+ *      We reserve the right to defer allocation of PCI
+ *      address space for a device window until the
+ *      driver makes a piomap_alloc or piotrans_addr
+ *      request.
+ *
+ *      If a device driver mucks with its device's base
+ *      registers through a PIO mapping to CFG space,
+ *      results of further PIO through the corresponding
+ *      window are UNDEFINED.
+ *
+ *      Windows are named by the index in the base
+ *      address register set for the device of the
+ *      desired register; IN THE CASE OF 64 BIT base
+ *      registers, the index should be to the word of
+ *      the register that contains the mapping type
+ *      bits; since the PCI CFG space is natively
+ *      organized little-endian fashion, this is the
+ *      first of the two words.
+ *
+ *      AT THE MOMENT, any required corrections for
+ *      endianness are the responsibility of the device
+ *      driver; not all platforms support control in
+ *      hardware of byteswapping hardware. We anticipate
+ *      providing flag bits to the PIO and DMA
+ *      management interfaces to request different
+ *      configurations of byteswapping hardware.
+ *
+ *      PIO Accesses to CFG space via the "Bridge" ASIC
+ *      used in IP30 platforms preserve the native byte
+ *      significance within the 32-bit word; byte
+ *      addresses for single byte accesses need to be
+ *      XORed with 3, and addresses for 16-bit accesses
+ *      need to be XORed with 2.
+ *
+ *      The IOC3 used on IP30, and other SGI PCI devices
+ *      as well, require use of 32-bit accesses to their
+ *      configuration space registers. Any potential PCI
+ *      bus providers need to be aware of this requirement.
+ */
+
+#define PCIIO_PIOMAP_CFG	(0x1)
+#define PCIIO_PIOMAP_MEM	(0x2)
+#define PCIIO_PIOMAP_IO		(0x4)
+#define PCIIO_PIOMAP_WIN(n)	(0x8+(n))
+
+typedef pciio_piomap_t
+pciio_piomap_alloc_f    (devfs_handle_t dev,	/* set up mapping for this device */
+			 device_desc_t dev_desc,	/* device descriptor */
+			 pciio_space_t space,	/* which address space */
+			 iopaddr_t pcipio_addr,		/* starting address */
+			 size_t byte_count,
+			 size_t byte_count_max,		/* maximum size of a mapping */
+			 unsigned flags);	/* defined in sys/pio.h */
+
+typedef void
+pciio_piomap_free_f     (pciio_piomap_t pciio_piomap);
+
+typedef caddr_t
+pciio_piomap_addr_f     (pciio_piomap_t pciio_piomap,	/* mapping resources */
+			 iopaddr_t pciio_addr,	/* map for this pcipio address */
+			 size_t byte_count);	/* map this many bytes */
+
+typedef void
+pciio_piomap_done_f     (pciio_piomap_t pciio_piomap);
+
+typedef caddr_t
+pciio_piotrans_addr_f   (devfs_handle_t dev,	/* translate for this device */
+			 device_desc_t dev_desc,	/* device descriptor */
+			 pciio_space_t space,	/* which address space */
+			 iopaddr_t pciio_addr,	/* starting address */
+			 size_t byte_count,	/* map this many bytes */
+			 unsigned flags);
+
+typedef caddr_t
+pciio_pio_addr_f        (devfs_handle_t dev,	/* translate for this device */
+			 device_desc_t dev_desc,	/* device descriptor */
+			 pciio_space_t space,	/* which address space */
+			 iopaddr_t pciio_addr,	/* starting address */
+			 size_t byte_count,	/* map this many bytes */
+			 pciio_piomap_t *mapp,	/* in case a piomap was needed */
+			 unsigned flags);
+
+typedef iopaddr_t
+pciio_piospace_alloc_f  (devfs_handle_t dev,	/* PIO space for this device */
+			 device_desc_t dev_desc,	/* Device descriptor   */
+			 pciio_space_t space,	/* which address space  */
+			 size_t byte_count,	/* Number of bytes of space */
+			 size_t alignment);	/* Alignment of allocation  */
+
+typedef void
+pciio_piospace_free_f   (devfs_handle_t dev,	/* Device freeing space */
+			 pciio_space_t space,	/* Which space is freed */
+			 iopaddr_t pci_addr,	/* Address being freed */
+			 size_t size);	/* Size freed           */
+
+/* DMA MANAGEMENT */
+
+typedef pciio_dmamap_t
+pciio_dmamap_alloc_f    (devfs_handle_t dev,	/* set up mappings for this device */
+			 device_desc_t dev_desc,	/* device descriptor */
+			 size_t byte_count_max,		/* max size of a mapping */
+			 unsigned flags);	/* defined in dma.h */
+
+typedef void
+pciio_dmamap_free_f     (pciio_dmamap_t dmamap);
+
+typedef iopaddr_t
+pciio_dmamap_addr_f     (pciio_dmamap_t dmamap,		/* use these mapping resources */
+			 paddr_t paddr,	/* map for this address */
+			 size_t byte_count);	/* map this many bytes */
+
+typedef alenlist_t
+pciio_dmamap_list_f     (pciio_dmamap_t dmamap,		/* use these mapping resources */
+			 alenlist_t alenlist,	/* map this address/length list */
+			 unsigned flags);
+
+typedef void
+pciio_dmamap_done_f     (pciio_dmamap_t dmamap);
+
+typedef iopaddr_t
+pciio_dmatrans_addr_f   (devfs_handle_t dev,	/* translate for this device */
+			 device_desc_t dev_desc,	/* device descriptor */
+			 paddr_t paddr,	/* system physical address */
+			 size_t byte_count,	/* length */
+			 unsigned flags);	/* defined in dma.h */
+
+typedef alenlist_t
+pciio_dmatrans_list_f   (devfs_handle_t dev,	/* translate for this device */
+			 device_desc_t dev_desc,	/* device descriptor */
+			 alenlist_t palenlist,	/* system address/length list */
+			 unsigned flags);	/* defined in dma.h */
+
+typedef void
+pciio_dmamap_drain_f	(pciio_dmamap_t map);
+
+typedef void
+pciio_dmaaddr_drain_f	(devfs_handle_t vhdl,
+			 paddr_t addr,
+			 size_t bytes);
+
+typedef void
+pciio_dmalist_drain_f	(devfs_handle_t vhdl,
+			 alenlist_t list);
+
+/* INTERRUPT MANAGEMENT */
+
+typedef pciio_intr_t
+pciio_intr_alloc_f      (devfs_handle_t dev,	/* which PCI device */
+			 device_desc_t dev_desc,	/* device descriptor */
+			 pciio_intr_line_t lines,	/* which line(s) will be used */
+			 devfs_handle_t owner_dev);	/* owner of this intr */
+
+typedef void
+pciio_intr_free_f       (pciio_intr_t intr_hdl);
+
+typedef int
+pciio_intr_connect_f    (pciio_intr_t intr_hdl,		/* pciio intr resource handle */
+			 intr_func_t intr_func,		/* pciio intr handler */
+			 intr_arg_t intr_arg,	/* arg to intr handler */
+			 void *thread);	/* intr thread to use */
+
+typedef void
+pciio_intr_disconnect_f (pciio_intr_t intr_hdl);
+
+typedef devfs_handle_t
+pciio_intr_cpu_get_f    (pciio_intr_t intr_hdl);	/* pciio intr resource handle */
+
+/* CONFIGURATION MANAGEMENT */
+
+typedef void
+pciio_provider_startup_f (devfs_handle_t pciio_provider);
+
+typedef void
+pciio_provider_shutdown_f (devfs_handle_t pciio_provider);
+
+typedef int	
+pciio_reset_f		(devfs_handle_t conn);	/* pci connection point */
+
+typedef int
+pciio_write_gather_flush_f (devfs_handle_t dev);    /* Device flushing buffers */
+
+typedef pciio_endian_t			/* actual endianness */
+pciio_endian_set_f      (devfs_handle_t dev,	/* specify endianness for this device */
+			 pciio_endian_t device_end,	/* endianness of device */
+			 pciio_endian_t desired_end);	/* desired endianness */
+
+typedef pciio_priority_t
+pciio_priority_set_f    (devfs_handle_t pcicard,
+			 pciio_priority_t device_prio);
+
+typedef uint64_t
+pciio_config_get_f	(devfs_handle_t conn,	/* pci connection point */
+			 unsigned reg,		/* register byte offset */
+			 unsigned size);	/* width in bytes (1..4) */
+
+typedef void
+pciio_config_set_f	(devfs_handle_t conn,	/* pci connection point */
+			 unsigned reg,		/* register byte offset */
+			 unsigned size,		/* width in bytes (1..4) */
+			 uint64_t value);	/* value to store */
+
+typedef int
+pciio_error_devenable_f (devfs_handle_t pconn_vhdl, int error_code);
+
+typedef pciio_slot_t
+pciio_error_extract_f	(devfs_handle_t vhdl,
+			 pciio_space_t *spacep,
+			 iopaddr_t *addrp);
+
+/*
+ * Adapters that provide a PCI interface adhere to this software interface.
+ */
+typedef struct pciio_provider_s {
+    /* PIO MANAGEMENT */
+    pciio_piomap_alloc_f   *piomap_alloc;
+    pciio_piomap_free_f    *piomap_free;
+    pciio_piomap_addr_f    *piomap_addr;
+    pciio_piomap_done_f    *piomap_done;
+    pciio_piotrans_addr_f  *piotrans_addr;
+    pciio_piospace_alloc_f *piospace_alloc;
+    pciio_piospace_free_f  *piospace_free;
+
+    /* DMA MANAGEMENT */
+    pciio_dmamap_alloc_f   *dmamap_alloc;
+    pciio_dmamap_free_f    *dmamap_free;
+    pciio_dmamap_addr_f    *dmamap_addr;
+    pciio_dmamap_list_f    *dmamap_list;
+    pciio_dmamap_done_f    *dmamap_done;
+    pciio_dmatrans_addr_f  *dmatrans_addr;
+    pciio_dmatrans_list_f  *dmatrans_list;
+    pciio_dmamap_drain_f   *dmamap_drain;
+    pciio_dmaaddr_drain_f  *dmaaddr_drain;
+    pciio_dmalist_drain_f  *dmalist_drain;
+
+    /* INTERRUPT MANAGEMENT */
+    pciio_intr_alloc_f     *intr_alloc;
+    pciio_intr_free_f      *intr_free;
+    pciio_intr_connect_f   *intr_connect;
+    pciio_intr_disconnect_f *intr_disconnect;
+    pciio_intr_cpu_get_f   *intr_cpu_get;
+
+    /* CONFIGURATION MANAGEMENT */
+    pciio_provider_startup_f *provider_startup;
+    pciio_provider_shutdown_f *provider_shutdown;
+    pciio_reset_f	   *reset;
+    pciio_write_gather_flush_f *write_gather_flush;
+    pciio_endian_set_f     *endian_set;
+    pciio_priority_set_f   *priority_set;
+    pciio_config_get_f	   *config_get;
+    pciio_config_set_f	   *config_set;
+
+    /* Error handling interface */
+    pciio_error_devenable_f *error_devenable;
+    pciio_error_extract_f *error_extract;
+} pciio_provider_t;
+
+/* PCI devices use these standard PCI provider interfaces */
+extern pciio_piomap_alloc_f pciio_piomap_alloc;
+extern pciio_piomap_free_f pciio_piomap_free;
+extern pciio_piomap_addr_f pciio_piomap_addr;
+extern pciio_piomap_done_f pciio_piomap_done;
+extern pciio_piotrans_addr_f pciio_piotrans_addr;
+extern pciio_pio_addr_f pciio_pio_addr;
+extern pciio_piospace_alloc_f pciio_piospace_alloc;
+extern pciio_piospace_free_f pciio_piospace_free;
+extern pciio_dmamap_alloc_f pciio_dmamap_alloc;
+extern pciio_dmamap_free_f pciio_dmamap_free;
+extern pciio_dmamap_addr_f pciio_dmamap_addr;
+extern pciio_dmamap_list_f pciio_dmamap_list;
+extern pciio_dmamap_done_f pciio_dmamap_done;
+extern pciio_dmatrans_addr_f pciio_dmatrans_addr;
+extern pciio_dmatrans_list_f pciio_dmatrans_list;
+extern pciio_dmamap_drain_f pciio_dmamap_drain;
+extern pciio_dmaaddr_drain_f pciio_dmaaddr_drain;
+extern pciio_dmalist_drain_f pciio_dmalist_drain;
+extern pciio_intr_alloc_f pciio_intr_alloc;
+extern pciio_intr_free_f pciio_intr_free;
+extern pciio_intr_connect_f pciio_intr_connect;
+extern pciio_intr_disconnect_f pciio_intr_disconnect;
+extern pciio_intr_cpu_get_f pciio_intr_cpu_get;
+extern pciio_provider_startup_f pciio_provider_startup;
+extern pciio_provider_shutdown_f pciio_provider_shutdown;
+extern pciio_reset_f pciio_reset;
+extern pciio_write_gather_flush_f pciio_write_gather_flush;
+extern pciio_endian_set_f pciio_endian_set;
+extern pciio_priority_set_f pciio_priority_set;
+extern pciio_config_get_f pciio_config_get;
+extern pciio_config_set_f pciio_config_set;
+extern pciio_error_devenable_f pciio_error_devenable;
+extern pciio_error_extract_f pciio_error_extract;
+
+/* Widgetdev in the IOERROR structure is encoded as follows.
+ *	+---------------------------+
+ *	| slot (7:3) | function(2:0)|
+ *	+---------------------------+
+ * Following are the convenience interfaces to get at form
+ * a widgetdev or to break it into its constituents.
+ */
+
+#define PCIIO_WIDGETDEV_SLOT_SHFT		3
+#define PCIIO_WIDGETDEV_SLOT_MASK		0x1f
+#define PCIIO_WIDGETDEV_FUNC_MASK		0x7
+
+#ifdef IRIX
+#define pciio_widgetdev_create(slot,func)	\
+	((slot) << PCIIO_WIDGETDEV_SLOT_SHFT + (func))
+#else
+#define pciio_widgetdev_create(slot,func)       \
+        (((slot) << PCIIO_WIDGETDEV_SLOT_SHFT) + (func))
+#endif
+
+#define pciio_widgetdev_slot_get(wdev)		\
+	(((wdev) >> PCIIO_WIDGETDEV_SLOT_SHFT) & PCIIO_WIDGETDEV_SLOT_MASK)
+
+#define pciio_widgetdev_func_get(wdev)		\
+	((wdev) & PCIIO_WIDGETDEV_FUNC_MASK)
+
+
+/* Generic PCI card initialization interface
+ */
+
+extern int
+pciio_driver_register  (pciio_vendor_id_t vendor_id,	/* card's vendor number */
+			pciio_device_id_t device_id,	/* card's device number */
+			char *driver_prefix,	/* driver prefix */
+			unsigned flags);
+
+extern void
+pciio_error_register   (devfs_handle_t pconn,	/* which slot */
+			error_handler_f *efunc,	/* function to call */
+			error_handler_arg_t einfo);	/* first parameter */
+
+extern void             pciio_driver_unregister(char *driver_prefix);
+
+typedef void		pciio_iter_f(devfs_handle_t pconn);	/* a connect point */
+
+extern void             pciio_iterate(char *driver_prefix,
+				      pciio_iter_f *func);
+
+/* Interfaces used by PCI Bus Providers to talk to
+ * the Generic PCI layer.
+ */
+extern devfs_handle_t
+pciio_device_register  (devfs_handle_t connectpt,	/* vertex at center of bus */
+			devfs_handle_t master,	/* card's master ASIC (pci provider) */
+			pciio_slot_t slot,	/* card's slot (0..?) */
+			pciio_function_t func,	/* card's func (0..?) */
+			pciio_vendor_id_t vendor,	/* card's vendor number */
+			pciio_device_id_t device);	/* card's device number */
+
+extern void
+pciio_device_unregister(devfs_handle_t connectpt);
+
+extern pciio_info_t
+pciio_device_info_new  (pciio_info_t pciio_info,	/* preallocated info struct */
+			devfs_handle_t master,	/* card's master ASIC (pci provider) */
+			pciio_slot_t slot,	/* card's slot (0..?) */
+			pciio_function_t func,	/* card's func (0..?) */
+			pciio_vendor_id_t vendor,	/* card's vendor number */
+			pciio_device_id_t device);	/* card's device number */
+
+extern void
+pciio_device_info_free(pciio_info_t pciio_info);
+
+extern devfs_handle_t
+pciio_device_info_register(
+			devfs_handle_t connectpt,	/* vertex at center of bus */
+			pciio_info_t pciio_info);	/* details about conn point */
+
+extern void
+pciio_device_info_unregister(
+			devfs_handle_t connectpt,	/* vertex at center of bus */
+			pciio_info_t pciio_info);	/* details about conn point */
+
+
+extern int              pciio_device_attach(devfs_handle_t pcicard);	/* vertex created by pciio_device_register */
+extern int             pciio_device_detach(devfs_handle_t pcicard);	/* vertex created by pciio_device_register */
+
+/*
+ * Generic PCI interface, for use with all PCI providers
+ * and all PCI devices.
+ */
+
+/* Generic PCI interrupt interfaces */
+extern devfs_handle_t     pciio_intr_dev_get(pciio_intr_t pciio_intr);
+extern devfs_handle_t     pciio_intr_cpu_get(pciio_intr_t pciio_intr);
+
+/* Generic PCI pio interfaces */
+extern devfs_handle_t     pciio_pio_dev_get(pciio_piomap_t pciio_piomap);
+extern pciio_slot_t     pciio_pio_slot_get(pciio_piomap_t pciio_piomap);
+extern pciio_space_t    pciio_pio_space_get(pciio_piomap_t pciio_piomap);
+extern iopaddr_t        pciio_pio_pciaddr_get(pciio_piomap_t pciio_piomap);
+extern ulong            pciio_pio_mapsz_get(pciio_piomap_t pciio_piomap);
+extern caddr_t          pciio_pio_kvaddr_get(pciio_piomap_t pciio_piomap);
+
+#ifdef IRIX
+#ifdef USE_PCI_PIO
+extern uint8_t 		pciio_pio_read8(volatile uint8_t *addr);
+extern uint16_t 	pciio_pio_read16(volatile uint16_t *addr);
+extern uint32_t 	pciio_pio_read32(volatile uint32_t *addr);
+extern uint64_t 	pciio_pio_read64(volatile uint64_t *addr);
+extern void 		pciio_pio_write8(uint8_t val, volatile uint8_t *addr);
+extern void 		pciio_pio_write16(uint16_t val, volatile uint16_t *addr);
+extern void 		pciio_pio_write32(uint32_t val, volatile uint32_t *addr);
+extern void 		pciio_pio_write64(uint64_t val, volatile uint64_t *addr);
+#else /* !USE_PCI_PIO */
+__inline uint8_t pciio_pio_read8(volatile uint8_t *addr)
+{ 
+	return *addr; 
+}
+__inline uint16_t pciio_pio_read16(volatile uint16_t *addr)
+{
+	return *addr; 
+}
+__inline uint32_t pciio_pio_read32(volatile uint32_t *addr)
+{
+	return *addr; 
+}
+__inline uint64_t pciio_pio_read64(volatile uint64_t *addr)
+{
+	return *addr;
+}
+__inline void pciio_pio_write8(uint8_t val, volatile uint8_t *addr)
+{
+	*addr = val;
+}
+__inline void pciio_pio_write16(uint16_t val, volatile uint16_t *addr)
+{
+	*addr = val;
+}
+__inline void pciio_pio_write32(uint32_t val, volatile uint32_t *addr)
+{
+	*addr = val;
+}
+__inline void pciio_pio_write64(uint64_t val, volatile uint64_t *addr)
+{
+	*addr = val;
+}
+#endif /* USE_PCI_PIO */
+#endif
+
+/* Generic PCI dma interfaces */
+extern devfs_handle_t     pciio_dma_dev_get(pciio_dmamap_t pciio_dmamap);
+
+/* Register/unregister PCI providers and get implementation handle */
+extern void             pciio_provider_register(devfs_handle_t provider, pciio_provider_t *pciio_fns);
+extern void             pciio_provider_unregister(devfs_handle_t provider);
+extern pciio_provider_t *pciio_provider_fns_get(devfs_handle_t provider);
+
+/* Generic pci slot information access interface */
+extern pciio_info_t     pciio_info_chk(devfs_handle_t vhdl);
+extern pciio_info_t     pciio_info_get(devfs_handle_t vhdl);
+extern void             pciio_info_set(devfs_handle_t vhdl, pciio_info_t widget_info);
+extern devfs_handle_t     pciio_info_dev_get(pciio_info_t pciio_info);
+extern pciio_bus_t	pciio_info_bus_get(pciio_info_t pciio_info);
+extern pciio_slot_t     pciio_info_slot_get(pciio_info_t pciio_info);
+extern pciio_function_t	pciio_info_function_get(pciio_info_t pciio_info);
+extern pciio_vendor_id_t pciio_info_vendor_id_get(pciio_info_t pciio_info);
+extern pciio_device_id_t pciio_info_device_id_get(pciio_info_t pciio_info);
+extern devfs_handle_t     pciio_info_master_get(pciio_info_t pciio_info);
+extern arbitrary_info_t pciio_info_mfast_get(pciio_info_t pciio_info);
+extern pciio_provider_t *pciio_info_pops_get(pciio_info_t pciio_info);
+extern error_handler_f *pciio_info_efunc_get(pciio_info_t);
+extern error_handler_arg_t *pciio_info_einfo_get(pciio_info_t);
+extern pciio_space_t	pciio_info_bar_space_get(pciio_info_t, int);
+extern iopaddr_t	pciio_info_bar_base_get(pciio_info_t, int);
+extern size_t		pciio_info_bar_size_get(pciio_info_t, int);
+extern iopaddr_t	pciio_info_rom_base_get(pciio_info_t);
+extern size_t		pciio_info_rom_size_get(pciio_info_t);
+
+extern int              pciio_error_handler(devfs_handle_t, int, ioerror_mode_t, ioerror_t *);
+extern int		pciio_dma_enabled(devfs_handle_t);
+
+#ifdef __cplusplus
+};
+#endif
+#endif				/* C or C++ */
+#endif				/* _ASM_SN_PCI_PCIIO_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/pci/pciio_private.h linux/include/asm-ia64/sn/pci/pciio_private.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/pci/pciio_private.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/pci/pciio_private.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,100 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_PCI_PCIIO_PRIVATE_H
+#define _ASM_SN_PCI_PCIIO_PRIVATE_H
+
+#ifdef colin
+#include <ksys/xthread.h>
+#endif
+
+/*
+ * pciio_private.h -- private definitions for pciio
+ * PCI drivers should NOT include this file.
+ */
+
+#ident "sys/PCI/pciio_private: $Revision: 1.13 $"
+
+/*
+ * All PCI providers set up PIO using this information.
+ */
+struct pciio_piomap_s {
+    unsigned                pp_flags;	/* PCIIO_PIOMAP flags */
+    devfs_handle_t            pp_dev;	/* associated pci card */
+    pciio_slot_t            pp_slot;	/* which slot the card is in */
+    pciio_space_t           pp_space;	/* which address space */
+    iopaddr_t               pp_pciaddr;		/* starting offset of mapping */
+    size_t                  pp_mapsz;	/* size of this mapping */
+    caddr_t                 pp_kvaddr;	/* kernel virtual address to use */
+};
+
+/*
+ * All PCI providers set up DMA using this information.
+ */
+struct pciio_dmamap_s {
+    unsigned                pd_flags;	/* PCIIO_DMAMAP flags */
+    devfs_handle_t            pd_dev;	/* associated pci card */
+    pciio_slot_t            pd_slot;	/* which slot the card is in */
+};
+
+/*
+ * All PCI providers set up interrupts using this information.
+ */
+
+struct pciio_intr_s {
+    unsigned                pi_flags;	/* PCIIO_INTR flags */
+    devfs_handle_t            pi_dev;	/* associated pci card */
+    device_desc_t	    pi_dev_desc;	/* override device descriptor */
+    pciio_intr_line_t       pi_lines;	/* which interrupt line(s) */
+    intr_func_t             pi_func;	/* handler function (when connected) */
+    intr_arg_t              pi_arg;	/* handler parameter (when connected) */
+#ifdef IRIX
+    thd_int_t               pi_tinfo;	/* Thread info (when connected) */
+#endif
+    cpuid_t                 pi_mustruncpu; /* Where we must run. */
+    int			    pi_irq;	/* IRQ assigned */
+    int			    pi_cpu;	/* cpu assigned */
+};
+
+/* PCIIO_INTR (pi_flags) flags */
+#define PCIIO_INTR_CONNECTED	1	/* interrupt handler/thread has been connected */
+#define PCIIO_INTR_NOTHREAD	2	/* interrupt handler wants to be called at interrupt level */
+
+/*
+ * Each PCI Card has one of these.
+ */
+
+struct pciio_info_s {
+    char                   *c_fingerprint;
+    devfs_handle_t            c_vertex;	/* back pointer to vertex */
+    pciio_bus_t             c_bus;	/* which bus the card is in */
+    pciio_slot_t            c_slot;	/* which slot the card is in */
+    pciio_function_t        c_func;	/* which func (on multi-func cards) */
+    pciio_vendor_id_t       c_vendor;	/* PCI card "vendor" code */
+    pciio_device_id_t       c_device;	/* PCI card "device" code */
+    devfs_handle_t            c_master;	/* PCI bus provider */
+    arbitrary_info_t        c_mfast;	/* cached fastinfo from c_master */
+    pciio_provider_t       *c_pops;	/* cached provider from c_master */
+    error_handler_f        *c_efunc;	/* error handling function */
+    error_handler_arg_t     c_einfo;	/* first parameter for efunc */
+
+    struct {				/* state of BASE regs */
+	pciio_space_t		w_space;
+	iopaddr_t		w_base;
+	size_t			w_size;
+    }			    c_window[6];
+
+    unsigned		    c_rbase;	/* EXPANSION ROM base addr */
+    unsigned		    c_rsize;	/* EXPANSION ROM size (bytes) */
+
+    pciio_piospace_t	    c_piospace;	/* additional I/O spaces allocated */
+};
+
+extern char             pciio_info_fingerprint[];
+#endif				/* _ASM_SN_PCI_PCIIO_PRIVATE_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/pio.h linux/include/asm-ia64/sn/pio.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/pio.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/pio.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,155 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_PIO_H
+#define _ASM_SN_PIO_H
+
+#include <linux/types.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/iobus.h>
+
+/*
+ * pioaddr_t	- The kernel virtual address that a PIO can be done upon.
+ *		  Should probably be (volatile void*) but EVEREST would do PIO
+ *		  to long mostly, just cast for other sizes.
+ */
+
+typedef volatile ulong*	pioaddr_t;
+
+/*
+ * iopaddr_t	- the physical io space relative address (e.g. VME A16S 0x0800).
+ * iosapce_t	- specifies the io address space to be mapped/accessed.
+ * piomap_t	- the handle returned by pio_alloc() and used with all the pio
+ *		  access functions.
+ */
+
+
+typedef struct piomap {
+	uint		pio_bus;
+	uint		pio_adap;
+#ifdef IRIX
+	iospace_t	pio_iospace;
+#endif
+	int		pio_flag;
+	int		pio_reg;
+	char		pio_name[7];	/* to identify the mapped device */
+	struct piomap	*pio_next;	/* dlist to link active piomap's */
+	struct piomap	*pio_prev;	/* for debug and error reporting */
+#ifdef IRIX
+	void		(*pio_errfunc)(); /* Pointer to an error function */
+					  /* Used only for piomaps allocated
+					   * in user level vme driver     */
+#endif
+	iopaddr_t	pio_iopmask;	/* valid iop address bit mask */
+	iobush_t	pio_bushandle;	/* bus-level handle */
+} piomap_t;
+
+
+/* Macro to get/set PIO error function */
+#define	pio_seterrf(p,f)	(p)->pio_errfunc = (f)
+#define	pio_geterrf(p)		(p)->pio_errfunc
+
+
+/*
+ * pio_mapalloc() - allocates a handle that specifies a mapping from kernel
+ *		    virtual to io space. The returned handle piomap is used
+ *		    with the access functions to make sure that the mapping
+ *		    to the iospace exists.
+ * pio_mapfree()  - frees the mapping as specified in the piomap handle.
+ * pio_mapaddr()  - returns the kv address that maps to piomap'ed io address.
+ */
+#ifdef IRIX
+extern piomap_t	*pio_mapalloc(uint,uint,iospace_t*,int,char*);
+extern void	 pio_mapfree(piomap_t*);
+extern caddr_t	 pio_mapaddr(piomap_t*,iopaddr_t);
+extern piomap_t *pio_ioaddr(int, iobush_t, iopaddr_t, piomap_t *);
+
+/*
+ * PIO access functions.
+ */
+extern int  pio_badaddr(piomap_t*,iopaddr_t,int);
+extern int  pio_badaddr_val(piomap_t*,iopaddr_t,int,void*);
+extern int  pio_wbadaddr(piomap_t*,iopaddr_t,int);
+extern int  pio_wbadaddr_val(piomap_t*,iopaddr_t,int,int);
+extern int  pio_bcopyin(piomap_t*,iopaddr_t,void *,int, int, int);
+extern int  pio_bcopyout(piomap_t*,iopaddr_t,void *,int, int, int);
+
+
+/*
+ * PIO RMW functions using piomap.
+ */
+extern void pio_orb_rmw(piomap_t*, iopaddr_t, unsigned char);
+extern void pio_orh_rmw(piomap_t*, iopaddr_t, unsigned short);
+extern void pio_orw_rmw(piomap_t*, iopaddr_t, unsigned long);
+extern void pio_andb_rmw(piomap_t*, iopaddr_t, unsigned char);
+extern void pio_andh_rmw(piomap_t*, iopaddr_t, unsigned short); 
+extern void pio_andw_rmw(piomap_t*, iopaddr_t, unsigned long); 
+
+
+/*
+ * Old RMW function interface
+ */
+extern void orb_rmw(volatile void*, unsigned int);
+extern void orh_rmw(volatile void*, unsigned int);
+extern void orw_rmw(volatile void*, unsigned int);
+extern void andb_rmw(volatile void*, unsigned int);
+extern void andh_rmw(volatile void*, unsigned int);
+extern void andw_rmw(volatile void*, unsigned int);
+#endif	/* IRIX */
+
+
+/*
+ * piomap_t type defines
+ */
+
+#define PIOMAP_NTYPES	7
+
+#define PIOMAP_A16N	VME_A16NP
+#define PIOMAP_A16S	VME_A16S
+#define PIOMAP_A24N	VME_A24NP
+#define PIOMAP_A24S	VME_A24S
+#define PIOMAP_A32N	VME_A32NP
+#define PIOMAP_A32S	VME_A32S
+#define PIOMAP_A64	6
+
+#define PIOMAP_EISA_IO	0
+#define PIOMAP_EISA_MEM	1
+
+#define PIOMAP_PCI_IO	0
+#define PIOMAP_PCI_MEM	1
+#define PIOMAP_PCI_CFG	2
+#define PIOMAP_PCI_ID	3
+
+/* IBUS piomap types */
+#define PIOMAP_FCI	0
+
+/* dang gio piomap types */
+
+#define	PIOMAP_GIO32	0
+#define	PIOMAP_GIO64	1
+
+#define ET_MEM         	0
+#define ET_IO          	1
+#define LAN_RAM         2
+#define LAN_IO          3
+
+#define PIOREG_NULL	-1
+
+/* standard flags values for pio_map routines,
+ * including {xtalk,pciio}_piomap calls.
+ * NOTE: try to keep these in step with DMAMAP flags.
+ */
+#define PIOMAP_UNFIXED	0x0
+#define PIOMAP_FIXED	0x1
+#define PIOMAP_NOSLEEP	0x2
+#define	PIOMAP_INPLACE	0x4
+
+#define	PIOMAP_FLAGS	0x7
+
+#endif	/* _ASM_SN_PIO_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/prio.h linux/include/asm-ia64/sn/prio.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/prio.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/prio.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,38 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_PRIO_H
+#define _ASM_SN_PRIO_H
+
+/*
+ * Priority I/O function prototypes and macro definitions
+ */
+
+typedef long long bandwidth_t;
+
+/* These should be the same as FREAD/FWRITE */
+#define PRIO_READ_ALLOCATE	0x1
+#define PRIO_WRITE_ALLOCATE	0x2
+#define PRIO_READWRITE_ALLOCATE	(PRIO_READ_ALLOCATE | PRIO_WRITE_ALLOCATE)
+
+extern int prioSetBandwidth (int		/* fd */,
+                             int		/* alloc_type */,
+                             bandwidth_t	/* bytes_per_sec */,
+                             pid_t *		/* pid */);
+extern int prioGetBandwidth (int		/* fd */,
+                             bandwidth_t *	/* read_bw */,
+                             bandwidth_t *	/* write_bw */);
+extern int prioLock (pid_t *);
+extern int prioUnlock (void);
+
+/* Error returns */
+#define PRIO_SUCCESS     0
+#define PRIO_FAIL       -1 
+
+#endif /* _ASM_SN_PRIO_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/router.h linux/include/asm-ia64/sn/router.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/router.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/router.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,17 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_ROUTER_H
+#define _ASM_SN_ROUTER_H
+
+#if CONFIG_SGI_IP35 || CONFIG_IA64_SGI_SN1 || CONFIG_IA64_GENERIC
+#include <asm/sn/sn1/router.h>
+#endif
+
+#endif /* _ASM_SN_ROUTER_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/sgi.h linux/include/asm-ia64/sn/sgi.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/sgi.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/sgi.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,237 @@
+/*
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Jack Steiner (steiner@sgi.com)
+ */
+
+
+#ifndef _ASM_SN_SGI_H
+#define _ASM_SN_SGI_H
+
+#include <asm/sn/types.h>
+#include <asm/uaccess.h>		/* for copy_??_user */
+#include <linux/mm.h>
+#include <linux/devfs_fs_kernel.h>
+
+// This devfs stuff needs a better home .....
+
+struct directory_type
+{
+    struct devfs_entry *first;
+    struct devfs_entry *last;
+    unsigned int num_removable;
+};
+
+struct file_type
+{
+    unsigned long size;
+};
+
+struct device_type
+{
+    unsigned short major;
+    unsigned short minor;
+};
+
+struct fcb_type  /*  File, char, block type  */
+{
+    uid_t default_uid;
+    gid_t default_gid;
+    void *ops;
+    union 
+    {
+	struct file_type file;
+	struct device_type device;
+    }
+    u;
+    unsigned char auto_owner:1;
+    unsigned char aopen_notify:1;
+    unsigned char removable:1;  /*  Belongs in device_type, but save space   */
+    unsigned char open:1;       /*  Not entirely correct                     */
+};
+
+struct symlink_type
+{
+    unsigned int length;  /*  Not including the NULL-termimator  */
+    char *linkname;       /*  This is NULL-terminated            */
+};
+
+struct fifo_type
+{
+    uid_t uid;
+    gid_t gid;
+};
+
+struct devfs_entry
+{
+    void *info;
+    union 
+    {
+	struct directory_type dir;
+	struct fcb_type fcb;
+	struct symlink_type symlink;
+	struct fifo_type fifo;
+    }
+    u;
+    struct devfs_entry *prev;    /*  Previous entry in the parent directory  */
+    struct devfs_entry *next;    /*  Next entry in the parent directory      */
+    struct devfs_entry *parent;  /*  The parent directory                    */
+    struct devfs_entry *slave;   /*  Another entry to unregister             */
+    struct devfs_inode *first_inode;
+    struct devfs_inode *last_inode;
+    umode_t mode;
+    unsigned short namelen;  /*  I think 64k+ filenames are a way off...  */
+    unsigned char registered:1;
+    unsigned char show_unreg:1;
+    unsigned char hide:1;
+    unsigned char no_persistence /*:1*/;
+    char name[1];            /*  This is just a dummy: the allocated array is
+				 bigger. This is NULL-terminated  */
+};
+
+#define MIN(_a,_b)		((_a)<(_b)?(_a):(_b))
+
+typedef uint32_t app32_ptr_t;	/* needed by edt.h */
+typedef int64_t  __psint_t;	/* needed by klgraph.c */
+
+typedef enum { B_FALSE, B_TRUE } boolean_t;
+
+#define ctob(x)			((uint64_t)(x)*NBPC)
+#define btoc(x)			(((uint64_t)(x)+(NBPC-1))/NBPC)
+
+typedef __psunsigned_t nic_data_t;
+
+
+/*
+** Possible return values from graph routines.
+*/
+typedef enum graph_error_e {
+	GRAPH_SUCCESS,		/* 0 */
+	GRAPH_DUP,		/* 1 */
+	GRAPH_NOT_FOUND,	/* 2 */
+	GRAPH_BAD_PARAM,	/* 3 */
+	GRAPH_HIT_LIMIT,	/* 4 */
+	GRAPH_CANNOT_ALLOC,	/* 5 */
+	GRAPH_ILLEGAL_REQUEST,	/* 6 */
+	GRAPH_IN_USE		/* 7 */
+} graph_error_t;
+
+#define SV_FIFO         0x0             /* sv_t is FIFO type */
+#define SV_LIFO         0x2             /* sv_t is LIFO type */
+#define SV_PRIO         0x4             /* sv_t is PRIO type */
+#define SV_KEYED        0x6             /* sv_t is KEYED type */
+#define SV_DEFAULT      SV_FIFO
+
+
+#define MUTEX_DEFAULT	0x0		/* needed by mutex_init() calls */
+#define PZERO		25		/* needed by mutex_lock(), sv_wait()
+					 * psema() calls */
+
+#define sema_t  uint64_t		/* FIXME */
+#define KM_SLEEP   0x0000
+#define KM_NOSLEEP 0x0001		/* needed by kmem_alloc_node(), kmem_zalloc()
+					 * calls */
+#define VM_NOSLEEP 0x0001		/* needed kmem_alloc_node(), kmem_zalloc_node
+					 * calls */
+#define XG_WIDGET_PART_NUM      0xC102          /* KONA/xt_regs.h     XG_XT_PART_NUM_VALUE */
+
+#ifndef K1BASE
+#define K1BASE 0xA0000000
+#endif
+
+#ifndef TO_PHYS_MASK
+#define TO_PHYS_MASK 0x0000000fffffffff
+#endif
+
+typedef uint64_t vhandl_t;
+
+
+#ifndef NBPP
+#define NBPP 4096
+#endif
+
+#ifndef D_MP
+#define D_MP 1
+#endif
+
+#ifndef MAXDEVNAME
+#define MAXDEVNAME 256
+#endif
+
+#ifndef NBPC
+#define NBPC 0
+#endif
+
+#ifndef _PAGESZ
+#define _PAGESZ 4096
+#endif
+
+typedef uint64_t k_machreg_t;	/* needed by cmn_err.h */
+
+typedef uint64_t mrlock_t;	/* needed by devsupport.c */
+
+#define HUB_PIO_CONVEYOR 0x1
+#define CNODEID_NONE (cnodeid_t)-1
+#define XTALK_PCI_PART_NUM "030-1275-"
+#define kdebug 0
+
+
+#define COPYIN(a, b, c)		copy_from_user(b,a,c)
+#define COPYOUT(a, b, c)	copy_to_user(b,a,c)
+
+#define kvtophys(x)		(alenaddr_t) (x)
+#define POFFMASK		(NBPP - 1)
+#define poff(X)			((__psunsigned_t)(X) & POFFMASK)
+
+#define initnsema(a,b,c) 	sema_init(a,b)
+
+#define BZERO(a,b)		memset(a, 0, b)
+
+#define kern_malloc(x)	kmalloc(x, GFP_KERNEL)
+#define kern_free(x)	kfree(x)
+
+typedef cpuid_t cpu_cookie_t;
+#define CPU_NONE		-1
+
+
+#if defined(DISABLE_ASSERT)
+#define ASSERT(expr)
+#define ASSERT_ALWAYS(expr)
+#else
+#define ASSERT(expr)	\
+        if(!(expr)) { \
+		printk( "Assertion [%s] failed! %s:%s(line=%d)\n",\
+			#expr,__FILE__,__FUNCTION__,__LINE__); \
+		panic("Assertion panic\n"); 	\
+        }
+
+#define ASSERT_ALWAYS(expr)	\
+        if(!(expr)) { \
+		printk( "Assertion [%s] failed! %s:%s(line=%d)\n",\
+			#expr,__FILE__,__FUNCTION__,__LINE__); \
+		panic("Assertion always panic\n"); 	\
+        }
+#endif	/* DISABLE_ASSERT */
+
+/* These are defined as cmn_err() replacements */
+#define PRINT_WARNING(x...)	{ printk("WARNING : "); printk(x); }
+#define PRINT_NOTICE(x...)	{ printk("NOTICE : "); printk(x); }
+#define PRINT_ALERT(x...)	{ printk("ALERT : "); printk(x); }
+#define PRINT_PANIC		panic
+
+#define mutex_t int
+#define spinlock_init(x,name) mutex_init(x, MUTEX_DEFAULT, name);
+
+#ifdef CONFIG_SMP
+#define cpu_enabled(cpu)        (test_bit(cpu, &cpu_online_map))
+#else
+#define cpu_enabled(cpu)	(1)
+#endif
+
+#include <asm/sn/hack.h>	/* for now */
+
+#endif	/* _ASM_SN_SGI_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/slotnum.h linux/include/asm-ia64/sn/slotnum.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/slotnum.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/slotnum.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,23 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_SLOTNUM_H
+#define _ASM_SN_SLOTNUM_H
+
+typedef	unsigned char slotid_t;
+
+#if defined (CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+#include <asm/sn/sn1/slotnum.h>
+#else
+
+#error <<BOMB! slotnum defined only for SN0 and SN1 >>
+
+#endif /* !CONFIG_SGI_IP35 && !CONFIG_IA64_SGI_SN1 */
+
+#endif /* _ASM_SN_SLOTNUM_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/addrs.h linux/include/asm-ia64/sn/sn1/addrs.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/addrs.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/sn1/addrs.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,311 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+#ifndef _ASM_SN_SN1_ADDRS_H
+#define _ASM_SN_SN1_ADDRS_H
+
+/*
+ * IP35 (on a TRex) Address map
+ *
+ * This file contains a set of definitions and macros which are used
+ * to reference into the major address spaces (CAC, HSPEC, IO, MSPEC,
+ * and UNCAC) used by the IP35 architecture.  It also contains addresses
+ * for "major" statically locatable PROM/Kernel data structures, such as
+ * the partition table, the configuration data structure, etc.
+ * We make an implicit assumption that the processor using this file
+ * follows the R12K's provisions for specifying uncached attributes;
+ * should this change, the base registers may very well become processor-
+ * dependent.
+ *
+ * For more information on the address spaces, see the "Local Resources"
+ * chapter of the Hub specification.
+ *
+ * NOTE: This header file is included both by C and by assembler source
+ *	 files.  Please bracket any language-dependent definitions
+ *	 appropriately.
+ */
+
+#include <linux/config.h>
+
+/*
+ * Some of the macros here need to be casted to appropriate types when used
+ * from C.  They definitely must not be casted from assembly language so we
+ * use some new ANSI preprocessor stuff to paste these on where needed.
+ */
+
+#if defined(_RUN_UNCACHED)
+#define CAC_BASE		0x9600000000000000
+#else
+#define CAC_BASE		0xa800000000000000
+#endif
+
+#ifdef Colin
+#define HSPEC_BASE		0x9000000000000000
+#define IO_BASE			0x9200000000000000
+#define MSPEC_BASE		0x9400000000000000
+#define UNCAC_BASE		0x9600000000000000
+#else
+#define HSPEC_BASE              0xc0000b0000000000
+#define HSPEC_SWIZ_BASE         0xc000030000000000
+#define IO_BASE                 0xc0000a0000000000
+#define IO_SWIZ_BASE            0xc000020000000000
+#define MSPEC_BASE              0xc000000000000000
+#define UNCAC_BASE              0xc000000000000000
+#endif
+
+#define TO_PHYS(x)		(	      ((x) & TO_PHYS_MASK))
+#define TO_CAC(x)		(CAC_BASE   | ((x) & TO_PHYS_MASK))
+#define TO_UNCAC(x)		(UNCAC_BASE | ((x) & TO_PHYS_MASK))
+#define TO_MSPEC(x)		(MSPEC_BASE | ((x) & TO_PHYS_MASK))
+#define TO_HSPEC(x)		(HSPEC_BASE | ((x) & TO_PHYS_MASK))
+
+
+/*
+ * The following couple of definitions will eventually need to be variables,
+ * since the amount of address space assigned to each node depends on
+ * whether the system is running in N-mode (more nodes with less memory)
+ * or M-mode (fewer nodes with more memory).  We expect that it will
+ * be a while before we need to make this decision dynamically, though,
+ * so for now we just use defines bracketed by an ifdef.
+ */
+
+#if defined(N_MODE)
+
+#define NODE_SIZE_BITS		32
+#define BWIN_SIZE_BITS		28
+
+#define NASID_BITS		8
+#define NASID_BITMASK		(0xffLL)
+#define NASID_SHFT		32
+#define NASID_META_BITS		1
+#define NASID_LOCAL_BITS	7
+
+#define BDDIR_UPPER_MASK	(UINT64_CAST 0x1ffffff << 4)
+#define BDECC_UPPER_MASK	(UINT64_CAST 0x1fffffff )
+
+#else /* !defined(N_MODE), assume that M-mode is desired */
+
+#define NODE_SIZE_BITS		33
+#define BWIN_SIZE_BITS		29
+
+#define NASID_BITMASK		(0x7fLL)
+#define NASID_BITS		7
+#define NASID_SHFT		33
+#define NASID_META_BITS		0
+#define NASID_LOCAL_BITS	7
+
+#define BDDIR_UPPER_MASK	(UINT64_CAST 0x3ffffff << 4)
+#define BDECC_UPPER_MASK	(UINT64_CAST 0x3fffffff)
+
+#endif /* defined(N_MODE) */
+
+#define NODE_ADDRSPACE_SIZE	(UINT64_CAST 1 << NODE_SIZE_BITS)
+
+#define NASID_MASK		(UINT64_CAST NASID_BITMASK << NASID_SHFT)
+#define NASID_GET(_pa)		(int) ((UINT64_CAST (_pa) >>		\
+					NASID_SHFT) & NASID_BITMASK)
+
+#if _LANGUAGE_C && !defined(_STANDALONE)
+#ifndef REAL_HARDWARE
+#define NODE_SWIN_BASE(nasid, widget) RAW_NODE_SWIN_BASE(nasid, widget)
+#else
+#define NODE_SWIN_BASE(nasid, widget)					\
+	((widget == 0) ? NODE_BWIN_BASE((nasid), SWIN0_BIGWIN)		\
+	: RAW_NODE_SWIN_BASE(nasid, widget))
+#endif
+#else
+#define NODE_SWIN_BASE(nasid, widget) \
+     (NODE_IO_BASE(nasid) + (UINT64_CAST (widget) << SWIN_SIZE_BITS))
+#endif /* _LANGUAGE_C */
+
+/*
+ * The following definitions pertain to the IO special address
+ * space.  They define the location of the big and little windows
+ * of any given node.
+ */
+
+#define BWIN_INDEX_BITS		3
+#define BWIN_SIZE		(UINT64_CAST 1 << BWIN_SIZE_BITS)
+#define	BWIN_SIZEMASK		(BWIN_SIZE - 1)
+#define	BWIN_WIDGET_MASK	0x7
+#define NODE_BWIN_BASE0(nasid)	(NODE_IO_BASE(nasid) + BWIN_SIZE)
+#define NODE_BWIN_BASE(nasid, bigwin)	(NODE_BWIN_BASE0(nasid) + 	\
+			(UINT64_CAST (bigwin) << BWIN_SIZE_BITS))
+
+#define	BWIN_WIDGETADDR(addr)	((addr) & BWIN_SIZEMASK)
+#define	BWIN_WINDOWNUM(addr)	(((addr) >> BWIN_SIZE_BITS) & BWIN_WIDGET_MASK)
+/*
+ * Verify if addr belongs to large window address of node with "nasid"
+ *
+ *
+ * NOTE: "addr" is expected to be XKPHYS address, and NOT physical
+ * address
+ *
+ *
+ */
+
+#define	NODE_BWIN_ADDR(nasid, addr)	\
+		(((addr) >= NODE_BWIN_BASE0(nasid)) && \
+		 ((addr) < (NODE_BWIN_BASE(nasid, HUB_NUM_BIG_WINDOW) + \
+				BWIN_SIZE)))
+
+/*
+ * The following define the major position-independent aliases used
+ * in IP27.
+ *	CALIAS -- Varies in size, points to the first n bytes of memory
+ *		  	on the reader's node.
+ */
+
+#define CALIAS_BASE		CAC_BASE
+
+
+
+#define BRIDGE_REG_PTR(_base, _off)	((volatile bridgereg_t *) \
+	((__psunsigned_t)(_base) + (__psunsigned_t)(_off)))
+
+#define SN0_WIDGET_BASE(_nasid, _wid)	(NODE_SWIN_BASE((_nasid), (_wid)))
+
+#if _LANGUAGE_C
+#define KERN_NMI_ADDR(nasid, slice)					\
+                    TO_NODE_UNCAC((nasid), IP27_NMI_KREGS_OFFSET + 	\
+				  (IP27_NMI_KREGS_CPU_SIZE * (slice)))
+#endif /* _LANGUAGE_C */
+
+
+/*
+ * needed by symmon so it needs to be outside #if PROM
+ * (see also POD_ELSCSIZE)
+ */
+#define IP27PROM_ELSC_BASE_A	PHYS_TO_K0(0x020e0000)
+#define IP27PROM_ELSC_BASE_B	PHYS_TO_K0(0x020e0800)
+#define IP27PROM_ELSC_BASE_C	PHYS_TO_K0(0x020e1000)
+#define IP27PROM_ELSC_BASE_D	PHYS_TO_K0(0x020e1800)
+#define IP27PROM_ELSC_SHFT	11
+#define IP27PROM_ELSC_SIZE	(1 << IP27PROM_ELSC_SHFT)
+
+#define FREEMEM_BASE		PHYS_TO_K0(0x4000000)
+
+#define IO6PROM_STACK_SHFT	14	/* stack per cpu */
+#define IO6PROM_STACK_SIZE	(1 << IO6PROM_STACK_SHFT)
+
+
+#define KL_UART_BASE	LOCAL_HSPEC(HSPEC_UART_0)	/* base of UART regs */
+#define KL_UART_CMD	LOCAL_HSPEC(HSPEC_UART_0)	/* UART command reg */
+#define KL_UART_DATA	LOCAL_HSPEC(HSPEC_UART_1)	/* UART data reg */
+
+#if !_LANGUAGE_ASSEMBLY
+/* Address 0x400 to 0x1000 ualias points to cache error eframe + misc
+ * CACHE_ERR_SP_PTR could either contain an address to the stack, or
+ * the stack could start at CACHE_ERR_SP_PTR
+ */
+#define CACHE_ERR_EFRAME	0x400
+
+#define CACHE_ERR_ECCFRAME	(CACHE_ERR_EFRAME + EF_SIZE)
+#define CACHE_ERR_SP_PTR	(0x1000 - 32)	/* why -32? TBD */
+#define CACHE_ERR_IBASE_PTR	(0x1000 - 40)
+#define CACHE_ERR_SP		(CACHE_ERR_SP_PTR - 16)
+#define CACHE_ERR_AREA_SIZE	(ARCS_SPB_OFFSET - CACHE_ERR_EFRAME)
+
+#endif	/* !_LANGUAGE_ASSEMBLY */
+
+/* Each CPU accesses UALIAS at a different physaddr, on 32k boundaries
+ * This determines the locations of the exception vectors
+ */
+#define UALIAS_FLIP_BASE	UALIAS_BASE
+#define UALIAS_FLIP_SHIFT	15
+#define UALIAS_FLIP_ADDR(_x)	((_x) ^ (cputoslice(getcpuid())<<UALIAS_FLIP_SHIFT))
+
+#if !defined(CONFIG_IA64_SGI_SN1) && !defined(CONFIG_IA64_GENERIC)
+#define EX_HANDLER_OFFSET(slice) ((slice) << UALIAS_FLIP_SHIFT)
+#endif
+#define EX_HANDLER_ADDR(nasid, slice)					\
+	PHYS_TO_K0(NODE_OFFSET(nasid) | EX_HANDLER_OFFSET(slice))
+#define EX_HANDLER_SIZE		0x0400
+
+#if !defined(CONFIG_IA64_SGI_SN1) && !defined(CONFIG_IA64_GENERIC)
+#define EX_FRAME_OFFSET(slice)	((slice) << UALIAS_FLIP_SHIFT | 0x400)
+#endif
+#define EX_FRAME_ADDR(nasid, slice)					\
+	PHYS_TO_K0(NODE_OFFSET(nasid) | EX_FRAME_OFFSET(slice))
+#define EX_FRAME_SIZE		0x0c00
+
+#define _ARCSPROM
+
+#ifdef _STANDALONE
+
+/*
+ * The PROM needs to pass the device base address and the
+ * device pci cfg space address to the device drivers during
+ * install. The COMPONENT->Key field is used for this purpose.
+ * Macros needed by IP27 device drivers to convert the
+ * COMPONENT->Key field to the respective base address.
+ * Key field looks as follows:
+ *
+ *  +----------------------------------------------------+
+ *  |devnasid | widget  |pciid |hubwidid|hstnasid | adap |
+ *  |   2     |   1     |  1   |   1    |    2    |   1  |
+ *  +----------------------------------------------------+
+ *  |         |         |      |        |         |      |
+ *  64        48        40     32       24        8      0
+ *
+ * These are used by standalone drivers till the io infrastructure
+ * is in place.
+ */
+
+#if _LANGUAGE_C
+
+#define uchar unsigned char
+
+#define KEY_DEVNASID_SHFT  48
+#define KEY_WIDID_SHFT	   40
+#define KEY_PCIID_SHFT	   32
+#define KEY_HUBWID_SHFT	   24
+#define KEY_HSTNASID_SHFT  8
+
+#define MK_SN0_KEY(nasid, widid, pciid) \
+			((((__psunsigned_t)nasid)<< KEY_DEVNASID_SHFT |\
+				((__psunsigned_t)widid) << KEY_WIDID_SHFT) |\
+				((__psunsigned_t)pciid) << KEY_PCIID_SHFT)
+
+#define ADD_HUBWID_KEY(key,hubwid)\
+			(key|=((__psunsigned_t)hubwid << KEY_HUBWID_SHFT))
+
+#define ADD_HSTNASID_KEY(key,hstnasid)\
+			(key|=((__psunsigned_t)hstnasid << KEY_HSTNASID_SHFT))
+
+#define GET_DEVNASID_FROM_KEY(key)	((short)(key >> KEY_DEVNASID_SHFT))
+#define GET_WIDID_FROM_KEY(key)		((uchar)(key >> KEY_WIDID_SHFT))
+#define GET_PCIID_FROM_KEY(key)		((uchar)(key >> KEY_PCIID_SHFT))
+#define GET_HUBWID_FROM_KEY(key)	((uchar)(key >> KEY_HUBWID_SHFT))
+#define GET_HSTNASID_FROM_KEY(key)	((short)(key >> KEY_HSTNASID_SHFT))
+
+#define PCI_64_TARGID_SHFT		60
+
+#define GET_PCIBASE_FROM_KEY(key)  (NODE_SWIN_BASE(GET_DEVNASID_FROM_KEY(key),\
+					GET_WIDID_FROM_KEY(key))\
+					| BRIDGE_DEVIO(GET_PCIID_FROM_KEY(key)))
+
+#define GET_PCICFGBASE_FROM_KEY(key) \
+			(NODE_SWIN_BASE(GET_DEVNASID_FROM_KEY(key),\
+			      GET_WIDID_FROM_KEY(key))\
+			| BRIDGE_TYPE0_CFG_DEV(GET_PCIID_FROM_KEY(key)))
+
+#define GET_WIDBASE_FROM_KEY(key) \
+                        (NODE_SWIN_BASE(GET_DEVNASID_FROM_KEY(key),\
+                              GET_WIDID_FROM_KEY(key)))
+
+#define PUT_INSTALL_STATUS(c,s)		c->Revision = s
+#define GET_INSTALL_STATUS(c)		c->Revision
+
+#endif /* LANGUAGE_C */
+
+#endif /* _STANDALONE */
+
+#endif /* _ASM_SN_SN1_ADDRS_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/arch.h linux/include/asm-ia64/sn/sn1/arch.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/arch.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/sn1/arch.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,81 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_SN1_ARCH_H
+#define _ASM_SN_SN1_ARCH_H
+
+#if defined(N_MODE)
+#error "ERROR constants defined only for M-mode"
+#endif
+
+#include <linux/config.h>
+
+/*
+ * This is the maximum number of NASIDS that can be present in a system.
+ * (Highest NASID plus one.)
+ */
+#define MAX_NASIDS              128
+
+/*
+ * MAXCPUS refers to the maximum number of CPUs in a single kernel.
+ * This is not necessarily the same as MAXNODES * CPUS_PER_NODE
+ */
+#define MAXCPUS                 512
+
+/*
+ * This is the maximum number of nodes that can be part of a kernel.
+ * Effectively, it's the maximum number of compact node ids (cnodeid_t).
+ * This is not necessarily the same as MAX_NASIDS.
+ */
+#define MAX_COMPACT_NODES       128
+
+/*
+ * MAX_REGIONS refers to the maximum number of hardware partitioned regions.
+ */
+#define	MAX_REGIONS		64
+#define MAX_NONPREMIUM_REGIONS  16
+#define MAX_PREMIUM_REGIONS     MAX_REGIONS
+
+
+/*
+ * MAX_PARITIONS refers to the maximum number of logically defined 
+ * partitions the system can support.
+ */
+#define MAX_PARTITIONS		MAX_REGIONS
+
+
+#define NASID_MASK_BYTES	((MAX_NASIDS + 7) / 8)
+
+/*
+ * Slot constants for IP35
+ */
+
+#define MAX_MEM_SLOTS    8                     /* max slots per node */
+
+#if defined(N_MODE)
+#error "N-mode not supported"
+#endif
+
+#define SLOT_SHIFT      	(30)
+#define SLOT_MIN_MEM_SIZE	(64*1024*1024)
+
+/*
+ * two PIs per bedrock, two CPUs per PI
+ */
+#define NUM_SUBNODES	2
+#define SUBNODE_SHFT	1
+#define SUBNODE_MASK	(0x1 << SUBNODE_SHFT)
+#define LOCALCPU_SHFT	0
+#define LOCALCPU_MASK	(0x1 << LOCALCPU_SHFT)
+#define SUBNODE(slice)	(((slice) & SUBNODE_MASK) >> SUBNODE_SHFT)
+#define LOCALCPU(slice)	(((slice) & LOCALCPU_MASK) >> LOCALCPU_SHFT)
+#define TO_SLICE(subn, local)	(((subn) << SUBNODE_SHFT) | \
+				 ((local) << LOCALCPU_SHFT))
+
+#endif /* _ASM_SN_SN1_ARCH_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/bedrock.h linux/include/asm-ia64/sn/sn1/bedrock.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/bedrock.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/sn1/bedrock.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,81 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+#ifndef _ASM_SN_SN1_BEDROCK_H
+#define _ASM_SN_SN1_BEDROCK_H
+
+/* The secret password; used to release protection */
+#define HUB_PASSWORD		0x53474972756c6573ull
+
+#define CHIPID_HUB		0x3012
+#define CHIPID_ROUTER		0x3017
+
+#define BEDROCK_REV_1_0		1
+#define BEDROCK_REV_1_1		2
+
+#define MAX_HUB_PATH		80
+
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+#include <asm/sn/arch.h>
+#include <asm/sn/sn1/addrs.h>
+#include <asm/sn/sn1/hubpi.h>
+#include <asm/sn/sn1/hubmd.h>
+#include <asm/sn/sn1/hubio.h>
+#include <asm/sn/sn1/hubni.h>
+#include <asm/sn/sn1/hublb.h>
+#include <asm/sn/sn1/hubxb.h>
+#include <asm/sn/sn1/hubpi_next.h>
+#include <asm/sn/sn1/hubmd_next.h>
+#include <asm/sn/sn1/hubio_next.h>
+#include <asm/sn/sn1/hubni_next.h>
+#include <asm/sn/sn1/hublb_next.h>
+#include <asm/sn/sn1/hubxb_next.h>
+
+#else /* ! CONFIG_SGI_IP35 || CONFIG_IA64_SGI_SN1 */
+
+<< BOMB! CONFIG_SGI_IP35 is only defined for IP35 >>
+
+#endif /* defined(CONFIG_SGI_IP35) */
+
+/* Translation of uncached attributes */
+#define	UATTR_HSPEC	0
+#define	UATTR_IO	1
+#define	UATTR_MSPEC	2
+#define	UATTR_UNCAC	3
+
+#if _LANGUAGE_ASSEMBLY
+
+/*
+ * Get nasid into register, r (uses at)
+ */
+#define GET_NASID_ASM(r)				\
+	dli	r, LOCAL_HUB_ADDR(LB_REV_ID);	\
+	ld	r, (r);					\
+	and	r, LRI_NODEID_MASK;			\
+	dsrl	r, LRI_NODEID_SHFT
+
+#endif /* _LANGUAGE_ASSEMBLY */
+
+#if _LANGUAGE_C
+
+#include <asm/sn/xtalk/xwidget.h>
+
+/* hub-as-widget iograph info, labelled by INFO_LBL_XWIDGET */
+typedef struct v_hub_s *v_hub_t;
+typedef uint64_t      rtc_time_t;
+
+struct nodepda_s;
+int hub_check_pci_equiv(void *addra, void *addrb);
+void capture_hub_stats(cnodeid_t, struct nodepda_s *);
+void init_hub_stats(cnodeid_t, struct nodepda_s *);
+
+#endif /* _LANGUAGE_C */
+
+#endif /* _ASM_SN_SN1_BEDROCK_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/hubdev.h linux/include/asm-ia64/sn/sn1/hubdev.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/hubdev.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/sn1/hubdev.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,22 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+#ifndef _ASM_SN_SN1_HUBDEV_H
+#define _ASM_SN_SN1_HUBDEV_H
+
+extern void hubdev_init(void);
+extern void hubdev_register(int (*attach_method)(devfs_handle_t));
+extern int hubdev_unregister(int (*attach_method)(devfs_handle_t));
+extern int hubdev_docallouts(devfs_handle_t hub);
+
+extern caddr_t hubdev_prombase_get(devfs_handle_t hub);
+extern cnodeid_t hubdev_cnodeid_get(devfs_handle_t hub);
+
+#endif /* _ASM_SN_SN1_HUBDEV_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/hubio.h linux/include/asm-ia64/sn/sn1/hubio.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/hubio.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/sn1/hubio.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,5017 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+/************************************************************************
+ *                                                                      *
+ *      WARNING!!!  WARNING!!!  WARNING!!!  WARNING!!!  WARNING!!!      *
+ *                                                                      *
+ * This file is created by an automated script. Any (minimal) changes   *
+ * made manually to this  file should be made with care.                *
+ *                                                                      *
+ *               MAKE ALL ADDITIONS TO THE END OF THIS FILE             *
+ *                                                                      *
+ ************************************************************************/
+
+
+#ifndef _ASM_SN_SN1_HUBIO_H
+#define _ASM_SN_SN1_HUBIO_H
+
+
+#define    IIO_WID                   0x00400000    /*
+                                                    * Crosstalk Widget
+                                                    * Identification This
+                                                    * register is also
+                                                    * accessible from
+                                                    * Crosstalk at
+                                                    * address 0x0.
+                                                    */
+
+
+
+#define    IIO_WSTAT                 0x00400008    /*
+                                                    * Crosstalk Widget
+                                                    * Status
+                                                    */
+
+
+
+#define    IIO_WCR                   0x00400020    /*
+                                                    * Crosstalk Widget
+                                                    * Control Register
+                                                    */
+
+
+
+#define    IIO_ILAPR                 0x00400100    /*
+                                                    * IO Local Access
+                                                    * Protection Register
+                                                    */
+
+
+
+#define    IIO_ILAPO                 0x00400108    /*
+                                                    * IO Local Access
+                                                    * Protection Override
+                                                    */
+
+
+
+#define    IIO_IOWA                  0x00400110    /*
+                                                    * IO Outbound Widget
+                                                    * Access
+                                                    */
+
+
+
+#define    IIO_IIWA                  0x00400118    /*
+                                                    * IO Inbound Widget
+                                                    * Access
+                                                    */
+
+
+
+#define    IIO_IIDEM                 0x00400120    /*
+                                                    * IO Inbound Device
+                                                    * Error Mask
+                                                    */
+
+
+
+#define    IIO_ILCSR                 0x00400128    /*
+                                                    * IO LLP Control and
+                                                    * Status Register
+                                                    */
+
+
+
+#define    IIO_ILLR                  0x00400130    /* IO LLP Log Register    */
+
+
+
+#define    IIO_IIDSR                 0x00400138    /*
+                                                    * IO Interrupt
+                                                    * Destination
+                                                    */
+
+
+
+#define    IIO_IGFX0                 0x00400140    /*
+                                                    * IO Graphics
+                                                    * Node-Widget Map 0
+                                                    */
+
+
+
+#define    IIO_IGFX1                 0x00400148    /*
+                                                    * IO Graphics
+                                                    * Node-Widget Map 1
+                                                    */
+
+
+
+#define    IIO_ISCR0                 0x00400150    /*
+                                                    * IO Scratch Register
+                                                    * 0
+                                                    */
+
+
+
+#define    IIO_ISCR1                 0x00400158    /*
+                                                    * IO Scratch Register
+                                                    * 1
+                                                    */
+
+
+
+#define    IIO_ITTE1                 0x00400160    /*
+                                                    * IO Translation
+                                                    * Table Entry 1
+                                                    */
+
+
+
+#define    IIO_ITTE2                 0x00400168    /*
+                                                    * IO Translation
+                                                    * Table Entry 2
+                                                    */
+
+
+
+#define    IIO_ITTE3                 0x00400170    /*
+                                                    * IO Translation
+                                                    * Table Entry 3
+                                                    */
+
+
+
+#define    IIO_ITTE4                 0x00400178    /*
+                                                    * IO Translation
+                                                    * Table Entry 4
+                                                    */
+
+
+
+#define    IIO_ITTE5                 0x00400180    /*
+                                                    * IO Translation
+                                                    * Table Entry 5
+                                                    */
+
+
+
+#define    IIO_ITTE6                 0x00400188    /*
+                                                    * IO Translation
+                                                    * Table Entry 6
+                                                    */
+
+
+
+#define    IIO_ITTE7                 0x00400190    /*
+                                                    * IO Translation
+                                                    * Table Entry 7
+                                                    */
+
+
+
+#define    IIO_IPRB0                 0x00400198    /* IO PRB Entry 0         */
+
+
+
+#define    IIO_IPRB8                 0x004001A0    /* IO PRB Entry 8         */
+
+
+
+#define    IIO_IPRB9                 0x004001A8    /* IO PRB Entry 9         */
+
+
+
+#define    IIO_IPRBA                 0x004001B0    /* IO PRB Entry A         */
+
+
+
+#define    IIO_IPRBB                 0x004001B8    /* IO PRB Entry B         */
+
+
+
+#define    IIO_IPRBC                 0x004001C0    /* IO PRB Entry C         */
+
+
+
+#define    IIO_IPRBD                 0x004001C8    /* IO PRB Entry D         */
+
+
+
+#define    IIO_IPRBE                 0x004001D0    /* IO PRB Entry E         */
+
+
+
+#define    IIO_IPRBF                 0x004001D8    /* IO PRB Entry F         */
+
+
+
+#define    IIO_IXCC                  0x004001E0    /*
+                                                    * IO Crosstalk Credit
+                                                    * Count Timeout
+                                                    */
+
+
+
+#define    IIO_IMEM                  0x004001E8    /*
+                                                    * IO Miscellaneous
+                                                    * Error Mask
+                                                    */
+
+
+
+#define    IIO_IXTT                  0x004001F0    /*
+                                                    * IO Crosstalk
+                                                    * Timeout Threshold
+                                                    */
+
+
+
+#define    IIO_IECLR                 0x004001F8    /*
+                                                    * IO Error Clear
+                                                    * Register
+                                                    */
+
+
+
+#define    IIO_IBCR                  0x00400200    /*
+                                                    * IO BTE Control
+                                                    * Register
+                                                    */
+
+
+
+#define    IIO_IXSM                  0x00400208    /*
+                                                    * IO Crosstalk
+                                                    * Spurious Message
+                                                    */
+
+
+
+#define    IIO_IXSS                  0x00400210    /*
+                                                    * IO Crosstalk
+                                                    * Spurious Sideband
+                                                    */
+
+
+
+#define    IIO_ILCT                  0x00400218    /* IO LLP Channel Test    */
+
+
+
+#define    IIO_IIEPH1                0x00400220    /*
+                                                    * IO Incoming Error
+                                                    * Packet Header, Part
+                                                    * 1
+                                                    */
+
+
+
+#define    IIO_IIEPH2                0x00400228    /*
+                                                    * IO Incoming Error
+                                                    * Packet Header, Part
+                                                    * 2
+                                                    */
+
+
+
+#define    IIO_IPCA                  0x00400300    /*
+                                                    * IO PRB Counter
+                                                    * Adjust
+                                                    */
+
+
+
+#define    IIO_IPRTE0                0x00400308    /*
+                                                    * IO PIO Read Address
+                                                    * Table Entry 0
+                                                    */
+
+
+
+#define    IIO_IPRTE1                0x00400310    /*
+                                                    * IO PIO Read Address
+                                                    * Table Entry 1
+                                                    */
+
+
+
+#define    IIO_IPRTE2                0x00400318    /*
+                                                    * IO PIO Read Address
+                                                    * Table Entry 2
+                                                    */
+
+
+
+#define    IIO_IPRTE3                0x00400320    /*
+                                                    * IO PIO Read Address
+                                                    * Table Entry 3
+                                                    */
+
+
+
+#define    IIO_IPRTE4                0x00400328    /*
+                                                    * IO PIO Read Address
+                                                    * Table Entry 4
+                                                    */
+
+
+
+#define    IIO_IPRTE5                0x00400330    /*
+                                                    * IO PIO Read Address
+                                                    * Table Entry 5
+                                                    */
+
+
+
+#define    IIO_IPRTE6                0x00400338    /*
+                                                    * IO PIO Read Address
+                                                    * Table Entry 6
+                                                    */
+
+
+
+#define    IIO_IPRTE7                0x00400340    /*
+                                                    * IO PIO Read Address
+                                                    * Table Entry 7
+                                                    */
+
+
+
+#define    IIO_IPDR                  0x00400388    /*
+                                                    * IO PIO Deallocation
+                                                    * Register
+                                                    */
+
+
+
+#define    IIO_ICDR                  0x00400390    /*
+                                                    * IO CRB Entry
+                                                    * Deallocation
+                                                    * Register
+                                                    */
+
+
+
+#define    IIO_IFDR                  0x00400398    /*
+                                                    * IO IOQ FIFO Depth
+                                                    * Register
+                                                    */
+
+
+
+#define    IIO_IIAP                  0x004003A0    /*
+                                                    * IO IIQ Arbitration
+                                                    * Parameters
+                                                    */
+
+
+
+#define    IIO_ICMR                  0x004003A8    /*
+                                                    * IO CRB Management
+                                                    * Register
+                                                    */
+
+
+
+#define    IIO_ICCR                  0x004003B0    /*
+                                                    * IO CRB Control
+                                                    * Register
+                                                    */
+
+
+
+#define    IIO_ICTO                  0x004003B8    /* IO CRB Timeout         */
+
+
+
+#define    IIO_ICTP                  0x004003C0    /*
+                                                    * IO CRB Timeout
+                                                    * Prescalar
+                                                    */
+
+
+
+#define    IIO_ICRB0_A               0x00400400    /* IO CRB Entry 0_A       */
+
+
+
+#define    IIO_ICRB0_B               0x00400408    /* IO CRB Entry 0_B       */
+
+
+
+#define    IIO_ICRB0_C               0x00400410    /* IO CRB Entry 0_C       */
+
+
+
+#define    IIO_ICRB0_D               0x00400418    /* IO CRB Entry 0_D       */
+
+
+
+#define    IIO_ICRB1_A               0x00400420    /* IO CRB Entry 1_A       */
+
+
+
+#define    IIO_ICRB1_B               0x00400428    /* IO CRB Entry 1_B       */
+
+
+
+#define    IIO_ICRB1_C               0x00400430    /* IO CRB Entry 1_C       */
+
+
+
+#define    IIO_ICRB1_D               0x00400438    /* IO CRB Entry 1_D       */
+
+
+
+#define    IIO_ICRB2_A               0x00400440    /* IO CRB Entry 2_A       */
+
+
+
+#define    IIO_ICRB2_B               0x00400448    /* IO CRB Entry 2_B       */
+
+
+
+#define    IIO_ICRB2_C               0x00400450    /* IO CRB Entry 2_C       */
+
+
+
+#define    IIO_ICRB2_D               0x00400458    /* IO CRB Entry 2_D       */
+
+
+
+#define    IIO_ICRB3_A               0x00400460    /* IO CRB Entry 3_A       */
+
+
+
+#define    IIO_ICRB3_B               0x00400468    /* IO CRB Entry 3_B       */
+
+
+
+#define    IIO_ICRB3_C               0x00400470    /* IO CRB Entry 3_C       */
+
+
+
+#define    IIO_ICRB3_D               0x00400478    /* IO CRB Entry 3_D       */
+
+
+
+#define    IIO_ICRB4_A               0x00400480    /* IO CRB Entry 4_A       */
+
+
+
+#define    IIO_ICRB4_B               0x00400488    /* IO CRB Entry 4_B       */
+
+
+
+#define    IIO_ICRB4_C               0x00400490    /* IO CRB Entry 4_C       */
+
+
+
+#define    IIO_ICRB4_D               0x00400498    /* IO CRB Entry 4_D       */
+
+
+
+#define    IIO_ICRB5_A               0x004004A0    /* IO CRB Entry 5_A       */
+
+
+
+#define    IIO_ICRB5_B               0x004004A8    /* IO CRB Entry 5_B       */
+
+
+
+#define    IIO_ICRB5_C               0x004004B0    /* IO CRB Entry 5_C       */
+
+
+
+#define    IIO_ICRB5_D               0x004004B8    /* IO CRB Entry 5_D       */
+
+
+
+#define    IIO_ICRB6_A               0x004004C0    /* IO CRB Entry 6_A       */
+
+
+
+#define    IIO_ICRB6_B               0x004004C8    /* IO CRB Entry 6_B       */
+
+
+
+#define    IIO_ICRB6_C               0x004004D0    /* IO CRB Entry 6_C       */
+
+
+
+#define    IIO_ICRB6_D               0x004004D8    /* IO CRB Entry 6_D       */
+
+
+
+#define    IIO_ICRB7_A               0x004004E0    /* IO CRB Entry 7_A       */
+
+
+
+#define    IIO_ICRB7_B               0x004004E8    /* IO CRB Entry 7_B       */
+
+
+
+#define    IIO_ICRB7_C               0x004004F0    /* IO CRB Entry 7_C       */
+
+
+
+#define    IIO_ICRB7_D               0x004004F8    /* IO CRB Entry 7_D       */
+
+
+
+#define    IIO_ICRB8_A               0x00400500    /* IO CRB Entry 8_A       */
+
+
+
+#define    IIO_ICRB8_B               0x00400508    /* IO CRB Entry 8_B       */
+
+
+
+#define    IIO_ICRB8_C               0x00400510    /* IO CRB Entry 8_C       */
+
+
+
+#define    IIO_ICRB8_D               0x00400518    /* IO CRB Entry 8_D       */
+
+
+
+#define    IIO_ICRB9_A               0x00400520    /* IO CRB Entry 9_A       */
+
+
+
+#define    IIO_ICRB9_B               0x00400528    /* IO CRB Entry 9_B       */
+
+
+
+#define    IIO_ICRB9_C               0x00400530    /* IO CRB Entry 9_C       */
+
+
+
+#define    IIO_ICRB9_D               0x00400538    /* IO CRB Entry 9_D       */
+
+
+
+#define    IIO_ICRBA_A               0x00400540    /* IO CRB Entry A_A       */
+
+
+
+#define    IIO_ICRBA_B               0x00400548    /* IO CRB Entry A_B       */
+
+
+
+#define    IIO_ICRBA_C               0x00400550    /* IO CRB Entry A_C       */
+
+
+
+#define    IIO_ICRBA_D               0x00400558    /* IO CRB Entry A_D       */
+
+
+
+#define    IIO_ICRBB_A               0x00400560    /* IO CRB Entry B_A       */
+
+
+
+#define    IIO_ICRBB_B               0x00400568    /* IO CRB Entry B_B       */
+
+
+
+#define    IIO_ICRBB_C               0x00400570    /* IO CRB Entry B_C       */
+
+
+
+#define    IIO_ICRBB_D               0x00400578    /* IO CRB Entry B_D       */
+
+
+
+#define    IIO_ICRBC_A               0x00400580    /* IO CRB Entry C_A       */
+
+
+
+#define    IIO_ICRBC_B               0x00400588    /* IO CRB Entry C_B       */
+
+
+
+#define    IIO_ICRBC_C               0x00400590    /* IO CRB Entry C_C       */
+
+
+
+#define    IIO_ICRBC_D               0x00400598    /* IO CRB Entry C_D       */
+
+
+
+#define    IIO_ICRBD_A               0x004005A0    /* IO CRB Entry D_A       */
+
+
+
+#define    IIO_ICRBD_B               0x004005A8    /* IO CRB Entry D_B       */
+
+
+
+#define    IIO_ICRBD_C               0x004005B0    /* IO CRB Entry D_C       */
+
+
+
+#define    IIO_ICRBD_D               0x004005B8    /* IO CRB Entry D_D       */
+
+
+
+#define    IIO_ICRBE_A               0x004005C0    /* IO CRB Entry E_A       */
+
+
+
+#define    IIO_ICRBE_B               0x004005C8    /* IO CRB Entry E_B       */
+
+
+
+#define    IIO_ICRBE_C               0x004005D0    /* IO CRB Entry E_C       */
+
+
+
+#define    IIO_ICRBE_D               0x004005D8    /* IO CRB Entry E_D       */
+
+
+
+#define    IIO_ICSML                 0x00400600    /*
+                                                    * IO CRB Spurious
+                                                    * Message Low
+                                                    */
+
+
+
+#define    IIO_ICSMH                 0x00400608    /*
+                                                    * IO CRB Spurious
+                                                    * Message High
+                                                    */
+
+
+
+#define    IIO_IDBSS                 0x00400610    /*
+                                                    * IO Debug Submenu
+                                                    * Select
+                                                    */
+
+
+
+#define    IIO_IBLS0                 0x00410000    /*
+                                                    * IO BTE Length
+                                                    * Status 0
+                                                    */
+
+
+
+#define    IIO_IBSA0                 0x00410008    /*
+                                                    * IO BTE Source
+                                                    * Address 0
+                                                    */
+
+
+
+#define    IIO_IBDA0                 0x00410010    /*
+                                                    * IO BTE Destination
+                                                    * Address 0
+                                                    */
+
+
+
+#define    IIO_IBCT0                 0x00410018    /*
+                                                    * IO BTE Control
+                                                    * Terminate 0
+                                                    */
+
+
+
+#define    IIO_IBNA0                 0x00410020    /*
+                                                    * IO BTE Notification
+                                                    * Address 0
+                                                    */
+
+
+
+#define    IIO_IBIA0                 0x00410028    /*
+                                                    * IO BTE Interrupt
+                                                    * Address 0
+                                                    */
+
+
+
+#define    IIO_IBLS1                 0x00420000    /*
+                                                    * IO BTE Length
+                                                    * Status 1
+                                                    */
+
+
+
+#define    IIO_IBSA1                 0x00420008    /*
+                                                    * IO BTE Source
+                                                    * Address 1
+                                                    */
+
+
+
+#define    IIO_IBDA1                 0x00420010    /*
+                                                    * IO BTE Destination
+                                                    * Address 1
+                                                    */
+
+
+
+#define    IIO_IBCT1                 0x00420018    /*
+                                                    * IO BTE Control
+                                                    * Terminate 1
+                                                    */
+
+
+
+#define    IIO_IBNA1                 0x00420020    /*
+                                                    * IO BTE Notification
+                                                    * Address 1
+                                                    */
+
+
+
+#define    IIO_IBIA1                 0x00420028    /*
+                                                    * IO BTE Interrupt
+                                                    * Address 1
+                                                    */
+
+
+
+#define    IIO_IPCR                  0x00430000    /*
+                                                    * IO Performance
+                                                    * Control
+                                                    */
+
+
+
+#define    IIO_IPPR                  0x00430008    /*
+                                                    * IO Performance
+                                                    * Profiling
+                                                    */
+
+
+
+
+
+#ifdef _LANGUAGE_C
+
+/************************************************************************
+ *                                                                      *
+ * Description:  This register echoes some information from the         *
+ * LB_REV_ID register. It is available through Crosstalk as described   *
+ * above. The REV_NUM and MFG_NUM fields receive their values from      *
+ * the REVISION and MANUFACTURER fields in the LB_REV_ID register.      *
+ * The PART_NUM field's value is the Crosstalk device ID number that    *
+ * Steve Miller assigned to the Bedrock chip.                           *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_wid_u {
+	bdrkreg_t	ii_wid_regval;
+	struct	{
+		bdrkreg_t	w_rsvd_1		  :	 1;
+		bdrkreg_t	w_mfg_num		  :	11;
+		bdrkreg_t	w_part_num		  :	16;
+		bdrkreg_t	w_rev_num		  :	 4;
+		bdrkreg_t	w_rsvd			  :	32;
+	} ii_wid_fld_s;
+} ii_wid_u_t;
+
+#else
+
+typedef union ii_wid_u {
+	bdrkreg_t	ii_wid_regval;
+	struct  {
+		bdrkreg_t	w_rsvd                    :	32;
+		bdrkreg_t	w_rev_num                 :	 4;
+		bdrkreg_t	w_part_num                :	16;
+		bdrkreg_t	w_mfg_num                 :	11;
+		bdrkreg_t	w_rsvd_1                  :	 1;
+	} ii_wid_fld_s;
+} ii_wid_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  The fields in this register are set upon detection of an error      *
+ * and cleared by various mechanisms, as explained in the               *
+ * description.                                                         *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_wstat_u {
+	bdrkreg_t	ii_wstat_regval;
+	struct	{
+		bdrkreg_t	w_pending		  :	 4;
+		bdrkreg_t	w_xt_crd_to		  :	 1;
+		bdrkreg_t	w_xt_tail_to		  :	 1;
+		bdrkreg_t	w_rsvd_3		  :	 3;
+		bdrkreg_t       w_tx_mx_rty               :      1;
+		bdrkreg_t	w_rsvd_2		  :	 6;
+		bdrkreg_t	w_llp_tx_cnt		  :	 8;
+		bdrkreg_t	w_rsvd_1		  :	 8;
+		bdrkreg_t	w_crazy			  :	 1;
+		bdrkreg_t	w_rsvd			  :	31;
+	} ii_wstat_fld_s;
+} ii_wstat_u_t;
+
+#else
+
+typedef union ii_wstat_u {
+	bdrkreg_t	ii_wstat_regval;
+	struct  {
+		bdrkreg_t	w_rsvd                    :	31;
+		bdrkreg_t	w_crazy                   :	 1;
+		bdrkreg_t	w_rsvd_1                  :	 8;
+		bdrkreg_t	w_llp_tx_cnt              :	 8;
+		bdrkreg_t	w_rsvd_2                  :	 6;
+		bdrkreg_t	w_tx_mx_rty               :	 1;
+		bdrkreg_t	w_rsvd_3                  :	 3;
+		bdrkreg_t	w_xt_tail_to              :	 1;
+		bdrkreg_t	w_xt_crd_to               :	 1;
+		bdrkreg_t	w_pending                 :	 4;
+	} ii_wstat_fld_s;
+} ii_wstat_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  This is a read-write enabled register. It controls     *
+ * various aspects of the Crosstalk flow control.                       *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_wcr_u {
+	bdrkreg_t	ii_wcr_regval;
+	struct	{
+		bdrkreg_t	w_wid			  :	 4;
+		bdrkreg_t	w_tag			  :	 1;
+		bdrkreg_t	w_rsvd_1		  :	 8;
+		bdrkreg_t	w_dst_crd		  :	 3;
+		bdrkreg_t	w_f_bad_pkt		  :	 1;
+		bdrkreg_t	w_dir_con		  :	 1;
+		bdrkreg_t	w_e_thresh		  :	 5;
+		bdrkreg_t	w_rsvd			  :	41;
+	} ii_wcr_fld_s;
+} ii_wcr_u_t;
+
+#else
+
+typedef union ii_wcr_u {
+	bdrkreg_t	ii_wcr_regval;
+	struct  {
+		bdrkreg_t	w_rsvd                    :	41;
+		bdrkreg_t	w_e_thresh                :	 5;
+		bdrkreg_t	w_dir_con                 :	 1;
+		bdrkreg_t	w_f_bad_pkt               :	 1;
+		bdrkreg_t	w_dst_crd                 :	 3;
+		bdrkreg_t	w_rsvd_1                  :	 8;
+		bdrkreg_t	w_tag                     :	 1;
+		bdrkreg_t	w_wid                     :	 4;
+	} ii_wcr_fld_s;
+} ii_wcr_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  This register's value is a bit vector that guards      *
+ * access to local registers within the II as well as to external       *
+ * Crosstalk widgets. Each bit in the register corresponds to a         *
+ * particular region in the system; a region consists of one, two or    *
+ * four nodes (depending on the value of the REGION_SIZE field in the   *
+ * LB_REV_ID register, which is documented in Section 8.3.1.1). The     *
+ * protection provided by this register applies to PIO read             *
+ * operations as well as PIO write operations. The II will perform a    *
+ * PIO read or write request only if the bit for the requestor's        *
+ * region is set; otherwise, the II will not perform the requested      *
+ * operation and will return an error response. When a PIO read or      *
+ * write request targets an external Crosstalk widget, then not only    *
+ * must the bit for the requestor's region be set in the ILAPR, but     *
+ * also the target widget's bit in the IOWA register must be set in     *
+ * order for the II to perform the requested operation; otherwise,      *
+ * the II will return an error response. Hence, the protection          *
+ * provided by the IOWA register supplements the protection provided    *
+ * by the ILAPR for requests that target external Crosstalk widgets.    *
+ * This register itself can be accessed only by the nodes whose         *
+ * region ID bits are enabled in this same register. It can also be     *
+ * accessed through the IAlias space by the local processors.           *
+ * The reset value of this register allows access by all nodes.         *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+typedef union ii_ilapr_u {
+	bdrkreg_t	ii_ilapr_regval;
+	struct  {
+		bdrkreg_t	i_region                  :	64;
+	} ii_ilapr_fld_s;
+} ii_ilapr_u_t;
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  A write to this register of the 64-bit value           *
+ * "SGIrules" in ASCII, will cause the bit in the ILAPR register        *
+ * corresponding to the region of the requestor to be set (allow        *
+ * access). A write of any other value will be ignored. Access          *
+ * protection for this register is "SGIrules".                          *
+ * This register can also be accessed through the IAlias space.         *
+ * However, this access will not change the access permissions in the   *
+ * ILAPR.                                                               *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_ilapo_u {
+	bdrkreg_t	ii_ilapo_regval;
+	struct	{
+		bdrkreg_t	i_io_ovrride		  :	 9;
+		bdrkreg_t	i_rsvd			  :	55;
+	} ii_ilapo_fld_s;
+} ii_ilapo_u_t;
+
+#else
+
+typedef union ii_ilapo_u {
+	bdrkreg_t	ii_ilapo_regval;
+	struct  {
+		bdrkreg_t	i_rsvd                    :	55;
+		bdrkreg_t	i_io_ovrride              :	 9;
+	} ii_ilapo_fld_s;
+} ii_ilapo_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register qualifies all the PIO and Graphics writes launched    *
+ * from the Bedrock towards a widget.                                   *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_iowa_u {
+	bdrkreg_t	ii_iowa_regval;
+	struct	{
+		bdrkreg_t	i_w0_oac		  :	 1;
+		bdrkreg_t	i_rsvd_1		  :	 7;
+                bdrkreg_t       i_wx_oac                  :      8;
+		bdrkreg_t	i_rsvd			  :	48;
+	} ii_iowa_fld_s;
+} ii_iowa_u_t;
+
+#else
+
+typedef union ii_iowa_u {
+	bdrkreg_t	ii_iowa_regval;
+	struct  {
+		bdrkreg_t	i_rsvd                    :	48;
+		bdrkreg_t	i_wx_oac                  :	 8;
+		bdrkreg_t	i_rsvd_1                  :	 7;
+		bdrkreg_t	i_w0_oac                  :	 1;
+	} ii_iowa_fld_s;
+} ii_iowa_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  This register qualifies all the requests launched      *
+ * from a widget towards the Bedrock. This register is intended to be   *
+ * used by software in case of misbehaving widgets.                     *
+ *                                                                      *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_iiwa_u {
+	bdrkreg_t	ii_iiwa_regval;
+	struct  {
+		bdrkreg_t	i_w0_iac                  :	 1;
+		bdrkreg_t	i_rsvd_1		  :	 7;
+		bdrkreg_t	i_wx_iac		  :	 8;
+		bdrkreg_t	i_rsvd			  :	48;
+	} ii_iiwa_fld_s;
+} ii_iiwa_u_t;
+
+#else
+
+typedef union ii_iiwa_u {
+	bdrkreg_t	ii_iiwa_regval;
+	struct	{
+		bdrkreg_t	i_rsvd			  :	48;
+		bdrkreg_t	i_wx_iac		  :	 8;
+		bdrkreg_t	i_rsvd_1		  :	 7;
+		bdrkreg_t	i_w0_iac		  :	 1;
+	} ii_iiwa_fld_s;
+} ii_iiwa_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  This register qualifies all the operations launched    *
+ * from a widget towards the Bedrock. It allows individual access       *
+ * control for up to 8 devices per widget. A device refers to           *
+ * individual DMA master hosted by a widget.                            *
+ * The bits in each field of this register are cleared by the Bedrock   *
+ * upon detection of an error which requires the device to be           *
+ * disabled. These fields assume that 0=TNUM=7 (i.e., Bridge-centric    *
+ * Crosstalk). Whether or not a device has access rights to this        *
+ * Bedrock is determined by an AND of the device enable bit in the      *
+ * appropriate field of this register and the corresponding bit in      *
+ * the Wx_IAC field (for the widget which this device belongs to).      *
+ * The bits in this field are set by writing a 1 to them. Incoming      *
+ * replies from Crosstalk are not subject to this access control        *
+ * mechanism.                                                           *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_iidem_u {
+	bdrkreg_t	ii_iidem_regval;
+	struct	{
+		bdrkreg_t	i_w8_dxs		  :	 8;
+		bdrkreg_t	i_w9_dxs		  :	 8;
+		bdrkreg_t	i_wa_dxs		  :	 8;
+		bdrkreg_t	i_wb_dxs		  :	 8;
+		bdrkreg_t	i_wc_dxs		  :	 8;
+		bdrkreg_t	i_wd_dxs		  :	 8;
+		bdrkreg_t	i_we_dxs		  :	 8;
+		bdrkreg_t	i_wf_dxs		  :	 8;
+	} ii_iidem_fld_s;
+} ii_iidem_u_t;
+
+#else
+
+typedef union ii_iidem_u {
+	bdrkreg_t	ii_iidem_regval;
+	struct  {
+		bdrkreg_t	i_wf_dxs                  :	 8;
+		bdrkreg_t	i_we_dxs                  :	 8;
+		bdrkreg_t	i_wd_dxs                  :	 8;
+		bdrkreg_t	i_wc_dxs                  :	 8;
+		bdrkreg_t	i_wb_dxs                  :	 8;
+		bdrkreg_t	i_wa_dxs                  :	 8;
+		bdrkreg_t	i_w9_dxs                  :	 8;
+		bdrkreg_t	i_w8_dxs                  :	 8;
+	} ii_iidem_fld_s;
+} ii_iidem_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register contains the various programmable fields necessary    *
+ * for controlling and observing the LLP signals.                       *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_ilcsr_u {
+	bdrkreg_t	ii_ilcsr_regval;
+	struct  {
+		bdrkreg_t	i_nullto                  :	 6;
+		bdrkreg_t	i_rsvd_4		  :	 2;
+		bdrkreg_t	i_wrmrst		  :	 1;
+		bdrkreg_t	i_rsvd_3		  :	 1;
+		bdrkreg_t	i_llp_en		  :	 1;
+		bdrkreg_t	i_bm8			  :	 1;
+		bdrkreg_t	i_llp_stat		  :	 2;
+		bdrkreg_t	i_remote_power		  :	 1;
+		bdrkreg_t	i_rsvd_2		  :	 1;
+		bdrkreg_t	i_maxrtry		  :	10;
+		bdrkreg_t	i_d_avail_sel		  :	 2;
+		bdrkreg_t	i_rsvd_1		  :	 4;
+		bdrkreg_t	i_maxbrst		  :	10;
+                bdrkreg_t       i_rsvd                    :     22;
+
+	} ii_ilcsr_fld_s;
+} ii_ilcsr_u_t;
+
+#else
+
+typedef union ii_ilcsr_u {
+	bdrkreg_t	ii_ilcsr_regval;
+	struct	{
+		bdrkreg_t	i_rsvd			  :	22;
+		bdrkreg_t	i_maxbrst		  :	10;
+		bdrkreg_t	i_rsvd_1		  :	 4;
+		bdrkreg_t	i_d_avail_sel		  :	 2;
+		bdrkreg_t	i_maxrtry		  :	10;
+		bdrkreg_t	i_rsvd_2		  :	 1;
+		bdrkreg_t	i_remote_power		  :	 1;
+		bdrkreg_t	i_llp_stat		  :	 2;
+		bdrkreg_t	i_bm8			  :	 1;
+		bdrkreg_t	i_llp_en		  :	 1;
+		bdrkreg_t	i_rsvd_3		  :	 1;
+		bdrkreg_t	i_wrmrst		  :	 1;
+		bdrkreg_t	i_rsvd_4		  :	 2;
+		bdrkreg_t	i_nullto		  :	 6;
+	} ii_ilcsr_fld_s;
+} ii_ilcsr_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This is simply a status registers that monitors the LLP error       *
+ * rate.                                                                *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_illr_u {
+	bdrkreg_t	ii_illr_regval;
+	struct	{
+		bdrkreg_t	i_sn_cnt		  :	16;
+		bdrkreg_t	i_cb_cnt		  :	16;
+		bdrkreg_t	i_rsvd			  :	32;
+	} ii_illr_fld_s;
+} ii_illr_u_t;
+
+#else
+
+typedef union ii_illr_u {
+	bdrkreg_t	ii_illr_regval;
+	struct  {
+		bdrkreg_t	i_rsvd                    :	32;
+		bdrkreg_t	i_cb_cnt                  :	16;
+		bdrkreg_t	i_sn_cnt                  :	16;
+	} ii_illr_fld_s;
+} ii_illr_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  All II-detected non-BTE error interrupts are           *
+ * specified via this register.                                         *
+ * NOTE: The PI interrupt register address is hardcoded in the II. If   *
+ * PI_ID==0, then the II sends an interrupt request (Duplonet PWRI      *
+ * packet) to address offset 0x0180_0090 within the local register      *
+ * address space of PI0 on the node specified by the NODE field. If     *
+ * PI_ID==1, then the II sends the interrupt request to address         *
+ * offset 0x01A0_0090 within the local register address space of PI1    *
+ * on the node specified by the NODE field.                             *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_iidsr_u {
+	bdrkreg_t	ii_iidsr_regval;
+	struct  {
+		bdrkreg_t	i_level                   :	 7;
+		bdrkreg_t	i_rsvd_4		  :	 1;
+		bdrkreg_t       i_pi_id                   :      1;
+		bdrkreg_t	i_node			  :	 8;
+		bdrkreg_t       i_rsvd_3                  :      7;
+		bdrkreg_t	i_enable		  :	 1;
+		bdrkreg_t	i_rsvd_2		  :	 3;
+		bdrkreg_t	i_int_sent		  :	 1;
+		bdrkreg_t       i_rsvd_1                  :      3;
+		bdrkreg_t	i_pi0_forward_int	  :	 1;
+		bdrkreg_t	i_pi1_forward_int	  :	 1;
+		bdrkreg_t	i_rsvd			  :	30;
+	} ii_iidsr_fld_s;
+} ii_iidsr_u_t;
+
+#else
+
+typedef union ii_iidsr_u {
+	bdrkreg_t	ii_iidsr_regval;
+	struct	{
+		bdrkreg_t	i_rsvd			  :	30;
+		bdrkreg_t	i_pi1_forward_int	  :	 1;
+		bdrkreg_t	i_pi0_forward_int	  :	 1;
+		bdrkreg_t	i_rsvd_1		  :	 3;
+		bdrkreg_t	i_int_sent		  :	 1;
+		bdrkreg_t	i_rsvd_2		  :	 3;
+		bdrkreg_t	i_enable		  :	 1;
+		bdrkreg_t	i_rsvd_3		  :	 7;
+		bdrkreg_t	i_node			  :	 8;
+		bdrkreg_t	i_pi_id			  :	 1;
+		bdrkreg_t	i_rsvd_4		  :	 1;
+		bdrkreg_t	i_level			  :	 7;
+	} ii_iidsr_fld_s;
+} ii_iidsr_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There are two instances of this register. This register is used     *
+ * for matching up the incoming responses from the graphics widget to   *
+ * the processor that initiated the graphics operation. The             *
+ * write-responses are converted to graphics credits and returned to    *
+ * the processor so that the processor interface can manage the flow    *
+ * control.                                                             *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_igfx0_u {
+	bdrkreg_t	ii_igfx0_regval;
+	struct	{
+		bdrkreg_t	i_w_num			  :	 4;
+		bdrkreg_t       i_pi_id                   :      1;
+		bdrkreg_t	i_n_num			  :	 8;
+		bdrkreg_t       i_rsvd_1                  :      3;
+		bdrkreg_t       i_p_num                   :      1;
+		bdrkreg_t       i_rsvd                    :     47;
+	} ii_igfx0_fld_s;
+} ii_igfx0_u_t;
+
+#else
+
+typedef union ii_igfx0_u {
+	bdrkreg_t	ii_igfx0_regval;
+	struct  {
+		bdrkreg_t	i_rsvd                    :	47;
+		bdrkreg_t	i_p_num                   :	 1;
+		bdrkreg_t	i_rsvd_1                  :	 3;
+		bdrkreg_t	i_n_num                   :	 8;
+		bdrkreg_t	i_pi_id                   :	 1;
+		bdrkreg_t	i_w_num                   :	 4;
+	} ii_igfx0_fld_s;
+} ii_igfx0_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There are two instances of this register. This register is used     *
+ * for matching up the incoming responses from the graphics widget to   *
+ * the processor that initiated the graphics operation. The             *
+ * write-responses are converted to graphics credits and returned to    *
+ * the processor so that the processor interface can manage the flow    *
+ * control.                                                             *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_igfx1_u {
+	bdrkreg_t	ii_igfx1_regval;
+	struct  {
+		bdrkreg_t	i_w_num                   :	 4;
+		bdrkreg_t	i_pi_id			  :	 1;
+		bdrkreg_t	i_n_num			  :	 8;
+		bdrkreg_t	i_rsvd_1		  :	 3;
+		bdrkreg_t	i_p_num			  :	 1;
+		bdrkreg_t	i_rsvd			  :	47;
+	} ii_igfx1_fld_s;
+} ii_igfx1_u_t;
+
+#else
+
+typedef union ii_igfx1_u {
+	bdrkreg_t	ii_igfx1_regval;
+	struct	{
+		bdrkreg_t	i_rsvd			  :	47;
+		bdrkreg_t	i_p_num			  :	 1;
+		bdrkreg_t	i_rsvd_1		  :	 3;
+		bdrkreg_t	i_n_num			  :	 8;
+		bdrkreg_t	i_pi_id			  :	 1;
+		bdrkreg_t	i_w_num			  :	 4;
+	} ii_igfx1_fld_s;
+} ii_igfx1_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There are two instances of this registers. These registers are      *
+ * used as scratch registers for software use.                          *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+typedef union ii_iscr0_u {
+	bdrkreg_t	ii_iscr0_regval;
+	struct  {
+		bdrkreg_t	i_scratch                 :	64;
+	} ii_iscr0_fld_s;
+} ii_iscr0_u_t;
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There are two instances of this registers. These registers are      *
+ * used as scratch registers for software use.                          *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+typedef union ii_iscr1_u {
+	bdrkreg_t	ii_iscr1_regval;
+	struct  {
+		bdrkreg_t	i_scratch                 :	64;
+	} ii_iscr1_fld_s;
+} ii_iscr1_u_t;
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  There are seven instances of translation table entry   *
+ * registers. Each register maps a Bedrock Big Window to a 48-bit       *
+ * address on Crosstalk.                                                *
+ * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window      *
+ * number) are used to select one of these 7 registers. The Widget      *
+ * number field is then derived from the W_NUM field for synthesizing   *
+ * a Crosstalk packet. The 5 bits of OFFSET are concatenated with       *
+ * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34]      *
+ * are padded with zeros. Although the maximum Crosstalk space          *
+ * addressable by the Bedrock is thus the lower 16 GBytes per widget    *
+ * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this       *
+ * space can be accessed.                                               *
+ * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big         *
+ * Window number) are used to select one of these 7 registers. The      *
+ * Widget number field is then derived from the W_NUM field for         *
+ * synthesizing a Crosstalk packet. The 5 bits of OFFSET are            *
+ * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP      *
+ * field is used as Crosstalk[47], and remainder of the Crosstalk       *
+ * address bits (Crosstalk[46:34]) are always zero. While the maximum   *
+ * Crosstalk space addressable by the Bedrock is thus the lower         *
+ * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB>   *
+ * of this space can be accessed.                                       *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_itte1_u {
+	bdrkreg_t	ii_itte1_regval;
+	struct  {
+		bdrkreg_t	i_offset                  :	 5;
+		bdrkreg_t	i_rsvd_1		  :	 3;
+		bdrkreg_t	i_w_num			  :	 4;
+		bdrkreg_t	i_iosp			  :	 1;
+		bdrkreg_t	i_rsvd			  :	51;
+	} ii_itte1_fld_s;
+} ii_itte1_u_t;
+
+#else
+
+typedef union ii_itte1_u {
+	bdrkreg_t	ii_itte1_regval;
+	struct	{
+		bdrkreg_t	i_rsvd			  :	51;
+		bdrkreg_t	i_iosp			  :	 1;
+		bdrkreg_t	i_w_num			  :	 4;
+		bdrkreg_t	i_rsvd_1		  :	 3;
+		bdrkreg_t	i_offset		  :	 5;
+	} ii_itte1_fld_s;
+} ii_itte1_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  There are seven instances of translation table entry   *
+ * registers. Each register maps a Bedrock Big Window to a 48-bit       *
+ * address on Crosstalk.                                                *
+ * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window      *
+ * number) are used to select one of these 7 registers. The Widget      *
+ * number field is then derived from the W_NUM field for synthesizing   *
+ * a Crosstalk packet. The 5 bits of OFFSET are concatenated with       *
+ * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34]      *
+ * are padded with zeros. Although the maximum Crosstalk space          *
+ * addressable by the Bedrock is thus the lower 16 GBytes per widget    *
+ * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this       *
+ * space can be accessed.                                               *
+ * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big         *
+ * Window number) are used to select one of these 7 registers. The      *
+ * Widget number field is then derived from the W_NUM field for         *
+ * synthesizing a Crosstalk packet. The 5 bits of OFFSET are            *
+ * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP      *
+ * field is used as Crosstalk[47], and remainder of the Crosstalk       *
+ * address bits (Crosstalk[46:34]) are always zero. While the maximum   *
+ * Crosstalk space addressable by the Bedrock is thus the lower         *
+ * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB>   *
+ * of this space can be accessed.                                       *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_itte2_u {
+	bdrkreg_t	ii_itte2_regval;
+	struct	{
+		bdrkreg_t	i_offset		  :	 5;
+		bdrkreg_t	i_rsvd_1		  :	 3;
+		bdrkreg_t	i_w_num			  :	 4;
+		bdrkreg_t	i_iosp			  :	 1;
+		bdrkreg_t       i_rsvd                    :     51;
+	} ii_itte2_fld_s;
+} ii_itte2_u_t;
+
+#else
+typedef union ii_itte2_u {
+	bdrkreg_t	ii_itte2_regval;
+	struct  {
+		bdrkreg_t	i_rsvd                    :	51;
+		bdrkreg_t	i_iosp                    :	 1;
+		bdrkreg_t	i_w_num                   :	 4;
+		bdrkreg_t	i_rsvd_1                  :	 3;
+		bdrkreg_t	i_offset                  :	 5;
+	} ii_itte2_fld_s;
+} ii_itte2_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  There are seven instances of translation table entry   *
+ * registers. Each register maps a Bedrock Big Window to a 48-bit       *
+ * address on Crosstalk.                                                *
+ * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window      *
+ * number) are used to select one of these 7 registers. The Widget      *
+ * number field is then derived from the W_NUM field for synthesizing   *
+ * a Crosstalk packet. The 5 bits of OFFSET are concatenated with       *
+ * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34]      *
+ * are padded with zeros. Although the maximum Crosstalk space          *
+ * addressable by the Bedrock is thus the lower 16 GBytes per widget    *
+ * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this       *
+ * space can be accessed.                                               *
+ * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big         *
+ * Window number) are used to select one of these 7 registers. The      *
+ * Widget number field is then derived from the W_NUM field for         *
+ * synthesizing a Crosstalk packet. The 5 bits of OFFSET are            *
+ * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP      *
+ * field is used as Crosstalk[47], and remainder of the Crosstalk       *
+ * address bits (Crosstalk[46:34]) are always zero. While the maximum   *
+ * Crosstalk space addressable by the Bedrock is thus the lower         *
+ * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB>   *
+ * of this space can be accessed.                                       *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_itte3_u {
+	bdrkreg_t	ii_itte3_regval;
+	struct  {
+		bdrkreg_t	i_offset                  :	 5;
+		bdrkreg_t       i_rsvd_1                  :      3;
+		bdrkreg_t       i_w_num                   :      4;
+		bdrkreg_t       i_iosp                    :      1;
+		bdrkreg_t       i_rsvd                    :     51;
+	} ii_itte3_fld_s;
+} ii_itte3_u_t;
+
+#else
+
+typedef union ii_itte3_u {
+	bdrkreg_t	ii_itte3_regval;
+	struct	{
+		bdrkreg_t	i_rsvd			  :	51;
+		bdrkreg_t	i_iosp			  :	 1;
+		bdrkreg_t	i_w_num			  :	 4;
+		bdrkreg_t	i_rsvd_1		  :	 3;
+		bdrkreg_t	i_offset		  :	 5;
+	} ii_itte3_fld_s;
+} ii_itte3_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  There are seven instances of translation table entry   *
+ * registers. Each register maps a Bedrock Big Window to a 48-bit       *
+ * address on Crosstalk.                                                *
+ * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window      *
+ * number) are used to select one of these 7 registers. The Widget      *
+ * number field is then derived from the W_NUM field for synthesizing   *
+ * a Crosstalk packet. The 5 bits of OFFSET are concatenated with       *
+ * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34]      *
+ * are padded with zeros. Although the maximum Crosstalk space          *
+ * addressable by the Bedrock is thus the lower 16 GBytes per widget    *
+ * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this       *
+ * space can be accessed.                                               *
+ * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big         *
+ * Window number) are used to select one of these 7 registers. The      *
+ * Widget number field is then derived from the W_NUM field for         *
+ * synthesizing a Crosstalk packet. The 5 bits of OFFSET are            *
+ * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP      *
+ * field is used as Crosstalk[47], and remainder of the Crosstalk       *
+ * address bits (Crosstalk[46:34]) are always zero. While the maximum   *
+ * Crosstalk space addressable by the Bedrock is thus the lower         *
+ * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB>   *
+ * of this space can be accessed.                                       *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_itte4_u {
+	bdrkreg_t	ii_itte4_regval;
+	struct  {
+		bdrkreg_t	i_offset                  :	 5;
+		bdrkreg_t	i_rsvd_1		  :	 3;
+		bdrkreg_t       i_w_num                   :      4;
+		bdrkreg_t       i_iosp                    :      1;
+		bdrkreg_t       i_rsvd                    :     51;
+	} ii_itte4_fld_s;
+} ii_itte4_u_t;
+
+#else
+
+typedef union ii_itte4_u {
+	bdrkreg_t	ii_itte4_regval;
+	struct	{
+		bdrkreg_t	i_rsvd			  :	51;
+		bdrkreg_t	i_iosp			  :	 1;
+		bdrkreg_t	i_w_num			  :	 4;
+		bdrkreg_t	i_rsvd_1		  :	 3;
+		bdrkreg_t	i_offset		  :	 5;
+	} ii_itte4_fld_s;
+} ii_itte4_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  There are seven instances of translation table entry   *
+ * registers. Each register maps a Bedrock Big Window to a 48-bit       *
+ * address on Crosstalk.                                                *
+ * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window      *
+ * number) are used to select one of these 7 registers. The Widget      *
+ * number field is then derived from the W_NUM field for synthesizing   *
+ * a Crosstalk packet. The 5 bits of OFFSET are concatenated with       *
+ * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34]      *
+ * are padded with zeros. Although the maximum Crosstalk space          *
+ * addressable by the Bedrock is thus the lower 16 GBytes per widget    *
+ * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this       *
+ * space can be accessed.                                               *
+ * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big         *
+ * Window number) are used to select one of these 7 registers. The      *
+ * Widget number field is then derived from the W_NUM field for         *
+ * synthesizing a Crosstalk packet. The 5 bits of OFFSET are            *
+ * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP      *
+ * field is used as Crosstalk[47], and remainder of the Crosstalk       *
+ * address bits (Crosstalk[46:34]) are always zero. While the maximum   *
+ * Crosstalk space addressable by the Bedrock is thus the lower         *
+ * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB>   *
+ * of this space can be accessed.                                       *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_itte5_u {
+	bdrkreg_t	ii_itte5_regval;
+	struct  {
+		bdrkreg_t	i_offset                  :	 5;
+		bdrkreg_t       i_rsvd_1                  :      3;
+		bdrkreg_t       i_w_num                   :      4;
+		bdrkreg_t       i_iosp                    :      1;
+		bdrkreg_t       i_rsvd                    :     51;
+	} ii_itte5_fld_s;
+} ii_itte5_u_t;
+
+#else
+
+typedef union ii_itte5_u {
+	bdrkreg_t	ii_itte5_regval;
+	struct	{
+		bdrkreg_t	i_rsvd			  :	51;
+		bdrkreg_t	i_iosp			  :	 1;
+		bdrkreg_t	i_w_num			  :	 4;
+		bdrkreg_t	i_rsvd_1		  :	 3;
+		bdrkreg_t	i_offset		  :	 5;
+	} ii_itte5_fld_s;
+} ii_itte5_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  There are seven instances of translation table entry   *
+ * registers. Each register maps a Bedrock Big Window to a 48-bit       *
+ * address on Crosstalk.                                                *
+ * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window      *
+ * number) are used to select one of these 7 registers. The Widget      *
+ * number field is then derived from the W_NUM field for synthesizing   *
+ * a Crosstalk packet. The 5 bits of OFFSET are concatenated with       *
+ * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34]      *
+ * are padded with zeros. Although the maximum Crosstalk space          *
+ * addressable by the Bedrock is thus the lower 16 GBytes per widget    *
+ * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this       *
+ * space can be accessed.                                               *
+ * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big         *
+ * Window number) are used to select one of these 7 registers. The      *
+ * Widget number field is then derived from the W_NUM field for         *
+ * synthesizing a Crosstalk packet. The 5 bits of OFFSET are            *
+ * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP      *
+ * field is used as Crosstalk[47], and remainder of the Crosstalk       *
+ * address bits (Crosstalk[46:34]) are always zero. While the maximum   *
+ * Crosstalk space addressable by the Bedrock is thus the lower         *
+ * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB>   *
+ * of this space can be accessed.                                       *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_itte6_u {
+	bdrkreg_t	ii_itte6_regval;
+	struct  {
+		bdrkreg_t	i_offset                  :	 5;
+		bdrkreg_t       i_rsvd_1                  :      3;
+		bdrkreg_t       i_w_num                   :      4;
+		bdrkreg_t       i_iosp                    :      1;
+		bdrkreg_t       i_rsvd                    :     51;
+	} ii_itte6_fld_s;
+} ii_itte6_u_t;
+
+#else
+
+typedef union ii_itte6_u {
+	bdrkreg_t	ii_itte6_regval;
+	struct	{
+		bdrkreg_t	i_rsvd			  :	51;
+		bdrkreg_t	i_iosp			  :	 1;
+		bdrkreg_t	i_w_num			  :	 4;
+		bdrkreg_t	i_rsvd_1		  :	 3;
+		bdrkreg_t	i_offset		  :	 5;
+	} ii_itte6_fld_s;
+} ii_itte6_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  There are seven instances of translation table entry   *
+ * registers. Each register maps a Bedrock Big Window to a 48-bit       *
+ * address on Crosstalk.                                                *
+ * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window      *
+ * number) are used to select one of these 7 registers. The Widget      *
+ * number field is then derived from the W_NUM field for synthesizing   *
+ * a Crosstalk packet. The 5 bits of OFFSET are concatenated with       *
+ * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34]      *
+ * are padded with zeros. Although the maximum Crosstalk space          *
+ * addressable by the Bedrock is thus the lower 16 GBytes per widget    *
+ * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this       *
+ * space can be accessed.                                               *
+ * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big         *
+ * Window number) are used to select one of these 7 registers. The      *
+ * Widget number field is then derived from the W_NUM field for         *
+ * synthesizing a Crosstalk packet. The 5 bits of OFFSET are            *
+ * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP      *
+ * field is used as Crosstalk[47], and remainder of the Crosstalk       *
+ * address bits (Crosstalk[46:34]) are always zero. While the maximum   *
+ * Crosstalk space addressable by the Bedrock is thus the lower         *
+ * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB>   *
+ * of this space can be accessed.                                       *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_itte7_u {
+	bdrkreg_t	ii_itte7_regval;
+	struct  {
+		bdrkreg_t	i_offset                  :	 5;
+		bdrkreg_t	i_rsvd_1		  :	 3;
+		bdrkreg_t       i_w_num                   :      4;
+		bdrkreg_t       i_iosp                    :      1;
+		bdrkreg_t       i_rsvd                    :     51;
+	} ii_itte7_fld_s;
+} ii_itte7_u_t;
+
+#else
+
+typedef union ii_itte7_u {
+	bdrkreg_t	ii_itte7_regval;
+	struct	{
+		bdrkreg_t	i_rsvd			  :	51;
+		bdrkreg_t	i_iosp			  :	 1;
+		bdrkreg_t	i_w_num			  :	 4;
+		bdrkreg_t	i_rsvd_1		  :	 3;
+		bdrkreg_t	i_offset		  :	 5;
+	} ii_itte7_fld_s;
+} ii_itte7_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  There are 9 instances of this register, one per        *
+ * actual widget in this implementation of Bedrock and Crossbow.        *
+ * Note: Crossbow only has ports for Widgets 8 through F, widget 0      *
+ * refers to Crossbow's internal space.                                 *
+ * This register contains the state elements per widget that are        *
+ * necessary to manage the PIO flow control on Crosstalk and on the     *
+ * Router Network. See the PIO Flow Control chapter for a complete      *
+ * description of this register                                         *
+ * The SPUR_WR bit requires some explanation. When this register is     *
+ * written, the new value of the C field is captured in an internal     *
+ * register so the hardware can remember what the programmer wrote      *
+ * into the credit counter. The SPUR_WR bit sets whenever the C field   *
+ * increments above this stored value, which indicates that there       *
+ * have been more responses received than requests sent. The SPUR_WR    *
+ * bit cannot be cleared until a value is written to the IPRBx          *
+ * register; the write will correct the C field and capture its new     *
+ * value in the internal register. Even if IECLR[E_PRB_x] is set, the   *
+ * SPUR_WR bit will persist if IPRBx hasn't yet been written.           *
+ * .                                                                    *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_iprb0_u {
+	bdrkreg_t	ii_iprb0_regval;
+	struct  {
+		bdrkreg_t	i_c                       :	 8;
+		bdrkreg_t	i_na			  :	14;
+		bdrkreg_t       i_rsvd_2                  :      2;
+		bdrkreg_t	i_nb			  :	14;
+		bdrkreg_t	i_rsvd_1		  :	 2;
+		bdrkreg_t	i_m			  :	 2;
+		bdrkreg_t	i_f			  :	 1;
+		bdrkreg_t	i_of_cnt		  :	 5;
+		bdrkreg_t	i_error			  :	 1;
+		bdrkreg_t	i_rd_to			  :	 1;
+		bdrkreg_t	i_spur_wr		  :	 1;
+		bdrkreg_t	i_spur_rd		  :	 1;
+		bdrkreg_t	i_rsvd			  :	11;
+		bdrkreg_t	i_mult_err		  :	 1;
+	} ii_iprb0_fld_s;
+} ii_iprb0_u_t;
+
+#else
+
+typedef union ii_iprb0_u {
+	bdrkreg_t	ii_iprb0_regval;
+	struct	{
+		bdrkreg_t	i_mult_err		  :	 1;
+		bdrkreg_t	i_rsvd			  :	11;
+		bdrkreg_t	i_spur_rd		  :	 1;
+		bdrkreg_t	i_spur_wr		  :	 1;
+		bdrkreg_t	i_rd_to			  :	 1;
+		bdrkreg_t	i_error			  :	 1;
+		bdrkreg_t	i_of_cnt		  :	 5;
+		bdrkreg_t	i_f			  :	 1;
+		bdrkreg_t	i_m			  :	 2;
+		bdrkreg_t	i_rsvd_1		  :	 2;
+		bdrkreg_t	i_nb			  :	14;
+		bdrkreg_t	i_rsvd_2		  :	 2;
+		bdrkreg_t	i_na			  :	14;
+		bdrkreg_t	i_c			  :	 8;
+	} ii_iprb0_fld_s;
+} ii_iprb0_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  There are 9 instances of this register, one per        *
+ * actual widget in this implementation of Bedrock and Crossbow.        *
+ * Note: Crossbow only has ports for Widgets 8 through F, widget 0      *
+ * refers to Crossbow's internal space.                                 *
+ * This register contains the state elements per widget that are        *
+ * necessary to manage the PIO flow control on Crosstalk and on the     *
+ * Router Network. See the PIO Flow Control chapter for a complete      *
+ * description of this register                                         *
+ * The SPUR_WR bit requires some explanation. When this register is     *
+ * written, the new value of the C field is captured in an internal     *
+ * register so the hardware can remember what the programmer wrote      *
+ * into the credit counter. The SPUR_WR bit sets whenever the C field   *
+ * increments above this stored value, which indicates that there       *
+ * have been more responses received than requests sent. The SPUR_WR    *
+ * bit cannot be cleared until a value is written to the IPRBx          *
+ * register; the write will correct the C field and capture its new     *
+ * value in the internal register. Even if IECLR[E_PRB_x] is set, the   *
+ * SPUR_WR bit will persist if IPRBx hasn't yet been written.           *
+ * .                                                                    *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_iprb8_u {
+	bdrkreg_t	ii_iprb8_regval;
+	struct  {
+		bdrkreg_t	i_c                       :	 8;
+		bdrkreg_t	i_na			  :	14;
+		bdrkreg_t       i_rsvd_2                  :      2;
+		bdrkreg_t	i_nb			  :	14;
+		bdrkreg_t       i_rsvd_1                  :      2;
+		bdrkreg_t       i_m                       :      2;
+		bdrkreg_t       i_f                       :      1;
+		bdrkreg_t       i_of_cnt                  :      5;
+		bdrkreg_t       i_error                   :      1;
+		bdrkreg_t       i_rd_to                   :      1;
+		bdrkreg_t       i_spur_wr                 :      1;
+		bdrkreg_t	i_spur_rd		  :	 1;
+		bdrkreg_t       i_rsvd                    :     11;
+		bdrkreg_t	i_mult_err		  :	 1;
+	} ii_iprb8_fld_s;
+} ii_iprb8_u_t;
+
+#else
+
+
+typedef union ii_iprb8_u {
+	bdrkreg_t	ii_iprb8_regval;
+	struct	{
+		bdrkreg_t	i_mult_err		  :	 1;
+		bdrkreg_t	i_rsvd			  :	11;
+		bdrkreg_t	i_spur_rd		  :	 1;
+		bdrkreg_t	i_spur_wr		  :	 1;
+		bdrkreg_t	i_rd_to			  :	 1;
+		bdrkreg_t	i_error			  :	 1;
+		bdrkreg_t	i_of_cnt		  :	 5;
+		bdrkreg_t	i_f			  :	 1;
+		bdrkreg_t	i_m			  :	 2;
+		bdrkreg_t	i_rsvd_1		  :	 2;
+		bdrkreg_t	i_nb			  :	14;
+		bdrkreg_t	i_rsvd_2		  :	 2;
+		bdrkreg_t	i_na			  :	14;
+		bdrkreg_t	i_c			  :	 8;
+	} ii_iprb8_fld_s;
+} ii_iprb8_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  There are 9 instances of this register, one per        *
+ * actual widget in this implementation of Bedrock and Crossbow.        *
+ * Note: Crossbow only has ports for Widgets 8 through F, widget 0      *
+ * refers to Crossbow's internal space.                                 *
+ * This register contains the state elements per widget that are        *
+ * necessary to manage the PIO flow control on Crosstalk and on the     *
+ * Router Network. See the PIO Flow Control chapter for a complete      *
+ * description of this register                                         *
+ * The SPUR_WR bit requires some explanation. When this register is     *
+ * written, the new value of the C field is captured in an internal     *
+ * register so the hardware can remember what the programmer wrote      *
+ * into the credit counter. The SPUR_WR bit sets whenever the C field   *
+ * increments above this stored value, which indicates that there       *
+ * have been more responses received than requests sent. The SPUR_WR    *
+ * bit cannot be cleared until a value is written to the IPRBx          *
+ * register; the write will correct the C field and capture its new     *
+ * value in the internal register. Even if IECLR[E_PRB_x] is set, the   *
+ * SPUR_WR bit will persist if IPRBx hasn't yet been written.           *
+ * .                                                                    *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_iprb9_u {
+	bdrkreg_t	ii_iprb9_regval;
+	struct	{
+		bdrkreg_t	i_c			  :	 8;
+		bdrkreg_t	i_na			  :	14;
+		bdrkreg_t	i_rsvd_2		  :	 2;
+		bdrkreg_t	i_nb			  :	14;
+		bdrkreg_t	i_rsvd_1		  :	 2;
+		bdrkreg_t	i_m			  :	 2;
+		bdrkreg_t	i_f			  :	 1;
+		bdrkreg_t	i_of_cnt		  :	 5;
+		bdrkreg_t	i_error			  :	 1;
+		bdrkreg_t	i_rd_to			  :	 1;
+		bdrkreg_t	i_spur_wr		  :	 1;
+		bdrkreg_t	i_spur_rd		  :	 1;
+		bdrkreg_t	i_rsvd			  :	11;
+		bdrkreg_t	i_mult_err		  :	 1;
+	} ii_iprb9_fld_s;
+} ii_iprb9_u_t;
+
+#else
+
+typedef union ii_iprb9_u {
+	bdrkreg_t	ii_iprb9_regval;
+	struct  {
+		bdrkreg_t	i_mult_err                :	 1;
+		bdrkreg_t	i_rsvd                    :	11;
+		bdrkreg_t	i_spur_rd                 :	 1;
+		bdrkreg_t	i_spur_wr                 :	 1;
+		bdrkreg_t	i_rd_to                   :	 1;
+		bdrkreg_t	i_error                   :	 1;
+		bdrkreg_t	i_of_cnt                  :	 5;
+		bdrkreg_t	i_f                       :	 1;
+		bdrkreg_t	i_m                       :	 2;
+		bdrkreg_t	i_rsvd_1                  :	 2;
+		bdrkreg_t	i_nb                      :	14;
+		bdrkreg_t	i_rsvd_2                  :	 2;
+		bdrkreg_t	i_na                      :	14;
+		bdrkreg_t	i_c                       :	 8;
+	} ii_iprb9_fld_s;
+} ii_iprb9_u_t;
+
+#endif
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  There are 9 instances of this register, one per        *
+ * actual widget in this implementation of Bedrock and Crossbow.        *
+ * Note: Crossbow only has ports for Widgets 8 through F, widget 0      *
+ * refers to Crossbow's internal space.                                 *
+ * This register contains the state elements per widget that are        *
+ * necessary to manage the PIO flow control on Crosstalk and on the     *
+ * Router Network. See the PIO Flow Control chapter for a complete      *
+ * description of this register                                         *
+ * The SPUR_WR bit requires some explanation. When this register is     *
+ * written, the new value of the C field is captured in an internal     *
+ * register so the hardware can remember what the programmer wrote      *
+ * into the credit counter. The SPUR_WR bit sets whenever the C field   *
+ * increments above this stored value, which indicates that there       *
+ * have been more responses received than requests sent. The SPUR_WR    *
+ * bit cannot be cleared until a value is written to the IPRBx          *
+ * register; the write will correct the C field and capture its new     *
+ * value in the internal register. Even if IECLR[E_PRB_x] is set, the   *
+ * SPUR_WR bit will persist if IPRBx hasn't yet been written.           *
+ * .                                                                    *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_iprba_u {
+	bdrkreg_t	ii_iprba_regval;
+	struct  {
+		bdrkreg_t	i_c                       :	 8;
+		bdrkreg_t	i_na			  :	14;
+		bdrkreg_t       i_rsvd_2                  :      2;
+		bdrkreg_t	i_nb			  :	14;
+		bdrkreg_t	i_rsvd_1		  :	 2;
+		bdrkreg_t	i_m			  :	 2;
+		bdrkreg_t	i_f			  :	 1;
+		bdrkreg_t	i_of_cnt		  :	 5;
+		bdrkreg_t	i_error			  :	 1;
+		bdrkreg_t	i_rd_to			  :	 1;
+		bdrkreg_t	i_spur_wr		  :	 1;
+		bdrkreg_t	i_spur_rd		  :	 1;
+		bdrkreg_t	i_rsvd			  :	11;
+		bdrkreg_t	i_mult_err		  :	 1;
+	} ii_iprba_fld_s;
+} ii_iprba_u_t;
+
+#else
+
+typedef union ii_iprba_u {
+	bdrkreg_t	ii_iprba_regval;
+	struct	{
+		bdrkreg_t	i_mult_err		  :	 1;
+		bdrkreg_t	i_rsvd			  :	11;
+		bdrkreg_t	i_spur_rd		  :	 1;
+		bdrkreg_t	i_spur_wr		  :	 1;
+		bdrkreg_t	i_rd_to			  :	 1;
+		bdrkreg_t	i_error			  :	 1;
+		bdrkreg_t	i_of_cnt		  :	 5;
+		bdrkreg_t	i_f			  :	 1;
+		bdrkreg_t	i_m			  :	 2;
+		bdrkreg_t	i_rsvd_1		  :	 2;
+		bdrkreg_t	i_nb			  :	14;
+		bdrkreg_t	i_rsvd_2		  :	 2;
+		bdrkreg_t	i_na			  :	14;
+		bdrkreg_t	i_c			  :	 8;
+	} ii_iprba_fld_s;
+} ii_iprba_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  There are 9 instances of this register, one per        *
+ * actual widget in this implementation of Bedrock and Crossbow.        *
+ * Note: Crossbow only has ports for Widgets 8 through F, widget 0      *
+ * refers to Crossbow's internal space.                                 *
+ * This register contains the state elements per widget that are        *
+ * necessary to manage the PIO flow control on Crosstalk and on the     *
+ * Router Network. See the PIO Flow Control chapter for a complete      *
+ * description of this register                                         *
+ * The SPUR_WR bit requires some explanation. When this register is     *
+ * written, the new value of the C field is captured in an internal     *
+ * register so the hardware can remember what the programmer wrote      *
+ * into the credit counter. The SPUR_WR bit sets whenever the C field   *
+ * increments above this stored value, which indicates that there       *
+ * have been more responses received than requests sent. The SPUR_WR    *
+ * bit cannot be cleared until a value is written to the IPRBx          *
+ * register; the write will correct the C field and capture its new     *
+ * value in the internal register. Even if IECLR[E_PRB_x] is set, the   *
+ * SPUR_WR bit will persist if IPRBx hasn't yet been written.           *
+ * .                                                                    *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_iprbb_u {
+	bdrkreg_t	ii_iprbb_regval;
+	struct	{
+		bdrkreg_t	i_c			  :	 8;
+		bdrkreg_t	i_na			  :	14;
+		bdrkreg_t	i_rsvd_2		  :	 2;
+		bdrkreg_t	i_nb			  :	14;
+		bdrkreg_t	i_rsvd_1		  :	 2;
+		bdrkreg_t	i_m			  :	 2;
+		bdrkreg_t	i_f			  :	 1;
+		bdrkreg_t	i_of_cnt		  :	 5;
+		bdrkreg_t	i_error			  :	 1;
+		bdrkreg_t	i_rd_to			  :	 1;
+		bdrkreg_t	i_spur_wr		  :	 1;
+		bdrkreg_t	i_spur_rd		  :	 1;
+		bdrkreg_t	i_rsvd			  :	11;
+		bdrkreg_t	i_mult_err		  :	 1;
+	} ii_iprbb_fld_s;
+} ii_iprbb_u_t;
+
+#else
+
+typedef union ii_iprbb_u {
+	bdrkreg_t	ii_iprbb_regval;
+	struct  {
+		bdrkreg_t	i_mult_err                :	 1;
+		bdrkreg_t	i_rsvd                    :	11;
+		bdrkreg_t	i_spur_rd                 :	 1;
+		bdrkreg_t	i_spur_wr                 :	 1;
+		bdrkreg_t	i_rd_to                   :	 1;
+		bdrkreg_t	i_error                   :	 1;
+		bdrkreg_t	i_of_cnt                  :	 5;
+		bdrkreg_t	i_f                       :	 1;
+		bdrkreg_t	i_m                       :	 2;
+		bdrkreg_t	i_rsvd_1                  :	 2;
+		bdrkreg_t	i_nb                      :	14;
+		bdrkreg_t	i_rsvd_2                  :	 2;
+		bdrkreg_t	i_na                      :	14;
+		bdrkreg_t	i_c                       :	 8;
+	} ii_iprbb_fld_s;
+} ii_iprbb_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  There are 9 instances of this register, one per        *
+ * actual widget in this implementation of Bedrock and Crossbow.        *
+ * Note: Crossbow only has ports for Widgets 8 through F, widget 0      *
+ * refers to Crossbow's internal space.                                 *
+ * This register contains the state elements per widget that are        *
+ * necessary to manage the PIO flow control on Crosstalk and on the     *
+ * Router Network. See the PIO Flow Control chapter for a complete      *
+ * description of this register                                         *
+ * The SPUR_WR bit requires some explanation. When this register is     *
+ * written, the new value of the C field is captured in an internal     *
+ * register so the hardware can remember what the programmer wrote      *
+ * into the credit counter. The SPUR_WR bit sets whenever the C field   *
+ * increments above this stored value, which indicates that there       *
+ * have been more responses received than requests sent. The SPUR_WR    *
+ * bit cannot be cleared until a value is written to the IPRBx          *
+ * register; the write will correct the C field and capture its new     *
+ * value in the internal register. Even if IECLR[E_PRB_x] is set, the   *
+ * SPUR_WR bit will persist if IPRBx hasn't yet been written.           *
+ * .                                                                    *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_iprbc_u {
+	bdrkreg_t	ii_iprbc_regval;
+	struct	{
+		bdrkreg_t	i_c			  :	 8;
+		bdrkreg_t	i_na			  :	14;
+		bdrkreg_t	i_rsvd_2		  :	 2;
+		bdrkreg_t	i_nb			  :	14;
+		bdrkreg_t	i_rsvd_1		  :	 2;
+		bdrkreg_t	i_m			  :	 2;
+		bdrkreg_t	i_f			  :	 1;
+		bdrkreg_t	i_of_cnt		  :	 5;
+		bdrkreg_t	i_error			  :	 1;
+		bdrkreg_t	i_rd_to			  :	 1;
+		bdrkreg_t	i_spur_wr		  :	 1;
+		bdrkreg_t	i_spur_rd		  :	 1;
+		bdrkreg_t	i_rsvd			  :	11;
+		bdrkreg_t	i_mult_err		  :	 1;
+	} ii_iprbc_fld_s;
+} ii_iprbc_u_t;
+
+#else
+
+typedef union ii_iprbc_u {
+	bdrkreg_t	ii_iprbc_regval;
+	struct  {
+		bdrkreg_t	i_mult_err                :	 1;
+		bdrkreg_t	i_rsvd                    :	11;
+		bdrkreg_t	i_spur_rd                 :	 1;
+		bdrkreg_t	i_spur_wr                 :	 1;
+		bdrkreg_t	i_rd_to                   :	 1;
+		bdrkreg_t	i_error                   :	 1;
+		bdrkreg_t	i_of_cnt                  :	 5;
+		bdrkreg_t	i_f                       :	 1;
+		bdrkreg_t	i_m                       :	 2;
+		bdrkreg_t	i_rsvd_1                  :	 2;
+		bdrkreg_t	i_nb                      :	14;
+		bdrkreg_t	i_rsvd_2                  :	 2;
+		bdrkreg_t	i_na                      :	14;
+		bdrkreg_t	i_c                       :	 8;
+	} ii_iprbc_fld_s;
+} ii_iprbc_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  There are 9 instances of this register, one per        *
+ * actual widget in this implementation of Bedrock and Crossbow.        *
+ * Note: Crossbow only has ports for Widgets 8 through F, widget 0      *
+ * refers to Crossbow's internal space.                                 *
+ * This register contains the state elements per widget that are        *
+ * necessary to manage the PIO flow control on Crosstalk and on the     *
+ * Router Network. See the PIO Flow Control chapter for a complete      *
+ * description of this register                                         *
+ * The SPUR_WR bit requires some explanation. When this register is     *
+ * written, the new value of the C field is captured in an internal     *
+ * register so the hardware can remember what the programmer wrote      *
+ * into the credit counter. The SPUR_WR bit sets whenever the C field   *
+ * increments above this stored value, which indicates that there       *
+ * have been more responses received than requests sent. The SPUR_WR    *
+ * bit cannot be cleared until a value is written to the IPRBx          *
+ * register; the write will correct the C field and capture its new     *
+ * value in the internal register. Even if IECLR[E_PRB_x] is set, the   *
+ * SPUR_WR bit will persist if IPRBx hasn't yet been written.           *
+ * .                                                                    *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_iprbd_u {
+	bdrkreg_t	ii_iprbd_regval;
+	struct	{
+		bdrkreg_t	i_c			  :	 8;
+		bdrkreg_t	i_na			  :	14;
+		bdrkreg_t	i_rsvd_2		  :	 2;
+		bdrkreg_t	i_nb			  :	14;
+		bdrkreg_t	i_rsvd_1		  :	 2;
+		bdrkreg_t	i_m			  :	 2;
+		bdrkreg_t	i_f			  :	 1;
+		bdrkreg_t	i_of_cnt		  :	 5;
+		bdrkreg_t	i_error			  :	 1;
+		bdrkreg_t	i_rd_to			  :	 1;
+		bdrkreg_t	i_spur_wr		  :	 1;
+		bdrkreg_t	i_spur_rd		  :	 1;
+		bdrkreg_t	i_rsvd			  :	11;
+		bdrkreg_t	i_mult_err		  :	 1;
+	} ii_iprbd_fld_s;
+} ii_iprbd_u_t;
+
+#else
+
+typedef union ii_iprbd_u {
+	bdrkreg_t	ii_iprbd_regval;
+	struct  {
+		bdrkreg_t	i_mult_err                :	 1;
+		bdrkreg_t	i_rsvd                    :	11;
+		bdrkreg_t	i_spur_rd                 :	 1;
+		bdrkreg_t	i_spur_wr                 :	 1;
+		bdrkreg_t	i_rd_to                   :	 1;
+		bdrkreg_t	i_error                   :	 1;
+		bdrkreg_t	i_of_cnt                  :	 5;
+		bdrkreg_t	i_f                       :	 1;
+		bdrkreg_t	i_m                       :	 2;
+		bdrkreg_t	i_rsvd_1                  :	 2;
+		bdrkreg_t	i_nb                      :	14;
+		bdrkreg_t	i_rsvd_2                  :	 2;
+		bdrkreg_t	i_na                      :	14;
+		bdrkreg_t	i_c                       :	 8;
+	} ii_iprbd_fld_s;
+} ii_iprbd_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  There are 9 instances of this register, one per        *
+ * actual widget in this implementation of Bedrock and Crossbow.        *
+ * Note: Crossbow only has ports for Widgets 8 through F, widget 0      *
+ * refers to Crossbow's internal space.                                 *
+ * This register contains the state elements per widget that are        *
+ * necessary to manage the PIO flow control on Crosstalk and on the     *
+ * Router Network. See the PIO Flow Control chapter for a complete      *
+ * description of this register                                         *
+ * The SPUR_WR bit requires some explanation. When this register is     *
+ * written, the new value of the C field is captured in an internal     *
+ * register so the hardware can remember what the programmer wrote      *
+ * into the credit counter. The SPUR_WR bit sets whenever the C field   *
+ * increments above this stored value, which indicates that there       *
+ * have been more responses received than requests sent. The SPUR_WR    *
+ * bit cannot be cleared until a value is written to the IPRBx          *
+ * register; the write will correct the C field and capture its new     *
+ * value in the internal register. Even if IECLR[E_PRB_x] is set, the   *
+ * SPUR_WR bit will persist if IPRBx hasn't yet been written.           *
+ * .                                                                    *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_iprbe_u {
+	bdrkreg_t	ii_iprbe_regval;
+	struct	{
+		bdrkreg_t	i_c			  :	 8;
+		bdrkreg_t	i_na			  :	14;
+		bdrkreg_t	i_rsvd_2		  :	 2;
+		bdrkreg_t	i_nb			  :	14;
+		bdrkreg_t	i_rsvd_1		  :	 2;
+		bdrkreg_t	i_m			  :	 2;
+		bdrkreg_t	i_f			  :	 1;
+		bdrkreg_t	i_of_cnt		  :	 5;
+		bdrkreg_t	i_error			  :	 1;
+		bdrkreg_t	i_rd_to			  :	 1;
+		bdrkreg_t	i_spur_wr		  :	 1;
+		bdrkreg_t	i_spur_rd		  :	 1;
+		bdrkreg_t	i_rsvd			  :	11;
+		bdrkreg_t	i_mult_err		  :	 1;
+	} ii_iprbe_fld_s;
+} ii_iprbe_u_t;
+
+#else
+
+typedef union ii_iprbe_u {
+	bdrkreg_t	ii_iprbe_regval;
+	struct  {
+		bdrkreg_t	i_mult_err                :	 1;
+		bdrkreg_t	i_rsvd                    :	11;
+		bdrkreg_t	i_spur_rd                 :	 1;
+		bdrkreg_t	i_spur_wr                 :	 1;
+		bdrkreg_t	i_rd_to                   :	 1;
+		bdrkreg_t	i_error                   :	 1;
+		bdrkreg_t	i_of_cnt                  :	 5;
+		bdrkreg_t	i_f                       :	 1;
+		bdrkreg_t	i_m                       :	 2;
+		bdrkreg_t	i_rsvd_1                  :	 2;
+		bdrkreg_t	i_nb                      :	14;
+		bdrkreg_t	i_rsvd_2                  :	 2;
+		bdrkreg_t	i_na                      :	14;
+		bdrkreg_t	i_c                       :	 8;
+	} ii_iprbe_fld_s;
+} ii_iprbe_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  There are 9 instances of this register, one per        *
+ * actual widget in this implementation of Bedrock and Crossbow.        *
+ * Note: Crossbow only has ports for Widgets 8 through F, widget 0      *
+ * refers to Crossbow's internal space.                                 *
+ * This register contains the state elements per widget that are        *
+ * necessary to manage the PIO flow control on Crosstalk and on the     *
+ * Router Network. See the PIO Flow Control chapter for a complete      *
+ * description of this register                                         *
+ * The SPUR_WR bit requires some explanation. When this register is     *
+ * written, the new value of the C field is captured in an internal     *
+ * register so the hardware can remember what the programmer wrote      *
+ * into the credit counter. The SPUR_WR bit sets whenever the C field   *
+ * increments above this stored value, which indicates that there       *
+ * have been more responses received than requests sent. The SPUR_WR    *
+ * bit cannot be cleared until a value is written to the IPRBx          *
+ * register; the write will correct the C field and capture its new     *
+ * value in the internal register. Even if IECLR[E_PRB_x] is set, the   *
+ * SPUR_WR bit will persist if IPRBx hasn't yet been written.           *
+ * .                                                                    *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_iprbf_u {
+        bdrkreg_t       ii_iprbf_regval;
+        struct  {
+                bdrkreg_t       i_c                       :      8;
+                bdrkreg_t       i_na                      :     14;
+                bdrkreg_t       i_rsvd_2                  :      2;
+                bdrkreg_t       i_nb                      :     14;
+                bdrkreg_t       i_rsvd_1                  :      2;
+                bdrkreg_t       i_m                       :      2;
+                bdrkreg_t       i_f                       :      1;
+                bdrkreg_t       i_of_cnt                  :      5;
+                bdrkreg_t       i_error                   :      1;
+                bdrkreg_t       i_rd_to                   :      1;
+                bdrkreg_t       i_spur_wr                 :      1;
+                bdrkreg_t       i_spur_rd                 :      1;
+                bdrkreg_t       i_rsvd                    :     11;
+                bdrkreg_t       i_mult_err                :      1;
+        } ii_iprbe_fld_s;
+} ii_iprbf_u_t;
+
+#else
+
+typedef union ii_iprbf_u {
+	bdrkreg_t	ii_iprbf_regval;
+	struct  {
+		bdrkreg_t	i_mult_err                :	 1;
+		bdrkreg_t	i_rsvd                    :	11;
+		bdrkreg_t	i_spur_rd                 :	 1;
+		bdrkreg_t	i_spur_wr                 :	 1;
+		bdrkreg_t	i_rd_to                   :	 1;
+		bdrkreg_t	i_error                   :	 1;
+		bdrkreg_t	i_of_cnt                  :	 5;
+		bdrkreg_t	i_f                       :	 1;
+		bdrkreg_t	i_m                       :	 2;
+		bdrkreg_t	i_rsvd_1                  :	 2;
+		bdrkreg_t	i_nb                      :	14;
+		bdrkreg_t	i_rsvd_2                  :	 2;
+		bdrkreg_t	i_na                      :	14;
+		bdrkreg_t	i_c                       :	 8;
+	} ii_iprbf_fld_s;
+} ii_iprbf_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register specifies the timeout value to use for monitoring     *
+ * Crosstalk credits which are used outbound to Crosstalk. An           *
+ * internal counter called the Crosstalk Credit Timeout Counter         *
+ * increments every 128 II clocks. The counter starts counting          *
+ * anytime the credit count drops below a threshold, and resets to      *
+ * zero (stops counting) anytime the credit count is at or above the    *
+ * threshold. The threshold is 1 credit in direct connect mode and 2    *
+ * in Crossbow connect mode. When the internal Crosstalk Credit         *
+ * Timeout Counter reaches the value programmed in this register, a     *
+ * Crosstalk Credit Timeout has occurred. The internal counter is not   *
+ * readable from software, and stops counting at its maximum value,     *
+ * so it cannot cause more than one interrupt.                          *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_ixcc_u {
+	bdrkreg_t	ii_ixcc_regval;
+	struct  {
+		bdrkreg_t	i_time_out                :	26;
+		bdrkreg_t	i_rsvd			  :	38;
+	} ii_ixcc_fld_s;
+} ii_ixcc_u_t;
+
+#else
+
+typedef union ii_ixcc_u {
+	bdrkreg_t	ii_ixcc_regval;
+	struct	{
+		bdrkreg_t	i_rsvd			  :	38;
+		bdrkreg_t	i_time_out		  :	26;
+	} ii_ixcc_fld_s;
+} ii_ixcc_u_t;
+
+#endif
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  This register qualifies all the PIO and DMA            *
+ * operations launched from widget 0 towards the Bedrock. In            *
+ * addition, it also qualifies accesses by the BTE streams.             *
+ * The bits in each field of this register are cleared by the Bedrock   *
+ * upon detection of an error which requires widget 0 or the BTE        *
+ * streams to be terminated. Whether or not widget x has access         *
+ * rights to this Bedrock is determined by an AND of the device         *
+ * enable bit in the appropriate field of this register and bit 0 in    *
+ * the Wx_IAC field. The bits in this field are set by writing a 1 to   *
+ * them. Incoming replies from Crosstalk are not subject to this        *
+ * access control mechanism.                                            *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_imem_u {
+	bdrkreg_t	ii_imem_regval;
+	struct  {
+		bdrkreg_t	i_w0_esd                  :	 1;
+		bdrkreg_t	i_rsvd_3		  :	 3;
+		bdrkreg_t	i_b0_esd		  :	 1;
+		bdrkreg_t	i_rsvd_2		  :	 3;
+		bdrkreg_t	i_b1_esd		  :	 1;
+		bdrkreg_t	i_rsvd_1		  :	 3;
+		bdrkreg_t	i_clr_precise		  :	 1;
+		bdrkreg_t       i_rsvd                    :     51;
+	} ii_imem_fld_s;
+} ii_imem_u_t;
+
+#else
+
+typedef union ii_imem_u {
+	bdrkreg_t	ii_imem_regval;
+	struct	{
+		bdrkreg_t	i_rsvd			  :	51;
+		bdrkreg_t	i_clr_precise		  :	 1;
+		bdrkreg_t	i_rsvd_1		  :	 3;
+		bdrkreg_t	i_b1_esd		  :	 1;
+		bdrkreg_t	i_rsvd_2		  :	 3;
+		bdrkreg_t	i_b0_esd		  :	 1;
+		bdrkreg_t	i_rsvd_3		  :	 3;
+		bdrkreg_t	i_w0_esd		  :	 1;
+	} ii_imem_fld_s;
+} ii_imem_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  This register specifies the timeout value to use for   *
+ * monitoring Crosstalk tail flits coming into the Bedrock in the       *
+ * TAIL_TO field. An internal counter associated with this register     *
+ * is incremented every 128 II internal clocks (7 bits). The counter    *
+ * starts counting anytime a header micropacket is received and stops   *
+ * counting (and resets to zero) any time a micropacket with a Tail     *
+ * bit is received. Once the counter reaches the threshold value        *
+ * programmed in this register, it generates an interrupt to the        *
+ * processor that is programmed into the IIDSR. The counter saturates   *
+ * (does not roll over) at its maximum value, so it cannot cause        *
+ * another interrupt until after it is cleared.                         *
+ * The register also contains the Read Response Timeout values. The     *
+ * Prescalar is 23 bits, and counts II clocks. An internal counter      *
+ * increments on every II clock and when it reaches the value in the    *
+ * Prescalar field, all IPRTE registers with their valid bits set       *
+ * have their Read Response timers bumped. Whenever any of them match   *
+ * the value in the RRSP_TO field, a Read Response Timeout has          *
+ * occurred, and error handling occurs as described in the Error        *
+ * Handling section of this document.                                   *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_ixtt_u {
+	bdrkreg_t	ii_ixtt_regval;
+	struct  {
+		bdrkreg_t	i_tail_to                 :	26;
+		bdrkreg_t	i_rsvd_1		  :	 6;
+		bdrkreg_t	i_rrsp_ps		  :	23;
+		bdrkreg_t	i_rrsp_to		  :	 5;
+		bdrkreg_t	i_rsvd			  :	 4;
+	} ii_ixtt_fld_s;
+} ii_ixtt_u_t;
+
+#else
+
+typedef union ii_ixtt_u {
+	bdrkreg_t	ii_ixtt_regval;
+	struct	{
+		bdrkreg_t	i_rsvd			  :	 4;
+		bdrkreg_t	i_rrsp_to		  :	 5;
+		bdrkreg_t	i_rrsp_ps		  :	23;
+		bdrkreg_t	i_rsvd_1		  :	 6;
+		bdrkreg_t	i_tail_to		  :	26;
+	} ii_ixtt_fld_s;
+} ii_ixtt_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Writing a 1 to the fields of this register clears the appropriate   *
+ * error bits in other areas of Bedrock_II. Note that when the          *
+ * E_PRB_x bits are used to clear error bits in PRB registers,          *
+ * SPUR_RD and SPUR_WR may persist, because they require additional     *
+ * action to clear them. See the IPRBx and IXSS Register                *
+ * specifications.                                                      *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_ieclr_u {
+	bdrkreg_t	ii_ieclr_regval;
+	struct  {
+		bdrkreg_t	i_e_prb_0                 :	 1;
+		bdrkreg_t	i_rsvd			  :	 7;
+		bdrkreg_t	i_e_prb_8		  :	 1;
+		bdrkreg_t	i_e_prb_9		  :	 1;
+		bdrkreg_t	i_e_prb_a		  :	 1;
+		bdrkreg_t	i_e_prb_b		  :	 1;
+		bdrkreg_t	i_e_prb_c		  :	 1;
+		bdrkreg_t	i_e_prb_d		  :	 1;
+		bdrkreg_t	i_e_prb_e		  :	 1;
+		bdrkreg_t	i_e_prb_f		  :	 1;
+		bdrkreg_t	i_e_crazy		  :	 1;
+		bdrkreg_t	i_e_bte_0		  :	 1;
+		bdrkreg_t	i_e_bte_1		  :	 1;
+		bdrkreg_t	i_reserved_1		  :	 9;
+		bdrkreg_t	i_ii_internal		  :	 1;
+		bdrkreg_t	i_spur_rd_hdr		  :	 1;
+		bdrkreg_t	i_pi0_forward_int	  :	 1;
+		bdrkreg_t	i_pi1_forward_int	  :	 1;
+		bdrkreg_t       i_reserved                :     32;
+	} ii_ieclr_fld_s;
+} ii_ieclr_u_t;
+
+#else
+
+typedef union ii_ieclr_u {
+	bdrkreg_t	ii_ieclr_regval;
+	struct	{
+		bdrkreg_t	i_reserved		  :	32;
+		bdrkreg_t	i_pi1_forward_int	  :	 1;
+		bdrkreg_t	i_pi0_forward_int	  :	 1;
+		bdrkreg_t	i_spur_rd_hdr		  :	 1;
+		bdrkreg_t	i_ii_internal		  :	 1;
+		bdrkreg_t	i_reserved_1		  :	 9;
+		bdrkreg_t	i_e_bte_1		  :	 1;
+		bdrkreg_t	i_e_bte_0		  :	 1;
+		bdrkreg_t	i_e_crazy		  :	 1;
+		bdrkreg_t	i_e_prb_f		  :	 1;
+		bdrkreg_t	i_e_prb_e		  :	 1;
+		bdrkreg_t	i_e_prb_d		  :	 1;
+		bdrkreg_t	i_e_prb_c		  :	 1;
+		bdrkreg_t	i_e_prb_b		  :	 1;
+		bdrkreg_t	i_e_prb_a		  :	 1;
+		bdrkreg_t	i_e_prb_9		  :	 1;
+		bdrkreg_t	i_e_prb_8		  :	 1;
+		bdrkreg_t	i_rsvd			  :	 7;
+		bdrkreg_t	i_e_prb_0		  :	 1;
+	} ii_ieclr_fld_s;
+} ii_ieclr_u_t;
+
+#endif
+
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register controls both BTEs. SOFT_RESET is intended for        *
+ * recovery after an error. COUNT controls the total number of CRBs     *
+ * that both BTEs (combined) can use, which affects total BTE           *
+ * bandwidth.                                                           *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_ibcr_u {
+	bdrkreg_t	ii_ibcr_regval;
+	struct  {
+		bdrkreg_t	i_count                   :	 4;
+		bdrkreg_t	i_rsvd_1		  :	 4;
+		bdrkreg_t	i_soft_reset		  :	 1;
+		bdrkreg_t	i_rsvd			  :	55;
+	} ii_ibcr_fld_s;
+} ii_ibcr_u_t;
+
+#else
+
+typedef union ii_ibcr_u {
+	bdrkreg_t	ii_ibcr_regval;
+	struct	{
+		bdrkreg_t	i_rsvd			  :	55;
+		bdrkreg_t	i_soft_reset		  :	 1;
+		bdrkreg_t	i_rsvd_1		  :	 4;
+		bdrkreg_t	i_count			  :	 4;
+	} ii_ibcr_fld_s;
+} ii_ibcr_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register contains the header of a spurious read response       *
+ * received from Crosstalk. A spurious read response is defined as a    *
+ * read response received by II from a widget for which (1) the SIDN    *
+ * has a value between 1 and 7, inclusive (II never sends requests to   *
+ * these widgets (2) there is no valid IPRTE register which             *
+ * corresponds to the TNUM, or (3) the widget indicated in SIDN is      *
+ * not the same as the widget recorded in the IPRTE register            *
+ * referenced by the TNUM. If this condition is true, and if the        *
+ * IXSS[VALID] bit is clear, then the header of the spurious read       *
+ * response is capture in IXSM and IXSS, and IXSS[VALID] is set. The    *
+ * errant header is thereby captured, and no further spurious read      *
+ * respones are captured until IXSS[VALID] is cleared by setting the    *
+ * appropriate bit in IECLR.Everytime a spurious read response is       *
+ * detected, the SPUR_RD bit of the PRB corresponding to the incoming   *
+ * message's SIDN field is set. This always happens, regarless of       *
+ * whether a header is captured. The programmer should check            *
+ * IXSM[SIDN] to determine which widget sent the spurious response,     *
+ * because there may be more than one SPUR_RD bit set in the PRB        *
+ * registers. The widget indicated by IXSM[SIDN] was the first          *
+ * spurious read response to be received since the last time            *
+ * IXSS[VALID] was clear. The SPUR_RD bit of the corresponding PRB      *
+ * will be set. Any SPUR_RD bits in any other PRB registers indicate    *
+ * spurious messages from other widets which were detected after the    *
+ * header was captured..                                                *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_ixsm_u {
+	bdrkreg_t	ii_ixsm_regval;
+	struct  {
+		bdrkreg_t	i_byte_en                 :	32;
+		bdrkreg_t	i_reserved		  :	 1;
+		bdrkreg_t	i_tag			  :	 3;
+		bdrkreg_t	i_alt_pactyp		  :	 4;
+		bdrkreg_t	i_bo			  :	 1;
+		bdrkreg_t	i_error			  :	 1;
+		bdrkreg_t	i_vbpm			  :	 1;
+		bdrkreg_t	i_gbr			  :	 1;
+		bdrkreg_t	i_ds			  :	 2;
+		bdrkreg_t	i_ct			  :	 1;
+		bdrkreg_t	i_tnum			  :	 5;
+		bdrkreg_t	i_pactyp		  :	 4;
+		bdrkreg_t	i_sidn			  :	 4;
+		bdrkreg_t	i_didn			  :	 4;
+	} ii_ixsm_fld_s;
+} ii_ixsm_u_t;
+
+#else
+
+typedef union ii_ixsm_u {
+	bdrkreg_t	ii_ixsm_regval;
+	struct	{
+		bdrkreg_t	i_didn			  :	 4;
+		bdrkreg_t	i_sidn			  :	 4;
+		bdrkreg_t	i_pactyp		  :	 4;
+		bdrkreg_t	i_tnum			  :	 5;
+		bdrkreg_t	i_ct			  :	 1;
+		bdrkreg_t	i_ds			  :	 2;
+		bdrkreg_t	i_gbr			  :	 1;
+		bdrkreg_t	i_vbpm			  :	 1;
+		bdrkreg_t	i_error			  :	 1;
+		bdrkreg_t	i_bo			  :	 1;
+		bdrkreg_t	i_alt_pactyp		  :	 4;
+		bdrkreg_t	i_tag			  :	 3;
+		bdrkreg_t	i_reserved		  :	 1;
+		bdrkreg_t	i_byte_en		  :	32;
+	} ii_ixsm_fld_s;
+} ii_ixsm_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register contains the sideband bits of a spurious read         *
+ * response received from Crosstalk.                                    *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_ixss_u {
+	bdrkreg_t	ii_ixss_regval;
+	struct  {
+		bdrkreg_t	i_sideband                :	 8;
+		bdrkreg_t	i_rsvd			  :	55;
+		bdrkreg_t	i_valid			  :	 1;
+	} ii_ixss_fld_s;
+} ii_ixss_u_t;
+
+#else
+
+typedef union ii_ixss_u {
+	bdrkreg_t	ii_ixss_regval;
+	struct	{
+		bdrkreg_t	i_valid			  :	 1;
+		bdrkreg_t	i_rsvd			  :	55;
+		bdrkreg_t	i_sideband		  :	 8;
+	} ii_ixss_fld_s;
+} ii_ixss_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register enables software to access the II LLP's test port.    *
+ * Refer to the LLP 2.5 documentation for an explanation of the test    *
+ * port. Software can write to this register to program the values      *
+ * for the control fields (TestErrCapture, TestClear, TestFlit,         *
+ * TestMask and TestSeed). Similarly, software can read from this       *
+ * register to obtain the values of the test port's status outputs      *
+ * (TestCBerr, TestValid and TestData).                                 *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_ilct_u {
+	bdrkreg_t	ii_ilct_regval;
+	struct  {
+		bdrkreg_t	i_rsvd                    :	 9;
+		bdrkreg_t	i_test_err_capture        :	 1;
+		bdrkreg_t	i_test_clear              :	 1;
+		bdrkreg_t	i_test_flit               :	 3;
+		bdrkreg_t	i_test_cberr              :	 1;
+		bdrkreg_t	i_test_valid              :	 1;
+		bdrkreg_t	i_test_data               :	20;
+		bdrkreg_t	i_test_mask               :	 8;
+		bdrkreg_t	i_test_seed               :	20;
+	} ii_ilct_fld_s;
+} ii_ilct_u_t;
+
+#else
+
+typedef union ii_ilct_u {
+	bdrkreg_t	ii_ilct_regval;
+	struct	{
+		bdrkreg_t	i_rsvd			  :	 9;
+		bdrkreg_t	i_test_err_capture	  :	 1;
+		bdrkreg_t	i_test_clear		  :	 1;
+		bdrkreg_t	i_test_flit		  :	 3;
+		bdrkreg_t	i_test_cberr		  :	 1;
+		bdrkreg_t	i_test_valid		  :	 1;
+		bdrkreg_t	i_test_data		  :	20;
+		bdrkreg_t	i_test_mask		  :	 8;
+		bdrkreg_t	i_test_seed		  :	20;
+	} ii_ilct_fld_s;
+} ii_ilct_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  If the II detects an illegal incoming Duplonet packet (request or   *
+ * reply) when VALID==0 in the IIEPH1 register, then it saves the       *
+ * contents of the packet's header flit in the IIEPH1 and IIEPH2        *
+ * registers, sets the VALID bit in IIEPH1, clears the OVERRUN bit,     *
+ * and assigns a value to the ERR_TYPE field which indicates the        *
+ * specific nature of the error. The II recognizes four different       *
+ * types of errors: short request packets (ERR_TYPE==2), short reply    *
+ * packets (ERR_TYPE==3), long request packets (ERR_TYPE==4) and long   *
+ * reply packets (ERR_TYPE==5). The encodings for these types of        *
+ * errors were chosen to be consistent with the same types of errors    *
+ * indicated by the ERR_TYPE field in the LB_ERROR_HDR1 register (in    *
+ * the LB unit). If the II detects an illegal incoming Duplonet         *
+ * packet when VALID==1 in the IIEPH1 register, then it merely sets     *
+ * the OVERRUN bit to indicate that a subsequent error has happened,    *
+ * and does nothing further.                                            *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_iieph1_u {
+	bdrkreg_t	ii_iieph1_regval;
+	struct	{
+		bdrkreg_t	i_command		  :	 7;
+		bdrkreg_t	i_rsvd_5		  :	 1;
+		bdrkreg_t	i_suppl			  :	11;
+		bdrkreg_t	i_rsvd_4		  :	 1;
+		bdrkreg_t	i_source		  :	11;
+		bdrkreg_t	i_rsvd_3		  :	 1;
+		bdrkreg_t	i_err_type		  :	 4;
+		bdrkreg_t	i_rsvd_2		  :	 4;
+		bdrkreg_t	i_overrun		  :	 1;
+		bdrkreg_t	i_rsvd_1		  :	 3;
+		bdrkreg_t	i_valid			  :	 1;
+		bdrkreg_t	i_rsvd			  :	19;
+	} ii_iieph1_fld_s;
+} ii_iieph1_u_t;
+
+#else
+
+typedef union ii_iieph1_u {
+	bdrkreg_t	ii_iieph1_regval;
+	struct  {
+		bdrkreg_t	i_rsvd                    :	19;
+		bdrkreg_t	i_valid                   :	 1;
+		bdrkreg_t	i_rsvd_1                  :	 3;
+		bdrkreg_t	i_overrun                 :	 1;
+		bdrkreg_t	i_rsvd_2                  :	 4;
+		bdrkreg_t	i_err_type                :	 4;
+		bdrkreg_t	i_rsvd_3                  :	 1;
+		bdrkreg_t	i_source                  :	11;
+		bdrkreg_t	i_rsvd_4                  :	 1;
+		bdrkreg_t	i_suppl                   :	11;
+		bdrkreg_t	i_rsvd_5                  :	 1;
+		bdrkreg_t	i_command                 :	 7;
+	} ii_iieph1_fld_s;
+} ii_iieph1_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register holds the Address field from the header flit of an    *
+ * incoming erroneous Duplonet packet, along with the tail bit which    *
+ * accompanied this header flit. This register is essentially an        *
+ * extension of IIEPH1. Two registers were necessary because the 64     *
+ * bits available in only a single register were insufficient to        *
+ * capture the entire header flit of an erroneous packet.               *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_iieph2_u {
+	bdrkreg_t	ii_iieph2_regval;
+	struct  {
+		bdrkreg_t	i_address                 :	38;
+		bdrkreg_t	i_rsvd_1		  :	 2;
+		bdrkreg_t	i_tail			  :	 1;
+		bdrkreg_t	i_rsvd			  :	23;
+	} ii_iieph2_fld_s;
+} ii_iieph2_u_t;
+
+#else
+
+typedef union ii_iieph2_u {
+	bdrkreg_t	ii_iieph2_regval;
+	struct	{
+		bdrkreg_t	i_rsvd			  :	23;
+		bdrkreg_t	i_tail			  :	 1;
+		bdrkreg_t	i_rsvd_1		  :	 2;
+		bdrkreg_t	i_address		  :	38;
+	} ii_iieph2_fld_s;
+} ii_iieph2_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  A write to this register causes a particular field in the           *
+ * corresponding widget's PRB entry to be adjusted up or down by 1.     *
+ * This counter should be used when recovering from error and reset     *
+ * conditions. Note that software would be capable of causing           *
+ * inadvertent overflow or underflow of these counters.                 *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_ipca_u {
+	bdrkreg_t	ii_ipca_regval;
+	struct  {
+		bdrkreg_t	i_wid                     :	 4;
+		bdrkreg_t	i_adjust		  :	 1;
+		bdrkreg_t	i_rsvd_1		  :	 3;
+		bdrkreg_t	i_field			  :	 2;
+		bdrkreg_t	i_rsvd			  :	54;
+	} ii_ipca_fld_s;
+} ii_ipca_u_t;
+
+#else
+
+typedef union ii_ipca_u {
+	bdrkreg_t	ii_ipca_regval;
+	struct	{
+		bdrkreg_t	i_rsvd			  :	54;
+		bdrkreg_t	i_field			  :	 2;
+		bdrkreg_t	i_rsvd_1		  :	 3;
+		bdrkreg_t	i_adjust		  :	 1;
+		bdrkreg_t	i_wid			  :	 4;
+	} ii_ipca_fld_s;
+} ii_ipca_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There are 8 instances of this register. This register contains      *
+ * the information that the II has to remember once it has launched a   *
+ * PIO Read operation. The contents are used to form the correct        *
+ * Router Network packet and direct the Crosstalk reply to the          *
+ * appropriate processor.                                               *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_iprte0_u {
+	bdrkreg_t	ii_iprte0_regval;
+	struct  {
+		bdrkreg_t	i_rsvd_1                  :	 3;
+		bdrkreg_t	i_addr			  :	38;
+		bdrkreg_t	i_init			  :	 3;
+		bdrkreg_t	i_source		  :	 8;
+		bdrkreg_t	i_rsvd			  :	 2;
+		bdrkreg_t	i_widget		  :	 4;
+		bdrkreg_t	i_to_cnt		  :	 5;
+		bdrkreg_t       i_vld                     :      1;
+	} ii_iprte0_fld_s;
+} ii_iprte0_u_t;
+
+#else
+
+typedef union ii_iprte0_u {
+	bdrkreg_t	ii_iprte0_regval;
+	struct	{
+		bdrkreg_t	i_vld			  :	 1;
+		bdrkreg_t	i_to_cnt		  :	 5;
+		bdrkreg_t	i_widget		  :	 4;
+		bdrkreg_t	i_rsvd			  :	 2;
+		bdrkreg_t	i_source		  :	 8;
+		bdrkreg_t	i_init			  :	 3;
+		bdrkreg_t	i_addr			  :	38;
+		bdrkreg_t	i_rsvd_1		  :	 3;
+	} ii_iprte0_fld_s;
+} ii_iprte0_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There are 8 instances of this register. This register contains      *
+ * the information that the II has to remember once it has launched a   *
+ * PIO Read operation. The contents are used to form the correct        *
+ * Router Network packet and direct the Crosstalk reply to the          *
+ * appropriate processor.                                               *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_iprte1_u {
+	bdrkreg_t	ii_iprte1_regval;
+	struct  {
+		bdrkreg_t	i_rsvd_1                  :	 3;
+		bdrkreg_t	i_addr			  :	38;
+		bdrkreg_t	i_init			  :	 3;
+		bdrkreg_t	i_source		  :	 8;
+		bdrkreg_t	i_rsvd			  :	 2;
+		bdrkreg_t	i_widget		  :	 4;
+		bdrkreg_t	i_to_cnt		  :	 5;
+		bdrkreg_t       i_vld                     :      1;
+	} ii_iprte1_fld_s;
+} ii_iprte1_u_t;
+
+#else
+
+typedef union ii_iprte1_u {
+	bdrkreg_t	ii_iprte1_regval;
+	struct	{
+		bdrkreg_t	i_vld			  :	 1;
+		bdrkreg_t	i_to_cnt		  :	 5;
+		bdrkreg_t	i_widget		  :	 4;
+		bdrkreg_t	i_rsvd			  :	 2;
+		bdrkreg_t	i_source		  :	 8;
+		bdrkreg_t	i_init			  :	 3;
+		bdrkreg_t	i_addr			  :	38;
+		bdrkreg_t	i_rsvd_1		  :	 3;
+	} ii_iprte1_fld_s;
+} ii_iprte1_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There are 8 instances of this register. This register contains      *
+ * the information that the II has to remember once it has launched a   *
+ * PIO Read operation. The contents are used to form the correct        *
+ * Router Network packet and direct the Crosstalk reply to the          *
+ * appropriate processor.                                               *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_iprte2_u {
+	bdrkreg_t	ii_iprte2_regval;
+	struct  {
+		bdrkreg_t	i_rsvd_1                  :	 3;
+		bdrkreg_t	i_addr			  :	38;
+		bdrkreg_t	i_init			  :	 3;
+		bdrkreg_t	i_source		  :	 8;
+		bdrkreg_t	i_rsvd			  :	 2;
+		bdrkreg_t	i_widget		  :	 4;
+		bdrkreg_t	i_to_cnt		  :	 5;
+		bdrkreg_t       i_vld                     :      1;
+	} ii_iprte2_fld_s;
+} ii_iprte2_u_t;
+
+#else
+
+typedef union ii_iprte2_u {
+	bdrkreg_t	ii_iprte2_regval;
+	struct	{
+		bdrkreg_t	i_vld			  :	 1;
+		bdrkreg_t	i_to_cnt		  :	 5;
+		bdrkreg_t	i_widget		  :	 4;
+		bdrkreg_t	i_rsvd			  :	 2;
+		bdrkreg_t	i_source		  :	 8;
+		bdrkreg_t	i_init			  :	 3;
+		bdrkreg_t	i_addr			  :	38;
+		bdrkreg_t	i_rsvd_1		  :	 3;
+	} ii_iprte2_fld_s;
+} ii_iprte2_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There are 8 instances of this register. This register contains      *
+ * the information that the II has to remember once it has launched a   *
+ * PIO Read operation. The contents are used to form the correct        *
+ * Router Network packet and direct the Crosstalk reply to the          *
+ * appropriate processor.                                               *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_iprte3_u {
+	bdrkreg_t	ii_iprte3_regval;
+	struct  {
+		bdrkreg_t	i_rsvd_1                  :	 3;
+		bdrkreg_t	i_addr			  :	38;
+		bdrkreg_t	i_init			  :	 3;
+		bdrkreg_t	i_source		  :	 8;
+		bdrkreg_t	i_rsvd			  :	 2;
+		bdrkreg_t	i_widget		  :	 4;
+		bdrkreg_t	i_to_cnt		  :	 5;
+		bdrkreg_t	i_vld			  :	 1;
+	} ii_iprte3_fld_s;
+} ii_iprte3_u_t;
+
+#else
+
+typedef union ii_iprte3_u {
+	bdrkreg_t	ii_iprte3_regval;
+	struct	{
+		bdrkreg_t	i_vld			  :	 1;
+		bdrkreg_t	i_to_cnt		  :	 5;
+		bdrkreg_t	i_widget		  :	 4;
+		bdrkreg_t	i_rsvd			  :	 2;
+		bdrkreg_t	i_source		  :	 8;
+		bdrkreg_t	i_init			  :	 3;
+		bdrkreg_t	i_addr			  :	38;
+		bdrkreg_t	i_rsvd_1		  :	 3;
+	} ii_iprte3_fld_s;
+} ii_iprte3_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There are 8 instances of this register. This register contains      *
+ * the information that the II has to remember once it has launched a   *
+ * PIO Read operation. The contents are used to form the correct        *
+ * Router Network packet and direct the Crosstalk reply to the          *
+ * appropriate processor.                                               *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_iprte4_u {
+	bdrkreg_t	ii_iprte4_regval;
+	struct	{
+		bdrkreg_t	i_rsvd_1		  :	 3;
+		bdrkreg_t	i_addr			  :	38;
+		bdrkreg_t	i_init			  :	 3;
+		bdrkreg_t	i_source		  :	 8;
+		bdrkreg_t	i_rsvd			  :	 2;
+		bdrkreg_t	i_widget		  :	 4;
+		bdrkreg_t	i_to_cnt		  :	 5;
+		bdrkreg_t	i_vld			  :	 1;
+	} ii_iprte4_fld_s;
+} ii_iprte4_u_t;
+
+#else
+
+typedef union ii_iprte4_u {
+	bdrkreg_t	ii_iprte4_regval;
+	struct  {
+		bdrkreg_t	i_vld                     :	 1;
+		bdrkreg_t	i_to_cnt                  :	 5;
+		bdrkreg_t	i_widget                  :	 4;
+		bdrkreg_t	i_rsvd                    :	 2;
+		bdrkreg_t	i_source                  :	 8;
+		bdrkreg_t	i_init                    :	 3;
+		bdrkreg_t	i_addr                    :	38;
+		bdrkreg_t	i_rsvd_1                  :	 3;
+	} ii_iprte4_fld_s;
+} ii_iprte4_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There are 8 instances of this register. This register contains      *
+ * the information that the II has to remember once it has launched a   *
+ * PIO Read operation. The contents are used to form the correct        *
+ * Router Network packet and direct the Crosstalk reply to the          *
+ * appropriate processor.                                               *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_iprte5_u {
+	bdrkreg_t	ii_iprte5_regval;
+	struct	{
+		bdrkreg_t	i_rsvd_1		  :	 3;
+		bdrkreg_t	i_addr			  :	38;
+		bdrkreg_t	i_init			  :	 3;
+		bdrkreg_t	i_source		  :	 8;
+		bdrkreg_t	i_rsvd			  :	 2;
+		bdrkreg_t	i_widget		  :	 4;
+		bdrkreg_t	i_to_cnt		  :	 5;
+		bdrkreg_t	i_vld			  :	 1;
+	} ii_iprte5_fld_s;
+} ii_iprte5_u_t;
+
+#else
+
+typedef union ii_iprte5_u {
+	bdrkreg_t	ii_iprte5_regval;
+	struct  {
+		bdrkreg_t	i_vld                     :	 1;
+		bdrkreg_t	i_to_cnt                  :	 5;
+		bdrkreg_t	i_widget                  :	 4;
+		bdrkreg_t	i_rsvd                    :	 2;
+		bdrkreg_t	i_source                  :	 8;
+		bdrkreg_t	i_init                    :	 3;
+		bdrkreg_t	i_addr                    :	38;
+		bdrkreg_t	i_rsvd_1                  :	 3;
+	} ii_iprte5_fld_s;
+} ii_iprte5_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There are 8 instances of this register. This register contains      *
+ * the information that the II has to remember once it has launched a   *
+ * PIO Read operation. The contents are used to form the correct        *
+ * Router Network packet and direct the Crosstalk reply to the          *
+ * appropriate processor.                                               *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_iprte6_u {
+	bdrkreg_t	ii_iprte6_regval;
+	struct	{
+		bdrkreg_t	i_rsvd_1		  :	 3;
+		bdrkreg_t	i_addr			  :	38;
+		bdrkreg_t	i_init			  :	 3;
+		bdrkreg_t	i_source		  :	 8;
+		bdrkreg_t	i_rsvd			  :	 2;
+		bdrkreg_t	i_widget		  :	 4;
+		bdrkreg_t	i_to_cnt		  :	 5;
+		bdrkreg_t	i_vld			  :	 1;
+	} ii_iprte6_fld_s;
+} ii_iprte6_u_t;
+
+#else
+
+typedef union ii_iprte6_u {
+	bdrkreg_t	ii_iprte6_regval;
+	struct  {
+		bdrkreg_t	i_vld                     :	 1;
+		bdrkreg_t	i_to_cnt                  :	 5;
+		bdrkreg_t	i_widget                  :	 4;
+		bdrkreg_t	i_rsvd                    :	 2;
+		bdrkreg_t	i_source                  :	 8;
+		bdrkreg_t	i_init                    :	 3;
+		bdrkreg_t	i_addr                    :	38;
+		bdrkreg_t	i_rsvd_1                  :	 3;
+	} ii_iprte6_fld_s;
+} ii_iprte6_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There are 8 instances of this register. This register contains      *
+ * the information that the II has to remember once it has launched a   *
+ * PIO Read operation. The contents are used to form the correct        *
+ * Router Network packet and direct the Crosstalk reply to the          *
+ * appropriate processor.                                               *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_iprte7_u {
+        bdrkreg_t       ii_iprte7_regval;
+        struct  {
+                bdrkreg_t       i_rsvd_1                  :      3;
+                bdrkreg_t       i_addr                    :     38;
+                bdrkreg_t       i_init                    :      3;
+                bdrkreg_t       i_source                  :      8;
+                bdrkreg_t       i_rsvd                    :      2;
+                bdrkreg_t       i_widget                  :      4;
+                bdrkreg_t       i_to_cnt                  :      5;
+                bdrkreg_t       i_vld                     :      1;
+        } ii_iprte7_fld_s;
+} ii_iprte7_u_t;
+
+#else
+
+typedef union ii_iprte7_u {
+	bdrkreg_t	ii_iprte7_regval;
+	struct  {
+		bdrkreg_t	i_vld                     :	 1;
+		bdrkreg_t	i_to_cnt                  :	 5;
+		bdrkreg_t	i_widget                  :	 4;
+		bdrkreg_t	i_rsvd                    :	 2;
+		bdrkreg_t	i_source                  :	 8;
+		bdrkreg_t	i_init                    :	 3;
+		bdrkreg_t	i_addr                    :	38;
+		bdrkreg_t	i_rsvd_1                  :	 3;
+	} ii_iprte7_fld_s;
+} ii_iprte7_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  Bedrock_II contains a feature which did not exist in   *
+ * the Hub which automatically cleans up after a Read Response          *
+ * timeout, including deallocation of the IPRTE and recovery of IBuf    *
+ * space. The inclusion of this register in Bedrock is for backward     *
+ * compatibility                                                        *
+ * A write to this register causes an entry from the table of           *
+ * outstanding PIO Read Requests to be freed and returned to the        *
+ * stack of free entries. This register is used in handling the         *
+ * timeout errors that result in a PIO Reply never returning from       *
+ * Crosstalk.                                                           *
+ * Note that this register does not affect the contents of the IPRTE    *
+ * registers. The Valid bits in those registers have to be              *
+ * specifically turned off by software.                                 *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_ipdr_u {
+	bdrkreg_t	ii_ipdr_regval;
+	struct  {
+		bdrkreg_t	i_te                      :	 3;
+		bdrkreg_t	i_rsvd_1		  :	 1;
+		bdrkreg_t	i_pnd			  :	 1;
+		bdrkreg_t	i_init_rpcnt		  :	 1;
+		bdrkreg_t	i_rsvd			  :	58;
+	} ii_ipdr_fld_s;
+} ii_ipdr_u_t;
+
+#else
+
+typedef union ii_ipdr_u {
+	bdrkreg_t	ii_ipdr_regval;
+	struct	{
+		bdrkreg_t	i_rsvd			  :	58;
+		bdrkreg_t	i_init_rpcnt		  :	 1;
+		bdrkreg_t	i_pnd			  :	 1;
+		bdrkreg_t	i_rsvd_1		  :	 1;
+		bdrkreg_t	i_te			  :	 3;
+	} ii_ipdr_fld_s;
+} ii_ipdr_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  A write to this register causes a CRB entry to be returned to the   *
+ * queue of free CRBs. The entry should have previously been cleared    *
+ * (mark bit) via backdoor access to the pertinent CRB entry. This      *
+ * register is used in the last step of handling the errors that are    *
+ * captured and marked in CRB entries.  Briefly: 1) first error for     *
+ * DMA write from a particular device, and first error for a            *
+ * particular BTE stream, lead to a marked CRB entry, and processor     *
+ * interrupt, 2) software reads the error information captured in the   *
+ * CRB entry, and presumably takes some corrective action, 3)           *
+ * software clears the mark bit, and finally 4) software writes to      *
+ * the ICDR register to return the CRB entry to the list of free CRB    *
+ * entries.                                                             *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_icdr_u {
+	bdrkreg_t	ii_icdr_regval;
+	struct  {
+		bdrkreg_t	i_crb_num                 :	 4;
+		bdrkreg_t	i_pnd			  :	 1;
+		bdrkreg_t       i_rsvd                    :     59;
+	} ii_icdr_fld_s;
+} ii_icdr_u_t;
+
+#else
+
+typedef union ii_icdr_u {
+	bdrkreg_t	ii_icdr_regval;
+	struct	{
+		bdrkreg_t	i_rsvd			  :	59;
+		bdrkreg_t	i_pnd			  :	 1;
+		bdrkreg_t	i_crb_num		  :	 4;
+	} ii_icdr_fld_s;
+} ii_icdr_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register provides debug access to two FIFOs inside of II.      *
+ * Both IOQ_MAX* fields of this register contain the instantaneous      *
+ * depth (in units of the number of available entries) of the           *
+ * associated IOQ FIFO.  A read of this register will return the        *
+ * number of free entries on each FIFO at the time of the read.  So     *
+ * when a FIFO is idle, the associated field contains the maximum       *
+ * depth of the FIFO.  This register is writable for debug reasons      *
+ * and is intended to be written with the maximum desired FIFO depth    *
+ * while the FIFO is idle. Software must assure that II is idle when    *
+ * this register is written. If there are any active entries in any     *
+ * of these FIFOs when this register is written, the results are        *
+ * undefined.                                                           *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_ifdr_u {
+	bdrkreg_t	ii_ifdr_regval;
+	struct  {
+		bdrkreg_t	i_ioq_max_rq              :	 7;
+		bdrkreg_t	i_set_ioq_rq		  :	 1;
+		bdrkreg_t	i_ioq_max_rp		  :	 7;
+		bdrkreg_t	i_set_ioq_rp		  :	 1;
+		bdrkreg_t	i_rsvd			  :	48;
+	} ii_ifdr_fld_s;
+} ii_ifdr_u_t;
+
+#else
+
+typedef union ii_ifdr_u {
+	bdrkreg_t	ii_ifdr_regval;
+	struct	{
+		bdrkreg_t	i_rsvd			  :	48;
+		bdrkreg_t	i_set_ioq_rp		  :	 1;
+		bdrkreg_t	i_ioq_max_rp		  :	 7;
+		bdrkreg_t	i_set_ioq_rq		  :	 1;
+		bdrkreg_t	i_ioq_max_rq		  :	 7;
+	} ii_ifdr_fld_s;
+} ii_ifdr_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register allows the II to become sluggish in removing          *
+ * messages from its inbound queue (IIQ). This will cause messages to   *
+ * back up in either virtual channel. Disabling the "molasses" mode     *
+ * subsequently allows the II to be tested under stress. In the         *
+ * sluggish ("Molasses") mode, the localized effects of congestion      *
+ * can be observed.                                                     *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_iiap_u {
+        bdrkreg_t       ii_iiap_regval;
+        struct  {
+                bdrkreg_t       i_rq_mls                  :      6;
+		bdrkreg_t	i_rsvd_1		  :	 2;
+		bdrkreg_t	i_rp_mls		  :	 6;
+		bdrkreg_t       i_rsvd                    :     50;
+        } ii_iiap_fld_s;
+} ii_iiap_u_t;
+
+#else
+
+typedef union ii_iiap_u {
+	bdrkreg_t	ii_iiap_regval;
+	struct  {
+		bdrkreg_t	i_rsvd                    :	50;
+		bdrkreg_t	i_rp_mls                  :	 6;
+		bdrkreg_t	i_rsvd_1                  :	 2;
+		bdrkreg_t	i_rq_mls                  :	 6;
+	} ii_iiap_fld_s;
+} ii_iiap_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register allows several parameters of CRB operation to be      *
+ * set. Note that writing to this register can have catastrophic side   *
+ * effects, if the CRB is not quiescent, i.e. if the CRB is             *
+ * processing protocol messages when the write occurs.                  *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_icmr_u {
+	bdrkreg_t	ii_icmr_regval;
+	struct  {
+		bdrkreg_t	i_sp_msg                  :	 1;
+		bdrkreg_t	i_rd_hdr		  :	 1;
+		bdrkreg_t	i_rsvd_4		  :	 2;
+		bdrkreg_t	i_c_cnt			  :	 4;
+		bdrkreg_t	i_rsvd_3		  :	 4;
+		bdrkreg_t	i_clr_rqpd		  :	 1;
+		bdrkreg_t	i_clr_rppd		  :	 1;
+		bdrkreg_t	i_rsvd_2		  :	 2;
+		bdrkreg_t	i_fc_cnt		  :	 4;
+		bdrkreg_t	i_crb_vld		  :	15;
+		bdrkreg_t	i_crb_mark		  :	15;
+		bdrkreg_t	i_rsvd_1		  :	 2;
+		bdrkreg_t	i_precise		  :	 1;
+		bdrkreg_t	i_rsvd			  :	11;
+	} ii_icmr_fld_s;
+} ii_icmr_u_t;
+
+#else
+
+typedef union ii_icmr_u {
+	bdrkreg_t	ii_icmr_regval;
+	struct	{
+		bdrkreg_t	i_rsvd			  :	11;
+		bdrkreg_t	i_precise		  :	 1;
+		bdrkreg_t	i_rsvd_1		  :	 2;
+		bdrkreg_t	i_crb_mark		  :	15;
+		bdrkreg_t	i_crb_vld		  :	15;
+		bdrkreg_t	i_fc_cnt		  :	 4;
+		bdrkreg_t	i_rsvd_2		  :	 2;
+		bdrkreg_t	i_clr_rppd		  :	 1;
+		bdrkreg_t	i_clr_rqpd		  :	 1;
+		bdrkreg_t	i_rsvd_3		  :	 4;
+		bdrkreg_t	i_c_cnt			  :	 4;
+		bdrkreg_t	i_rsvd_4		  :	 2;
+		bdrkreg_t	i_rd_hdr		  :	 1;
+		bdrkreg_t	i_sp_msg		  :	 1;
+	} ii_icmr_fld_s;
+} ii_icmr_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register allows control of the table portion of the CRB        *
+ * logic via software. Control operations from this register have       *
+ * priority over all incoming Crosstalk or BTE requests.                *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_iccr_u {
+	bdrkreg_t	ii_iccr_regval;
+	struct  {
+		bdrkreg_t	i_crb_num                 :	 4;
+		bdrkreg_t	i_rsvd_1		  :	 4;
+		bdrkreg_t	i_cmd			  :	 8;
+		bdrkreg_t	i_pending		  :	 1;
+		bdrkreg_t	i_rsvd			  :	47;
+	} ii_iccr_fld_s;
+} ii_iccr_u_t;
+
+#else
+
+typedef union ii_iccr_u {
+	bdrkreg_t	ii_iccr_regval;
+	struct	{
+		bdrkreg_t	i_rsvd			  :	47;
+		bdrkreg_t	i_pending		  :	 1;
+		bdrkreg_t	i_cmd			  :	 8;
+		bdrkreg_t	i_rsvd_1		  :	 4;
+		bdrkreg_t	i_crb_num		  :	 4;
+	} ii_iccr_fld_s;
+} ii_iccr_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register allows the maximum timeout value to be programmed.    *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_icto_u {
+	bdrkreg_t	ii_icto_regval;
+	struct  {
+		bdrkreg_t	i_timeout                 :	 8;
+		bdrkreg_t	i_rsvd			  :	56;
+	} ii_icto_fld_s;
+} ii_icto_u_t;
+
+#else
+
+typedef union ii_icto_u {
+	bdrkreg_t	ii_icto_regval;
+	struct	{
+		bdrkreg_t	i_rsvd			  :	56;
+		bdrkreg_t	i_timeout		  :	 8;
+	} ii_icto_fld_s;
+} ii_icto_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register allows the timeout prescalar to be programmed. An     *
+ * internal counter is associated with this register. When the          *
+ * internal counter reaches the value of the PRESCALE field, the        *
+ * timer registers in all valid CRBs are incremented (CRBx_D[TIMEOUT]   *
+ * field). The internal counter resets to zero, and then continues      *
+ * counting.                                                            *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_ictp_u {
+	bdrkreg_t	ii_ictp_regval;
+	struct  {
+		bdrkreg_t	i_prescale                :	24;
+		bdrkreg_t	i_rsvd			  :	40;
+	} ii_ictp_fld_s;
+} ii_ictp_u_t;
+
+#else
+
+typedef union ii_ictp_u {
+	bdrkreg_t	ii_ictp_regval;
+	struct	{
+		bdrkreg_t	i_rsvd			  :	40;
+		bdrkreg_t	i_prescale		  :	24;
+	} ii_ictp_fld_s;
+} ii_ictp_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  There are 15 CRB Entries (ICRB0 to ICRBE) that are     *
+ * used for Crosstalk operations (both cacheline and partial            *
+ * operations) or BTE/IO. Because the CRB entries are very wide, four   *
+ * registers (_A to _D) are required to read and write each entry.      *
+ * The CRB Entry registers can be conceptualized as rows and columns    *
+ * (illustrated in the table above). Each row contains the 4            *
+ * registers required for a single CRB Entry. The first doubleword      *
+ * (column) for each entry is labeled A, and the second doubleword      *
+ * (higher address) is labeled B, the third doubleword is labeled C,    *
+ * and the fourth doubleword is labeled D. All CRB entries have their   *
+ * addresses on a quarter cacheline aligned boundary.                   *
+ * Upon reset, only the following fields are initialized: valid         *
+ * (VLD), priority count, timeout, timeout valid, and context valid.    *
+ * All other bits should be cleared by software before use (after       *
+ * recovering any potential error state from before the reset).         *
+ * The following four tables summarize the format for the four          *
+ * registers that are used for each ICRB# Entry.                        *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_icrb0_a_u {
+	bdrkreg_t	ii_icrb0_a_regval;
+	struct  {
+		bdrkreg_t	ia_iow                    :	 1;
+		bdrkreg_t	ia_vld			  :	 1;
+		bdrkreg_t	ia_addr			  :	38;
+		bdrkreg_t	ia_tnum			  :	 5;
+		bdrkreg_t	ia_sidn			  :	 4;
+		bdrkreg_t	ia_xt_err		  :	 1;
+		bdrkreg_t	ia_mark			  :	 1;
+		bdrkreg_t	ia_ln_uce		  :	 1;
+		bdrkreg_t	ia_errcode		  :	 3;
+		bdrkreg_t	ia_error		  :	 1;
+		bdrkreg_t	ia_stall__bte_1		  :	 1;
+		bdrkreg_t	ia_stall__bte_0		  :	 1;
+		bdrkreg_t       ia_rsvd                   :      6;
+	} ii_icrb0_a_fld_s;
+} ii_icrb0_a_u_t;
+
+#else
+
+typedef union ii_icrb0_a_u {
+	bdrkreg_t	ii_icrb0_a_regval;
+	struct	{
+		bdrkreg_t	ia_rsvd			  :	 6;
+		bdrkreg_t	ia_stall__bte_0		  :	 1;
+		bdrkreg_t	ia_stall__bte_1		  :	 1;
+		bdrkreg_t	ia_error		  :	 1;
+		bdrkreg_t	ia_errcode		  :	 3;
+		bdrkreg_t	ia_ln_uce		  :	 1;
+		bdrkreg_t	ia_mark			  :	 1;
+		bdrkreg_t	ia_xt_err		  :	 1;
+		bdrkreg_t	ia_sidn			  :	 4;
+		bdrkreg_t	ia_tnum			  :	 5;
+		bdrkreg_t	ia_addr			  :	38;
+		bdrkreg_t	ia_vld			  :	 1;
+		bdrkreg_t	ia_iow			  :	 1;
+	} ii_icrb0_a_fld_s;
+} ii_icrb0_a_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  There are 15 CRB Entries (ICRB0 to ICRBE) that are     *
+ * used for Crosstalk operations (both cacheline and partial            *
+ * operations) or BTE/IO. Because the CRB entries are very wide, four   *
+ * registers (_A to _D) are required to read and write each entry.      *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_icrb0_b_u {
+	bdrkreg_t	ii_icrb0_b_regval;
+	struct	{
+		bdrkreg_t	ib_stall__intr		  :	 1;
+		bdrkreg_t	ib_stall_ib		  :	 1;
+		bdrkreg_t	ib_intvn		  :	 1;
+		bdrkreg_t	ib_wb			  :	 1;
+		bdrkreg_t	ib_hold			  :	 1;
+		bdrkreg_t	ib_ack			  :	 1;
+		bdrkreg_t	ib_resp			  :	 1;
+		bdrkreg_t	ib_ack_cnt		  :	11;
+		bdrkreg_t	ib_rsvd_1		  :	 7;
+		bdrkreg_t	ib_exc			  :	 5;
+		bdrkreg_t	ib_init			  :	 3;
+		bdrkreg_t	ib_imsg			  :	 8;
+		bdrkreg_t	ib_imsgtype		  :	 2;
+		bdrkreg_t	ib_use_old		  :	 1;
+		bdrkreg_t	ib_source		  :	12;
+		bdrkreg_t	ib_size			  :	 2;
+		bdrkreg_t	ib_ct			  :	 1;
+		bdrkreg_t	ib_bte_num		  :	 1;
+		bdrkreg_t	ib_rsvd			  :	 4;
+	} ii_icrb0_b_fld_s;
+} ii_icrb0_b_u_t;
+
+#else
+
+typedef union ii_icrb0_b_u {
+	bdrkreg_t	ii_icrb0_b_regval;
+	struct  {
+		bdrkreg_t	ib_rsvd                   :	 4;
+		bdrkreg_t	ib_bte_num                :	 1;
+		bdrkreg_t	ib_ct                     :	 1;
+		bdrkreg_t	ib_size                   :	 2;
+		bdrkreg_t	ib_source                 :	12;
+		bdrkreg_t	ib_use_old                :	 1;
+		bdrkreg_t	ib_imsgtype               :	 2;
+		bdrkreg_t	ib_imsg                   :	 8;
+		bdrkreg_t	ib_init                   :	 3;
+		bdrkreg_t	ib_exc                    :	 5;
+		bdrkreg_t	ib_rsvd_1                 :	 7;
+		bdrkreg_t	ib_ack_cnt                :	11;
+		bdrkreg_t	ib_resp                   :	 1;
+		bdrkreg_t	ib_ack                    :	 1;
+		bdrkreg_t	ib_hold                   :	 1;
+		bdrkreg_t	ib_wb                     :	 1;
+		bdrkreg_t	ib_intvn                  :	 1;
+		bdrkreg_t	ib_stall_ib               :	 1;
+		bdrkreg_t	ib_stall__intr            :	 1;
+	} ii_icrb0_b_fld_s;
+} ii_icrb0_b_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  There are 15 CRB Entries (ICRB0 to ICRBE) that are     *
+ * used for Crosstalk operations (both cacheline and partial            *
+ * operations) or BTE/IO. Because the CRB entries are very wide, four   *
+ * registers (_A to _D) are required to read and write each entry.      *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_icrb0_c_u {
+	bdrkreg_t	ii_icrb0_c_regval;
+	struct	{
+		bdrkreg_t	ic_gbr			  :	 1;
+		bdrkreg_t	ic_resprqd		  :	 1;
+		bdrkreg_t	ic_bo			  :	 1;
+		bdrkreg_t	ic_suppl		  :	12;
+		bdrkreg_t	ic_pa_be		  :	34;
+		bdrkreg_t	ic_bte_op		  :	 1;
+		bdrkreg_t	ic_pr_psc		  :	 4;
+		bdrkreg_t	ic_pr_cnt		  :	 4;
+		bdrkreg_t	ic_sleep		  :	 1;
+		bdrkreg_t	ic_rsvd			  :	 5;
+	} ii_icrb0_c_fld_s;
+} ii_icrb0_c_u_t;
+
+#else
+
+typedef union ii_icrb0_c_u {
+	bdrkreg_t	ii_icrb0_c_regval;
+	struct  {
+		bdrkreg_t	ic_rsvd                   :	 5;
+		bdrkreg_t	ic_sleep                  :	 1;
+		bdrkreg_t	ic_pr_cnt                 :	 4;
+		bdrkreg_t	ic_pr_psc                 :	 4;
+		bdrkreg_t	ic_bte_op                 :	 1;
+		bdrkreg_t	ic_pa_be                  :	34;
+		bdrkreg_t	ic_suppl                  :	12;
+		bdrkreg_t	ic_bo                     :	 1;
+		bdrkreg_t	ic_resprqd                :	 1;
+		bdrkreg_t	ic_gbr                    :	 1;
+	} ii_icrb0_c_fld_s;
+} ii_icrb0_c_u_t;
+
+#endif
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  There are 15 CRB Entries (ICRB0 to ICRBE) that are     *
+ * used for Crosstalk operations (both cacheline and partial            *
+ * operations) or BTE/IO. Because the CRB entries are very wide, four   *
+ * registers (_A to _D) are required to read and write each entry.      *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_icrb0_d_u {
+	bdrkreg_t	ii_icrb0_d_regval;
+	struct  {
+		bdrkreg_t	id_timeout                :	 8;
+		bdrkreg_t	id_context		  :	15;
+		bdrkreg_t	id_rsvd_1		  :	 1;
+		bdrkreg_t	id_tvld			  :	 1;
+		bdrkreg_t	id_cvld			  :	 1;
+		bdrkreg_t	id_rsvd			  :	38;
+	} ii_icrb0_d_fld_s;
+} ii_icrb0_d_u_t;
+
+#else
+
+typedef union ii_icrb0_d_u {
+	bdrkreg_t	ii_icrb0_d_regval;
+	struct	{
+		bdrkreg_t	id_rsvd			  :	38;
+		bdrkreg_t	id_cvld			  :	 1;
+		bdrkreg_t	id_tvld			  :	 1;
+		bdrkreg_t	id_rsvd_1		  :	 1;
+		bdrkreg_t	id_context		  :	15;
+		bdrkreg_t	id_timeout		  :	 8;
+	} ii_icrb0_d_fld_s;
+} ii_icrb0_d_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register contains the lower 64 bits of the header of the       *
+ * spurious message captured by II. Valid when the SP_MSG bit in ICMR   *
+ * register is set.                                                     *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_icsml_u {
+	bdrkreg_t	ii_icsml_regval;
+	struct  {
+		bdrkreg_t	i_tt_addr                 :	38;
+		bdrkreg_t	i_tt_ack_cnt		  :	11;
+		bdrkreg_t	i_newsuppl_ex		  :	11;
+		bdrkreg_t	i_reserved		  :	 3;
+		bdrkreg_t       i_overflow                :      1;
+	} ii_icsml_fld_s;
+} ii_icsml_u_t;
+
+#else
+
+typedef union ii_icsml_u {
+	bdrkreg_t	ii_icsml_regval;
+	struct	{
+		bdrkreg_t	i_overflow		  :	 1;
+		bdrkreg_t	i_reserved		  :	 3;
+		bdrkreg_t	i_newsuppl_ex		  :	11;
+		bdrkreg_t	i_tt_ack_cnt		  :	11;
+		bdrkreg_t	i_tt_addr		  :	38;
+	} ii_icsml_fld_s;
+} ii_icsml_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register contains the microscopic state, all the inputs to     *
+ * the protocol table, captured with the spurious message. Valid when   *
+ * the SP_MSG bit in the ICMR register is set.                          *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_icsmh_u {
+	bdrkreg_t	ii_icsmh_regval;
+	struct  {
+		bdrkreg_t	i_tt_vld                  :	 1;
+		bdrkreg_t	i_xerr			  :	 1;
+		bdrkreg_t	i_ft_cwact_o		  :	 1;
+		bdrkreg_t	i_ft_wact_o		  :	 1;
+		bdrkreg_t       i_ft_active_o             :      1;
+		bdrkreg_t	i_sync			  :	 1;
+		bdrkreg_t	i_mnusg			  :	 1;
+		bdrkreg_t	i_mnusz			  :	 1;
+		bdrkreg_t	i_plusz			  :	 1;
+		bdrkreg_t	i_plusg			  :	 1;
+		bdrkreg_t	i_tt_exc		  :	 5;
+		bdrkreg_t	i_tt_wb			  :	 1;
+		bdrkreg_t	i_tt_hold		  :	 1;
+		bdrkreg_t	i_tt_ack		  :	 1;
+		bdrkreg_t	i_tt_resp		  :	 1;
+		bdrkreg_t	i_tt_intvn		  :	 1;
+		bdrkreg_t	i_g_stall_bte1		  :	 1;
+		bdrkreg_t	i_g_stall_bte0		  :	 1;
+		bdrkreg_t	i_g_stall_il		  :	 1;
+		bdrkreg_t	i_g_stall_ib		  :	 1;
+		bdrkreg_t	i_tt_imsg		  :	 8;
+		bdrkreg_t	i_tt_imsgtype		  :	 2;
+		bdrkreg_t	i_tt_use_old		  :	 1;
+		bdrkreg_t	i_tt_respreqd		  :	 1;
+		bdrkreg_t	i_tt_bte_num		  :	 1;
+		bdrkreg_t	i_cbn			  :	 1;
+		bdrkreg_t	i_match			  :	 1;
+		bdrkreg_t	i_rpcnt_lt_34		  :	 1;
+		bdrkreg_t	i_rpcnt_ge_34		  :	 1;
+		bdrkreg_t	i_rpcnt_lt_18		  :	 1;
+		bdrkreg_t	i_rpcnt_ge_18		  :	 1;
+		bdrkreg_t       i_rpcnt_lt_2              :      1;
+		bdrkreg_t	i_rpcnt_ge_2		  :	 1;
+		bdrkreg_t	i_rqcnt_lt_18		  :	 1;
+		bdrkreg_t	i_rqcnt_ge_18		  :	 1;
+		bdrkreg_t	i_rqcnt_lt_2		  :	 1;
+		bdrkreg_t	i_rqcnt_ge_2		  :	 1;
+		bdrkreg_t	i_tt_device		  :	 7;
+		bdrkreg_t	i_tt_init		  :	 3;
+		bdrkreg_t	i_reserved		  :	 5;
+	} ii_icsmh_fld_s;
+} ii_icsmh_u_t;
+
+#else
+
+typedef union ii_icsmh_u {
+	bdrkreg_t	ii_icsmh_regval;
+	struct	{
+		bdrkreg_t	i_reserved		  :	 5;
+		bdrkreg_t	i_tt_init		  :	 3;
+		bdrkreg_t	i_tt_device		  :	 7;
+		bdrkreg_t	i_rqcnt_ge_2		  :	 1;
+		bdrkreg_t	i_rqcnt_lt_2		  :	 1;
+		bdrkreg_t	i_rqcnt_ge_18		  :	 1;
+		bdrkreg_t	i_rqcnt_lt_18		  :	 1;
+		bdrkreg_t	i_rpcnt_ge_2		  :	 1;
+		bdrkreg_t	i_rpcnt_lt_2		  :	 1;
+		bdrkreg_t	i_rpcnt_ge_18		  :	 1;
+		bdrkreg_t	i_rpcnt_lt_18		  :	 1;
+		bdrkreg_t	i_rpcnt_ge_34		  :	 1;
+		bdrkreg_t	i_rpcnt_lt_34		  :	 1;
+		bdrkreg_t	i_match			  :	 1;
+		bdrkreg_t	i_cbn			  :	 1;
+		bdrkreg_t	i_tt_bte_num		  :	 1;
+		bdrkreg_t	i_tt_respreqd		  :	 1;
+		bdrkreg_t	i_tt_use_old		  :	 1;
+		bdrkreg_t	i_tt_imsgtype		  :	 2;
+		bdrkreg_t	i_tt_imsg		  :	 8;
+		bdrkreg_t	i_g_stall_ib		  :	 1;
+		bdrkreg_t	i_g_stall_il		  :	 1;
+		bdrkreg_t	i_g_stall_bte0		  :	 1;
+		bdrkreg_t	i_g_stall_bte1		  :	 1;
+		bdrkreg_t	i_tt_intvn		  :	 1;
+		bdrkreg_t	i_tt_resp		  :	 1;
+		bdrkreg_t	i_tt_ack		  :	 1;
+		bdrkreg_t	i_tt_hold		  :	 1;
+		bdrkreg_t	i_tt_wb			  :	 1;
+		bdrkreg_t	i_tt_exc		  :	 5;
+		bdrkreg_t	i_plusg			  :	 1;
+		bdrkreg_t	i_plusz			  :	 1;
+		bdrkreg_t	i_mnusz			  :	 1;
+		bdrkreg_t	i_mnusg			  :	 1;
+		bdrkreg_t	i_sync			  :	 1;
+		bdrkreg_t	i_ft_active_o		  :	 1;
+		bdrkreg_t	i_ft_wact_o		  :	 1;
+		bdrkreg_t	i_ft_cwact_o		  :	 1;
+		bdrkreg_t	i_xerr			  :	 1;
+		bdrkreg_t	i_tt_vld		  :	 1;
+	} ii_icsmh_fld_s;
+} ii_icsmh_u_t;
+
+#endif
+
+
+/************************************************************************
+ *                                                                      *
+ *  The Bedrock DEBUG unit provides a 3-bit selection signal to the     *
+ * II unit, thus allowing a choice of one set of debug signal outputs   *
+ * from a menu of 8 options. Each option is limited to 32 bits in       *
+ * size. There are more signals of interest than can be accommodated    *
+ * in this 8*32 framework, so the IDBSS register has been defined to    *
+ * extend the range of choices available. For each menu option          *
+ * available to the DEBUG unit, the II provides a "submenu" of          *
+ * several options. The value of the SUBMENU field in the IDBSS         *
+ * register selects the desired submenu. Hence, the particular debug    *
+ * signals provided by the II are determined by the 3-bit selection     *
+ * signal from the DEBUG unit and the value of the SUBMENU field        *
+ * within the IDBSS register. For a detailed description of the         *
+ * available menus and submenus for II debug signals, refer to the      *
+ * documentation in ii_interface.doc..                                  *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LIITLE_ENDIAN
+
+typedef union ii_idbss_u {
+	bdrkreg_t	ii_idbss_regval;
+	struct  {
+		bdrkreg_t	i_submenu                 :	 3;
+		bdrkreg_t	i_rsvd			  :	61;
+	} ii_idbss_fld_s;
+} ii_idbss_u_t;
+
+#else
+
+typedef union ii_idbss_u {
+	bdrkreg_t	ii_idbss_regval;
+	struct	{
+		bdrkreg_t	i_rsvd			  :	61;
+		bdrkreg_t	i_submenu		  :	 3;
+	} ii_idbss_fld_s;
+} ii_idbss_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  This register is used to set up the length for a       *
+ * transfer and then to monitor the progress of that transfer. This     *
+ * register needs to be initialized before a transfer is started. A     *
+ * legitimate write to this register will set the Busy bit, clear the   *
+ * Error bit, and initialize the length to the value desired.           *
+ * While the transfer is in progress, hardware will decrement the       *
+ * length field with each successful block that is copied. Once the     *
+ * transfer completes, hardware will clear the Busy bit. The length     *
+ * field will also contain the number of cache lines left to be         *
+ * transferred.                                                         *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LIITLE_ENDIAN
+
+typedef union ii_ibls0_u {
+	bdrkreg_t	ii_ibls0_regval;
+	struct	{
+		bdrkreg_t	i_length		  :	16;
+		bdrkreg_t	i_error			  :	 1;
+		bdrkreg_t	i_rsvd_1		  :	 3;
+		bdrkreg_t	i_busy			  :	 1;
+		bdrkreg_t       i_rsvd                    :     43;
+	} ii_ibls0_fld_s;
+} ii_ibls0_u_t;
+
+#else
+
+typedef union ii_ibls0_u {
+	bdrkreg_t	ii_ibls0_regval;
+	struct  {
+		bdrkreg_t	i_rsvd                    :	43;
+		bdrkreg_t	i_busy                    :	 1;
+		bdrkreg_t	i_rsvd_1                  :	 3;
+		bdrkreg_t	i_error                   :	 1;
+		bdrkreg_t	i_length                  :	16;
+	} ii_ibls0_fld_s;
+} ii_ibls0_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register should be loaded before a transfer is started. The    *
+ * address to be loaded in bits 39:0 is the 40-bit TRex+ physical       *
+ * address as described in Section 1.3, Figure2 and Figure3. Since      *
+ * the bottom 7 bits of the address are always taken to be zero, BTE    *
+ * transfers are always cacheline-aligned.                              *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_ibsa0_u {
+	bdrkreg_t	ii_ibsa0_regval;
+	struct  {
+		bdrkreg_t	i_rsvd_1                  :	 7;
+		bdrkreg_t	i_addr			  :	33;
+		bdrkreg_t       i_rsvd                    :     24;
+	} ii_ibsa0_fld_s;
+} ii_ibsa0_u_t;
+
+#else
+
+typedef union ii_ibsa0_u {
+	bdrkreg_t	ii_ibsa0_regval;
+	struct	{
+		bdrkreg_t	i_rsvd			  :	24;
+		bdrkreg_t	i_addr			  :	33;
+		bdrkreg_t	i_rsvd_1		  :	 7;
+	} ii_ibsa0_fld_s;
+} ii_ibsa0_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register should be loaded before a transfer is started. The    *
+ * address to be loaded in bits 39:0 is the 40-bit TRex+ physical       *
+ * address as described in Section 1.3, Figure2 and Figure3. Since      *
+ * the bottom 7 bits of the address are always taken to be zero, BTE    *
+ * transfers are always cacheline-aligned.                              *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_ibda0_u {
+	bdrkreg_t	ii_ibda0_regval;
+	struct  {
+		bdrkreg_t	i_rsvd_1                  :	 7;
+		bdrkreg_t	i_addr			  :	33;
+		bdrkreg_t	i_rsvd			  :	24;
+	} ii_ibda0_fld_s;
+} ii_ibda0_u_t;
+
+#else
+
+typedef union ii_ibda0_u {
+	bdrkreg_t	ii_ibda0_regval;
+	struct	{
+		bdrkreg_t	i_rsvd			  :	24;
+		bdrkreg_t	i_addr			  :	33;
+		bdrkreg_t	i_rsvd_1		  :	 7;
+	} ii_ibda0_fld_s;
+} ii_ibda0_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Writing to this register sets up the attributes of the transfer     *
+ * and initiates the transfer operation. Reading this register has      *
+ * the side effect of terminating any transfer in progress. Note:       *
+ * stopping a transfer midstream could have an adverse impact on the    *
+ * other BTE. If a BTE stream has to be stopped (due to error           *
+ * handling for example), both BTE streams should be stopped and        *
+ * their transfers discarded.                                           *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_ibct0_u {
+	bdrkreg_t	ii_ibct0_regval;
+	struct  {
+		bdrkreg_t	i_zerofill                :	 1;
+		bdrkreg_t	i_rsvd_2		  :	 3;
+		bdrkreg_t	i_notify		  :	 1;
+		bdrkreg_t	i_rsvd_1		  :	 3;
+		bdrkreg_t       i_poison                  :      1;
+		bdrkreg_t       i_rsvd                    :     55;
+	} ii_ibct0_fld_s;
+} ii_ibct0_u_t;
+
+#else
+
+typedef union ii_ibct0_u {
+	bdrkreg_t	ii_ibct0_regval;
+	struct	{
+		bdrkreg_t	i_rsvd			  :	55;
+		bdrkreg_t	i_poison		  :	 1;
+		bdrkreg_t	i_rsvd_1		  :	 3;
+		bdrkreg_t	i_notify		  :	 1;
+		bdrkreg_t	i_rsvd_2		  :	 3;
+		bdrkreg_t	i_zerofill		  :	 1;
+	} ii_ibct0_fld_s;
+} ii_ibct0_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register contains the address to which the WINV is sent.       *
+ * This address has to be cache line aligned.                           *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_ibna0_u {
+	bdrkreg_t	ii_ibna0_regval;
+	struct  {
+		bdrkreg_t	i_rsvd_1                  :	 7;
+		bdrkreg_t	i_addr			  :	33;
+		bdrkreg_t	i_rsvd			  :	24;
+	} ii_ibna0_fld_s;
+} ii_ibna0_u_t;
+
+#else
+
+typedef union ii_ibna0_u {
+	bdrkreg_t	ii_ibna0_regval;
+	struct	{
+		bdrkreg_t	i_rsvd			  :	24;
+		bdrkreg_t	i_addr			  :	33;
+		bdrkreg_t	i_rsvd_1		  :	 7;
+	} ii_ibna0_fld_s;
+} ii_ibna0_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register contains the programmable level as well as the node   *
+ * ID and PI unit of the processor to which the interrupt will be       *
+ * sent.                                                                *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_ibia0_u {
+	bdrkreg_t	ii_ibia0_regval;
+	struct  {
+		bdrkreg_t	i_pi_id                   :	 1;
+		bdrkreg_t	i_node_id		  :	 8;
+		bdrkreg_t	i_rsvd_1		  :	 7;
+		bdrkreg_t	i_level			  :	 7;
+		bdrkreg_t       i_rsvd                    :     41;
+	} ii_ibia0_fld_s;
+} ii_ibia0_u_t;
+
+#else
+
+typedef union ii_ibia0_u {
+	bdrkreg_t	ii_ibia0_regval;
+	struct	{
+		bdrkreg_t	i_rsvd			  :	41;
+		bdrkreg_t	i_level			  :	 7;
+		bdrkreg_t	i_rsvd_1		  :	 7;
+		bdrkreg_t	i_node_id		  :	 8;
+		bdrkreg_t	i_pi_id			  :	 1;
+	} ii_ibia0_fld_s;
+} ii_ibia0_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  This register is used to set up the length for a       *
+ * transfer and then to monitor the progress of that transfer. This     *
+ * register needs to be initialized before a transfer is started. A     *
+ * legitimate write to this register will set the Busy bit, clear the   *
+ * Error bit, and initialize the length to the value desired.           *
+ * While the transfer is in progress, hardware will decrement the       *
+ * length field with each successful block that is copied. Once the     *
+ * transfer completes, hardware will clear the Busy bit. The length     *
+ * field will also contain the number of cache lines left to be         *
+ * transferred.                                                         *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_ibls1_u {
+	bdrkreg_t	ii_ibls1_regval;
+	struct  {
+		bdrkreg_t	i_length                  :	16;
+		bdrkreg_t	i_error			  :	 1;
+		bdrkreg_t	i_rsvd_1		  :	 3;
+		bdrkreg_t	i_busy			  :	 1;
+		bdrkreg_t       i_rsvd                    :     43;
+	} ii_ibls1_fld_s;
+} ii_ibls1_u_t;
+
+#else
+
+typedef union ii_ibls1_u {
+	bdrkreg_t	ii_ibls1_regval;
+	struct	{
+		bdrkreg_t	i_rsvd			  :	43;
+		bdrkreg_t	i_busy			  :	 1;
+		bdrkreg_t	i_rsvd_1		  :	 3;
+		bdrkreg_t	i_error			  :	 1;
+		bdrkreg_t	i_length		  :	16;
+	} ii_ibls1_fld_s;
+} ii_ibls1_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register should be loaded before a transfer is started. The    *
+ * address to be loaded in bits 39:0 is the 40-bit TRex+ physical       *
+ * address as described in Section 1.3, Figure2 and Figure3. Since      *
+ * the bottom 7 bits of the address are always taken to be zero, BTE    *
+ * transfers are always cacheline-aligned.                              *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_ibsa1_u {
+	bdrkreg_t	ii_ibsa1_regval;
+	struct  {
+		bdrkreg_t	i_rsvd_1                  :	 7;
+		bdrkreg_t	i_addr			  :	33;
+		bdrkreg_t	i_rsvd			  :	24;
+	} ii_ibsa1_fld_s;
+} ii_ibsa1_u_t;
+
+#else
+
+typedef union ii_ibsa1_u {
+	bdrkreg_t	ii_ibsa1_regval;
+	struct	{
+		bdrkreg_t	i_rsvd			  :	24;
+		bdrkreg_t	i_addr			  :	33;
+		bdrkreg_t	i_rsvd_1		  :	 7;
+	} ii_ibsa1_fld_s;
+} ii_ibsa1_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register should be loaded before a transfer is started. The    *
+ * address to be loaded in bits 39:0 is the 40-bit TRex+ physical       *
+ * address as described in Section 1.3, Figure2 and Figure3. Since      *
+ * the bottom 7 bits of the address are always taken to be zero, BTE    *
+ * transfers are always cacheline-aligned.                              *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_ibda1_u {
+	bdrkreg_t	ii_ibda1_regval;
+	struct  {
+		bdrkreg_t	i_rsvd_1                  :	 7;
+		bdrkreg_t	i_addr			  :	33;
+		bdrkreg_t	i_rsvd			  :	24;
+	} ii_ibda1_fld_s;
+} ii_ibda1_u_t;
+
+#else
+
+typedef union ii_ibda1_u {
+	bdrkreg_t	ii_ibda1_regval;
+	struct	{
+		bdrkreg_t	i_rsvd			  :	24;
+		bdrkreg_t	i_addr			  :	33;
+		bdrkreg_t	i_rsvd_1		  :	 7;
+	} ii_ibda1_fld_s;
+} ii_ibda1_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Writing to this register sets up the attributes of the transfer     *
+ * and initiates the transfer operation. Reading this register has      *
+ * the side effect of terminating any transfer in progress. Note:       *
+ * stopping a transfer midstream could have an adverse impact on the    *
+ * other BTE. If a BTE stream has to be stopped (due to error           *
+ * handling for example), both BTE streams should be stopped and        *
+ * their transfers discarded.                                           *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_ibct1_u {
+	bdrkreg_t	ii_ibct1_regval;
+	struct  {
+		bdrkreg_t	i_zerofill                :	 1;
+		bdrkreg_t	i_rsvd_2		  :	 3;
+		bdrkreg_t	i_notify		  :	 1;
+		bdrkreg_t	i_rsvd_1		  :	 3;
+		bdrkreg_t	i_poison		  :	 1;
+		bdrkreg_t	i_rsvd			  :	55;
+	} ii_ibct1_fld_s;
+} ii_ibct1_u_t;
+
+#else
+
+typedef union ii_ibct1_u {
+	bdrkreg_t	ii_ibct1_regval;
+	struct	{
+		bdrkreg_t	i_rsvd			  :	55;
+		bdrkreg_t	i_poison		  :	 1;
+		bdrkreg_t	i_rsvd_1		  :	 3;
+		bdrkreg_t	i_notify		  :	 1;
+		bdrkreg_t	i_rsvd_2		  :	 3;
+		bdrkreg_t	i_zerofill		  :	 1;
+	} ii_ibct1_fld_s;
+} ii_ibct1_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register contains the address to which the WINV is sent.       *
+ * This address has to be cache line aligned.                           *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_ibna1_u {
+	bdrkreg_t	ii_ibna1_regval;
+	struct  {
+		bdrkreg_t	i_rsvd_1                  :	 7;
+		bdrkreg_t	i_addr			  :	33;
+		bdrkreg_t       i_rsvd                    :     24;
+	} ii_ibna1_fld_s;
+} ii_ibna1_u_t;
+
+#else
+
+typedef union ii_ibna1_u {
+	bdrkreg_t	ii_ibna1_regval;
+	struct	{
+		bdrkreg_t	i_rsvd			  :	24;
+		bdrkreg_t	i_addr			  :	33;
+		bdrkreg_t	i_rsvd_1		  :	 7;
+	} ii_ibna1_fld_s;
+} ii_ibna1_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register contains the programmable level as well as the node   *
+ * ID and PI unit of the processor to which the interrupt will be       *
+ * sent.                                                                *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_ibia1_u {
+	bdrkreg_t	ii_ibia1_regval;
+	struct  {
+		bdrkreg_t	i_pi_id                   :	 1;
+		bdrkreg_t	i_node_id		  :	 8;
+		bdrkreg_t	i_rsvd_1		  :	 7;
+		bdrkreg_t	i_level			  :	 7;
+		bdrkreg_t	i_rsvd			  :	41;
+	} ii_ibia1_fld_s;
+} ii_ibia1_u_t;
+
+#else
+
+typedef union ii_ibia1_u {
+	bdrkreg_t	ii_ibia1_regval;
+	struct	{
+		bdrkreg_t	i_rsvd			  :	41;
+		bdrkreg_t	i_level			  :	 7;
+		bdrkreg_t	i_rsvd_1		  :	 7;
+		bdrkreg_t	i_node_id		  :	 8;
+		bdrkreg_t	i_pi_id			  :	 1;
+	} ii_ibia1_fld_s;
+} ii_ibia1_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register defines the resources that feed information into      *
+ * the two performance counters located in the IO Performance           *
+ * Profiling Register. There are 17 different quantities that can be    *
+ * measured. Given these 17 different options, the two performance      *
+ * counters have 15 of them in common; menu selections 0 through 0xE    *
+ * are identical for each performance counter. As for the other two     *
+ * options, one is available from one performance counter and the       *
+ * other is available from the other performance counter. Hence, the    *
+ * II supports all 17*16=272 possible combinations of quantities to     *
+ * measure.                                                             *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_ipcr_u {
+	bdrkreg_t	ii_ipcr_regval;
+	struct  {
+		bdrkreg_t	i_ippr0_c                 :	 4;
+		bdrkreg_t	i_ippr1_c		  :	 4;
+		bdrkreg_t	i_icct			  :	 8;
+		bdrkreg_t       i_rsvd                    :     48;
+	} ii_ipcr_fld_s;
+} ii_ipcr_u_t;
+
+#else
+
+typedef union ii_ipcr_u {
+	bdrkreg_t	ii_ipcr_regval;
+	struct	{
+		bdrkreg_t	i_rsvd			  :	48;
+		bdrkreg_t	i_icct			  :	 8;
+		bdrkreg_t	i_ippr1_c		  :	 4;
+		bdrkreg_t	i_ippr0_c		  :	 4;
+	} ii_ipcr_fld_s;
+} ii_ipcr_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *                                                                      *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ii_ippr_u {
+	bdrkreg_t	ii_ippr_regval;
+	struct  {
+		bdrkreg_t	i_ippr0                   :	32;
+		bdrkreg_t	i_ippr1			  :	32;
+	} ii_ippr_fld_s;
+} ii_ippr_u_t;
+
+#else
+
+typedef union ii_ippr_u {
+	bdrkreg_t	ii_ippr_regval;
+	struct	{
+		bdrkreg_t	i_ippr1			  :	32;
+		bdrkreg_t	i_ippr0			  :	32;
+	} ii_ippr_fld_s;
+} ii_ippr_u_t;
+
+#endif
+
+
+
+
+
+
+#endif /* _LANGUAGE_C */
+
+/************************************************************************
+ *                                                                      *
+ * The following defines which were not formed into structures are      *
+ * probably indentical to another register, and the name of the         *
+ * register is provided against each of these registers. This           *
+ * information needs to be checked carefully                            *
+ *                                                                      *
+ *           IIO_ICRB1_A                IIO_ICRB0_A                       *
+ *           IIO_ICRB1_B                IIO_ICRB0_B                       *
+ *           IIO_ICRB1_C                IIO_ICRB0_C                       *
+ *           IIO_ICRB1_D                IIO_ICRB0_D                       *
+ *           IIO_ICRB2_A                IIO_ICRB0_A                       *
+ *           IIO_ICRB2_B                IIO_ICRB0_B                       *
+ *           IIO_ICRB2_C                IIO_ICRB0_C                       *
+ *           IIO_ICRB2_D                IIO_ICRB0_D                       *
+ *           IIO_ICRB3_A                IIO_ICRB0_A                       *
+ *           IIO_ICRB3_B                IIO_ICRB0_B                       *
+ *           IIO_ICRB3_C                IIO_ICRB0_C                       *
+ *           IIO_ICRB3_D                IIO_ICRB0_D                       *
+ *           IIO_ICRB4_A                IIO_ICRB0_A                       *
+ *           IIO_ICRB4_B                IIO_ICRB0_B                       *
+ *           IIO_ICRB4_C                IIO_ICRB0_C                       *
+ *           IIO_ICRB4_D                IIO_ICRB0_D                       *
+ *           IIO_ICRB5_A                IIO_ICRB0_A                       *
+ *           IIO_ICRB5_B                IIO_ICRB0_B                       *
+ *           IIO_ICRB5_C                IIO_ICRB0_C                       *
+ *           IIO_ICRB5_D                IIO_ICRB0_D                       *
+ *           IIO_ICRB6_A                IIO_ICRB0_A                       *
+ *           IIO_ICRB6_B                IIO_ICRB0_B                       *
+ *           IIO_ICRB6_C                IIO_ICRB0_C                       *
+ *           IIO_ICRB6_D                IIO_ICRB0_D                       *
+ *           IIO_ICRB7_A                IIO_ICRB0_A                       *
+ *           IIO_ICRB7_B                IIO_ICRB0_B                       *
+ *           IIO_ICRB7_C                IIO_ICRB0_C                       *
+ *           IIO_ICRB7_D                IIO_ICRB0_D                       *
+ *           IIO_ICRB8_A                IIO_ICRB0_A                       *
+ *           IIO_ICRB8_B                IIO_ICRB0_B                       *
+ *           IIO_ICRB8_C                IIO_ICRB0_C                       *
+ *           IIO_ICRB8_D                IIO_ICRB0_D                       *
+ *           IIO_ICRB9_A                IIO_ICRB0_A                       *
+ *           IIO_ICRB9_B                IIO_ICRB0_B                       *
+ *           IIO_ICRB9_C                IIO_ICRB0_C                       *
+ *           IIO_ICRB9_D                IIO_ICRB0_D                       *
+ *           IIO_ICRBA_A                IIO_ICRB0_A                       *
+ *           IIO_ICRBA_B                IIO_ICRB0_B                       *
+ *           IIO_ICRBA_C                IIO_ICRB0_C                       *
+ *           IIO_ICRBA_D                IIO_ICRB0_D                       *
+ *           IIO_ICRBB_A                IIO_ICRB0_A                       *
+ *           IIO_ICRBB_B                IIO_ICRB0_B                       *
+ *           IIO_ICRBB_C                IIO_ICRB0_C                       *
+ *           IIO_ICRBB_D                IIO_ICRB0_D                       *
+ *           IIO_ICRBC_A                IIO_ICRB0_A                       *
+ *           IIO_ICRBC_B                IIO_ICRB0_B                       *
+ *           IIO_ICRBC_C                IIO_ICRB0_C                       *
+ *           IIO_ICRBC_D                IIO_ICRB0_D                       *
+ *           IIO_ICRBD_A                IIO_ICRB0_A                       *
+ *           IIO_ICRBD_B                IIO_ICRB0_B                       *
+ *           IIO_ICRBD_C                IIO_ICRB0_C                       *
+ *           IIO_ICRBD_D                IIO_ICRB0_D                       *
+ *           IIO_ICRBE_A                IIO_ICRB0_A                       *
+ *           IIO_ICRBE_B                IIO_ICRB0_B                       *
+ *           IIO_ICRBE_C                IIO_ICRB0_C                       *
+ *           IIO_ICRBE_D                IIO_ICRB0_D                       *
+ *                                                                      *
+ ************************************************************************/
+
+
+/************************************************************************
+ *                                                                      *
+ *               MAKE ALL ADDITIONS AFTER THIS LINE                     *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+
+#endif /* _ASM_SN_SN1_HUBIO_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/hubio_next.h linux/include/asm-ia64/sn/sn1/hubio_next.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/hubio_next.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/sn1/hubio_next.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,714 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_SN1_HUBIO_NEXT_H
+#define _ASM_SN_SN1_HUBIO_NEXT_H
+
+/*
+ * Slightly friendlier names for some common registers.
+ */
+#define IIO_WIDGET              IIO_WID      /* Widget identification */
+#define IIO_WIDGET_STAT         IIO_WSTAT    /* Widget status register */
+#define IIO_WIDGET_CTRL         IIO_WCR      /* Widget control register */
+#define IIO_PROTECT             IIO_ILAPR    /* IO interface protection */
+#define IIO_PROTECT_OVRRD       IIO_ILAPO    /* IO protect override */
+#define IIO_OUTWIDGET_ACCESS    IIO_IOWA     /* Outbound widget access */
+#define IIO_INWIDGET_ACCESS     IIO_IIWA     /* Inbound widget access */
+#define IIO_INDEV_ERR_MASK      IIO_IIDEM    /* Inbound device error mask */
+#define IIO_LLP_CSR             IIO_ILCSR    /* LLP control and status */
+#define IIO_LLP_LOG             IIO_ILLR     /* LLP log */
+#define IIO_XTALKCC_TOUT        IIO_IXCC     /* Xtalk credit count timeout*/
+#define IIO_XTALKTT_TOUT        IIO_IXTT     /* Xtalk tail timeout */
+#define IIO_IO_ERR_CLR          IIO_IECLR    /* IO error clear */
+#define IIO_IGFX_0 		IIO_IGFX0
+#define IIO_IGFX_1 		IIO_IGFX1
+#define IIO_IBCT_0		IIO_IBCT0
+#define IIO_IBCT_1		IIO_IBCT1
+#define IIO_IBLS_0		IIO_IBLS0
+#define IIO_IBLS_1		IIO_IBLS1
+#define IIO_IBSA_0		IIO_IBSA0
+#define IIO_IBSA_1		IIO_IBSA1
+#define IIO_IBDA_0		IIO_IBDA0
+#define IIO_IBDA_1		IIO_IBDA1
+#define IIO_IBNA_0		IIO_IBNA0
+#define IIO_IBNA_1		IIO_IBNA1
+#define IIO_IBIA_0		IIO_IBIA0
+#define IIO_IBIA_1		IIO_IBIA1
+#define IIO_IOPRB_0		IIO_IPRB0
+#define IIO_PRTE_0      	IIO_IPRTE0        /* PIO Read address table entry 0 */
+#define IIO_PRTE(_x)    	(IIO_PRTE_0 + (8 * (_x)))
+
+#define IIO_LLP_CSR_IS_UP               0x00002000
+#define IIO_LLP_CSR_LLP_STAT_MASK       0x00003000
+#define IIO_LLP_CSR_LLP_STAT_SHFT       12
+
+#define IIO_LLP_CB_MAX  0xffff	/* in ILLR CB_CNT, Max Check Bit errors */
+#define IIO_LLP_SN_MAX  0xffff	/* in ILLR SN_CNT, Max Sequence Number errors */
+
+/* key to IIO_PROTECT_OVRRD */
+#define IIO_PROTECT_OVRRD_KEY   0x53474972756c6573ull   /* "SGIrules" */
+
+/* BTE register names */
+#define IIO_BTE_STAT_0          IIO_IBLS_0   /* Also BTE length/status 0 */
+#define IIO_BTE_SRC_0           IIO_IBSA_0   /* Also BTE source address  0 */
+#define IIO_BTE_DEST_0          IIO_IBDA_0   /* Also BTE dest. address 0 */
+#define IIO_BTE_CTRL_0          IIO_IBCT_0   /* Also BTE control/terminate 0 */
+#define IIO_BTE_NOTIFY_0        IIO_IBNA_0   /* Also BTE notification 0 */
+#define IIO_BTE_INT_0           IIO_IBIA_0   /* Also BTE interrupt 0 */
+#define IIO_BTE_OFF_0           0            /* Base offset from BTE 0 regs. */
+#define IIO_BTE_OFF_1   IIO_IBLS_1 - IIO_IBLS_0 /* Offset from base to BTE 1 */
+
+/* BTE register offsets from base */
+#define BTEOFF_STAT             0
+#define BTEOFF_SRC              (IIO_BTE_SRC_0 - IIO_BTE_STAT_0)
+#define BTEOFF_DEST             (IIO_BTE_DEST_0 - IIO_BTE_STAT_0)
+#define BTEOFF_CTRL             (IIO_BTE_CTRL_0 - IIO_BTE_STAT_0)
+#define BTEOFF_NOTIFY           (IIO_BTE_NOTIFY_0 - IIO_BTE_STAT_0)
+#define BTEOFF_INT              (IIO_BTE_INT_0 - IIO_BTE_STAT_0)
+
+
+/* names used in hub_diags.c; carried over from SN0 */
+#define IIO_BASE_BTE0   IIO_IBLS_0		
+#define IIO_BASE_BTE1   IIO_IBLS_1		
+#if 0
+#define IIO_BASE        IIO_WID
+#define IIO_BASE_PERF   IIO_IPCR   /* IO Performance Control */
+#define IIO_PERF_CNT    IIO_IPPR   /* IO Performance Profiling */
+#endif
+
+
+/* GFX Flow Control Node/Widget Register */
+#define IIO_IGFX_W_NUM_BITS	4	/* size of widget num field */
+#define IIO_IGFX_W_NUM_MASK	((1<<IIO_IGFX_W_NUM_BITS)-1)
+#define IIO_IGFX_W_NUM_SHIFT	0
+#define IIO_IGFX_PI_NUM_BITS	1	/* size of PI num field */
+#define IIO_IGFX_PI_NUM_MASK	((1<<IIO_IGFX_PI_NUM_BITS)-1)
+#define IIO_IGFX_PI_NUM_SHIFT	4
+#define IIO_IGFX_N_NUM_BITS	8	/* size of node num field */
+#define IIO_IGFX_N_NUM_MASK	((1<<IIO_IGFX_N_NUM_BITS)-1)
+#define IIO_IGFX_N_NUM_SHIFT	5
+#define IIO_IGFX_P_NUM_BITS	1	/* size of processor num field */
+#define IIO_IGFX_P_NUM_MASK	((1<<IIO_IGFX_P_NUM_BITS)-1)
+#define IIO_IGFX_P_NUM_SHIFT	16
+#define IIO_IGFX_INIT(widget, pi, node, cpu)				(\
+	(((widget) & IIO_IGFX_W_NUM_MASK) << IIO_IGFX_W_NUM_SHIFT) |	 \
+	(((pi)     & IIO_IGFX_PI_NUM_MASK)<< IIO_IGFX_PI_NUM_SHIFT)|	 \
+	(((node)   & IIO_IGFX_N_NUM_MASK) << IIO_IGFX_N_NUM_SHIFT) |	 \
+	(((cpu)    & IIO_IGFX_P_NUM_MASK) << IIO_IGFX_P_NUM_SHIFT))
+
+
+/* Scratch registers (all bits available) */
+#define IIO_SCRATCH_REG0        IIO_ISCR0
+#define IIO_SCRATCH_REG1        IIO_ISCR1
+#define IIO_SCRATCH_MASK        0xffffffffffffffff
+
+#define IIO_SCRATCH_BIT0_0      0x0000000000000001
+#define IIO_SCRATCH_BIT0_1      0x0000000000000002
+#define IIO_SCRATCH_BIT0_2      0x0000000000000004
+#define IIO_SCRATCH_BIT0_3      0x0000000000000008
+#define IIO_SCRATCH_BIT0_4      0x0000000000000010
+#define IIO_SCRATCH_BIT0_5      0x0000000000000020
+#define IIO_SCRATCH_BIT0_6      0x0000000000000040
+#define IIO_SCRATCH_BIT0_7      0x0000000000000080
+#define IIO_SCRATCH_BIT0_8      0x0000000000000100
+#define IIO_SCRATCH_BIT0_9      0x0000000000000200
+#define IIO_SCRATCH_BIT0_A      0x0000000000000400
+
+#define IIO_SCRATCH_BIT1_0      0x0000000000000001
+#define IIO_SCRATCH_BIT1_1      0x0000000000000002
+/* IO Translation Table Entries */
+#define IIO_NUM_ITTES   7               /* ITTEs numbered 0..6 */
+                                        /* Hw manuals number them 1..7! */
+/*
+ * IIO_IMEM Register fields.
+ */
+#define IIO_IMEM_W0ESD  0x1             /* Widget 0 shut down due to error */
+#define IIO_IMEM_B0ESD  (1 << 4)        /* BTE 0 shut down due to error */
+#define IIO_IMEM_B1ESD  (1 << 8)        /* BTE 1 Shut down due to error */
+
+/*
+ * As a permanent workaround for a bug in the PI side of the hub, we've
+ * redefined big window 7 as small window 0.
+ XXX does this still apply for SN1??
+ */
+#define HUB_NUM_BIG_WINDOW      IIO_NUM_ITTES - 1
+
+/*
+ * Use the top big window as a surrogate for the first small window
+ */
+#define SWIN0_BIGWIN            HUB_NUM_BIG_WINDOW
+
+#define IIO_NUM_PRTES   8               /* Total number of PRB table entries */
+
+#define ILCSR_WARM_RESET        0x100
+
+/*
+ * CRB manipulation macros
+ *      The CRB macros are slightly complicated, since there are up to
+ *      four registers associated with each CRB entry.
+ */
+#define IIO_NUM_CRBS            15      /* Number of CRBs */
+#define IIO_NUM_NORMAL_CRBS     12      /* Number of regular CRB entries */
+#define IIO_NUM_PC_CRBS         4       /* Number of partial cache CRBs */
+#define IIO_ICRB_OFFSET         8
+#define IIO_ICRB_0              IIO_ICRB0_A
+#define IIO_ICRB_ADDR_SHFT	2	/* Shift to get proper address */
+/* XXX - This is now tuneable:
+        #define IIO_FIRST_PC_ENTRY 12
+ */
+
+#define IIO_ICRB_A(_x)  (IIO_ICRB_0 + (4 * IIO_ICRB_OFFSET * (_x)))
+#define IIO_ICRB_B(_x)  (IIO_ICRB_A(_x) + 1*IIO_ICRB_OFFSET)
+#define IIO_ICRB_C(_x)  (IIO_ICRB_A(_x) + 2*IIO_ICRB_OFFSET)
+#define IIO_ICRB_D(_x)  (IIO_ICRB_A(_x) + 3*IIO_ICRB_OFFSET)
+
+#define TNUM_TO_WIDGET_DEV(_tnum)	(_tnum & 0x7)
+
+/*
+ * values for "ecode" field
+ */
+#define IIO_ICRB_ECODE_DERR     0       /* Directory error due to IIO access */
+#define IIO_ICRB_ECODE_PERR     1       /* Poison error on IO access */
+#define IIO_ICRB_ECODE_WERR     2       /* Write error by IIO access
+                                         * e.g. WINV to a Read only line. */
+#define IIO_ICRB_ECODE_AERR     3       /* Access error caused by IIO access */
+#define IIO_ICRB_ECODE_PWERR    4       /* Error on partial write       */
+#define IIO_ICRB_ECODE_PRERR    5       /* Error on partial read        */
+#define IIO_ICRB_ECODE_TOUT     6       /* CRB timeout before deallocating */
+#define IIO_ICRB_ECODE_XTERR    7       /* Incoming xtalk pkt had error bit */
+
+/*
+ * Number of credits Hub widget has while sending req/response to
+ * xbow.
+ * Value of 3 is required by Xbow 1.1
+ * We may be able to increase this to 4 with Xbow 1.2.
+ */
+#define       HUBII_XBOW_CREDIT       3
+#define       HUBII_XBOW_REV2_CREDIT  4
+
+/*************************************************************************
+
+ Some of the IIO field masks and shifts are defined here.
+ This is in order to maintain compatibility in SN0 and SN1 code
+ 
+**************************************************************************/
+
+/*
+ * ICMR register fields
+ * (Note: the IIO_ICMR_P_CNT and IIO_ICMR_PC_VLD from Hub are not
+ * present in Bedrock)
+ */
+
+#define IIO_ICMR_CRB_VLD_SHFT   20
+#define IIO_ICMR_CRB_VLD_MASK   (0x7fffUL << IIO_ICMR_CRB_VLD_SHFT)
+
+#define IIO_ICMR_FC_CNT_SHFT    16
+#define IIO_ICMR_FC_CNT_MASK    (0xf << IIO_ICMR_FC_CNT_SHFT)
+
+#define IIO_ICMR_C_CNT_SHFT     4
+#define IIO_ICMR_C_CNT_MASK     (0xf << IIO_ICMR_C_CNT_SHFT)
+
+#define IIO_ICMR_PRECISE        (1UL << 52)
+#define IIO_ICMR_CLR_RPPD       (1UL << 13)
+#define IIO_ICMR_CLR_RQPD       (1UL << 12)
+
+/*
+ * IIO PIO Deallocation register field masks : (IIO_IPDR)
+ XXX present but not needed in bedrock?  See the manual.
+ */
+#define IIO_IPDR_PND    (1 << 4)
+
+/*
+ * IIO CRB deallocation register field masks: (IIO_ICDR)
+ */
+#define IIO_ICDR_PND    (1 << 4)
+
+/* 
+ * IO BTE Length/Status (IIO_IBLS) register bit field definitions
+ */
+#define IBLS_BUSY		(0x1 << 20)
+#define IBLS_ERROR_SHFT		16
+#define IBLS_ERROR		(0x1 << IBLS_ERROR_SHFT)
+#define IBLS_LENGTH_MASK	0xffff
+
+/*
+ * IO BTE Control/Terminate register (IBCT) register bit field definitions
+ */
+#define IBCT_POISON		(0x1 << 8)
+#define IBCT_NOTIFY		(0x1 << 4)
+#define IBCT_ZFIL_MODE		(0x1 << 0)
+
+/*
+ * IO Error Clear register bit field definitions
+ */
+#define IECLR_PI1_FWD_INT	(1 << 31)  /* clear PI1_FORWARD_INT in iidsr */
+#define IECLR_PI0_FWD_INT	(1 << 30)  /* clear PI0_FORWARD_INT in iidsr */
+#define IECLR_SPUR_RD_HDR	(1 << 29)  /* clear valid bit in ixss reg */
+#define IECLR_BTE1		(1 << 18)  /* clear bte error 1 */
+#define IECLR_BTE0		(1 << 17)  /* clear bte error 0 */
+#define IECLR_CRAZY		(1 << 16)  /* clear crazy bit in wstat reg */
+#define IECLR_PRB_F		(1 << 15)  /* clear err bit in PRB_F reg */
+#define IECLR_PRB_E		(1 << 14)  /* clear err bit in PRB_E reg */
+#define IECLR_PRB_D		(1 << 13)  /* clear err bit in PRB_D reg */
+#define IECLR_PRB_C		(1 << 12)  /* clear err bit in PRB_C reg */
+#define IECLR_PRB_B		(1 << 11)  /* clear err bit in PRB_B reg */
+#define IECLR_PRB_A		(1 << 10)  /* clear err bit in PRB_A reg */
+#define IECLR_PRB_9		(1 << 9)   /* clear err bit in PRB_9 reg */
+#define IECLR_PRB_8		(1 << 8)   /* clear err bit in PRB_8 reg */
+#define IECLR_PRB_0		(1 << 0)   /* clear err bit in PRB_0 reg */
+
+/*
+ * IIO CRB control register Fields: IIO_ICCR 
+ */
+#define	IIO_ICCR_PENDING	(0x10000)
+#define	IIO_ICCR_CMD_MASK	(0xFF)
+#define	IIO_ICCR_CMD_SHFT	(7)
+#define	IIO_ICCR_CMD_NOP	(0x0)	/* No Op */
+#define	IIO_ICCR_CMD_WAKE	(0x100) /* Reactivate CRB entry and process */
+#define	IIO_ICCR_CMD_TIMEOUT	(0x200)	/* Make CRB timeout & mark invalid */
+#define	IIO_ICCR_CMD_EJECT	(0x400)	/* Contents of entry written to memory 
+					 * via a WB
+					 */
+#define	IIO_ICCR_CMD_FLUSH	(0x800)
+
+/*
+ *
+ * CRB Register description.
+ *
+ * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
+ * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
+ * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
+ * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
+ * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
+ *
+ * Many of the fields in CRB are status bits used by hardware
+ * for implementation of the protocol. It's very dangerous to
+ * mess around with the CRB registers.
+ *
+ * It's OK to read the CRB registers and try to make sense out of the
+ * fields in CRB.
+ *
+ * Updating CRB requires all activities in Hub IIO to be quiesced.
+ * otherwise, a write to CRB could corrupt other CRB entries.
+ * CRBs are here only as a back door peek to hub IIO's status.
+ * Quiescing implies  no dmas no PIOs
+ * either directly from the cpu or from sn0net.
+ * this is not something that can be done easily. So, AVOID updating
+ * CRBs.
+ */
+
+#ifdef _LANGUAGE_C
+
+/*
+ * Easy access macros for CRBs, all 4 registers (A-D)
+ */
+typedef ii_icrb0_a_u_t icrba_t;	/* what it was called on SN0/hub */
+#define a_error         ii_icrb0_a_fld_s.ia_error
+#define a_ecode         ii_icrb0_a_fld_s.ia_errcode
+#define a_lnetuce       ii_icrb0_a_fld_s.ia_ln_uce
+#define a_mark          ii_icrb0_a_fld_s.ia_mark
+#define a_xerr          ii_icrb0_a_fld_s.ia_xt_err
+#define a_sidn          ii_icrb0_a_fld_s.ia_sidn
+#define a_tnum          ii_icrb0_a_fld_s.ia_tnum
+#define a_addr          ii_icrb0_a_fld_s.ia_addr
+#define a_valid         ii_icrb0_a_fld_s.ia_vld
+#define a_iow           ii_icrb0_a_fld_s.ia_iow
+#define a_regvalue	ii_icrb0_a_regval
+
+typedef ii_icrb0_b_u_t icrbb_t;
+#define b_btenum        ii_icrb0_b_fld_s.ib_bte_num
+#define b_cohtrans      ii_icrb0_b_fld_s.ib_ct
+#define b_xtsize        ii_icrb0_b_fld_s.ib_size
+#define b_source        ii_icrb0_b_fld_s.ib_source
+#define b_imsgtype      ii_icrb0_b_fld_s.ib_imsgtype
+#define b_imsg          ii_icrb0_b_fld_s.ib_imsg
+#define b_initiator     ii_icrb0_b_fld_s.ib_init
+#define b_regvalue	ii_icrb0_b_regval
+
+typedef ii_icrb0_c_u_t icrbc_t;
+#define c_pricnt        ii_icrb0_c_fld_s.ic_pr_cnt
+#define c_pripsc        ii_icrb0_c_fld_s.ic_pr_psc
+#define c_bteop         ii_icrb0_c_fld_s.ic_bte_op
+#define c_bteaddr       ii_icrb0_c_fld_s.ic_pa_be /* ic_pa_be fld has 2 names*/
+#define c_benable       ii_icrb0_c_fld_s.ic_pa_be /* ic_pa_be fld has 2 names*/
+#define c_suppl         ii_icrb0_c_fld_s.ic_suppl
+#define c_barrop        ii_icrb0_c_fld_s.ic_bo
+#define c_doresp        ii_icrb0_c_fld_s.ic_resprqd
+#define c_gbr           ii_icrb0_c_fld_s.ic_gbr
+#define c_regvalue	ii_icrb0_c_regval
+
+typedef ii_icrb0_d_u_t icrbd_t;
+#define icrbd_ctxtvld   ii_icrb0_d_fld_s.id_cvld
+#define icrbd_toutvld   ii_icrb0_d_fld_s.id_tvld
+#define icrbd_context   ii_icrb0_d_fld_s.id_context
+#define d_regvalue	ii_icrb0_d_regval
+
+#endif /* LANGUAGE_C */
+
+/* Number of widgets supported by hub */
+#define HUB_NUM_WIDGET          9
+#define HUB_WIDGET_ID_MIN       0x8
+#define HUB_WIDGET_ID_MAX       0xf
+
+#define HUB_WIDGET_PART_NUM     0xc110
+#define MAX_HUBS_PER_XBOW       2
+
+#ifdef _LANGUAGE_C
+/* A few more #defines for backwards compatibility */
+#define iprb_t          ii_iprb0_u_t
+#define iprb_regval     ii_iprb0_regval
+#define iprb_ovflow     ii_iprb0_fld_s.i_of_cnt
+#define iprb_error      ii_iprb0_fld_s.i_error
+#define iprb_ff         ii_iprb0_fld_s.i_f
+#define iprb_mode       ii_iprb0_fld_s.i_m
+#define iprb_bnakctr    ii_iprb0_fld_s.i_nb
+#define iprb_anakctr    ii_iprb0_fld_s.i_na
+#define iprb_xtalkctr   ii_iprb0_fld_s.i_c
+#endif
+
+#define LNK_STAT_WORKING        0x2
+
+#define IIO_WSTAT_ECRAZY        (1ULL << 32)    /* Hub gone crazy */
+#define IIO_WSTAT_TXRETRY       (1ULL << 9)     /* Hub Tx Retry timeout */
+#define IIO_WSTAT_TXRETRY_MASK  (0x7F)   /* should be 0xFF?? */
+#define IIO_WSTAT_TXRETRY_SHFT  (16)
+#define IIO_WSTAT_TXRETRY_CNT(w)        (((w) >> IIO_WSTAT_TXRETRY_SHFT) & \
+                                          IIO_WSTAT_TXRETRY_MASK)
+
+/* Number of II perf. counters we can multiplex at once */
+
+#define IO_PERF_SETS	32
+
+#ifdef BRINGUP
+#if __KERNEL__
+#if _LANGUAGE_C
+/* XXX moved over from SN/SN0/hubio.h -- each should be checked for SN1 */
+#include <asm/sn/alenlist.h>
+#include <asm/sn/dmamap.h>
+#include <asm/sn/iobus.h>
+#include <asm/sn/xtalk/xtalk.h>
+
+/* Bit for the widget in inbound access register */
+#define IIO_IIWA_WIDGET(_w)     ((uint64_t)(1ULL << _w))
+/* Bit for the widget in outbound access register */
+#define IIO_IOWA_WIDGET(_w)     ((uint64_t)(1ULL << _w))
+
+/* NOTE: The following define assumes that we are going to get
+ * widget numbers from 8 thru F and the device numbers within
+ * widget from 0 thru 7.
+ */
+#define IIO_IIDEM_WIDGETDEV_MASK(w, d)  ((uint64_t)(1ULL << (8 * ((w) - 8) + (d))))
+
+/* IO Interrupt Destination Register */
+#define IIO_IIDSR_SENT_SHIFT    28
+#define IIO_IIDSR_SENT_MASK     0x10000000
+#define IIO_IIDSR_ENB_SHIFT     24
+#define IIO_IIDSR_ENB_MASK      0x01000000
+#define IIO_IIDSR_NODE_SHIFT    8
+#define IIO_IIDSR_NODE_MASK     0x0000ff00
+#define IIO_IIDSR_PI_ID_SHIFT   8
+#define IIO_IIDSR_PI_ID_MASK    0x00000010
+#define IIO_IIDSR_LVL_SHIFT     0
+#define IIO_IIDSR_LVL_MASK      0x0000007f
+
+/* Xtalk timeout threshhold register (IIO_IXTT) */
+#define IXTT_RRSP_TO_SHFT	55	   /* read response timeout */
+#define IXTT_RRSP_TO_MASK	(0x1FULL << IXTT_RRSP_TO_SHFT)
+#define IXTT_RRSP_PS_SHFT	32	   /* read responsed TO prescalar */
+#define IXTT_RRSP_PS_MASK	(0x7FFFFFULL << IXTT_RRSP_PS_SHFT)
+#define IXTT_TAIL_TO_SHFT	0	   /* tail timeout counter threshold */
+#define IXTT_TAIL_TO_MASK	(0x3FFFFFFULL << IXTT_TAIL_TO_SHFT)
+
+/*
+ * The IO LLP control status register and widget control register
+ */
+
+#ifdef LITTLE_ENDIAN
+
+typedef union hubii_wcr_u {
+        uint64_t      wcr_reg_value;
+        struct {
+	  uint64_t	wcr_widget_id:   4,     /* LLP crossbar credit */
+			wcr_tag_mode:	 1,	/* Tag mode */
+			wcr_rsvd1:	 8,	/* Reserved */
+			wcr_xbar_crd:	 3,	/* LLP crossbar credit */
+			wcr_f_bad_pkt:	 1,	/* Force bad llp pkt enable */
+			wcr_dir_con:	 1,	/* widget direct connect */
+			wcr_e_thresh:	 5,	/* elasticity threshold */
+			wcr_rsvd:	41;	/* unused */
+        } wcr_fields_s;
+} hubii_wcr_t;
+
+#else
+
+typedef union hubii_wcr_u {
+	uint64_t	wcr_reg_value;
+	struct {
+	    uint64_t	wcr_rsvd:	41,	/* unused */
+			wcr_e_thresh:	 5,	/* elasticity threshold */
+			wcr_dir_con:	 1,	/* widget direct connect */
+			wcr_f_bad_pkt:	 1,	/* Force bad llp pkt enable */
+			wcr_xbar_crd:	 3,	/* LLP crossbar credit */
+			wcr_rsvd1:	 8,	/* Reserved */
+			wcr_tag_mode:	 1,	/* Tag mode */
+			wcr_widget_id:	 4;	/* LLP crossbar credit */
+	} wcr_fields_s;
+} hubii_wcr_t;
+
+#endif
+
+#define iwcr_dir_con    wcr_fields_s.wcr_dir_con
+
+/* The structures below are defined to extract and modify the ii
+performance registers */
+
+/* io_perf_sel allows the caller to specify what tests will be
+   performed */
+#ifdef LITTLE_ENDIAN
+
+typedef union io_perf_sel {
+        uint64_t perf_sel_reg;
+        struct {
+               uint64_t	perf_ippr0 :  4,
+				perf_ippr1 :  4,
+				perf_icct  :  8,
+				perf_rsvd  : 48;
+        } perf_sel_bits;
+} io_perf_sel_t;
+
+#else
+
+typedef union io_perf_sel {
+	uint64_t perf_sel_reg;
+	struct {
+		uint64_t	perf_rsvd  : 48,
+				perf_icct  :  8,
+				perf_ippr1 :  4,
+				perf_ippr0 :  4;
+	} perf_sel_bits;
+} io_perf_sel_t;
+
+#endif
+
+/* io_perf_cnt is to extract the count from the hub registers. Due to
+   hardware problems there is only one counter, not two. */
+
+#ifdef LITTLE_ENDIAN
+
+typedef union io_perf_cnt {
+        uint64_t      perf_cnt;
+        struct {
+               uint64_t	perf_cnt   : 20,
+				perf_rsvd2 : 12,
+				perf_rsvd1 : 32;
+        } perf_cnt_bits;
+
+} io_perf_cnt_t;
+
+#else
+
+typedef union io_perf_cnt {
+	uint64_t	perf_cnt;
+	struct {
+		uint64_t	perf_rsvd1 : 32,
+				perf_rsvd2 : 12,
+				perf_cnt   : 20;
+	} perf_cnt_bits;
+
+} io_perf_cnt_t;
+
+#endif
+
+#ifdef LITTLE_ENDIAN
+
+typedef union iprte_a {
+	bdrkreg_t	entry;
+	struct {
+		bdrkreg_t	i_rsvd_1                  :	 3;
+		bdrkreg_t	i_addr			  :	38;
+		bdrkreg_t	i_init			  :	 3;
+		bdrkreg_t	i_source		  :	 8;
+		bdrkreg_t	i_rsvd			  :	 2;
+		bdrkreg_t	i_widget		  :	 4;
+		bdrkreg_t	i_to_cnt		  :	 5;
+		bdrkreg_t       i_vld                     :      1;
+	} iprte_fields;
+} iprte_a_t;
+
+#else
+
+typedef union iprte_a {
+	bdrkreg_t	entry;
+	struct {
+		bdrkreg_t	i_vld			  :	 1;
+		bdrkreg_t	i_to_cnt		  :	 5;
+		bdrkreg_t	i_widget		  :	 4;
+		bdrkreg_t	i_rsvd			  :	 2;
+		bdrkreg_t	i_source		  :	 8;
+		bdrkreg_t	i_init			  :	 3;
+		bdrkreg_t	i_addr			  :	38;
+		bdrkreg_t	i_rsvd_1		  :	 3;
+	} iprte_fields;
+} iprte_a_t;
+
+#endif
+
+/* PIO MANAGEMENT */
+typedef struct hub_piomap_s *hub_piomap_t;
+
+extern hub_piomap_t
+hub_piomap_alloc(devfs_handle_t dev,      /* set up mapping for this device */
+                device_desc_t dev_desc, /* device descriptor */
+                iopaddr_t xtalk_addr,   /* map for this xtalk_addr range */
+                size_t byte_count,
+                size_t byte_count_max,  /* maximum size of a mapping */
+                unsigned flags);                /* defined in sys/pio.h */
+
+extern void hub_piomap_free(hub_piomap_t hub_piomap);
+
+extern caddr_t
+hub_piomap_addr(hub_piomap_t hub_piomap,        /* mapping resources */
+                iopaddr_t xtalk_addr,           /* map for this xtalk addr */
+                size_t byte_count);             /* map this many bytes */
+
+extern void
+hub_piomap_done(hub_piomap_t hub_piomap);
+
+extern caddr_t
+hub_piotrans_addr(      devfs_handle_t dev,       /* translate to this device */
+                        device_desc_t dev_desc, /* device descriptor */
+                        iopaddr_t xtalk_addr,   /* Crosstalk address */
+                        size_t byte_count,      /* map this many bytes */
+                        unsigned flags);        /* (currently unused) */
+
+/* DMA MANAGEMENT */
+typedef struct hub_dmamap_s *hub_dmamap_t;
+
+extern hub_dmamap_t
+hub_dmamap_alloc(       devfs_handle_t dev,       /* set up mappings for dev */
+                        device_desc_t dev_desc, /* device descriptor */
+                        size_t byte_count_max,  /* max size of a mapping */
+                        unsigned flags);        /* defined in dma.h */
+
+extern void
+hub_dmamap_free(hub_dmamap_t dmamap);
+
+extern iopaddr_t
+hub_dmamap_addr(        hub_dmamap_t dmamap,    /* use mapping resources */
+                        paddr_t paddr,          /* map for this address */
+                        size_t byte_count);     /* map this many bytes */
+
+extern alenlist_t
+hub_dmamap_list(        hub_dmamap_t dmamap,    /* use mapping resources */
+                        alenlist_t alenlist,    /* map this Addr/Length List */
+                        unsigned flags);
+
+extern void
+hub_dmamap_done(        hub_dmamap_t dmamap);   /* done w/ mapping resources */
+
+extern iopaddr_t
+hub_dmatrans_addr(      devfs_handle_t dev,       /* translate for this device */
+                        device_desc_t dev_desc, /* device descriptor */
+                        paddr_t paddr,          /* system physical address */
+                        size_t byte_count,      /* length */
+                        unsigned flags);                /* defined in dma.h */
+
+extern alenlist_t
+hub_dmatrans_list(      devfs_handle_t dev,       /* translate for this device */
+                        device_desc_t dev_desc, /* device descriptor */
+                        alenlist_t palenlist,   /* system addr/length list */
+                        unsigned flags);                /* defined in dma.h */
+
+extern void
+hub_dmamap_drain(       hub_dmamap_t map);
+
+extern void
+hub_dmaaddr_drain(      devfs_handle_t vhdl,
+                        paddr_t addr,
+                        size_t bytes);
+
+extern void
+hub_dmalist_drain(      devfs_handle_t vhdl,
+                        alenlist_t list);
+
+
+/* INTERRUPT MANAGEMENT */
+typedef struct hub_intr_s *hub_intr_t;
+
+extern hub_intr_t
+hub_intr_alloc( devfs_handle_t dev,               /* which device */
+                device_desc_t dev_desc,         /* device descriptor */
+                devfs_handle_t owner_dev);        /* owner of this interrupt */
+
+extern void
+hub_intr_free(hub_intr_t intr_hdl);
+
+extern int
+hub_intr_connect(       hub_intr_t intr_hdl,    /* xtalk intr resource hndl */
+                        intr_func_t intr_func,  /* xtalk intr handler */
+                        void *intr_arg,         /* arg to intr handler */
+                        xtalk_intr_setfunc_t setfunc,
+                                                /* func to set intr hw */
+                        void *setfunc_arg,      /* arg to setfunc */
+                        void *thread);          /* intr thread to use */
+
+extern void
+hub_intr_disconnect(hub_intr_t intr_hdl);
+
+extern devfs_handle_t
+hub_intr_cpu_get(hub_intr_t intr_hdl);
+
+/* CONFIGURATION MANAGEMENT */
+
+extern void
+hub_provider_startup(devfs_handle_t hub);
+
+extern void
+hub_provider_shutdown(devfs_handle_t hub);
+
+#define HUB_PIO_CONVEYOR        0x1     /* PIO in conveyor belt mode */
+#define HUB_PIO_FIRE_N_FORGET   0x2     /* PIO in fire-and-forget mode */
+
+/* Flags that make sense to hub_widget_flags_set */
+#define HUB_WIDGET_FLAGS        (                               \
+				 HUB_PIO_CONVEYOR       |       \
+				 HUB_PIO_FIRE_N_FORGET          \
+				)
+
+
+typedef int     hub_widget_flags_t;
+
+/* Set the PIO mode for a widget.  These two functions perform the
+ * same operation, but hub_device_flags_set() takes a hardware graph
+ * vertex while hub_widget_flags_set() takes a nasid and widget
+ * number.  In most cases, hub_device_flags_set() should be used.
+ */
+extern int      hub_widget_flags_set(nasid_t            nasid,
+                                     xwidgetnum_t       widget_num,
+                                     hub_widget_flags_t flags);
+
+/* Depending on the flags set take the appropriate actions */
+extern int      hub_device_flags_set(devfs_handle_t       widget_dev,
+                                     hub_widget_flags_t flags);
+                                                    
+
+/* Error Handling. */
+extern int hub_ioerror_handler(devfs_handle_t, int, int, struct io_error_s *);
+extern int kl_ioerror_handler(cnodeid_t, cnodeid_t, cpuid_t,
+                              int, paddr_t, caddr_t, ioerror_mode_t);
+extern void hub_widget_reset(devfs_handle_t, xwidgetnum_t);
+extern int hub_error_devenable(devfs_handle_t, int, int);
+extern void hub_widgetdev_enable(devfs_handle_t, int);
+extern void hub_widgetdev_shutdown(devfs_handle_t, int);
+extern int  hub_dma_enabled(devfs_handle_t);
+
+#endif /* _LANGUAGE_C */
+#endif /* _KERNEL */
+#endif /* BRINGUP */
+#endif  /* _ASM_SN_SN1_HUBIO_NEXT_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/hublb.h linux/include/asm-ia64/sn/sn1/hublb.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/hublb.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/sn1/hublb.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,1608 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+/************************************************************************
+ *                                                                      *
+ *      WARNING!!!  WARNING!!!  WARNING!!!  WARNING!!!  WARNING!!!      *
+ *                                                                      *
+ * This file is created by an automated script. Any (minimal) changes   *
+ * made manually to this  file should be made with care.                *
+ *                                                                      *
+ *               MAKE ALL ADDITIONS TO THE END OF THIS FILE             *
+ *                                                                      *
+ ************************************************************************/
+
+
+#ifndef _ASM_SN_SN1_HUBLB_H
+#define _ASM_SN_SN1_HUBLB_H
+
+
+#define    LB_REV_ID                 0x00600000    /*
+                                                    * Bedrock Revision
+                                                    * and ID
+                                                    */
+
+
+
+#define    LB_CPU_PERMISSION         0x00604000    /*
+                                                    * CPU PIO access
+                                                    * permission bits
+                                                    */
+
+
+
+#define    LB_CPU_PERM_OVRRD         0x00604008    /*
+                                                    * CPU PIO access
+                                                    * permission bit
+                                                    * override
+                                                    */
+
+
+
+#define    LB_IO_PERMISSION          0x00604010    /*
+                                                    * IO PIO access
+                                                    * permission bits
+                                                    */
+
+
+
+#define    LB_SOFT_RESET             0x00604018    /*
+                                                    * Soft reset the
+                                                    * Bedrock chip
+                                                    */
+
+
+
+#define    LB_REGION_PRESENT         0x00604020    /*
+                                                    * Regions Present for
+                                                    * Invalidates
+                                                    */
+
+
+
+#define    LB_NODES_ABSENT           0x00604028    /*
+                                                    * Nodes Absent for
+                                                    * Invalidates
+                                                    */
+
+
+
+#define    LB_MICROLAN_CTL           0x00604030    /*
+                                                    * Microlan Control
+                                                    * (NIC)
+                                                    */
+
+
+
+#define    LB_ERROR_BITS             0x00604040    /*
+                                                    * Local Block error
+                                                    * bits
+                                                    */
+
+
+
+#define    LB_ERROR_MASK_CLR         0x00604048    /*
+                                                    * Bit mask write to
+                                                    * clear error bits
+                                                    */
+
+
+
+#define    LB_ERROR_HDR1             0x00604050    /*
+                                                    * Source, Suppl and
+                                                    * Cmd fields
+                                                    */
+
+
+
+#define    LB_ERROR_HDR2             0x00604058    /*
+                                                    * Address field from
+                                                    * first error
+                                                    */
+
+
+
+#define    LB_ERROR_DATA             0x00604060    /*
+                                                    * Data flit (if any)
+                                                    * from first error
+                                                    */
+
+
+
+#define    LB_DEBUG_SELECT           0x00604100    /*
+                                                    * Choice of debug
+                                                    * signals from chip
+                                                    */
+
+
+
+#define    LB_DEBUG_PINS             0x00604108    /*
+                                                    * Value on the chip's
+                                                    * debug pins
+                                                    */
+
+
+
+#define    LB_RT_LOCAL_CTRL          0x00604200    /*
+                                                    * Local generation of
+                                                    * real-time clock
+                                                    */
+
+
+
+#define    LB_RT_FILTER_CTRL         0x00604208    /*
+                                                    * Control of
+                                                    * filtering of global
+                                                    * clock
+                                                    */
+
+
+
+#define    LB_SCRATCH_REG0           0x00608000    /* Scratch Register 0     */
+
+
+
+#define    LB_SCRATCH_REG1           0x00608008    /* Scratch Register 1     */
+
+
+
+#define    LB_SCRATCH_REG2           0x00608010    /* Scratch Register 2     */
+
+
+
+#define    LB_SCRATCH_REG3           0x00608018    /* Scratch Register 3     */
+
+
+
+#define    LB_SCRATCH_REG4           0x00608020    /* Scratch Register 4     */
+
+
+
+#define    LB_SCRATCH_REG0_WZ        0x00608040    /*
+                                                    * Scratch Register 0
+                                                    * (WZ alias)
+                                                    */
+
+
+
+#define    LB_SCRATCH_REG1_WZ        0x00608048    /*
+                                                    * Scratch Register 1
+                                                    * (WZ alias)
+                                                    */
+
+
+
+#define    LB_SCRATCH_REG2_WZ        0x00608050    /*
+                                                    * Scratch Register 2
+                                                    * (WZ alias)
+                                                    */
+
+
+
+#define    LB_SCRATCH_REG3_RZ        0x00608058    /*
+                                                    * Scratch Register 3
+                                                    * (RZ alias)
+                                                    */
+
+
+
+#define    LB_SCRATCH_REG4_RZ        0x00608060    /*
+                                                    * Scratch Register 4
+                                                    * (RZ alias)
+                                                    */
+
+
+
+#define    LB_VECTOR_PARMS           0x0060C000    /*
+                                                    * Vector PIO
+                                                    * parameters
+                                                    */
+
+
+
+#define    LB_VECTOR_ROUTE           0x0060C008    /*
+                                                    * Vector PIO Vector
+                                                    * Route
+                                                    */
+
+
+
+#define    LB_VECTOR_DATA            0x0060C010    /*
+                                                    * Vector PIO Write
+                                                    * Data
+                                                    */
+
+
+
+#define    LB_VECTOR_STATUS          0x0060C020    /*
+                                                    * Vector PIO Return
+                                                    * Status
+                                                    */
+
+
+
+#define    LB_VECTOR_RETURN          0x0060C028    /*
+                                                    * Vector PIO Return
+                                                    * Route
+                                                    */
+
+
+
+#define    LB_VECTOR_READ_DATA       0x0060C030    /*
+                                                    * Vector PIO Read
+                                                    * Data
+                                                    */
+
+
+
+#define    LB_VECTOR_STATUS_CLEAR    0x0060C038    /*
+                                                    * Clear Vector PIO
+                                                    * Return Status
+                                                    */
+
+
+
+
+
+#ifdef _LANGUAGE_C
+
+/************************************************************************
+ *                                                                      *
+ * Description:  This register contains information that allows         *
+ * exploratory software to probe for chip type. This is also the        *
+ * register that sets this node's ID and the size of each region        *
+ * (which affects the maximum possible system size). IBM assigns the    *
+ * values for the REVISION, PART_NUMBER and MANUFACTURER fields, in     *
+ * accordance with the IEEE 1149.1 standard; SGI is not at liberty to   *
+ * unilaterally change the values of these fields.                      *
+ *  .                                                                   *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union lb_rev_id_u {
+	bdrkreg_t	lb_rev_id_regval;
+	struct  {
+		bdrkreg_t	ri_reserved_2             :	 1;
+		bdrkreg_t       ri_manufacturer           :     11;
+		bdrkreg_t       ri_part_number            :     16;
+		bdrkreg_t       ri_revision               :      4;
+		bdrkreg_t       ri_node_id                :      8;
+		bdrkreg_t       ri_reserved_1             :      6;
+		bdrkreg_t       ri_region_size            :      2;
+		bdrkreg_t       ri_reserved               :     16;
+	} lb_rev_id_fld_s;
+} lb_rev_id_u_t;
+
+#else
+
+typedef union lb_rev_id_u {
+        bdrkreg_t       lb_rev_id_regval;
+	struct	{
+		bdrkreg_t	ri_reserved		  :	16;
+		bdrkreg_t	ri_region_size		  :	 2;
+		bdrkreg_t	ri_reserved_1		  :	 6;
+		bdrkreg_t	ri_node_id		  :	 8;
+		bdrkreg_t	ri_revision		  :	 4;
+		bdrkreg_t	ri_part_number		  :	16;
+		bdrkreg_t	ri_manufacturer		  :	11;
+		bdrkreg_t	ri_reserved_2		  :	 1;
+	} lb_rev_id_fld_s;
+} lb_rev_id_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register contains the PI-access-rights bit-vector for the      *
+ * LB, NI, XB and MD portions of the Bedrock local register space. If   *
+ * a bit in the bit-vector is set, the region corresponding to that     *
+ * bit has read/write permission on the LB, NI, XB and MD local         *
+ * registers. If the bit is clear, that region has no write access to   *
+ * the local registers and no read access if the read will cause any    *
+ * state change. If a write or a read with side effects is attempted    *
+ * by a PI in a region for which access is restricted, the LB will      *
+ * not perform the operation and will send back a reply which           *
+ * indicates an error.                                                  *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+typedef union lb_cpu_permission_u {
+	bdrkreg_t	lb_cpu_permission_regval;
+	struct  {
+		bdrkreg_t	cp_cpu_access             :	64;
+	} lb_cpu_permission_fld_s;
+} lb_cpu_permission_u_t;
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  A write to this register of the 64-bit value "SGIrules" will        *
+ * cause the bit in the LB_CPU_PROTECT register corresponding to the    *
+ * region of the requester to be set.                                   *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+typedef union lb_cpu_perm_ovrrd_u {
+	bdrkreg_t	lb_cpu_perm_ovrrd_regval;
+	struct  {
+		bdrkreg_t	cpo_cpu_perm_ovr          :	64;
+	} lb_cpu_perm_ovrrd_fld_s;
+} lb_cpu_perm_ovrrd_u_t;
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register contains the II-access-rights bit-vector for the      *
+ * LB, NI, XB and MD portions of the Bedrock local register space. If   *
+ * a bit in the bit-vector is set, the region corresponding to that     *
+ * bit has read/write permission on the LB, NI, XB and MD local         *
+ * registers. If the bit is clear, then that region has no write        *
+ * access to the local registers and no read access if the read         *
+ * results in any state change. If a write or a read with side          *
+ * effects is attempted by an II in a region for which access is        *
+ * restricted, the LB will not perform the operation and will send      *
+ * back a reply which indicates an error.                               *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+typedef union lb_io_permission_u {
+	bdrkreg_t	lb_io_permission_regval;
+	struct  {
+		bdrkreg_t	ip_io_permission          :	64;
+	} lb_io_permission_fld_s;
+} lb_io_permission_u_t;
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  A write to this bit resets the Bedrock chip with a soft reset.      *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union lb_soft_reset_u {
+	bdrkreg_t	lb_soft_reset_regval;
+	struct  {
+		bdrkreg_t	sr_soft_reset             :	 1;
+		bdrkreg_t	sr_reserved		  :	63;
+	} lb_soft_reset_fld_s;
+} lb_soft_reset_u_t;
+
+#else
+
+typedef union lb_soft_reset_u {
+	bdrkreg_t	lb_soft_reset_regval;
+	struct	{
+		bdrkreg_t	sr_reserved		  :	63;
+		bdrkreg_t	sr_soft_reset		  :	 1;
+	} lb_soft_reset_fld_s;
+} lb_soft_reset_u_t;
+
+#endif
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register indicates which regions are present and capable of    *
+ * receiving an invalidate (INVAL) request. The LB samples this         *
+ * register at the start of processing each LINVAL. When an LINVAL      *
+ * indicates that a particular PI unit might hold a shared copy of a    *
+ * cache block but this PI is in a region which is not present (i.e.,   *
+ * its bit in LB_REGION_PRESENT is clear), then the LB sends an IVACK   *
+ * reply packet on behalf of this PI. The REGION_SIZE field in the      *
+ * LB_REV_ID register determines the number of nodes per region (and    *
+ * hence, the number of PI units which share a common bit in the        *
+ * LB_REGION_PRESENT register).                                         *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+typedef union lb_region_present_u {
+	bdrkreg_t	lb_region_present_regval;
+	struct  {
+		bdrkreg_t	rp_present_bits           :	64;
+	} lb_region_present_fld_s;
+} lb_region_present_u_t;
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  This register indicates which nodes are absent and     *
+ * not capable of receiving an invalidate (INVAL) request. The LB       *
+ * samples this register at the start of processing each LINVAL. When   *
+ * an LINVAL indicates that a particular PI unit might hold a shared    *
+ * copy of a cache block but this PI unit's node is not present         *
+ * (i.e., its node ID is listed in the LB_NODES_ABSENT register),       *
+ * then the LB sends an IVACK reply packet on behalf of this PI.        *
+ *                                                                      *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union lb_nodes_absent_u {
+	bdrkreg_t	lb_nodes_absent_regval;
+	struct  {
+		bdrkreg_t	na_node_0                 :	 8;
+		bdrkreg_t       na_reserved_3             :      7;
+		bdrkreg_t       na_node_0_valid           :      1;
+		bdrkreg_t       na_node_1                 :      8;
+		bdrkreg_t       na_reserved_2             :      7;
+		bdrkreg_t       na_node_1_valid           :      1;
+		bdrkreg_t       na_node_2                 :      8;
+		bdrkreg_t       na_reserved_1             :      7;
+		bdrkreg_t       na_node_2_valid           :      1;
+		bdrkreg_t       na_node_3                 :      8;
+		bdrkreg_t       na_reserved               :      7;
+		bdrkreg_t       na_node_3_valid           :      1;
+	} lb_nodes_absent_fld_s;
+} lb_nodes_absent_u_t;
+
+#else
+
+typedef union lb_nodes_absent_u {
+	bdrkreg_t	lb_nodes_absent_regval;
+	struct	{
+		bdrkreg_t	na_node_3_valid		  :	 1;
+		bdrkreg_t	na_reserved		  :	 7;
+		bdrkreg_t	na_node_3		  :	 8;
+		bdrkreg_t	na_node_2_valid		  :	 1;
+		bdrkreg_t	na_reserved_1		  :	 7;
+		bdrkreg_t	na_node_2		  :	 8;
+		bdrkreg_t	na_node_1_valid		  :	 1;
+		bdrkreg_t	na_reserved_2		  :	 7;
+		bdrkreg_t	na_node_1		  :	 8;
+		bdrkreg_t	na_node_0_valid		  :	 1;
+		bdrkreg_t	na_reserved_3		  :	 7;
+		bdrkreg_t	na_node_0		  :	 8;
+	} lb_nodes_absent_fld_s;
+} lb_nodes_absent_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register provides access to the Number-In-a-Can add-only       *
+ * serial PROM that is used to store node board serial number and       *
+ * configuration information. (Refer to NIC datasheet Dallas 1990A      *
+ * that is viewable at                                                  *
+ * URL::http://www.dalsemi.com/DocControl/PDFs/pdfindex.html). Data     *
+ * comes from this interface LSB first.                                 *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union lb_microlan_ctl_u {
+	bdrkreg_t	lb_microlan_ctl_regval;
+	struct  {
+		bdrkreg_t	mc_rd_data                :	 1;
+		bdrkreg_t       mc_done                   :      1;
+		bdrkreg_t       mc_sample                 :      8;
+		bdrkreg_t       mc_pulse                  :     10;
+		bdrkreg_t       mc_clkdiv_phi0            :      7;
+		bdrkreg_t       mc_clkdiv_phi1            :      7;
+		bdrkreg_t       mc_reserved               :     30;
+	} lb_microlan_ctl_fld_s;
+} lb_microlan_ctl_u_t;
+
+#else
+
+typedef union lb_microlan_ctl_u {
+        bdrkreg_t       lb_microlan_ctl_regval;
+        struct  {
+                bdrkreg_t       mc_reserved               :     30;
+                bdrkreg_t       mc_clkdiv_phi1            :      7;
+                bdrkreg_t       mc_clkdiv_phi0            :      7;
+                bdrkreg_t       mc_pulse                  :     10;
+                bdrkreg_t       mc_sample                 :      8;
+                bdrkreg_t       mc_done                   :      1;
+                bdrkreg_t       mc_rd_data                :      1;
+        } lb_microlan_ctl_fld_s;
+} lb_microlan_ctl_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  This register contains the LB error status bits.       *
+ * Whenever a particular type of error occurs, the LB sets its bit in   *
+ * this register so that software will be aware that such an event      *
+ * has happened. Reads from this register are non-destructive and the   *
+ * contents of this register remain intact across reset operations.     *
+ * Whenever any of these bits is set, the LB will assert its            *
+ * interrupt request output signals that go to the PI units.            *
+ *  Software can simulate the occurrence of an error by first writing   *
+ * appropriate values into the LB_ERROR_HDR1, LB_ERROR_HDR2 and         *
+ * LB_ERROR_DATA registers, and then writing to the LB_ERROR_BITS       *
+ * register to set the error bits in a particular way. Setting one or   *
+ * more error bits will cause the LB to interrupt a processor and       *
+ * invoke error-handling software.                                      *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union lb_error_bits_u {
+	bdrkreg_t	lb_error_bits_regval;
+	struct  {
+		bdrkreg_t	eb_rq_bad_cmd             :	 1;
+		bdrkreg_t       eb_rp_bad_cmd             :      1;
+		bdrkreg_t       eb_rq_short               :      1;
+		bdrkreg_t       eb_rp_short               :      1;
+		bdrkreg_t       eb_rq_long                :      1;
+		bdrkreg_t       eb_rp_long                :      1;
+		bdrkreg_t       eb_rq_bad_data            :      1;
+		bdrkreg_t       eb_rp_bad_data            :      1;
+		bdrkreg_t       eb_rq_bad_addr            :      1;
+		bdrkreg_t       eb_rq_bad_linval          :      1;
+		bdrkreg_t       eb_gclk_drop              :      1;
+		bdrkreg_t       eb_reserved               :     53;
+	} lb_error_bits_fld_s;
+} lb_error_bits_u_t;
+
+#else
+
+typedef union lb_error_bits_u {
+	bdrkreg_t	lb_error_bits_regval;
+	struct	{
+		bdrkreg_t	eb_reserved		  :	53;
+		bdrkreg_t	eb_gclk_drop		  :	 1;
+		bdrkreg_t	eb_rq_bad_linval	  :	 1;
+		bdrkreg_t	eb_rq_bad_addr		  :	 1;
+		bdrkreg_t	eb_rp_bad_data		  :	 1;
+		bdrkreg_t	eb_rq_bad_data		  :	 1;
+		bdrkreg_t	eb_rp_long		  :	 1;
+		bdrkreg_t	eb_rq_long		  :	 1;
+		bdrkreg_t	eb_rp_short		  :	 1;
+		bdrkreg_t	eb_rq_short		  :	 1;
+		bdrkreg_t	eb_rp_bad_cmd		  :	 1;
+		bdrkreg_t	eb_rq_bad_cmd		  :	 1;
+	} lb_error_bits_fld_s;
+} lb_error_bits_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register lets software clear some of the bits in the           *
+ * LB_ERROR_BITS register without affecting other bits.  Essentially,   *
+ * it provides bit mask functionality. When software writes to the      *
+ * LB_ERROR_MASK_CLR register, the bits which are set in the data       *
+ * value indicate which bits are to be cleared in LB_ERROR_BITS. If a   *
+ * bit is clear in the data value written to the LB_ERROR_MASK_CLR      *
+ * register, then its corresponding bit in the LB_ERROR_BITS register   *
+ * is not affected. Hence, software can atomically clear any subset     *
+ * of the error bits in the LB_ERROR_BITS register.                     *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union lb_error_mask_clr_u {
+	bdrkreg_t	lb_error_mask_clr_regval;
+	struct  {
+		bdrkreg_t	emc_clr_rq_bad_cmd        :	 1;
+		bdrkreg_t       emc_clr_rp_bad_cmd        :      1;
+		bdrkreg_t       emc_clr_rq_short          :      1;
+		bdrkreg_t       emc_clr_rp_short          :      1;
+		bdrkreg_t       emc_clr_rq_long           :      1;
+		bdrkreg_t       emc_clr_rp_long           :      1;
+		bdrkreg_t       emc_clr_rq_bad_data       :      1;
+		bdrkreg_t       emc_clr_rp_bad_data       :      1;
+		bdrkreg_t       emc_clr_rq_bad_addr       :      1;
+		bdrkreg_t       emc_clr_rq_bad_linval     :      1;
+		bdrkreg_t       emc_clr_gclk_drop         :      1;
+		bdrkreg_t       emc_reserved              :     53;
+	} lb_error_mask_clr_fld_s;
+} lb_error_mask_clr_u_t;
+
+#else
+
+typedef union lb_error_mask_clr_u {
+	bdrkreg_t	lb_error_mask_clr_regval;
+	struct	{
+		bdrkreg_t	emc_reserved		  :	53;
+		bdrkreg_t	emc_clr_gclk_drop	  :	 1;
+		bdrkreg_t	emc_clr_rq_bad_linval	  :	 1;
+		bdrkreg_t	emc_clr_rq_bad_addr	  :	 1;
+		bdrkreg_t	emc_clr_rp_bad_data	  :	 1;
+		bdrkreg_t	emc_clr_rq_bad_data	  :	 1;
+		bdrkreg_t	emc_clr_rp_long		  :	 1;
+		bdrkreg_t	emc_clr_rq_long		  :	 1;
+		bdrkreg_t	emc_clr_rp_short	  :	 1;
+		bdrkreg_t	emc_clr_rq_short	  :	 1;
+		bdrkreg_t	emc_clr_rp_bad_cmd	  :	 1;
+		bdrkreg_t	emc_clr_rq_bad_cmd	  :	 1;
+	} lb_error_mask_clr_fld_s;
+} lb_error_mask_clr_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  If the LB detects an error when VALID==0 in the LB_ERROR_HDR1       *
+ * register, then it saves the contents of the offending packet's       *
+ * header flit in the LB_ERROR_HDR1 and LB_ERROR_HDR2 registers, sets   *
+ * the VALID bit in LB_ERROR_HDR1 and clears the OVERRUN bit in         *
+ * LB_ERROR_HDR1 (and it will also set the corresponding bit in the     *
+ * LB_ERROR_BITS register). The ERR_TYPE field indicates specifically   *
+ * what kind of error occurred.  Its encoding corresponds to the bit    *
+ * positions in the LB_ERROR_BITS register (e.g., ERR_TYPE==5           *
+ * indicates a RP_LONG error).  If an error (of any type except         *
+ * GCLK_DROP) subsequently happens while VALID==1, then the LB sets     *
+ * the OVERRUN bit in LB_ERROR_HDR1. This register is not relevant      *
+ * when a GCLK_DROP error occurs; the LB does not even attempt to       *
+ * change the ERR_TYPE, VALID or OVERRUN field when a GCLK_DROP error   *
+ * happens.                                                             *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union lb_error_hdr1_u {
+	bdrkreg_t	lb_error_hdr1_regval;
+	struct  {
+		bdrkreg_t	eh_command                :	 7;
+		bdrkreg_t       eh_reserved_5             :      1;
+		bdrkreg_t       eh_suppl                  :     11;
+		bdrkreg_t       eh_reserved_4             :      1;
+		bdrkreg_t       eh_source                 :     11;
+		bdrkreg_t       eh_reserved_3             :      1;
+		bdrkreg_t       eh_err_type               :      4;
+		bdrkreg_t       eh_reserved_2             :      4;
+		bdrkreg_t       eh_overrun                :      1;
+		bdrkreg_t       eh_reserved_1             :      3;
+		bdrkreg_t       eh_valid                  :      1;
+		bdrkreg_t       eh_reserved               :     19;
+	} lb_error_hdr1_fld_s;
+} lb_error_hdr1_u_t;
+
+#else
+
+typedef union lb_error_hdr1_u {
+	bdrkreg_t	lb_error_hdr1_regval;
+	struct	{
+		bdrkreg_t	eh_reserved		  :	19;
+		bdrkreg_t	eh_valid		  :	 1;
+		bdrkreg_t	eh_reserved_1		  :	 3;
+		bdrkreg_t	eh_overrun		  :	 1;
+		bdrkreg_t	eh_reserved_2		  :	 4;
+		bdrkreg_t	eh_err_type		  :	 4;
+		bdrkreg_t	eh_reserved_3		  :	 1;
+		bdrkreg_t	eh_source		  :	11;
+		bdrkreg_t	eh_reserved_4		  :	 1;
+		bdrkreg_t	eh_suppl		  :	11;
+		bdrkreg_t	eh_reserved_5		  :	 1;
+		bdrkreg_t	eh_command		  :	 7;
+	} lb_error_hdr1_fld_s;
+} lb_error_hdr1_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Contents of the Address field from header flit of first packet      *
+ * that causes an error. This register is not relevant when a           *
+ * GCLK_DROP error occurs.                                              *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union lb_error_hdr2_u {
+	bdrkreg_t	lb_error_hdr2_regval;
+	struct  {
+		bdrkreg_t	eh_address                :	38;
+		bdrkreg_t       eh_reserved               :     26;
+	} lb_error_hdr2_fld_s;
+} lb_error_hdr2_u_t;
+
+#else
+
+typedef union lb_error_hdr2_u {
+	bdrkreg_t	lb_error_hdr2_regval;
+	struct	{
+		bdrkreg_t	eh_reserved		  :	26;
+		bdrkreg_t	eh_address		  :	38;
+	} lb_error_hdr2_fld_s;
+} lb_error_hdr2_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  This register accompanies the LB_ERROR_HDR1 and        *
+ * LB_ERROR_HDR2 registers.  The LB updates the value in this           *
+ * register when an incoming packet with a data flit causes an error    *
+ * while VALID==0 in the LB_ERROR_HDR1 register.  This register         *
+ * retains the contents of the data flit from the incoming packet       *
+ * that caused the error. This register is relevant for the following   *
+ * types of errors:                                                     *
+ * <UL >                                                                *
+ * <UL >                                                                *
+ * <UL >                                                                *
+ * <UL >                                                                *
+ * <UL >                                                                *
+ * <LI >RQ_BAD_LINVAL for a LINVAL request.                             *
+ * <LI >RQ_BAD_ADDR for a normal or vector PIO request.                 *
+ * <LI >RP_BAD_DATA for a vector PIO reply.                             *
+ * <LI >RQ_BAD DATA for an incoming request with data.                  *
+ * <LI >RP_LONG for a vector PIO reply.                                 *
+ * <LI >RQ_LONG for an incoming request with expected data.             *
+ * <BLOCKQUOTE >                                                        *
+ * In the case of RQ_BAD_LINVAL, the register retains the 64-bit data   *
+ * value that followed the header flit.  In the case of RQ_BAD_ADDR     *
+ * or RQ_BAD_DATA, the register retains the incoming packet's 64-bit    *
+ * data value (i.e., 2nd flit in the packet for a normal PIO write or   *
+ * an LINVAL, 3rd flit for a vector PIO read or write). In the case     *
+ * of RP_BAD_DATA, the register retains the 64-bit data value in the    *
+ * 3rd flit of the packet. When a RP_LONG or RQ_LONG error occurs,      *
+ * the LB loads the LB_ERROR_DATA register with the contents of the     *
+ * expected data flit (i.e., the 3rd flit in the packet for a vector    *
+ * PIO request or reply, the 2nd flit for other packets), if any. The   *
+ * contents of the LB_ERROR_DATA register are undefined after a         *
+ * RP_SHORT, RQ_SHORT, RP_BAD_CMD or RQ_BAD_CMD error. The contents     *
+ * of the LB_ERROR_DATA register are also undefined after an incoming   *
+ * normal PIO read request which encounters a RQ_LONG error.            *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+typedef union lb_error_data_u {
+	bdrkreg_t	lb_error_data_regval;
+	struct  {
+		bdrkreg_t	ed_data                   :	64;
+	} lb_error_data_fld_s;
+} lb_error_data_u_t;
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register enables software to control what internal Bedrock     *
+ * signals are visible on the chip's debug pins. The LB provides the    *
+ * 6-bit value in this register to Bedrock's DEBUG unit. The JTAG       *
+ * unit provides a similar 6-bit selection input to the DEBUG unit,     *
+ * along with another signal that tells the DEBUG unit whether to use   *
+ * the selection signal from the LB or the JTAG unit. For a             *
+ * description of the menu of choices for debug signals, refer to the   *
+ * documentation for the DEBUG unit.                                    *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union lb_debug_select_u {
+	bdrkreg_t	lb_debug_select_regval;
+	struct  {
+		bdrkreg_t	ds_debug_sel              :	 6;
+		bdrkreg_t       ds_reserved               :     58;
+	} lb_debug_select_fld_s;
+} lb_debug_select_u_t;
+
+#else
+
+typedef union lb_debug_select_u {
+	bdrkreg_t	lb_debug_select_regval;
+	struct	{
+		bdrkreg_t	ds_reserved		  :	58;
+		bdrkreg_t	ds_debug_sel		  :	 6;
+	} lb_debug_select_fld_s;
+} lb_debug_select_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  A PIO read from this register returns the 32-bit value that is      *
+ * currently on the Bedrock chip's debug pins. This register allows     *
+ * software to observe debug pin output values which do not change      *
+ * frequently (i.e., they remain constant over a period of many         *
+ * cycles).                                                             *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union lb_debug_pins_u {
+	bdrkreg_t	lb_debug_pins_regval;
+	struct  {
+		bdrkreg_t	dp_debug_pins             :	32;
+		bdrkreg_t       dp_reserved               :     32;
+	} lb_debug_pins_fld_s;
+} lb_debug_pins_u_t;
+
+#else
+
+typedef union lb_debug_pins_u {
+	bdrkreg_t	lb_debug_pins_regval;
+	struct	{
+		bdrkreg_t	dp_reserved		  :	32;
+		bdrkreg_t	dp_debug_pins		  :	32;
+	} lb_debug_pins_fld_s;
+} lb_debug_pins_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  The LB unit provides the PI0 and PI1 units with a real-time clock   *
+ * signal. The LB can generate this signal itself, based on the         *
+ * Bedrock chip's system clock which the LB receives as an input.       *
+ * Alternatively, the LB can filter a global clock signal which it      *
+ * receives as an input and provide the filtered version to PI0 and     *
+ * PI1. The user can program the LB_RT_LOCAL_CTRL register to choose    *
+ * the source of the real-time clock. If the user chooses to generate   *
+ * the real-time clock internally within the LB, then the user can      *
+ * specify the period for the real-time clock signal.                   *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union lb_rt_local_ctrl_u {
+	bdrkreg_t	lb_rt_local_ctrl_regval;
+	struct  {
+		bdrkreg_t	rlc_gclk_enable           :	 1;
+		bdrkreg_t       rlc_reserved_4            :      3;
+		bdrkreg_t       rlc_max_count             :     10;
+		bdrkreg_t       rlc_reserved_3            :      2;
+		bdrkreg_t       rlc_gclk_counter          :     10;
+		bdrkreg_t       rlc_reserved_2            :      2;
+		bdrkreg_t       rlc_gclk                  :      1;
+		bdrkreg_t       rlc_reserved_1            :      3;
+		bdrkreg_t       rlc_use_internal          :      1;
+		bdrkreg_t       rlc_reserved              :     31;
+	} lb_rt_local_ctrl_fld_s;
+} lb_rt_local_ctrl_u_t;
+
+#else
+
+typedef union lb_rt_local_ctrl_u {
+        bdrkreg_t       lb_rt_local_ctrl_regval;
+        struct  {
+                bdrkreg_t       rlc_reserved              :     31;
+                bdrkreg_t       rlc_use_internal          :      1;
+                bdrkreg_t       rlc_reserved_1            :      3;
+                bdrkreg_t       rlc_gclk                  :      1;
+                bdrkreg_t       rlc_reserved_2            :      2;
+                bdrkreg_t       rlc_gclk_counter          :     10;
+                bdrkreg_t       rlc_reserved_3            :      2;
+                bdrkreg_t       rlc_max_count             :     10;
+                bdrkreg_t       rlc_reserved_4            :      3;
+                bdrkreg_t       rlc_gclk_enable           :      1;
+        } lb_rt_local_ctrl_fld_s;
+} lb_rt_local_ctrl_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  When the value of the USE_INTERNAL field in the LB_RT_LOCAL_CTRL    *
+ * register is 0, the LB filters an incoming global clock signal and    *
+ * provides the result to PI0 and PI1 for their real-time clock         *
+ * inputs. The LB can perform either simple filtering or complex        *
+ * filtering, depending on the value of the MASK_ENABLE bit. For the    *
+ * simple filtering option, the LB merely removes glitches from the     *
+ * incoming global clock; if the global clock goes high (or low) for    *
+ * only a single cycle, the LB considers it to be a glitch and does     *
+ * not pass it through to PI0 and PI1. For the complex filtering        *
+ * option, the LB expects positive edges on the incoming global clock   *
+ * to be spaced at fairly regular intervals and it looks for them at    *
+ * these times; the LB keeps track of unexpected or missing positive    *
+ * edges, and it generates an edge itself whenever the incoming         *
+ * global clock apparently misses an edge. For each filtering option,   *
+ * the real-time clock which the LB provides to PI0 and PI1 is not      *
+ * necessarily a square wave; when a positive edge happens, the         *
+ * real-time clock stays high for (2*MAX_COUNT+1-OFFSET)/2 cycles of    *
+ * the LB's system clock, and then is low until the next positive       *
+ * edge.                                                                *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union lb_rt_filter_ctrl_u {
+	bdrkreg_t	lb_rt_filter_ctrl_regval;
+	struct  {
+		bdrkreg_t       rfc_offset                :      5;
+		bdrkreg_t       rfc_reserved_4            :      3;
+		bdrkreg_t       rfc_mask_counter          :     12;
+		bdrkreg_t       rfc_mask_enable           :      1;
+		bdrkreg_t       rfc_reserved_3            :      3;
+		bdrkreg_t       rfc_dropout_counter       :     10;
+		bdrkreg_t       rfc_reserved_2            :      2;
+		bdrkreg_t       rfc_dropout_thresh        :     10;
+		bdrkreg_t       rfc_reserved_1            :      2;
+		bdrkreg_t       rfc_error_counter         :     10;
+		bdrkreg_t       rfc_reserved              :      6;
+	} lb_rt_filter_ctrl_fld_s;
+} lb_rt_filter_ctrl_u_t;
+
+#else
+
+typedef union lb_rt_filter_ctrl_u {
+        bdrkreg_t       lb_rt_filter_ctrl_regval;
+        struct  {
+                bdrkreg_t       rfc_reserved              :      6;
+                bdrkreg_t       rfc_error_counter         :     10;
+                bdrkreg_t       rfc_reserved_1            :      2;
+                bdrkreg_t       rfc_dropout_thresh        :     10;
+                bdrkreg_t       rfc_reserved_2            :      2;
+                bdrkreg_t       rfc_dropout_counter       :     10;
+                bdrkreg_t       rfc_reserved_3            :      3;
+                bdrkreg_t       rfc_mask_enable           :      1;
+                bdrkreg_t       rfc_mask_counter          :     12;
+                bdrkreg_t       rfc_reserved_4            :      3;
+                bdrkreg_t       rfc_offset                :      5;
+        } lb_rt_filter_ctrl_fld_s;
+} lb_rt_filter_ctrl_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register is a scratch register that is reset to 0x0. At the    *
+ * normal address, the register is a simple storage location. At the    *
+ * Write-If-Zero address, the register accepts a new value from a       *
+ * write operation only if the current value is zero.                   *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+typedef union lb_scratch_reg0_u {
+	bdrkreg_t	lb_scratch_reg0_regval;
+	struct  {
+		bdrkreg_t	sr_scratch_bits           :	64;
+	} lb_scratch_reg0_fld_s;
+} lb_scratch_reg0_u_t;
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  These registers are scratch registers that are not reset. At a      *
+ * register's normal address, it is a simple storage location. At a     *
+ * register's Write-If-Zero address, it accepts a new value from a      *
+ * write operation only if the current value is zero.                   *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+typedef union lb_scratch_reg1_u {
+	bdrkreg_t	lb_scratch_reg1_regval;
+	struct  {
+		bdrkreg_t	sr_scratch_bits           :	64;
+	} lb_scratch_reg1_fld_s;
+} lb_scratch_reg1_u_t;
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  These registers are scratch registers that are not reset. At a      *
+ * register's normal address, it is a simple storage location. At a     *
+ * register's Write-If-Zero address, it accepts a new value from a      *
+ * write operation only if the current value is zero.                   *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+typedef union lb_scratch_reg2_u {
+	bdrkreg_t	lb_scratch_reg2_regval;
+	struct  {
+		bdrkreg_t	sr_scratch_bits           :	64;
+	} lb_scratch_reg2_fld_s;
+} lb_scratch_reg2_u_t;
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  These one-bit registers are scratch registers. At a register's      *
+ * normal address, it is a simple storage location. At a register's     *
+ * Read-Set-If-Zero address, it returns the original contents and       *
+ * sets the bit if the original value is zero.                          *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union lb_scratch_reg3_u {
+	bdrkreg_t	lb_scratch_reg3_regval;
+	struct  {
+		bdrkreg_t	sr_scratch_bit            :	 1;
+		bdrkreg_t	sr_reserved		  :	63;
+	} lb_scratch_reg3_fld_s;
+} lb_scratch_reg3_u_t;
+
+#else
+
+typedef union lb_scratch_reg3_u {
+	bdrkreg_t	lb_scratch_reg3_regval;
+	struct	{
+		bdrkreg_t	sr_reserved		  :	63;
+		bdrkreg_t	sr_scratch_bit		  :	 1;
+	} lb_scratch_reg3_fld_s;
+} lb_scratch_reg3_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  These one-bit registers are scratch registers. At a register's      *
+ * normal address, it is a simple storage location. At a register's     *
+ * Read-Set-If-Zero address, it returns the original contents and       *
+ * sets the bit if the original value is zero.                          *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union lb_scratch_reg4_u {
+	bdrkreg_t	lb_scratch_reg4_regval;
+	struct  {
+		bdrkreg_t	sr_scratch_bit            :	 1;
+		bdrkreg_t       sr_reserved               :     63;
+	} lb_scratch_reg4_fld_s;
+} lb_scratch_reg4_u_t;
+
+#else
+
+typedef union lb_scratch_reg4_u {
+	bdrkreg_t	lb_scratch_reg4_regval;
+	struct	{
+		bdrkreg_t	sr_reserved		  :	63;
+		bdrkreg_t	sr_scratch_bit		  :	 1;
+	} lb_scratch_reg4_fld_s;
+} lb_scratch_reg4_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register is a scratch register that is reset to 0x0. At the    *
+ * normal address, the register is a simple storage location. At the    *
+ * Write-If-Zero address, the register accepts a new value from a       *
+ * write operation only if the current value is zero.                   *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+typedef union lb_scratch_reg0_wz_u {
+	bdrkreg_t	lb_scratch_reg0_wz_regval;
+	struct  {
+		bdrkreg_t	srw_scratch_bits          :	64;
+	} lb_scratch_reg0_wz_fld_s;
+} lb_scratch_reg0_wz_u_t;
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  These registers are scratch registers that are not reset. At a      *
+ * register's normal address, it is a simple storage location. At a     *
+ * register's Write-If-Zero address, it accepts a new value from a      *
+ * write operation only if the current value is zero.                   *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+typedef union lb_scratch_reg1_wz_u {
+	bdrkreg_t	lb_scratch_reg1_wz_regval;
+	struct  {
+		bdrkreg_t	srw_scratch_bits          :	64;
+	} lb_scratch_reg1_wz_fld_s;
+} lb_scratch_reg1_wz_u_t;
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  These registers are scratch registers that are not reset. At a      *
+ * register's normal address, it is a simple storage location. At a     *
+ * register's Write-If-Zero address, it accepts a new value from a      *
+ * write operation only if the current value is zero.                   *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+typedef union lb_scratch_reg2_wz_u {
+	bdrkreg_t	lb_scratch_reg2_wz_regval;
+	struct  {
+		bdrkreg_t	srw_scratch_bits          :	64;
+	} lb_scratch_reg2_wz_fld_s;
+} lb_scratch_reg2_wz_u_t;
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  These one-bit registers are scratch registers. At a register's      *
+ * normal address, it is a simple storage location. At a register's     *
+ * Read-Set-If-Zero address, it returns the original contents and       *
+ * sets the bit if the original value is zero.                          *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union lb_scratch_reg3_rz_u {
+	bdrkreg_t	lb_scratch_reg3_rz_regval;
+	struct  {
+		bdrkreg_t	srr_scratch_bit           :	 1;
+		bdrkreg_t       srr_reserved              :     63;
+	} lb_scratch_reg3_rz_fld_s;
+} lb_scratch_reg3_rz_u_t;
+
+#else
+
+typedef union lb_scratch_reg3_rz_u {
+	bdrkreg_t	lb_scratch_reg3_rz_regval;
+	struct	{
+		bdrkreg_t	srr_reserved		  :	63;
+		bdrkreg_t	srr_scratch_bit		  :	 1;
+	} lb_scratch_reg3_rz_fld_s;
+} lb_scratch_reg3_rz_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  These one-bit registers are scratch registers. At a register's      *
+ * normal address, it is a simple storage location. At a register's     *
+ * Read-Set-If-Zero address, it returns the original contents and       *
+ * sets the bit if the original value is zero.                          *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union lb_scratch_reg4_rz_u {
+	bdrkreg_t	lb_scratch_reg4_rz_regval;
+	struct  {
+		bdrkreg_t	srr_scratch_bit           :	 1;
+		bdrkreg_t       srr_reserved              :     63;
+	} lb_scratch_reg4_rz_fld_s;
+} lb_scratch_reg4_rz_u_t;
+
+#else
+
+typedef union lb_scratch_reg4_rz_u {
+	bdrkreg_t	lb_scratch_reg4_rz_regval;
+	struct	{
+		bdrkreg_t	srr_reserved		  :	63;
+		bdrkreg_t	srr_scratch_bit		  :	 1;
+	} lb_scratch_reg4_rz_fld_s;
+} lb_scratch_reg4_rz_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  This register contains vector PIO parameters. A        *
+ * write to this register triggers the LB to send out a vector PIO      *
+ * request packet. Immediately after servicing a write request to the   *
+ * LB_VECTOR_PARMS register, the LB sends back a reply (i.e., the LB    *
+ * doesn't wait for the vector PIO operation to finish first). Three    *
+ * LB registers provide the contents for an outgoing vector PIO         *
+ * request packet. Software should wait until the BUSY bit in           *
+ * LB_VECTOR_PARMS is clear and then initialize all three of these      *
+ * registers before initiating a vector PIO operation. The three        *
+ * vector PIO registers are:                                            *
+ * LB_VECTOR_ROUTE                                                      *
+ * LB_VECTOR_DATA                                                       *
+ * LB_VECTOR_PARMS (should be written last)                             *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union lb_vector_parms_u {
+	bdrkreg_t	lb_vector_parms_regval;
+	struct  {
+		bdrkreg_t	vp_type                   :	 1;
+		bdrkreg_t       vp_reserved_2             :      2;
+		bdrkreg_t       vp_address                :     21;
+		bdrkreg_t       vp_reserved_1             :      8;
+		bdrkreg_t       vp_write_id               :      8;
+		bdrkreg_t       vp_pio_id                 :     11;
+		bdrkreg_t       vp_reserved               :     12;
+		bdrkreg_t       vp_busy                   :      1;
+	} lb_vector_parms_fld_s;
+} lb_vector_parms_u_t;
+
+#else
+
+typedef union lb_vector_parms_u {
+	bdrkreg_t	lb_vector_parms_regval;
+	struct	{
+		bdrkreg_t	vp_busy			  :	 1;
+		bdrkreg_t	vp_reserved		  :	12;
+		bdrkreg_t	vp_pio_id		  :	11;
+		bdrkreg_t	vp_write_id		  :	 8;
+		bdrkreg_t	vp_reserved_1		  :	 8;
+		bdrkreg_t	vp_address		  :	21;
+		bdrkreg_t	vp_reserved_2		  :	 2;
+		bdrkreg_t	vp_type			  :	 1;
+	} lb_vector_parms_fld_s;
+} lb_vector_parms_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register contains the vector PIO route. This is one of the 3   *
+ * vector PIO control registers.                                        *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+typedef union lb_vector_route_u {
+	bdrkreg_t	lb_vector_route_regval;
+	struct  {
+		bdrkreg_t	vr_vector                 :	64;
+	} lb_vector_route_fld_s;
+} lb_vector_route_u_t;
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register contains the vector PIO write data. This is one of    *
+ * the 3 vector PIO control registers. The contents of this register    *
+ * also provide the data value to be sent in outgoing vector PIO read   *
+ * requests and vector PIO write replies.                               *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+typedef union lb_vector_data_u {
+	bdrkreg_t	lb_vector_data_regval;
+	struct  {
+		bdrkreg_t	vd_write_data             :	64;
+	} lb_vector_data_fld_s;
+} lb_vector_data_u_t;
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  This register contains the vector PIO return status.   *
+ * Software should clear this register before launching a vector PIO    *
+ * request from the LB. The LB will not modify this register's value    *
+ * if an incoming reply packet encounters any kind of error. If an      *
+ * incoming reply packet does not encounter an error but the            *
+ * STATUS_VALID bit is already set, then the LB sets the OVERRUN bit    *
+ * and leaves the other fields unchanged. The LB updates the values     *
+ * of the SOURCE, PIO_ID, WRITE_ID, ADDRESS and TYPE fields only if     *
+ * an incoming vector PIO reply packet does not encounter an error      *
+ * and the STATUS_VALID bit is clear; at the same time, the LB sets     *
+ * the STATUS_VALID bit and will also update the LB_VECTOR_RETURN and   *
+ * LB_VECTOR_READ_DATA registers.                                       *
+ *                                                                      *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union lb_vector_status_u {
+	bdrkreg_t	lb_vector_status_regval;
+	struct  {
+		bdrkreg_t	vs_type                   :	 3;
+		bdrkreg_t       vs_address                :     21;
+		bdrkreg_t       vs_reserved               :      8;
+		bdrkreg_t       vs_write_id               :      8;
+		bdrkreg_t       vs_pio_id                 :     11;
+		bdrkreg_t       vs_source                 :     11;
+		bdrkreg_t       vs_overrun                :      1;
+		bdrkreg_t       vs_status_valid           :      1;
+	} lb_vector_status_fld_s;
+} lb_vector_status_u_t;
+
+#else
+
+typedef union lb_vector_status_u {
+	bdrkreg_t	lb_vector_status_regval;
+	struct	{
+		bdrkreg_t	vs_status_valid		  :	 1;
+		bdrkreg_t	vs_overrun		  :	 1;
+		bdrkreg_t	vs_source		  :	11;
+		bdrkreg_t	vs_pio_id		  :	11;
+		bdrkreg_t	vs_write_id		  :	 8;
+		bdrkreg_t	vs_reserved		  :	 8;
+		bdrkreg_t	vs_address		  :	21;
+		bdrkreg_t	vs_type			  :	 3;
+	} lb_vector_status_fld_s;
+} lb_vector_status_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register contains the return vector PIO route. The LB will     *
+ * not modify this register's value if an incoming reply packet         *
+ * encounters any kind of error. The LB also will not modify this       *
+ * register's value if the STATUS_VALID bit in the LB_VECTOR_STATUS     *
+ * register is set when it receives an incoming vector PIO reply. The   *
+ * LB stores an incoming vector PIO reply packet's vector route flit    *
+ * in this register only if the packet does not encounter an error      *
+ * and the STATUS_VALID bit is clear.                                   *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+typedef union lb_vector_return_u {
+	bdrkreg_t	lb_vector_return_regval;
+	struct  {
+		bdrkreg_t	vr_return_vector          :	64;
+	} lb_vector_return_fld_s;
+} lb_vector_return_u_t;
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register contains the vector PIO read data, if any. The LB     *
+ * will not modify this register's value if an incoming reply packet    *
+ * encounters any kind of error. The LB also will not modify this       *
+ * register's value if the STATUS_VALID bit in the LB_VECTOR_STATUS     *
+ * register is set when it receives an incoming vector PIO reply. The   *
+ * LB stores an incoming vector PIO reply packet's data flit in this    *
+ * register only if the packet does not encounter an error and the      *
+ * STATUS_VALID bit is clear.                                           *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+typedef union lb_vector_read_data_u {
+	bdrkreg_t	lb_vector_read_data_regval;
+	struct  {
+		bdrkreg_t	vrd_read_data             :	64;
+	} lb_vector_read_data_fld_s;
+} lb_vector_read_data_u_t;
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  This register contains the vector PIO return status.   *
+ * Software should clear this register before launching a vector PIO    *
+ * request from the LB. The LB will not modify this register's value    *
+ * if an incoming reply packet encounters any kind of error. If an      *
+ * incoming reply packet does not encounter an error but the            *
+ * STATUS_VALID bit is already set, then the LB sets the OVERRUN bit    *
+ * and leaves the other fields unchanged. The LB updates the values     *
+ * of the SOURCE, PIO_ID, WRITE_ID, ADDRESS and TYPE fields only if     *
+ * an incoming vector PIO reply packet does not encounter an error      *
+ * and the STATUS_VALID bit is clear; at the same time, the LB sets     *
+ * the STATUS_VALID bit and will also update the LB_VECTOR_RETURN and   *
+ * LB_VECTOR_READ_DATA registers.                                       *
+ *                                                                      *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union lb_vector_status_clear_u {
+	bdrkreg_t	lb_vector_status_clear_regval;
+	struct  {
+		bdrkreg_t	vsc_type                  :	 3;
+		bdrkreg_t       vsc_address               :     21;
+		bdrkreg_t       vsc_reserved              :      8;
+		bdrkreg_t       vsc_write_id              :      8;
+		bdrkreg_t       vsc_pio_id                :     11;
+		bdrkreg_t       vsc_source                :     11;
+		bdrkreg_t       vsc_overrun               :      1;
+		bdrkreg_t       vsc_status_valid          :      1;
+	} lb_vector_status_clear_fld_s;
+} lb_vector_status_clear_u_t;
+
+#else
+
+typedef union lb_vector_status_clear_u {
+	bdrkreg_t	lb_vector_status_clear_regval;
+	struct	{
+		bdrkreg_t	vsc_status_valid	  :	 1;
+		bdrkreg_t	vsc_overrun		  :	 1;
+		bdrkreg_t	vsc_source		  :	11;
+		bdrkreg_t	vsc_pio_id		  :	11;
+		bdrkreg_t	vsc_write_id		  :	 8;
+		bdrkreg_t	vsc_reserved		  :	 8;
+		bdrkreg_t	vsc_address		  :	21;
+		bdrkreg_t	vsc_type		  :	 3;
+	} lb_vector_status_clear_fld_s;
+} lb_vector_status_clear_u_t;
+
+#endif
+
+
+
+
+
+
+#endif /* _LANGUAGE_C */
+
+/************************************************************************
+ *                                                                      *
+ *               MAKE ALL ADDITIONS AFTER THIS LINE                     *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+
+#endif /* _ASM_SN_SN1_HUBLB_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/hublb_next.h linux/include/asm-ia64/sn/sn1/hublb_next.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/hublb_next.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/sn1/hublb_next.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,110 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_SN1_HUBLB_NEXT_H
+#define _ASM_SN_SN1_HUBLB_NEXT_H
+
+/**********************************************************************
+
+ This contains some mask and shift values for LB defined as required
+ for compatibility.
+
+ **********************************************************************/
+
+#define LRI_SYSTEM_SIZE_SHFT        46
+#define LRI_SYSTEM_SIZE_MASK        (UINT64_CAST 0x3 << LRI_SYSTEM_SIZE_SHFT)
+#define LRI_NODEID_SHFT        32
+#define LRI_NODEID_MASK        (UINT64_CAST 0xff << LRI_NODEID_SHFT)/* Node ID    */
+#define LRI_CHIPID_SHFT		12
+#define LRI_CHIPID_MASK		(UINT64_CAST 0xffff << LRI_CHIPID_SHFT) /* should be 0x3012 */
+#define LRI_REV_SHFT        28
+#define LRI_REV_MASK        (UINT64_CAST 0xf << LRI_REV_SHFT)/* Chip revision    */
+
+/* Values for LRI_SYSTEM_SIZE */
+#define SYSTEM_SIZE_INVALID	0x3
+#define SYSTEM_SIZE_NMODE	0x2
+#define SYSTEM_SIZE_COARSE 	0x1
+#define SYSTEM_SIZE_SMALL	0x0
+
+/* In fine mode, each node is a region.  In coarse mode, there are
+ * 2 nodes per region.  In N-mode, there are 4 nodes per region. */
+#define NASID_TO_FINEREG_SHFT   0
+#define NASID_TO_COARSEREG_SHFT 1
+#define NASID_TO_NMODEREG_SHFT  2
+
+#define LR_LOCALRESET               (UINT64_CAST 1)
+/*
+ * LB_VECTOR_PARMS mask and shift definitions.
+ * TYPE may be any of the first four PIOTYPEs defined under NI_VECTOR_STATUS.
+ */
+
+#define LVP_BUSY		(UINT64_CAST 1 << 63)
+#define LVP_PIOID_SHFT          40
+#define LVP_PIOID_MASK          (UINT64_CAST 0x7ff << 40)
+#define LVP_WRITEID_SHFT        32
+#define LVP_WRITEID_MASK        (UINT64_CAST 0xff << 32)
+#define LVP_ADDRESS_MASK        (UINT64_CAST 0xfffff8)   /* Bits 23:3        */
+#define LVP_TYPE_SHFT           0
+#define LVP_TYPE_MASK           (UINT64_CAST 0x3)
+
+/* LB_VECTOR_STATUS mask and shift definitions */
+
+#define LVS_VALID               (UINT64_CAST 1 << 63)
+#define LVS_OVERRUN             (UINT64_CAST 1 << 62)
+#define LVS_TARGET_SHFT         51
+#define LVS_TARGET_MASK         (UINT64_CAST 0x7ff << 51)
+#define LVS_PIOID_SHFT          40
+#define LVS_PIOID_MASK          (UINT64_CAST 0x7ff << 40)
+#define LVS_WRITEID_SHFT        32
+#define LVS_WRITEID_MASK        (UINT64_CAST 0xff << 32)
+#define LVS_ADDRESS_MASK        (UINT64_CAST 0xfffff8)   /* Bits 23:3     */
+#define LVS_TYPE_SHFT           0
+#define LVS_TYPE_MASK           (UINT64_CAST 0x7)
+#define LVS_ERROR_MASK          (UINT64_CAST 0x4)  /* bit set means error */
+
+/* LB_RT_LOCAL_CTRL mask and shift definitions */
+
+#define LRLC_USE_INT_SHFT       32
+#define LRLC_USE_INT_MASK       (UINT64_CAST 1 << 32)
+#define LRLC_USE_INT            (UINT64_CAST 1 << 32)
+#define LRLC_GCLK_SHFT          28
+#define LRLC_GCLK_MASK          (UINT64_CAST 1 << 28)
+#define LRLC_GCLK               (UINT64_CAST 1 << 28)
+#define LRLC_GCLK_COUNT_SHFT    16
+#define LRLC_GCLK_COUNT_MASK    (UINT64_CAST 0x3ff << 16)
+#define LRLC_MAX_COUNT_SHFT     4
+#define LRLC_MAX_COUNT_MASK     (UINT64_CAST 0x3ff << 4)
+#define LRLC_GCLK_EN_SHFT       0
+#define LRLC_GCLK_EN_MASK       (UINT64_CAST 1)
+#define LRLC_GCLK_EN            (UINT64_CAST 1)
+
+/* LB_NODES_ABSENT mask and shift definitions */
+#define LNA_VALID_SHFT		15
+#define LNA_VALID_MASK		(UINT64_CAST 1 << LNA_VALID_SHFT)
+#define LNA_VALID		(UINT64_CAST 1 << LNA_VALID_SHFT)
+#define LNA_NODE_SHFT		0
+#define LNA_NODE_MASK		(UINT64_CAST 0xff << LNA_NODE_SHFT)
+
+/* LB_NODES_ABSENT has 4 identical sub-registers, on 16-bit boundaries */
+#define LNA_ENTRY_SHFT		16
+#define LNA_MAX_ENTRIES		4
+#define LNA_ADD(_reg, _n)	((_reg) = (_reg) << LNA_ENTRY_SHFT | \
+				 	LNA_VALID | (_n) << LNA_NODE_SHFT)
+
+#define  PIOTYPE_READ           0       /* VECTOR_PARMS and VECTOR_STATUS   */
+#define  PIOTYPE_WRITE          1       /* VECTOR_PARMS and VECTOR_STATUS   */
+#define  PIOTYPE_UNDEFINED      2       /* VECTOR_PARMS and VECTOR_STATUS   */
+/* XXX IP35 doesn't support vector exchange:  scr. regs. do locks directly */
+#define  PIOTYPE_EXCHANGE       3       /* VECTOR_PARMS and VECTOR_STATUS   */
+#define  PIOTYPE_ADDR_ERR       4       /* VECTOR_STATUS only               */
+#define  PIOTYPE_CMD_ERR        5       /* VECTOR_STATUS only               */
+#define  PIOTYPE_PROT_ERR       6       /* VECTOR_STATUS only               */
+#define  PIOTYPE_UNKNOWN        7       /* VECTOR_STATUS only               */
+
+#endif	/* _ASM_SN_SN1_HUBLB_NEXT_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/hubmd.h linux/include/asm-ia64/sn/sn1/hubmd.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/hubmd.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/sn1/hubmd.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,2477 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_SN1_HUBMD_H
+#define _ASM_SN_SN1_HUBMD_H
+
+
+/************************************************************************
+ *                                                                      *
+ *      WARNING!!!  WARNING!!!  WARNING!!!  WARNING!!!  WARNING!!!      *
+ *                                                                      *
+ * This file is created by an automated script. Any (minimal) changes   *
+ * made manually to this  file should be made with care.                *
+ *                                                                      *
+ *               MAKE ALL ADDITIONS TO THE END OF THIS FILE             *
+ *                                                                      *
+ ************************************************************************/
+
+
+#define    MD_CURRENT_CELL           0x00780000    /*
+                                                    * BDDIR, LREG, LBOOT,
+                                                    * RREG, RBOOT
+                                                    * protection and mask
+                                                    * for using Local
+                                                    * Access protection.
+                                                    */
+
+
+
+#define    MD_MEMORY_CONFIG          0x00780008    /*
+                                                    * Memory/Directory
+                                                    * DIMM control
+                                                    */
+
+
+
+#define    MD_ARBITRATION_CONTROL    0x00780010    /*
+                                                    * Arbitration
+                                                    * Parameters
+                                                    */
+
+
+
+#define    MD_MIG_CONFIG             0x00780018    /*
+                                                    * Page Migration
+                                                    * control
+                                                    */
+
+
+
+#define    MD_FANDOP_CAC_STAT0       0x00780020    /*
+                                                    * Fetch-and-op cache
+                                                    * 0 status
+                                                    */
+
+
+
+#define    MD_FANDOP_CAC_STAT1       0x00780028    /*
+                                                    * Fetch-and-op cache
+                                                    * 1 status
+                                                    */
+
+
+
+#define    MD_MISC0_ERROR            0x00780040    /*
+                                                    * Miscellaneous MD
+                                                    * error
+                                                    */
+
+
+
+#define    MD_MISC1_ERROR            0x00780048    /*
+                                                    * Miscellaneous MD
+                                                    * error
+                                                    */
+
+
+
+#define    MD_MISC1_ERROR_CLR        0x00780058    /*
+                                                    * Miscellaneous MD
+                                                    * error clear
+                                                    */
+
+
+
+#define    MD_OUTGOING_RP_QUEUE_SIZE 0x00780060    /*
+                                                    * MD outgoing reply
+                                                    * queues sizing
+                                                    */
+
+
+
+#define    MD_PERF_SEL0              0x00790000    /*
+                                                    * Selects events
+                                                    * monitored by
+                                                    * MD_PERF_CNT0
+                                                    */
+
+
+
+#define    MD_PERF_SEL1              0x00790008    /*
+                                                    * Selects events
+                                                    * monitored by
+                                                    * MD_PERF_CNT1
+                                                    */
+
+
+
+#define    MD_PERF_CNT0              0x00790010    /*
+                                                    * Performance counter
+                                                    * 0
+                                                    */
+
+
+
+#define    MD_PERF_CNT1              0x00790018    /*
+                                                    * Performance counter
+                                                    * 1
+                                                    */
+
+
+
+#define    MD_REFRESH_CONTROL        0x007A0000    /*
+                                                    * Memory/Directory
+                                                    * refresh control
+                                                    */
+
+
+
+#define    MD_JUNK_BUS_TIMING        0x007A0008    /* Junk Bus Timing        */
+
+
+
+#define    MD_LED0                   0x007A0010    /* Reads of 8-bit LED0    */
+
+
+
+#define    MD_LED1                   0x007A0018    /* Reads of 8-bit LED1    */
+
+
+
+#define    MD_LED2                   0x007A0020    /* Reads of 8-bit LED2    */
+
+
+
+#define    MD_LED3                   0x007A0028    /* Reads of 8-bit LED3    */
+
+
+
+#define    MD_BIST_CTL               0x007A0030    /*
+                                                    * BIST general
+                                                    * control
+                                                    */
+
+
+
+#define    MD_BIST_DATA              0x007A0038    /*
+                                                    * BIST initial data
+                                                    * pattern and
+                                                    * variation control
+                                                    */
+
+
+
+#define    MD_BIST_AB_ERR_ADDR       0x007A0040    /* BIST error address     */
+
+
+
+#define    MD_BIST_STATUS            0x007A0048    /* BIST status            */
+
+
+
+#define    MD_IB_DEBUG               0x007A0060    /* IB debug select        */
+
+
+
+#define    MD_DIR_CONFIG             0x007C0000    /*
+                                                    * Directory mode
+                                                    * control
+                                                    */
+
+
+
+#define    MD_DIR_ERROR              0x007C0010    /*
+                                                    * Directory DIMM
+                                                    * error
+                                                    */
+
+
+
+#define    MD_DIR_ERROR_CLR          0x007C0018    /*
+                                                    * Directory DIMM
+                                                    * error clear
+                                                    */
+
+
+
+#define    MD_PROTOCOL_ERROR         0x007C0020    /*
+                                                    * Directory protocol
+                                                    * error
+                                                    */
+
+
+
+#define    MD_PROTOCOL_ERR_CLR       0x007C0028    /*
+                                                    * Directory protocol
+                                                    * error clear
+                                                    */
+
+
+
+#define    MD_MIG_CANDIDATE          0x007C0030    /*
+                                                    * Page migration
+                                                    * candidate
+                                                    */
+
+
+
+#define    MD_MIG_CANDIDATE_CLR      0x007C0038    /*
+                                                    * Page migration
+                                                    * candidate clear
+                                                    */
+
+
+
+#define    MD_MIG_DIFF_THRESH        0x007C0040    /*
+                                                    * Page migration
+                                                    * count difference
+                                                    * threshold
+                                                    */
+
+
+
+#define    MD_MIG_VALUE_THRESH       0x007C0048    /*
+                                                    * Page migration
+                                                    * count absolute
+                                                    * threshold
+                                                    */
+
+
+
+#define    MD_OUTGOING_RQ_QUEUE_SIZE 0x007C0050    /*
+                                                    * MD outgoing request
+                                                    * queues sizing
+                                                    */
+
+
+
+#define    MD_BIST_DB_ERR_DATA       0x007C0058    /*
+                                                    * BIST directory
+                                                    * error data
+                                                    */
+
+
+
+#define    MD_DB_DEBUG               0x007C0060    /* DB debug select        */
+
+
+
+#define    MD_MB_ECC_CONFIG          0x007E0000    /*
+                                                    * Data ECC
+                                                    * Configuration
+                                                    */
+
+
+
+#define    MD_MEM_ERROR              0x007E0010    /* Memory DIMM error      */
+
+
+
+#define    MD_MEM_ERROR_CLR          0x007E0018    /*
+                                                    * Memory DIMM error
+                                                    * clear
+                                                    */
+
+
+
+#define    MD_BIST_MB_ERR_DATA_0     0x007E0020    /*
+                                                    * BIST memory error
+                                                    * data
+                                                    */
+
+
+
+#define    MD_BIST_MB_ERR_DATA_1     0x007E0028    /*
+                                                    * BIST memory error
+                                                    * data
+                                                    */
+
+
+
+#define    MD_BIST_MB_ERR_DATA_2     0x007E0030    /*
+                                                    * BIST memory error
+                                                    * data
+                                                    */
+
+
+
+#define    MD_BIST_MB_ERR_DATA_3     0x007E0038    /*
+                                                    * BIST memory error
+                                                    * data
+                                                    */
+
+
+
+#define    MD_MB_DEBUG               0x007E0040    /* MB debug select        */
+
+
+
+
+
+#ifdef _LANGUAGE_C
+
+/************************************************************************
+ *                                                                      *
+ * Description:  This register shows which regions are in the current   *
+ * cell. If a region has its bit set in this register, then it uses     *
+ * the Local Access protection in the directory instead of the          *
+ * separate per-region protection (which would cause a small            *
+ * performance penalty). In addition, writeback and write reply         *
+ * commands from outside the current cell will always check the         *
+ * directory protection before writing data to memory. Writeback and    *
+ * write reply commands from inside the current cell will write         *
+ * memory regardless of the protection value.                           *
+ * This register is also used as the access-rights bit-vector for       *
+ * most of the ASIC-special (HSpec) portion of the address space. It    *
+ * covers the BDDIR, LREG, LBOOT, RREG, and RBOOT spaces. It does not   *
+ * cover the UALIAS and BDECC spaces, as they are covered by the        *
+ * protection in the directory. If a bit in the bit-vector is set,      *
+ * the region corresponding to that bit has read/write permission on    *
+ * these spaces. If the bit is clear, then that region has read-only    *
+ * access to these spaces (except for LREG/RREG which have no access    *
+ * when the bit is clear).                                              *
+ * The granularity of a region is set by the REGION_SIZE register in    *
+ * the NI local register space.                                         *
+ * NOTE: This means that no processor outside the current cell can      *
+ * write into the BDDIR, LREG, LBOOT, RREG, or RBOOT spaces.            *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+typedef union md_current_cell_u {
+	bdrkreg_t	md_current_cell_regval;
+	struct  {
+		bdrkreg_t	cc_hspec_prot             :	64;
+	} md_current_cell_fld_s;
+} md_current_cell_u_t;
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  This register contains three sets of information.      *
+ * The first set describes the size and configuration of DIMMs that     *
+ * are plugged into a system, the second set controls which set of      *
+ * protection checks are performed on each access and the third set     *
+ * controls various DDR SDRAM timing parameters.                        *
+ * In order to config a DIMM bank, three fields must be initialized:    *
+ * BANK_SIZE, DRAM_WIDTH, and BANK_ENABLE. The BANK_SIZE field sets     *
+ * the address range that the MD unit will accept for that DIMM bank.   *
+ * All addresses larger than the specified size will return errors on   *
+ * access. In order to read from a DIMM bank, Bedrock must know         *
+ * whether or not the bank contains x4 or x8/x16 DRAM. The operating    *
+ * system must query the System Controller for this information and     *
+ * then set the DRAM_WIDTH field accordingly. The BANK_ENABLE field     *
+ * can be used to individually enable the two physical banks located    *
+ * on each DIMM bank.                                                   *
+ * The contents of this register are preserved through soft-resets.     *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_memory_config_u {
+	bdrkreg_t	md_memory_config_regval;
+	struct  {
+		bdrkreg_t	mc_dimm0_bank_enable      :	 2;
+		bdrkreg_t       mc_reserved_7             :      1;
+		bdrkreg_t       mc_dimm0_dram_width       :      1;
+		bdrkreg_t       mc_dimm0_bank_size        :      4;
+		bdrkreg_t       mc_dimm1_bank_enable      :      2;
+		bdrkreg_t       mc_reserved_6             :      1;
+		bdrkreg_t       mc_dimm1_dram_width       :      1;
+		bdrkreg_t       mc_dimm1_bank_size        :      4;
+                bdrkreg_t       mc_dimm2_bank_enable      :      2;
+                bdrkreg_t       mc_reserved_5             :      1;
+                bdrkreg_t       mc_dimm2_dram_width       :      1;
+                bdrkreg_t       mc_dimm2_bank_size        :      4;
+                bdrkreg_t       mc_dimm3_bank_enable      :      2;
+                bdrkreg_t       mc_reserved_4             :      1;
+                bdrkreg_t       mc_dimm3_dram_width       :      1;
+                bdrkreg_t       mc_dimm3_bank_size        :      4;
+                bdrkreg_t       mc_dimm0_sel              :      2;
+                bdrkreg_t       mc_reserved_3             :     10;
+                bdrkreg_t       mc_cc_enable              :      1;
+                bdrkreg_t       mc_io_prot_en             :      1;
+                bdrkreg_t       mc_io_prot_ignore         :      1;
+                bdrkreg_t       mc_cpu_prot_ignore        :      1;
+                bdrkreg_t       mc_db_neg_edge            :      1;
+                bdrkreg_t       mc_phase_delay            :      1;
+                bdrkreg_t       mc_delay_mux_sel          :      2;
+                bdrkreg_t       mc_sample_time            :      2;
+                bdrkreg_t       mc_reserved_2             :      2;
+                bdrkreg_t       mc_mb_neg_edge            :      3;
+                bdrkreg_t       mc_reserved_1             :      1;
+                bdrkreg_t       mc_rcd_config             :      1;
+                bdrkreg_t       mc_rp_config              :      1;
+                bdrkreg_t       mc_reserved               :      2;
+	} md_memory_config_fld_s;
+} md_memory_config_u_t;
+
+#else
+
+typedef union md_memory_config_u {
+	bdrkreg_t	md_memory_config_regval;
+	struct	{
+		bdrkreg_t	mc_reserved		  :	 2;
+		bdrkreg_t	mc_rp_config		  :	 1;
+		bdrkreg_t	mc_rcd_config		  :	 1;
+		bdrkreg_t	mc_reserved_1		  :	 1;
+		bdrkreg_t	mc_mb_neg_edge		  :	 3;
+		bdrkreg_t	mc_reserved_2		  :	 2;
+		bdrkreg_t	mc_sample_time		  :	 2;
+		bdrkreg_t	mc_delay_mux_sel	  :	 2;
+		bdrkreg_t	mc_phase_delay		  :	 1;
+		bdrkreg_t	mc_db_neg_edge		  :	 1;
+		bdrkreg_t	mc_cpu_prot_ignore	  :	 1;
+		bdrkreg_t	mc_io_prot_ignore	  :	 1;
+		bdrkreg_t	mc_io_prot_en		  :	 1;
+		bdrkreg_t	mc_cc_enable		  :	 1;
+		bdrkreg_t	mc_reserved_3		  :	10;
+		bdrkreg_t	mc_dimm0_sel		  :	 2;
+		bdrkreg_t	mc_dimm3_bank_size	  :	 4;
+		bdrkreg_t	mc_dimm3_dram_width	  :	 1;
+		bdrkreg_t	mc_reserved_4		  :	 1;
+		bdrkreg_t	mc_dimm3_bank_enable	  :	 2;
+		bdrkreg_t	mc_dimm2_bank_size	  :	 4;
+		bdrkreg_t	mc_dimm2_dram_width	  :	 1;
+		bdrkreg_t	mc_reserved_5		  :	 1;
+		bdrkreg_t	mc_dimm2_bank_enable	  :	 2;
+		bdrkreg_t	mc_dimm1_bank_size	  :	 4;
+		bdrkreg_t	mc_dimm1_dram_width	  :	 1;
+		bdrkreg_t	mc_reserved_6		  :	 1;
+		bdrkreg_t	mc_dimm1_bank_enable	  :	 2;
+		bdrkreg_t	mc_dimm0_bank_size	  :	 4;
+		bdrkreg_t	mc_dimm0_dram_width	  :	 1;
+		bdrkreg_t	mc_reserved_7		  :	 1;
+		bdrkreg_t	mc_dimm0_bank_enable	  :	 2;
+	} md_memory_config_fld_s;
+} md_memory_config_u_t;
+
+#endif
+
+
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_arbitration_control_u {
+	bdrkreg_t	md_arbitration_control_regval;
+	struct  {
+		bdrkreg_t	ac_reply_guar             :	 4;
+		bdrkreg_t       ac_write_guar             :      4;
+		bdrkreg_t       ac_reserved               :     56;
+	} md_arbitration_control_fld_s;
+} md_arbitration_control_u_t;
+
+#else
+
+typedef union md_arbitration_control_u {
+	bdrkreg_t	md_arbitration_control_regval;
+	struct	{
+		bdrkreg_t	ac_reserved		  :	56;
+		bdrkreg_t	ac_write_guar		  :	 4;
+		bdrkreg_t	ac_reply_guar		  :	 4;
+	} md_arbitration_control_fld_s;
+} md_arbitration_control_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Contains page migration control fields.                             *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_mig_config_u {
+	bdrkreg_t	md_mig_config_regval;
+	struct  {
+		bdrkreg_t	mc_mig_interval           :	10;
+		bdrkreg_t       mc_reserved_2             :      6;
+		bdrkreg_t       mc_mig_node_mask          :      8;
+		bdrkreg_t       mc_reserved_1             :      8;
+		bdrkreg_t       mc_mig_enable             :      1;
+		bdrkreg_t       mc_reserved               :     31;
+	} md_mig_config_fld_s;
+} md_mig_config_u_t;
+
+#else
+
+typedef union md_mig_config_u {
+	bdrkreg_t	md_mig_config_regval;
+	struct	{
+		bdrkreg_t	mc_reserved		  :	31;
+		bdrkreg_t	mc_mig_enable		  :	 1;
+		bdrkreg_t	mc_reserved_1		  :	 8;
+		bdrkreg_t	mc_mig_node_mask	  :	 8;
+		bdrkreg_t	mc_reserved_2		  :	 6;
+		bdrkreg_t	mc_mig_interval		  :	10;
+	} md_mig_config_fld_s;
+} md_mig_config_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Each register contains the valid bit and address of the entry in    *
+ * the fetch-and-op for cache 0 (or 1).                                 *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_fandop_cac_stat0_u {
+	bdrkreg_t	md_fandop_cac_stat0_regval;
+	struct  {
+		bdrkreg_t	fcs_reserved_1            :	 6;
+		bdrkreg_t       fcs_addr                  :     27;
+		bdrkreg_t       fcs_reserved              :     30;
+		bdrkreg_t       fcs_valid                 :      1;
+	} md_fandop_cac_stat0_fld_s;
+} md_fandop_cac_stat0_u_t;
+
+#else
+
+typedef union md_fandop_cac_stat0_u {
+	bdrkreg_t	md_fandop_cac_stat0_regval;
+	struct	{
+		bdrkreg_t	fcs_valid		  :	 1;
+		bdrkreg_t	fcs_reserved		  :	30;
+		bdrkreg_t	fcs_addr		  :	27;
+		bdrkreg_t	fcs_reserved_1		  :	 6;
+	} md_fandop_cac_stat0_fld_s;
+} md_fandop_cac_stat0_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Each register contains the valid bit and address of the entry in    *
+ * the fetch-and-op for cache 0 (or 1).                                 *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_fandop_cac_stat1_u {
+	bdrkreg_t	md_fandop_cac_stat1_regval;
+	struct  {
+		bdrkreg_t	fcs_reserved_1            :	 6;
+		bdrkreg_t       fcs_addr                  :     27;
+		bdrkreg_t       fcs_reserved              :     30;
+		bdrkreg_t       fcs_valid                 :      1;
+	} md_fandop_cac_stat1_fld_s;
+} md_fandop_cac_stat1_u_t;
+
+#else
+
+typedef union md_fandop_cac_stat1_u {
+	bdrkreg_t	md_fandop_cac_stat1_regval;
+	struct	{
+		bdrkreg_t	fcs_valid		  :	 1;
+		bdrkreg_t	fcs_reserved		  :	30;
+		bdrkreg_t	fcs_addr		  :	27;
+		bdrkreg_t	fcs_reserved_1		  :	 6;
+	} md_fandop_cac_stat1_fld_s;
+} md_fandop_cac_stat1_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  Contains a number of fields to capture various         *
+ * random memory/directory errors. For each 2-bit field, the LSB        *
+ * indicates that additional information has been captured for the      *
+ * error and the MSB indicates overrun, thus:                           *
+ *  x1: bits 51...0 of this register contain additional information     *
+ * for the message that caused this error                               *
+ *  1x: overrun occurred                                                *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_misc0_error_u {
+	bdrkreg_t	md_misc0_error_regval;
+	struct	{
+		bdrkreg_t	me_command		  :	 7;
+                bdrkreg_t       me_reserved_4             :      1;
+                bdrkreg_t       me_source                 :     11;
+                bdrkreg_t       me_reserved_3             :      1;
+                bdrkreg_t       me_suppl                  :     11;
+                bdrkreg_t       me_reserved_2             :      1;
+                bdrkreg_t       me_virtual_channel        :      2;
+                bdrkreg_t       me_reserved_1             :      2;
+                bdrkreg_t       me_tail                   :      1;
+                bdrkreg_t       me_reserved               :     11;
+                bdrkreg_t       me_xb_error               :      4;
+                bdrkreg_t       me_bad_partial_data       :      2;
+                bdrkreg_t       me_missing_dv             :      2;
+                bdrkreg_t       me_short_pack             :      2;
+                bdrkreg_t       me_long_pack              :      2;
+                bdrkreg_t       me_ill_msg                :      2;
+                bdrkreg_t       me_ill_revision           :      2;
+	} md_misc0_error_fld_s;
+} md_misc0_error_u_t;
+
+#else
+
+typedef union md_misc0_error_u {
+	bdrkreg_t	md_misc0_error_regval;
+	struct  {
+		bdrkreg_t	me_ill_revision           :	 2;
+		bdrkreg_t	me_ill_msg                :	 2;
+		bdrkreg_t	me_long_pack              :	 2;
+		bdrkreg_t	me_short_pack             :	 2;
+		bdrkreg_t	me_missing_dv             :	 2;
+		bdrkreg_t	me_bad_partial_data       :	 2;
+		bdrkreg_t	me_xb_error               :	 4;
+		bdrkreg_t	me_reserved               :	11;
+		bdrkreg_t	me_tail                   :	 1;
+		bdrkreg_t	me_reserved_1             :	 2;
+		bdrkreg_t	me_virtual_channel        :	 2;
+		bdrkreg_t	me_reserved_2             :	 1;
+		bdrkreg_t	me_suppl                  :	11;
+		bdrkreg_t	me_reserved_3             :	 1;
+		bdrkreg_t	me_source                 :	11;
+		bdrkreg_t	me_reserved_4             :	 1;
+		bdrkreg_t	me_command                :	 7;
+	} md_misc0_error_fld_s;
+} md_misc0_error_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Address for error captured in MISC0_ERROR. Error valid bits are     *
+ * repeated in both MISC0_ERROR and MISC1_ERROR (allowing them to be    *
+ * read sequentially without missing any errors).                       *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_misc1_error_u {
+	bdrkreg_t	md_misc1_error_regval;
+	struct  {
+		bdrkreg_t	me_reserved_1             :	 3;
+		bdrkreg_t       me_address                :     38;
+		bdrkreg_t       me_reserved               :      7;
+		bdrkreg_t       me_xb_error               :      4;
+		bdrkreg_t       me_bad_partial_data       :      2;
+		bdrkreg_t       me_missing_dv             :      2;
+		bdrkreg_t       me_short_pack             :      2;
+		bdrkreg_t       me_long_pack              :      2;
+		bdrkreg_t       me_ill_msg                :      2;
+		bdrkreg_t       me_ill_revision           :      2;
+	} md_misc1_error_fld_s;
+} md_misc1_error_u_t;
+
+#else
+
+typedef union md_misc1_error_u {
+	bdrkreg_t	md_misc1_error_regval;
+	struct	{
+		bdrkreg_t	me_ill_revision		  :	 2;
+		bdrkreg_t	me_ill_msg		  :	 2;
+		bdrkreg_t	me_long_pack		  :	 2;
+		bdrkreg_t	me_short_pack		  :	 2;
+		bdrkreg_t	me_missing_dv		  :	 2;
+		bdrkreg_t	me_bad_partial_data	  :	 2;
+		bdrkreg_t	me_xb_error		  :	 4;
+		bdrkreg_t	me_reserved		  :	 7;
+		bdrkreg_t	me_address		  :	38;
+		bdrkreg_t	me_reserved_1		  :	 3;
+	} md_misc1_error_fld_s;
+} md_misc1_error_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Address for error captured in MISC0_ERROR. Error valid bits are     *
+ * repeated in both MISC0_ERROR and MISC1_ERROR (allowing them to be    *
+ * read sequentially without missing any errors).                       *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_misc1_error_clr_u {
+	bdrkreg_t	md_misc1_error_clr_regval;
+	struct  {
+		bdrkreg_t	mec_reserved_1            :	 3;
+		bdrkreg_t       mec_address               :     38;
+		bdrkreg_t       mec_reserved              :      7;
+		bdrkreg_t       mec_xb_error              :      4;
+		bdrkreg_t       mec_bad_partial_data      :      2;
+		bdrkreg_t       mec_missing_dv            :      2;
+		bdrkreg_t       mec_short_pack            :      2;
+		bdrkreg_t       mec_long_pack             :      2;
+		bdrkreg_t       mec_ill_msg               :      2;
+		bdrkreg_t       mec_ill_revision          :      2;
+	} md_misc1_error_clr_fld_s;
+} md_misc1_error_clr_u_t;
+
+#else
+
+typedef union md_misc1_error_clr_u {
+	bdrkreg_t	md_misc1_error_clr_regval;
+	struct	{
+		bdrkreg_t	mec_ill_revision	  :	 2;
+		bdrkreg_t	mec_ill_msg		  :	 2;
+		bdrkreg_t	mec_long_pack		  :	 2;
+		bdrkreg_t	mec_short_pack		  :	 2;
+		bdrkreg_t	mec_missing_dv		  :	 2;
+		bdrkreg_t	mec_bad_partial_data	  :	 2;
+		bdrkreg_t	mec_xb_error		  :	 4;
+		bdrkreg_t	mec_reserved		  :	 7;
+		bdrkreg_t	mec_address		  :	38;
+		bdrkreg_t	mec_reserved_1		  :	 3;
+	} md_misc1_error_clr_fld_s;
+} md_misc1_error_clr_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  The MD no longer allows for arbitrarily sizing the     *
+ * reply queues, so all of the fields in this register are read-only    *
+ * and contain the reset default value of 12 for the MOQHs (for         *
+ * headers) and 24 for the MOQDs (for data).                            *
+ * Reading from this register returns the values currently held in      *
+ * the MD's credit counters. Writing to the register resets the         *
+ * counters to the default reset values specified in the table below.   *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_outgoing_rp_queue_size_u {
+	bdrkreg_t	md_outgoing_rp_queue_size_regval;
+	struct  {
+		bdrkreg_t	orqs_reserved_6           :	 8;
+		bdrkreg_t       orqs_moqh_p0_rp_size      :      4;
+		bdrkreg_t       orqs_reserved_5           :      4;
+		bdrkreg_t       orqs_moqh_p1_rp_size      :      4;
+		bdrkreg_t       orqs_reserved_4           :      4;
+		bdrkreg_t       orqs_moqh_np_rp_size      :      4;
+		bdrkreg_t       orqs_reserved_3           :      4;
+		bdrkreg_t       orqs_moqd_pi0_rp_size     :      5;
+		bdrkreg_t       orqs_reserved_2           :      3;
+		bdrkreg_t       orqs_moqd_pi1_rp_size     :      5;
+		bdrkreg_t       orqs_reserved_1           :      3;
+		bdrkreg_t       orqs_moqd_np_rp_size      :      5;
+		bdrkreg_t       orqs_reserved             :     11;
+	} md_outgoing_rp_queue_size_fld_s;
+} md_outgoing_rp_queue_size_u_t;
+
+#else
+
+typedef union md_outgoing_rp_queue_size_u {
+	bdrkreg_t	md_outgoing_rp_queue_size_regval;
+	struct	{
+		bdrkreg_t	orqs_reserved		  :	11;
+		bdrkreg_t	orqs_moqd_np_rp_size	  :	 5;
+		bdrkreg_t	orqs_reserved_1		  :	 3;
+		bdrkreg_t	orqs_moqd_pi1_rp_size	  :	 5;
+		bdrkreg_t	orqs_reserved_2		  :	 3;
+		bdrkreg_t	orqs_moqd_pi0_rp_size	  :	 5;
+		bdrkreg_t	orqs_reserved_3		  :	 4;
+		bdrkreg_t	orqs_moqh_np_rp_size	  :	 4;
+		bdrkreg_t	orqs_reserved_4		  :	 4;
+		bdrkreg_t	orqs_moqh_p1_rp_size	  :	 4;
+		bdrkreg_t	orqs_reserved_5		  :	 4;
+		bdrkreg_t	orqs_moqh_p0_rp_size	  :	 4;
+		bdrkreg_t	orqs_reserved_6		  :	 8;
+	} md_outgoing_rp_queue_size_fld_s;
+} md_outgoing_rp_queue_size_u_t;
+
+#endif
+
+
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_perf_sel0_u {
+	bdrkreg_t	md_perf_sel0_regval;
+	struct  {
+		bdrkreg_t	ps_cnt_mode               :	 2;
+		bdrkreg_t       ps_reserved_2             :      2;
+		bdrkreg_t       ps_activity               :      4;
+		bdrkreg_t       ps_source                 :      7;
+		bdrkreg_t       ps_reserved_1             :      1;
+		bdrkreg_t       ps_channel                :      4;
+		bdrkreg_t       ps_command                :     40;
+		bdrkreg_t       ps_reserved               :      3;
+		bdrkreg_t       ps_interrupt              :      1;
+	} md_perf_sel0_fld_s;
+} md_perf_sel0_u_t;
+
+#else
+
+typedef union md_perf_sel0_u {
+	bdrkreg_t	md_perf_sel0_regval;
+	struct	{
+		bdrkreg_t	ps_interrupt		  :	 1;
+		bdrkreg_t	ps_reserved		  :	 3;
+		bdrkreg_t	ps_command		  :	40;
+		bdrkreg_t	ps_channel		  :	 4;
+		bdrkreg_t	ps_reserved_1		  :	 1;
+		bdrkreg_t	ps_source		  :	 7;
+		bdrkreg_t	ps_activity		  :	 4;
+		bdrkreg_t	ps_reserved_2		  :	 2;
+		bdrkreg_t	ps_cnt_mode		  :	 2;
+	} md_perf_sel0_fld_s;
+} md_perf_sel0_u_t;
+
+#endif
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_perf_sel1_u {
+	bdrkreg_t	md_perf_sel1_regval;
+	struct  {
+		bdrkreg_t	ps_cnt_mode               :	 2;
+		bdrkreg_t       ps_reserved_2             :      2;
+		bdrkreg_t       ps_activity               :      4;
+		bdrkreg_t       ps_source                 :      7;
+		bdrkreg_t       ps_reserved_1             :      1;
+		bdrkreg_t       ps_channel                :      4;
+		bdrkreg_t       ps_command                :     40;
+		bdrkreg_t       ps_reserved               :      3;
+		bdrkreg_t       ps_interrupt              :      1;
+	} md_perf_sel1_fld_s;
+} md_perf_sel1_u_t;
+
+#else
+
+typedef union md_perf_sel1_u {
+	bdrkreg_t	md_perf_sel1_regval;
+	struct	{
+		bdrkreg_t	ps_interrupt		  :	 1;
+		bdrkreg_t	ps_reserved		  :	 3;
+		bdrkreg_t	ps_command		  :	40;
+		bdrkreg_t	ps_channel		  :	 4;
+		bdrkreg_t	ps_reserved_1		  :	 1;
+		bdrkreg_t	ps_source		  :	 7;
+		bdrkreg_t	ps_activity		  :	 4;
+		bdrkreg_t	ps_reserved_2		  :	 2;
+		bdrkreg_t	ps_cnt_mode		  :	 2;
+	} md_perf_sel1_fld_s;
+} md_perf_sel1_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Performance counter.                                                *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_perf_cnt0_u {
+	bdrkreg_t	md_perf_cnt0_regval;
+	struct  {
+		bdrkreg_t	pc_perf_cnt               :	41;
+		bdrkreg_t	pc_reserved		  :	23;
+	} md_perf_cnt0_fld_s;
+} md_perf_cnt0_u_t;
+
+#else
+
+typedef union md_perf_cnt0_u {
+	bdrkreg_t	md_perf_cnt0_regval;
+	struct	{
+		bdrkreg_t	pc_reserved		  :	23;
+		bdrkreg_t	pc_perf_cnt		  :	41;
+	} md_perf_cnt0_fld_s;
+} md_perf_cnt0_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Performance counter.                                                *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_perf_cnt1_u {
+	bdrkreg_t	md_perf_cnt1_regval;
+	struct  {
+		bdrkreg_t	pc_perf_cnt               :	41;
+		bdrkreg_t	pc_reserved		  :	23;
+	} md_perf_cnt1_fld_s;
+} md_perf_cnt1_u_t;
+
+#else
+
+typedef union md_perf_cnt1_u {
+	bdrkreg_t	md_perf_cnt1_regval;
+	struct	{
+		bdrkreg_t	pc_reserved		  :	23;
+		bdrkreg_t	pc_perf_cnt		  :	41;
+	} md_perf_cnt1_fld_s;
+} md_perf_cnt1_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  This register contains the control for                 *
+ * memory/directory refresh. Once the MEMORY_CONFIG register contains   *
+ * the correct DIMM information, the hardware takes care of             *
+ * refreshing all the banks in the system. Therefore, the value in      *
+ * the counter threshold is corresponds exactly to the refresh value    *
+ * required by the SDRAM parts (expressed in Bedrock clock cycles).     *
+ * The refresh will execute whenever there is a free cycle and there    *
+ * are still banks that have not been refreshed in the current          *
+ * window. If the window expires with banks still waiting to be         *
+ * refreshed, all other transactions are halted until the banks are     *
+ * refreshed.                                                           *
+ * The upper order bit contains an enable, which may be needed for      *
+ * correct initialization of the DIMMs (according to the specs, the     *
+ * first operation to the DIMMs should be a mode register write, not    *
+ * a refresh, so this bit is cleared on reset) and is also useful for   *
+ * diagnostic purposes.                                                 *
+ * For the SDRAM parts used by Bedrock, 4096 refreshes need to be       *
+ * issued during every 64 ms window, resulting in a refresh threshold   *
+ * of 3125 Bedrock cycles.                                              *
+ * The ENABLE and CNT_THRESH fields of this register are preserved      *
+ * through soft-resets.                                                 *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_refresh_control_u {
+	bdrkreg_t	md_refresh_control_regval;
+	struct  {
+		bdrkreg_t	rc_cnt_thresh             :	12;
+		bdrkreg_t       rc_counter                :     12;
+		bdrkreg_t       rc_reserved               :     39;
+		bdrkreg_t       rc_enable                 :      1;
+	} md_refresh_control_fld_s;
+} md_refresh_control_u_t;
+
+#else
+
+typedef union md_refresh_control_u {
+	bdrkreg_t	md_refresh_control_regval;
+	struct	{
+		bdrkreg_t	rc_enable		  :	 1;
+		bdrkreg_t	rc_reserved		  :	39;
+		bdrkreg_t	rc_counter		  :	12;
+		bdrkreg_t	rc_cnt_thresh		  :	12;
+	} md_refresh_control_fld_s;
+} md_refresh_control_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register controls the read and write timing for Flash PROM,    *
+ * UART and Synergy junk bus devices.                                   *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_junk_bus_timing_u {
+	bdrkreg_t	md_junk_bus_timing_regval;
+	struct  {
+		bdrkreg_t	jbt_fprom_setup_hold      :	 8;
+		bdrkreg_t       jbt_fprom_enable          :      8;
+		bdrkreg_t       jbt_uart_setup_hold       :      8;
+		bdrkreg_t       jbt_uart_enable           :      8;
+		bdrkreg_t       jbt_synergy_setup_hold    :      8;
+		bdrkreg_t       jbt_synergy_enable        :      8;
+		bdrkreg_t       jbt_reserved              :     16;
+	} md_junk_bus_timing_fld_s;
+} md_junk_bus_timing_u_t;
+
+#else
+
+typedef union md_junk_bus_timing_u {
+	bdrkreg_t	md_junk_bus_timing_regval;
+	struct	{
+		bdrkreg_t	jbt_reserved		  :	16;
+		bdrkreg_t	jbt_synergy_enable	  :	 8;
+		bdrkreg_t	jbt_synergy_setup_hold	  :	 8;
+		bdrkreg_t	jbt_uart_enable		  :	 8;
+		bdrkreg_t	jbt_uart_setup_hold	  :	 8;
+		bdrkreg_t	jbt_fprom_enable	  :	 8;
+		bdrkreg_t	jbt_fprom_setup_hold	  :	 8;
+	} md_junk_bus_timing_fld_s;
+} md_junk_bus_timing_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Each of these addresses allows the value on one 8-bit bank of       *
+ * LEDs to be read.                                                     *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_led0_u {
+	bdrkreg_t	md_led0_regval;
+	struct  {
+		bdrkreg_t	l_data                    :	 8;
+		bdrkreg_t       l_reserved                :     56;
+	} md_led0_fld_s;
+} md_led0_u_t;
+
+#else
+
+typedef union md_led0_u {
+	bdrkreg_t	md_led0_regval;
+	struct	{
+		bdrkreg_t	l_reserved		  :	56;
+		bdrkreg_t	l_data			  :	 8;
+	} md_led0_fld_s;
+} md_led0_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Each of these addresses allows the value on one 8-bit bank of       *
+ * LEDs to be read.                                                     *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_led1_u {
+	bdrkreg_t	md_led1_regval;
+	struct  {
+		bdrkreg_t	l_data                    :	 8;
+		bdrkreg_t       l_reserved                :     56;
+	} md_led1_fld_s;
+} md_led1_u_t;
+
+#else
+
+typedef union md_led1_u {
+	bdrkreg_t	md_led1_regval;
+	struct	{
+		bdrkreg_t	l_reserved		  :	56;
+		bdrkreg_t	l_data			  :	 8;
+	} md_led1_fld_s;
+} md_led1_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Each of these addresses allows the value on one 8-bit bank of       *
+ * LEDs to be read.                                                     *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_led2_u {
+	bdrkreg_t	md_led2_regval;
+	struct  {
+		bdrkreg_t	l_data                    :	 8;
+		bdrkreg_t       l_reserved                :     56;
+	} md_led2_fld_s;
+} md_led2_u_t;
+
+#else
+
+typedef union md_led2_u {
+	bdrkreg_t	md_led2_regval;
+	struct	{
+		bdrkreg_t	l_reserved		  :	56;
+		bdrkreg_t	l_data			  :	 8;
+	} md_led2_fld_s;
+} md_led2_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Each of these addresses allows the value on one 8-bit bank of       *
+ * LEDs to be read.                                                     *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_led3_u {
+	bdrkreg_t	md_led3_regval;
+	struct  {
+		bdrkreg_t	l_data                    :	 8;
+		bdrkreg_t       l_reserved                :     56;
+	} md_led3_fld_s;
+} md_led3_u_t;
+
+#else
+
+typedef union md_led3_u {
+	bdrkreg_t	md_led3_regval;
+	struct	{
+		bdrkreg_t	l_reserved		  :	56;
+		bdrkreg_t	l_data			  :	 8;
+	} md_led3_fld_s;
+} md_led3_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Core control for the BIST function. Start and stop BIST at any      *
+ * time.                                                                *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_bist_ctl_u {
+	bdrkreg_t	md_bist_ctl_regval;
+	struct  {
+		bdrkreg_t	bc_bist_start             :	 1;
+		bdrkreg_t       bc_bist_stop              :      1;
+		bdrkreg_t       bc_bist_reset             :      1;
+		bdrkreg_t       bc_reserved_1             :      1;
+		bdrkreg_t       bc_bank_num               :      1;
+		bdrkreg_t       bc_dimm_num               :      2;
+		bdrkreg_t       bc_reserved               :     57;
+	} md_bist_ctl_fld_s;
+} md_bist_ctl_u_t;
+
+#else
+
+typedef union md_bist_ctl_u {
+	bdrkreg_t	md_bist_ctl_regval;
+	struct	{
+		bdrkreg_t	bc_reserved		  :	57;
+		bdrkreg_t	bc_dimm_num		  :	 2;
+		bdrkreg_t	bc_bank_num		  :	 1;
+		bdrkreg_t	bc_reserved_1		  :	 1;
+		bdrkreg_t	bc_bist_reset		  :	 1;
+		bdrkreg_t	bc_bist_stop		  :	 1;
+		bdrkreg_t	bc_bist_start		  :	 1;
+	} md_bist_ctl_fld_s;
+} md_bist_ctl_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Contain the initial BIST data nibble and the 4-bit data control     *
+ * field..                                                              *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_bist_data_u {
+	bdrkreg_t	md_bist_data_regval;
+	struct  {
+		bdrkreg_t	bd_bist_data              :	 4;
+		bdrkreg_t	bd_bist_nibble		  :	 1;
+		bdrkreg_t       bd_bist_byte              :      1;
+		bdrkreg_t       bd_bist_cycle             :      1;
+		bdrkreg_t       bd_bist_write             :      1;
+		bdrkreg_t       bd_reserved               :     56;
+	} md_bist_data_fld_s;
+} md_bist_data_u_t;
+
+#else
+
+typedef union md_bist_data_u {
+	bdrkreg_t	md_bist_data_regval;
+	struct	{
+		bdrkreg_t	bd_reserved		  :	56;
+		bdrkreg_t	bd_bist_write		  :	 1;
+		bdrkreg_t	bd_bist_cycle		  :	 1;
+		bdrkreg_t	bd_bist_byte		  :	 1;
+		bdrkreg_t	bd_bist_nibble		  :	 1;
+		bdrkreg_t	bd_bist_data		  :	 4;
+	} md_bist_data_fld_s;
+} md_bist_data_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Captures the BIST error address and indicates whether it is an MB   *
+ * error or DB error.                                                   *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_bist_ab_err_addr_u {
+	bdrkreg_t	md_bist_ab_err_addr_regval;
+	struct  {
+		bdrkreg_t	baea_be_db_cas_addr       :	15;
+		bdrkreg_t       baea_reserved_3           :      1;
+		bdrkreg_t       baea_be_mb_cas_addr       :     15;
+		bdrkreg_t       baea_reserved_2           :      1;
+		bdrkreg_t       baea_be_ras_addr          :     15;
+		bdrkreg_t       baea_reserved_1           :      1;
+		bdrkreg_t       baea_bist_mb_error        :      1;
+		bdrkreg_t       baea_bist_db_error        :      1;
+		bdrkreg_t       baea_reserved             :     14;
+	} md_bist_ab_err_addr_fld_s;
+} md_bist_ab_err_addr_u_t;
+
+#else
+
+typedef union md_bist_ab_err_addr_u {
+	bdrkreg_t	md_bist_ab_err_addr_regval;
+	struct	{
+		bdrkreg_t	baea_reserved		  :	14;
+		bdrkreg_t	baea_bist_db_error	  :	 1;
+		bdrkreg_t	baea_bist_mb_error	  :	 1;
+		bdrkreg_t	baea_reserved_1		  :	 1;
+		bdrkreg_t	baea_be_ras_addr	  :	15;
+		bdrkreg_t	baea_reserved_2		  :	 1;
+		bdrkreg_t	baea_be_mb_cas_addr	  :	15;
+		bdrkreg_t	baea_reserved_3		  :	 1;
+		bdrkreg_t	baea_be_db_cas_addr	  :	15;
+	} md_bist_ab_err_addr_fld_s;
+} md_bist_ab_err_addr_u_t;
+
+#endif
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Contains information on BIST progress and memory bank currently     *
+ * under BIST.                                                          *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_bist_status_u {
+	bdrkreg_t	md_bist_status_regval;
+	struct  {
+		bdrkreg_t	bs_bist_passed            :	 1;
+		bdrkreg_t       bs_bist_done              :      1;
+		bdrkreg_t       bs_reserved               :     62;
+	} md_bist_status_fld_s;
+} md_bist_status_u_t;
+
+#else
+
+typedef union md_bist_status_u {
+	bdrkreg_t	md_bist_status_regval;
+	struct	{
+		bdrkreg_t	bs_reserved		  :	62;
+		bdrkreg_t	bs_bist_done		  :	 1;
+		bdrkreg_t	bs_bist_passed		  :	 1;
+	} md_bist_status_fld_s;
+} md_bist_status_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Contains 3 bits that allow the selection of IB debug information    *
+ * at the debug port (see design specification for available debug      *
+ * information).                                                        *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_ib_debug_u {
+	bdrkreg_t	md_ib_debug_regval;
+	struct  {
+		bdrkreg_t	id_ib_debug_sel           :	 2;
+		bdrkreg_t       id_reserved               :     62;
+	} md_ib_debug_fld_s;
+} md_ib_debug_u_t;
+
+#else
+
+typedef union md_ib_debug_u {
+	bdrkreg_t	md_ib_debug_regval;
+	struct	{
+		bdrkreg_t	id_reserved		  :	62;
+		bdrkreg_t	id_ib_debug_sel		  :	 2;
+	} md_ib_debug_fld_s;
+} md_ib_debug_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Contains the directory specific mode bits. The contents of this     *
+ * register are preserved through soft-resets.                          *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_dir_config_u {
+	bdrkreg_t	md_dir_config_regval;
+	struct  {
+		bdrkreg_t	dc_dir_flavor             :	 1;
+		bdrkreg_t       dc_ignore_dir_ecc         :      1;
+		bdrkreg_t       dc_reserved               :     62;
+	} md_dir_config_fld_s;
+} md_dir_config_u_t;
+
+#else
+
+typedef union md_dir_config_u {
+	bdrkreg_t	md_dir_config_regval;
+	struct	{
+		bdrkreg_t	dc_reserved		  :	62;
+		bdrkreg_t	dc_ignore_dir_ecc	  :	 1;
+		bdrkreg_t	dc_dir_flavor		  :	 1;
+	} md_dir_config_fld_s;
+} md_dir_config_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  Contains information on uncorrectable and              *
+ * correctable directory ECC errors, along with protection ECC          *
+ * errors. The priority of ECC errors latched is: uncorrectable         *
+ * directory, protection error, correctable directory. Thus the valid   *
+ * bits signal:                                                         *
+ * 1xxx: uncorrectable directory ECC error (UCE)                        *
+ * 01xx: access protection double bit error (AE)                        *
+ * 001x: correctable directory ECC error (CE)                           *
+ * 0001: access protection correctable error (ACE)                      *
+ * If the UCE valid bit is set, the address field contains a pointer    *
+ * to the Hspec address of the offending directory entry, the           *
+ * syndrome field contains the bad syndrome, and the UCE overrun bit    *
+ * indicates whether multiple double-bit errors were received.          *
+ * If the UCE valid bit is clear but the AE valid bit is set, the       *
+ * address field contains a pointer to the Hspec address of the         *
+ * offending protection entry, the Bad Protection field contains the    *
+ * 4-bit bad protection value, the PROT_INDEX field shows which of      *
+ * the 8 protection values in the word was bad and the AE overrun bit   *
+ * indicates whether multiple AE errors were received.                  *
+ * If the UCE and AE valid bits are clear, but the CE valid bit is      *
+ * set, the address field contains a pointer to the Hspec address of    *
+ * the offending directory entry, the syndrome field contains the bad   *
+ * syndrome, and the CE overrun bit indicates whether multiple          *
+ * single-bit errors were received.                                     *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_dir_error_u {
+	bdrkreg_t	md_dir_error_regval;
+	struct  {
+		bdrkreg_t	de_reserved_3             :	 3;
+		bdrkreg_t       de_hspec_addr             :     30;
+		bdrkreg_t       de_reserved_2             :      7;
+		bdrkreg_t       de_bad_syn                :      7;
+		bdrkreg_t       de_reserved_1             :      1;
+                bdrkreg_t       de_bad_protect            :      4;
+                bdrkreg_t       de_prot_index             :      3;
+                bdrkreg_t       de_reserved               :      1;
+                bdrkreg_t       de_ace_overrun            :      1;
+                bdrkreg_t       de_ce_overrun             :      1;
+                bdrkreg_t       de_ae_overrun             :      1;
+                bdrkreg_t       de_uce_overrun            :      1;
+                bdrkreg_t       de_ace_valid              :      1;
+                bdrkreg_t       de_ce_valid               :      1;
+                bdrkreg_t       de_ae_valid               :      1;
+                bdrkreg_t       de_uce_valid              :      1;
+	} md_dir_error_fld_s;
+} md_dir_error_u_t;
+
+#else
+
+typedef union md_dir_error_u {
+	bdrkreg_t	md_dir_error_regval;
+	struct	{
+		bdrkreg_t	de_uce_valid		  :	 1;
+		bdrkreg_t	de_ae_valid		  :	 1;
+		bdrkreg_t	de_ce_valid		  :	 1;
+		bdrkreg_t	de_ace_valid		  :	 1;
+		bdrkreg_t	de_uce_overrun		  :	 1;
+		bdrkreg_t	de_ae_overrun		  :	 1;
+		bdrkreg_t	de_ce_overrun		  :	 1;
+		bdrkreg_t	de_ace_overrun		  :	 1;
+		bdrkreg_t	de_reserved		  :	 1;
+		bdrkreg_t	de_prot_index		  :	 3;
+		bdrkreg_t	de_bad_protect		  :	 4;
+		bdrkreg_t	de_reserved_1		  :	 1;
+		bdrkreg_t	de_bad_syn		  :	 7;
+		bdrkreg_t	de_reserved_2		  :	 7;
+		bdrkreg_t	de_hspec_addr		  :	30;
+		bdrkreg_t	de_reserved_3		  :	 3;
+	} md_dir_error_fld_s;
+} md_dir_error_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  Contains information on uncorrectable and              *
+ * correctable directory ECC errors, along with protection ECC          *
+ * errors. The priority of ECC errors latched is: uncorrectable         *
+ * directory, protection error, correctable directory. Thus the valid   *
+ * bits signal:                                                         *
+ * 1xxx: uncorrectable directory ECC error (UCE)                        *
+ * 01xx: access protection double bit error (AE)                        *
+ * 001x: correctable directory ECC error (CE)                           *
+ * 0001: access protection correctable error (ACE)                      *
+ * If the UCE valid bit is set, the address field contains a pointer    *
+ * to the Hspec address of the offending directory entry, the           *
+ * syndrome field contains the bad syndrome, and the UCE overrun bit    *
+ * indicates whether multiple double-bit errors were received.          *
+ * If the UCE valid bit is clear but the AE valid bit is set, the       *
+ * address field contains a pointer to the Hspec address of the         *
+ * offending protection entry, the Bad Protection field contains the    *
+ * 4-bit bad protection value, the PROT_INDEX field shows which of      *
+ * the 8 protection values in the word was bad and the AE overrun bit   *
+ * indicates whether multiple AE errors were received.                  *
+ * If the UCE and AE valid bits are clear, but the CE valid bit is      *
+ * set, the address field contains a pointer to the Hspec address of    *
+ * the offending directory entry, the syndrome field contains the bad   *
+ * syndrome, and the CE overrun bit indicates whether multiple          *
+ * single-bit errors were received.                                     *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_dir_error_clr_u {
+	bdrkreg_t	md_dir_error_clr_regval;
+	struct  {
+		bdrkreg_t	dec_reserved_3            :	 3;
+                bdrkreg_t       dec_hspec_addr            :     30;
+                bdrkreg_t       dec_reserved_2            :      7;
+                bdrkreg_t       dec_bad_syn               :      7;
+                bdrkreg_t       dec_reserved_1            :      1;
+                bdrkreg_t       dec_bad_protect           :      4;
+                bdrkreg_t       dec_prot_index            :      3;
+                bdrkreg_t       dec_reserved              :      1;
+                bdrkreg_t       dec_ace_overrun           :      1;
+                bdrkreg_t       dec_ce_overrun            :      1;
+                bdrkreg_t       dec_ae_overrun            :      1;
+                bdrkreg_t       dec_uce_overrun           :      1;
+                bdrkreg_t       dec_ace_valid             :      1;
+                bdrkreg_t       dec_ce_valid              :      1;
+                bdrkreg_t       dec_ae_valid              :      1;
+                bdrkreg_t       dec_uce_valid             :      1;
+	} md_dir_error_clr_fld_s;
+} md_dir_error_clr_u_t;
+
+#else
+
+typedef union md_dir_error_clr_u {
+	bdrkreg_t	md_dir_error_clr_regval;
+	struct	{
+		bdrkreg_t	dec_uce_valid		  :	 1;
+		bdrkreg_t	dec_ae_valid		  :	 1;
+		bdrkreg_t	dec_ce_valid		  :	 1;
+		bdrkreg_t	dec_ace_valid		  :	 1;
+		bdrkreg_t	dec_uce_overrun		  :	 1;
+		bdrkreg_t	dec_ae_overrun		  :	 1;
+		bdrkreg_t	dec_ce_overrun		  :	 1;
+		bdrkreg_t	dec_ace_overrun		  :	 1;
+		bdrkreg_t	dec_reserved		  :	 1;
+		bdrkreg_t	dec_prot_index		  :	 3;
+		bdrkreg_t	dec_bad_protect		  :	 4;
+		bdrkreg_t	dec_reserved_1		  :	 1;
+		bdrkreg_t	dec_bad_syn		  :	 7;
+		bdrkreg_t	dec_reserved_2		  :	 7;
+		bdrkreg_t	dec_hspec_addr		  :	30;
+		bdrkreg_t	dec_reserved_3		  :	 3;
+	} md_dir_error_clr_fld_s;
+} md_dir_error_clr_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Contains information on requests that encounter no valid protocol   *
+ * table entry.                                                         *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_protocol_error_u {
+	bdrkreg_t	md_protocol_error_regval;
+	struct  {
+		bdrkreg_t	pe_overrun                :	 1;
+                bdrkreg_t       pe_pointer_me             :      1;
+                bdrkreg_t       pe_reserved_1             :      1;
+                bdrkreg_t       pe_address                :     30;
+                bdrkreg_t       pe_reserved               :      1;
+                bdrkreg_t       pe_ptr1_btmbits           :      3;
+                bdrkreg_t       pe_dir_format             :      2;
+                bdrkreg_t       pe_dir_state              :      3;
+                bdrkreg_t       pe_priority               :      1;
+                bdrkreg_t       pe_access                 :      1;
+                bdrkreg_t       pe_msg_type               :      8;
+                bdrkreg_t       pe_initiator              :     11;
+                bdrkreg_t       pe_valid                  :      1;
+	} md_protocol_error_fld_s;
+} md_protocol_error_u_t;
+
+#else
+
+typedef union md_protocol_error_u {
+	bdrkreg_t	md_protocol_error_regval;
+	struct	{
+		bdrkreg_t	pe_valid		  :	 1;
+		bdrkreg_t	pe_initiator		  :	11;
+		bdrkreg_t	pe_msg_type		  :	 8;
+		bdrkreg_t	pe_access		  :	 1;
+		bdrkreg_t	pe_priority		  :	 1;
+		bdrkreg_t	pe_dir_state		  :	 3;
+		bdrkreg_t	pe_dir_format		  :	 2;
+		bdrkreg_t	pe_ptr1_btmbits		  :	 3;
+		bdrkreg_t	pe_reserved		  :	 1;
+		bdrkreg_t	pe_address		  :	30;
+		bdrkreg_t	pe_reserved_1		  :	 1;
+		bdrkreg_t	pe_pointer_me		  :	 1;
+		bdrkreg_t	pe_overrun		  :	 1;
+	} md_protocol_error_fld_s;
+} md_protocol_error_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Contains information on requests that encounter no valid protocol   *
+ * table entry.                                                         *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_protocol_err_clr_u {
+	bdrkreg_t	md_protocol_err_clr_regval;
+	struct  {
+		bdrkreg_t	pec_overrun               :	 1;
+                bdrkreg_t       pec_pointer_me            :      1;
+                bdrkreg_t       pec_reserved_1            :      1;
+                bdrkreg_t       pec_address               :     30;
+                bdrkreg_t       pec_reserved              :      1;
+                bdrkreg_t       pec_ptr1_btmbits          :      3;
+                bdrkreg_t       pec_dir_format            :      2;
+                bdrkreg_t       pec_dir_state             :      3;
+                bdrkreg_t       pec_priority              :      1;
+                bdrkreg_t       pec_access                :      1;
+                bdrkreg_t       pec_msg_type              :      8;
+                bdrkreg_t       pec_initiator             :     11;
+                bdrkreg_t       pec_valid                 :      1;
+	} md_protocol_err_clr_fld_s;
+} md_protocol_err_clr_u_t;
+
+#else
+
+typedef union md_protocol_err_clr_u {
+	bdrkreg_t	md_protocol_err_clr_regval;
+	struct	{
+		bdrkreg_t	pec_valid		  :	 1;
+		bdrkreg_t	pec_initiator		  :	11;
+		bdrkreg_t	pec_msg_type		  :	 8;
+		bdrkreg_t	pec_access		  :	 1;
+		bdrkreg_t	pec_priority		  :	 1;
+		bdrkreg_t	pec_dir_state		  :	 3;
+		bdrkreg_t	pec_dir_format		  :	 2;
+		bdrkreg_t	pec_ptr1_btmbits	  :	 3;
+		bdrkreg_t	pec_reserved		  :	 1;
+		bdrkreg_t	pec_address		  :	30;
+		bdrkreg_t	pec_reserved_1		  :	 1;
+		bdrkreg_t	pec_pointer_me		  :	 1;
+		bdrkreg_t	pec_overrun		  :	 1;
+	} md_protocol_err_clr_fld_s;
+} md_protocol_err_clr_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Contains the address of the page and the requestor which caused a   *
+ * migration threshold to be exceeded. Also contains the type of        *
+ * threshold exceeded and an overrun bit. For Value mode type           *
+ * interrupts, it indicates whether the local or the remote counter     *
+ * triggered the interrupt. Unlike most registers, when the overrun     *
+ * bit is set the register contains information on the most recent      *
+ * (the last) migration candidate.                                      *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_mig_candidate_u {
+	bdrkreg_t	md_mig_candidate_regval;
+	struct  {
+		bdrkreg_t	mc_address                :	21;
+                bdrkreg_t       mc_initiator              :     11;
+                bdrkreg_t       mc_overrun                :      1;
+                bdrkreg_t       mc_type                   :      1;
+                bdrkreg_t       mc_local                  :      1;
+                bdrkreg_t       mc_reserved               :     28;
+                bdrkreg_t       mc_valid                  :      1;
+	} md_mig_candidate_fld_s;
+} md_mig_candidate_u_t;
+
+#else
+
+typedef union md_mig_candidate_u {
+	bdrkreg_t	md_mig_candidate_regval;
+	struct	{
+		bdrkreg_t	mc_valid		  :	 1;
+		bdrkreg_t	mc_reserved		  :	28;
+		bdrkreg_t	mc_local		  :	 1;
+		bdrkreg_t	mc_type			  :	 1;
+		bdrkreg_t	mc_overrun		  :	 1;
+		bdrkreg_t	mc_initiator		  :	11;
+		bdrkreg_t	mc_address		  :	21;
+	} md_mig_candidate_fld_s;
+} md_mig_candidate_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Contains the address of the page and the requestor which caused a   *
+ * migration threshold to be exceeded. Also contains the type of        *
+ * threshold exceeded and an overrun bit. For Value mode type           *
+ * interrupts, it indicates whether the local or the remote counter     *
+ * triggered the interrupt. Unlike most registers, when the overrun     *
+ * bit is set the register contains information on the most recent      *
+ * (the last) migration candidate.                                      *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_mig_candidate_clr_u {
+	bdrkreg_t	md_mig_candidate_clr_regval;
+	struct  {
+		bdrkreg_t	mcc_address               :	21;
+                bdrkreg_t       mcc_initiator             :     11;
+                bdrkreg_t       mcc_overrun               :      1;
+                bdrkreg_t       mcc_type                  :      1;
+                bdrkreg_t       mcc_local                 :      1;
+                bdrkreg_t       mcc_reserved              :     28;
+                bdrkreg_t       mcc_valid                 :      1;
+	} md_mig_candidate_clr_fld_s;
+} md_mig_candidate_clr_u_t;
+
+#else
+
+typedef union md_mig_candidate_clr_u {
+	bdrkreg_t	md_mig_candidate_clr_regval;
+	struct	{
+		bdrkreg_t	mcc_valid		  :	 1;
+		bdrkreg_t	mcc_reserved		  :	28;
+		bdrkreg_t	mcc_local		  :	 1;
+		bdrkreg_t	mcc_type		  :	 1;
+		bdrkreg_t	mcc_overrun		  :	 1;
+		bdrkreg_t	mcc_initiator		  :	11;
+		bdrkreg_t	mcc_address		  :	21;
+	} md_mig_candidate_clr_fld_s;
+} md_mig_candidate_clr_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Controls the generation of page-migration interrupts and loading    *
+ * of the MIGRATION_CANDIDATE register for pages which are using the    *
+ * difference between the requestor and home counts. If the             *
+ * difference is greater-than or equal to than the threshold            *
+ * contained in the register, and the valid bit is set, the migration   *
+ * candidate is loaded (and an interrupt generated if enabled by the    *
+ * page migration mode).                                                *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_mig_diff_thresh_u {
+	bdrkreg_t	md_mig_diff_thresh_regval;
+	struct  {
+		bdrkreg_t	mdt_threshold             :	15;
+                bdrkreg_t       mdt_reserved_1            :     17;
+                bdrkreg_t       mdt_th_action             :      3;
+                bdrkreg_t       mdt_sat_action            :      3;
+                bdrkreg_t       mdt_reserved              :     25;
+                bdrkreg_t       mdt_valid                 :      1;
+	} md_mig_diff_thresh_fld_s;
+} md_mig_diff_thresh_u_t;
+
+#else
+
+typedef union md_mig_diff_thresh_u {
+	bdrkreg_t	md_mig_diff_thresh_regval;
+	struct	{
+		bdrkreg_t	mdt_valid		  :	 1;
+		bdrkreg_t	mdt_reserved		  :	25;
+		bdrkreg_t	mdt_sat_action		  :	 3;
+		bdrkreg_t	mdt_th_action		  :	 3;
+		bdrkreg_t	mdt_reserved_1		  :	17;
+		bdrkreg_t	mdt_threshold		  :	15;
+	} md_mig_diff_thresh_fld_s;
+} md_mig_diff_thresh_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Controls the generation of page-migration interrupts and loading    *
+ * of the MIGRATION_CANDIDATE register for pages that are using the     *
+ * absolute value of the requestor count. If the value is               *
+ * greater-than or equal to the threshold contained in the register,    *
+ * and the register valid bit is set, the migration candidate is        *
+ * loaded and an interrupt generated. For the value mode of page        *
+ * migration, there are two variations. In the first variation,         *
+ * interrupts are only generated when the remote counter reaches the    *
+ * threshold, not when the local counter reaches the threshold. In      *
+ * the second mode, both the local counter and the remote counter       *
+ * generate interrupts if they reach the threshold. This second mode    *
+ * is useful for performance monitoring, to track the number of local   *
+ * and remote references to a page. LOCAL_INT determines whether we     *
+ * will generate interrupts when the local counter reaches the          *
+ * threshold.                                                           *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_mig_value_thresh_u {
+	bdrkreg_t	md_mig_value_thresh_regval;
+	struct  {
+		bdrkreg_t	mvt_threshold             :	15;
+                bdrkreg_t       mvt_reserved_1            :     17;
+                bdrkreg_t       mvt_th_action             :      3;
+                bdrkreg_t       mvt_sat_action            :      3;
+                bdrkreg_t       mvt_reserved              :     24;
+                bdrkreg_t       mvt_local_int             :      1;
+                bdrkreg_t       mvt_valid                 :      1;
+	} md_mig_value_thresh_fld_s;
+} md_mig_value_thresh_u_t;
+
+#else
+
+typedef union md_mig_value_thresh_u {
+        bdrkreg_t       md_mig_value_thresh_regval;
+        struct  {
+                bdrkreg_t       mvt_valid                 :      1;
+                bdrkreg_t       mvt_local_int             :      1;
+                bdrkreg_t       mvt_reserved              :     24;
+                bdrkreg_t       mvt_sat_action            :      3;
+                bdrkreg_t       mvt_th_action             :      3;
+                bdrkreg_t       mvt_reserved_1            :     17;
+                bdrkreg_t       mvt_threshold             :     15;
+        } md_mig_value_thresh_fld_s;
+} md_mig_value_thresh_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Contains the controls for the sizing of the three MOQH request      *
+ * queues. The maximum (and default) value is 4. Queue sizes are in     *
+ * flits. One header equals one flit.                                   *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_outgoing_rq_queue_size_u {
+	bdrkreg_t	md_outgoing_rq_queue_size_regval;
+	struct  {
+		bdrkreg_t	orqs_reserved_3           :	 8;
+                bdrkreg_t       orqs_moqh_p0_rq_size      :      3;
+                bdrkreg_t       orqs_reserved_2           :      5;
+                bdrkreg_t       orqs_moqh_p1_rq_size      :      3;
+                bdrkreg_t       orqs_reserved_1           :      5;
+                bdrkreg_t       orqs_moqh_np_rq_size      :      3;
+                bdrkreg_t       orqs_reserved             :     37;
+	} md_outgoing_rq_queue_size_fld_s;
+} md_outgoing_rq_queue_size_u_t;
+
+#else
+
+typedef union md_outgoing_rq_queue_size_u {
+	bdrkreg_t	md_outgoing_rq_queue_size_regval;
+	struct	{
+		bdrkreg_t	orqs_reserved		  :	37;
+		bdrkreg_t	orqs_moqh_np_rq_size	  :	 3;
+		bdrkreg_t	orqs_reserved_1		  :	 5;
+		bdrkreg_t	orqs_moqh_p1_rq_size	  :	 3;
+		bdrkreg_t	orqs_reserved_2		  :	 5;
+		bdrkreg_t	orqs_moqh_p0_rq_size	  :	 3;
+		bdrkreg_t	orqs_reserved_3		  :	 8;
+	} md_outgoing_rq_queue_size_fld_s;
+} md_outgoing_rq_queue_size_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Contains the 32-bit directory word failing BIST.                    *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_bist_db_err_data_u {
+	bdrkreg_t	md_bist_db_err_data_regval;
+	struct  {
+		bdrkreg_t	bded_db_er_d              :	32;
+		bdrkreg_t       bded_reserved             :     32;
+	} md_bist_db_err_data_fld_s;
+} md_bist_db_err_data_u_t;
+
+#else
+
+typedef union md_bist_db_err_data_u {
+	bdrkreg_t	md_bist_db_err_data_regval;
+	struct	{
+		bdrkreg_t	bded_reserved		  :	32;
+		bdrkreg_t	bded_db_er_d		  :	32;
+	} md_bist_db_err_data_fld_s;
+} md_bist_db_err_data_u_t;
+
+#endif
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Contains 2 bits that allow the selection of DB debug information    *
+ * at the debug port (see the design specification for descrition of    *
+ * the available debug information).                                    *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_db_debug_u {
+	bdrkreg_t	md_db_debug_regval;
+	struct  {
+		bdrkreg_t	dd_db_debug_sel           :	 2;
+		bdrkreg_t       dd_reserved               :     62;
+	} md_db_debug_fld_s;
+} md_db_debug_u_t;
+
+#else
+
+typedef union md_db_debug_u {
+	bdrkreg_t	md_db_debug_regval;
+	struct	{
+		bdrkreg_t	dd_reserved		  :	62;
+		bdrkreg_t	dd_db_debug_sel		  :	 2;
+	} md_db_debug_fld_s;
+} md_db_debug_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Contains the IgnoreECC bit. When this bit is set, all ECC errors    *
+ * are ignored. ECC bits will still be generated on writebacks.         *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_mb_ecc_config_u {
+	bdrkreg_t	md_mb_ecc_config_regval;
+	struct  {
+		bdrkreg_t	mec_ignore_dataecc        :	 1;
+		bdrkreg_t       mec_reserved              :     63;
+	} md_mb_ecc_config_fld_s;
+} md_mb_ecc_config_u_t;
+
+#else
+
+typedef union md_mb_ecc_config_u {
+	bdrkreg_t	md_mb_ecc_config_regval;
+	struct	{
+		bdrkreg_t	mec_reserved		  :	63;
+		bdrkreg_t	mec_ignore_dataecc	  :	 1;
+	} md_mb_ecc_config_fld_s;
+} md_mb_ecc_config_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  Contains information on read memory errors (both       *
+ * correctable and uncorrectable) and write memory errors (always       *
+ * uncorrectable). The errors are prioritized as follows:               *
+ *  highest: uncorrectable read error (READ_UCE)                        *
+ *  middle: write error (WRITE_UCE)                                     *
+ *  lowest: correctable read error (READ_CE)                            *
+ * Each type of error maintains a two-bit valid/overrun field           *
+ * (READ_UCE, WRITE_UCE, or READ_CE). Bit 0 of each two-bit field       *
+ * corresponds to the valid bit, and bit 1 of each two-bit field        *
+ * corresponds to the overrun bit.                                      *
+ * The rule for the valid bit is that it gets set whenever that error   *
+ * occurs, regardless of whether a higher priority error has occured.   *
+ * The rule for the overrun bit is that it gets set whenever we are     *
+ * unable to record the address information for this particular         *
+ * error, due to a previous error of the same or higher priority.       *
+ * Note that the syndrome and address information always corresponds    *
+ * to the earliest, highest priority error.                             *
+ *  Finally, the UCE_DIFF_ADDR bit is set whenever there have been      *
+ * several uncorrectable errors, to different cache line addresses.     *
+ * If all the UCEs were to the same cache line address, then            *
+ * UCE_DIFF_ADDR will be 0. This allows the operating system to         *
+ * detect the case where a UCE error is read exclusively, and then      *
+ * written back by the processor. If the bit is 0, it indicates that    *
+ * no information has been lost about UCEs on other cache lines. In     *
+ * particular, partial writes do a read modify write of the cache       *
+ * line. A UCE read error will be set when the cache line is read,      *
+ * and a UCE write error will occur when the cache line is written      *
+ * back, but the UCE_DIFF_ADDR will not be set.                         *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_mem_error_u {
+	bdrkreg_t	md_mem_error_regval;
+	struct  {
+		bdrkreg_t	me_reserved_5             :	 3;
+                bdrkreg_t       me_address                :     30;
+                bdrkreg_t       me_reserved_4             :      7;
+                bdrkreg_t       me_bad_syn                :      8;
+                bdrkreg_t       me_reserved_3             :      4;
+                bdrkreg_t       me_read_ce                :      2;
+                bdrkreg_t       me_reserved_2             :      2;
+                bdrkreg_t       me_write_uce              :      2;
+                bdrkreg_t       me_reserved_1             :      2;
+                bdrkreg_t       me_read_uce               :      2;
+                bdrkreg_t       me_reserved               :      1;
+                bdrkreg_t       me_uce_diff_addr          :      1;
+	} md_mem_error_fld_s;
+} md_mem_error_u_t;
+
+#else
+
+typedef union md_mem_error_u {
+	bdrkreg_t	md_mem_error_regval;
+	struct	{
+		bdrkreg_t	me_uce_diff_addr	  :	 1;
+		bdrkreg_t	me_reserved		  :	 1;
+		bdrkreg_t	me_read_uce		  :	 2;
+		bdrkreg_t	me_reserved_1		  :	 2;
+		bdrkreg_t	me_write_uce		  :	 2;
+		bdrkreg_t	me_reserved_2		  :	 2;
+		bdrkreg_t	me_read_ce		  :	 2;
+		bdrkreg_t	me_reserved_3		  :	 4;
+		bdrkreg_t	me_bad_syn		  :	 8;
+		bdrkreg_t	me_reserved_4		  :	 7;
+		bdrkreg_t	me_address		  :	30;
+		bdrkreg_t	me_reserved_5		  :	 3;
+	} md_mem_error_fld_s;
+} md_mem_error_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  Contains information on read memory errors (both       *
+ * correctable and uncorrectable) and write memory errors (always       *
+ * uncorrectable). The errors are prioritized as follows:               *
+ *  highest: uncorrectable read error (READ_UCE)                        *
+ *  middle: write error (WRITE_UCE)                                     *
+ *  lowest: correctable read error (READ_CE)                            *
+ * Each type of error maintains a two-bit valid/overrun field           *
+ * (READ_UCE, WRITE_UCE, or READ_CE). Bit 0 of each two-bit field       *
+ * corresponds to the valid bit, and bit 1 of each two-bit field        *
+ * corresponds to the overrun bit.                                      *
+ * The rule for the valid bit is that it gets set whenever that error   *
+ * occurs, regardless of whether a higher priority error has occured.   *
+ * The rule for the overrun bit is that it gets set whenever we are     *
+ * unable to record the address information for this particular         *
+ * error, due to a previous error of the same or higher priority.       *
+ * Note that the syndrome and address information always corresponds    *
+ * to the earliest, highest priority error.                             *
+ *  Finally, the UCE_DIFF_ADDR bit is set whenever there have been      *
+ * several uncorrectable errors, to different cache line addresses.     *
+ * If all the UCEs were to the same cache line address, then            *
+ * UCE_DIFF_ADDR will be 0. This allows the operating system to         *
+ * detect the case where a UCE error is read exclusively, and then      *
+ * written back by the processor. If the bit is 0, it indicates that    *
+ * no information has been lost about UCEs on other cache lines. In     *
+ * particular, partial writes do a read modify write of the cache       *
+ * line. A UCE read error will be set when the cache line is read,      *
+ * and a UCE write error will occur when the cache line is written      *
+ * back, but the UCE_DIFF_ADDR will not be set.                         *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_mem_error_clr_u {
+	bdrkreg_t	md_mem_error_clr_regval;
+	struct  {
+		bdrkreg_t	mec_reserved_5            :	 3;
+                bdrkreg_t       mec_address               :     30;
+                bdrkreg_t       mec_reserved_4            :      7;
+                bdrkreg_t       mec_bad_syn               :      8;
+                bdrkreg_t       mec_reserved_3            :      4;
+                bdrkreg_t       mec_read_ce               :      2;
+                bdrkreg_t       mec_reserved_2            :      2;
+                bdrkreg_t       mec_write_uce             :      2;
+                bdrkreg_t       mec_reserved_1            :      2;
+                bdrkreg_t       mec_read_uce              :      2;
+                bdrkreg_t       mec_reserved              :      1;
+                bdrkreg_t       mec_uce_diff_addr         :      1;
+	} md_mem_error_clr_fld_s;
+} md_mem_error_clr_u_t;
+
+#else
+
+typedef union md_mem_error_clr_u {
+	bdrkreg_t	md_mem_error_clr_regval;
+	struct	{
+		bdrkreg_t	mec_uce_diff_addr	  :	 1;
+		bdrkreg_t	mec_reserved		  :	 1;
+		bdrkreg_t	mec_read_uce		  :	 2;
+		bdrkreg_t	mec_reserved_1		  :	 2;
+		bdrkreg_t	mec_write_uce		  :	 2;
+		bdrkreg_t	mec_reserved_2		  :	 2;
+		bdrkreg_t	mec_read_ce		  :	 2;
+		bdrkreg_t	mec_reserved_3		  :	 4;
+		bdrkreg_t	mec_bad_syn		  :	 8;
+		bdrkreg_t	mec_reserved_4		  :	 7;
+		bdrkreg_t	mec_address		  :	30;
+		bdrkreg_t	mec_reserved_5		  :	 3;
+	} md_mem_error_clr_fld_s;
+} md_mem_error_clr_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Contains one-quarter of the error memory line failing BIST.         *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_bist_mb_err_data_0_u {
+	bdrkreg_t	md_bist_mb_err_data_0_regval;
+	struct  {
+		bdrkreg_t	bmed0_mb_er_d             :	36;
+		bdrkreg_t       bmed0_reserved            :     28;
+	} md_bist_mb_err_data_0_fld_s;
+} md_bist_mb_err_data_0_u_t;
+
+#else
+
+typedef union md_bist_mb_err_data_0_u {
+	bdrkreg_t	md_bist_mb_err_data_0_regval;
+	struct	{
+		bdrkreg_t	bmed0_reserved		  :	28;
+		bdrkreg_t	bmed0_mb_er_d		  :	36;
+	} md_bist_mb_err_data_0_fld_s;
+} md_bist_mb_err_data_0_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Contains one-quarter of the error memory line failing BIST.         *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_bist_mb_err_data_1_u {
+	bdrkreg_t	md_bist_mb_err_data_1_regval;
+	struct  {
+		bdrkreg_t	bmed1_mb_er_d             :	36;
+		bdrkreg_t       bmed1_reserved            :     28;
+	} md_bist_mb_err_data_1_fld_s;
+} md_bist_mb_err_data_1_u_t;
+
+#else
+
+typedef union md_bist_mb_err_data_1_u {
+	bdrkreg_t	md_bist_mb_err_data_1_regval;
+	struct	{
+		bdrkreg_t	bmed1_reserved		  :	28;
+		bdrkreg_t	bmed1_mb_er_d		  :	36;
+	} md_bist_mb_err_data_1_fld_s;
+} md_bist_mb_err_data_1_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Contains one-quarter of the error memory line failing BIST.         *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_bist_mb_err_data_2_u {
+	bdrkreg_t	md_bist_mb_err_data_2_regval;
+	struct  {
+		bdrkreg_t	bmed2_mb_er_d             :	36;
+		bdrkreg_t       bmed2_reserved            :     28;
+	} md_bist_mb_err_data_2_fld_s;
+} md_bist_mb_err_data_2_u_t;
+
+#else
+
+typedef union md_bist_mb_err_data_2_u {
+	bdrkreg_t	md_bist_mb_err_data_2_regval;
+	struct	{
+		bdrkreg_t	bmed2_reserved		  :	28;
+		bdrkreg_t	bmed2_mb_er_d		  :	36;
+	} md_bist_mb_err_data_2_fld_s;
+} md_bist_mb_err_data_2_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Contains one-quarter of the error memory line failing BIST.         *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_bist_mb_err_data_3_u {
+	bdrkreg_t	md_bist_mb_err_data_3_regval;
+	struct  {
+		bdrkreg_t	bmed3_mb_er_d             :	36;
+		bdrkreg_t       bmed3_reserved            :     28;
+	} md_bist_mb_err_data_3_fld_s;
+} md_bist_mb_err_data_3_u_t;
+
+#else
+
+typedef union md_bist_mb_err_data_3_u {
+	bdrkreg_t	md_bist_mb_err_data_3_regval;
+	struct	{
+		bdrkreg_t	bmed3_reserved		  :	28;
+		bdrkreg_t	bmed3_mb_er_d		  :	36;
+	} md_bist_mb_err_data_3_fld_s;
+} md_bist_mb_err_data_3_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Contains 1 bit that allow the selection of MB debug information     *
+ * at the debug port (see the design specification for the available    *
+ * debug information).                                                  *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union md_mb_debug_u {
+	bdrkreg_t	md_mb_debug_regval;
+	struct  {
+		bdrkreg_t	md_mb_debug_sel           :	 1;
+		bdrkreg_t       md_reserved               :     63;
+	} md_mb_debug_fld_s;
+} md_mb_debug_u_t;
+
+#else
+
+typedef union md_mb_debug_u {
+	bdrkreg_t	md_mb_debug_regval;
+	struct	{
+		bdrkreg_t	md_reserved		  :	63;
+		bdrkreg_t	md_mb_debug_sel		  :	 1;
+	} md_mb_debug_fld_s;
+} md_mb_debug_u_t;
+
+#endif
+
+
+
+
+
+
+#endif /* _LANGUAGE_C */
+
+/************************************************************************
+ *                                                                      *
+ *               MAKE ALL ADDITIONS AFTER THIS LINE                     *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#endif /* _ASM_SN_SN1_HUBMD_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/hubmd_next.h linux/include/asm-ia64/sn/sn1/hubmd_next.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/hubmd_next.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/sn1/hubmd_next.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,815 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_SN1_HUBMD_NEXT_H
+#define _ASM_SN_SN1_HUBMD_NEXT_H
+
+#ifdef BRINGUP
+/* XXX moved over from SN/SN0/hubmd.h -- each should be checked for SN1 */
+/* In fact, most of this stuff is wrong. Some is correct, such as
+ * MD_PAGE_SIZE and MD_PAGE_NUM_SHFT.
+ */
+
+#define MD_PERF_COUNTERS        6
+#define MD_PERF_SETS            6
+
+#define MD_SIZE_EMPTY           0       
+#define MD_SIZE_64MB            1       
+#define MD_SIZE_128MB           2       
+#define MD_SIZE_256MB           3
+#define MD_SIZE_512MB           4      
+#define MD_SIZE_1GB             5      
+
+#define MD_SIZE_BYTES(size)     ((size) == 0 ? 0 : 0x2000000L << (size))
+#define MD_SIZE_MBYTES(size)    ((size) == 0 ? 0 :   0x20       << (size))
+#define MD_NUM_ENABLED(_x)	((_x & 0x1) + ((_x >> 1) & 0x1) + \
+				((_x >> 2) & 0x1) + ((_x >> 3) & 0x1))
+
+
+/* Hardware page size and shift */
+
+#define MD_PAGE_SIZE            16384    /* Page size in bytes              */
+#define MD_PAGE_NUM_SHFT        14       /* Address to page number shift    */
+
+#define MMC_IO_PROT 		(UINT64_CAST 1 << 45)
+
+/* Register offsets from LOCAL_HUB or REMOTE_HUB */
+#define MD_PERF_SEL             0x210000 /* Select perf monitor events      */
+
+/* MD_MIG_VALUE_THRESH bit definitions */
+
+#define MD_MIG_VALUE_THRES_VALID_MASK (UINT64_CAST 0x1 << 63)
+#define MD_MIG_VALUE_THRES_VALUE_MASK (UINT64_CAST 0xfffff)
+
+/* MD_MIG_CANDIDATE bit definitions */
+
+#define MD_MIG_CANDIDATE_VALID_MASK (UINT64_CAST 0x1 << 63)
+#define MD_MIG_CANDIDATE_VALID_SHFT 63
+#define MD_MIG_CANDIDATE_TYPE_MASK (UINT64_CAST 0x1 << 30)
+#define MD_MIG_CANDIDATE_TYPE_SHFT 30
+#define MD_MIG_CANDIDATE_OVERRUN_MASK (UINT64_CAST 0x1 << 29)
+#define MD_MIG_CANDIDATE_OVERRUN_SHFT 29
+#define MD_MIG_CANDIDATE_NODEID_MASK (UINT64_CAST 0x1ff << 20)
+#define MD_MIG_CANDIDATE_NODEID_SHFT 20
+#define MD_MIG_CANDIDATE_ADDR_MASK (UINT64_CAST 0x3ffff)
+
+
+/* XXX protection and migration are completely revised on SN1.  On
+   SN0, the reference count and protection fields were accessed in the
+   same word, but on SN1 they reside at different addresses.  The
+   users of these macros will need to be rewritten.  Also, the MD page
+   size is 16K on SN1 but 4K on SN0.  */
+
+/* Premium SIMM protection entry shifts and masks. */
+
+#define MD_PPROT_SHFT           0                       /* Prot. field      */
+#define MD_PPROT_MASK           0xf
+#define MD_PPROT_REFCNT_SHFT    5                       /* Reference count  */
+#define MD_PPROT_REFCNT_WIDTH   0x7ffff
+#define MD_PPROT_REFCNT_MASK    (MD_PPROT_REFCNT_WIDTH << 5)
+
+#define MD_PPROT_IO_SHFT        8                       /* I/O Prot field   */
+
+/* Standard SIMM protection entry shifts and masks. */
+
+#define MD_SPROT_SHFT           0                       /* Prot. field      */
+#define MD_SPROT_MASK           0xf
+#define MD_SPROT_IO_SHFT	8
+#define MD_SPROT_REFCNT_SHFT    5                       /* Reference count  */
+#define MD_SPROT_REFCNT_WIDTH   0x7ff
+#define MD_SPROT_REFCNT_MASK    (MD_SPROT_REFCNT_WIDTH << 5)
+
+/* Migration modes used in protection entries */
+
+#define MD_PROT_MIGMD_IREL      (UINT64_CAST 0x3 << 3)
+#define MD_PROT_MIGMD_IABS      (UINT64_CAST 0x2 << 3)
+#define MD_PROT_MIGMD_PREL      (UINT64_CAST 0x1 << 3)
+#define MD_PROT_MIGMD_OFF       (UINT64_CAST 0x0 << 3)
+
+/*
+ * Operations on Memory/Directory DIMM control register
+ */
+
+#define DIRTYPE_PREMIUM 1
+#define DIRTYPE_STANDARD 0
+
+/*
+ * Operations on page migration count difference and absolute threshold
+ * registers
+ */
+
+#define MD_MIG_VALUE_THRESH_GET(region) (                               \
+        REMOTE_HUB_L((region), MD_MIG_VALUE_THRESH) &  \
+        MD_MIG_VALUE_THRES_VALUE_MASK)
+
+#define MD_MIG_VALUE_THRESH_SET(region, value) (                        \
+        REMOTE_HUB_S((region), MD_MIG_VALUE_THRESH,                     \
+                MD_MIG_VALUE_THRES_VALID_MASK | (value)))
+
+#define MD_MIG_VALUE_THRESH_ENABLE(region) (                    \
+        REMOTE_HUB_S((region), MD_MIG_VALUE_THRESH,                     \
+                REMOTE_HUB_L((region), MD_MIG_VALUE_THRESH)             \
+                             | MD_MIG_VALUE_THRES_VALID_MASK))
+
+/*
+ * Operations on page migration candidate register
+ */
+
+#define MD_MIG_CANDIDATE_GET(my_region_id) ( \
+        REMOTE_HUB_L((my_region_id), MD_MIG_CANDIDATE_CLR))
+
+#define MD_MIG_CANDIDATE_HWPFN(value) ((value) & MD_MIG_CANDIDATE_ADDR_MASK)
+
+#define MD_MIG_CANDIDATE_NODEID(value) ( \
+        ((value) & MD_MIG_CANDIDATE_NODEID_MASK) >> MD_MIG_CANDIDATE_NODEID_SHFT)
+
+#define MD_MIG_CANDIDATE_TYPE(value) ( \
+        ((value) & MD_MIG_CANDIDATE_TYPE_MASK) >> MD_MIG_CANDIDATE_TYPE_SHFT)
+
+#define MD_MIG_CANDIDATE_VALID(value) ( \
+        ((value) & MD_MIG_CANDIDATE_VALID_MASK) >> MD_MIG_CANDIDATE_VALID_SHFT)
+
+/*
+ * Macros to retrieve fields in the protection entry
+ */
+
+/* for Premium SIMM */
+#define MD_PPROT_REFCNT_GET(value) ( \
+        ((value) & MD_PPROT_REFCNT_MASK) >> MD_PPROT_REFCNT_SHFT)
+
+/* for Standard SIMM */
+#define MD_SPROT_REFCNT_GET(value) ( \
+        ((value) & MD_SPROT_REFCNT_MASK) >> MD_SPROT_REFCNT_SHFT)
+
+#if _LANGUAGE_C
+#ifdef LITTLE_ENDIAN
+
+typedef union md_perf_sel {
+        uint64_t      perf_sel_reg;
+        struct  {
+                uint64_t      perf_sel  :  3,
+				perf_en   :  1,
+				perf_rsvd : 60;
+        } perf_sel_bits;
+} md_perf_sel_t;
+
+#else
+
+typedef union md_perf_sel {
+	uint64_t	perf_sel_reg;
+	struct	{
+		uint64_t	perf_rsvd : 60,
+				perf_en	  :  1,
+				perf_sel  :  3;
+	} perf_sel_bits;
+} md_perf_sel_t;
+
+#endif
+#endif /* _LANGUAGE_C */
+
+#endif /* BRINGUP */
+
+/* Like SN0, SN1 supports a mostly-flat address space with 8
+   CPU-visible, evenly spaced, contiguous regions, or "software
+   banks".  On SN1, software bank n begins at addresses n * 1GB, 
+   0 <= n < 8.
+
+   Physically (and very unlike SN0), each SN1 node board contains 8
+   dimm sockets, arranged as 4 "DIMM banks" of 2 dimms each.  DIMM
+   size and width (x4/x8) is assigned per dimm bank.  Each DIMM bank
+   consists of 2 "physical banks", one on the front sides of the 2
+   DIMMs and the other on the back sides.  Therefore a node has a
+   total of 8 ( = 4 * 2) physical banks.  They are collectively
+   referred to as "locational banks", since the locational bank number
+   depends on the physical location of the DIMMs on the board.
+
+	      Dimm bank 0, Phys bank 0a (locational bank 0a)
+     Slot D0  ----------------------------------------------
+	      Dimm bank 0, Phys bank 1a (locational bank 1a)
+
+	      Dimm bank 1, Phys bank 0a (locational bank 2a)
+     Slot D1  ----------------------------------------------
+	      Dimm bank 1, Phys bank 1a (locational bank 3a)
+
+	      Dimm bank 2, Phys bank 0a (locational bank 4a)
+     Slot D2  ----------------------------------------------
+	      Dimm bank 2, Phys bank 1a (locational bank 5a)
+
+	      Dimm bank 3, Phys bank 0a (locational bank 6a)
+     Slot D3  ----------------------------------------------
+	      Dimm bank 3, Phys bank 1a (locational bank 7a)
+
+	      Dimm bank 0, Phys bank 0b (locational bank 0b)
+     Slot D4  ----------------------------------------------
+	      Dimm bank 0, Phys bank 1b (locational bank 1b)
+
+	      Dimm bank 1, Phys bank 0b (locational bank 2b)
+     Slot D5  ----------------------------------------------
+	      Dimm bank 1, Phys bank 1b (locational bank 3b)
+
+	      Dimm bank 2, Phys bank 0b (locational bank 4b)
+     Slot D6  ----------------------------------------------
+	      Dimm bank 2, Phys bank 1b (locational bank 5b)
+
+	      Dimm bank 3, Phys bank 0b (locational bank 6b)
+     Slot D7  ----------------------------------------------
+	      Dimm bank 3, Phys bank 1b (locational bank 7b)
+
+   Since bank size is assigned per DIMM bank, each pair of locational
+   banks must have the same size.  However, they may be
+   enabled/disabled individually.
+
+   The locational banks map to the software banks via the dimm0_sel
+   field in MD_MEMORY_CONFIG.  When the field is 0 (the usual case),
+   the mapping is direct:  eg. locational bank 1 (dimm bank 0,
+   physical bank 1, which is the back side of the first DIMM pair)
+   corresponds to software bank 1, at node offset 1GB.  More
+   generally, locational bank = software bank XOR dimm0_sel.
+
+   All the PROM's data structures (promlog variables, klconfig, etc.)
+   track memory by the locational bank number.  The kernel usually
+   tracks memory by the software bank number.
+   memsupport.c:slot_psize_compute() performs the mapping.
+
+   (Note:  the terms "locational bank" and "software bank" are not
+   offical in any way, but I've tried to make the PROM use them
+   consistently -- bjj.)
+ */
+
+#define MD_MEM_BANKS 		8
+#define MD_MEM_DIMM_BANKS 	4
+#define MD_BANK_SHFT            30                     /* log2(1 GB)     */
+#define MD_BANK_MASK            (UINT64_CAST 0x7 << 30)
+#define MD_BANK_SIZE            (UINT64_CAST 1 << MD_BANK_SHFT)   /*  1 GB */
+#define MD_BANK_OFFSET(_b)      (UINT64_CAST (_b) << MD_BANK_SHFT)
+#define MD_BANK_GET(addr)	(((addr) & MD_BANK_MASK) >> MD_BANK_SHFT)
+#define MD_BANK_TO_DIMM_BANK(_b) (( (_b) >> 1) & 0x3)
+#define MD_BANK_TO_PHYS_BANK(_b) (( (_b) >> 0) & 0x1)
+#define MD_DIMM_BANK_GET(addr)   MD_BANK_TO_DIMM_BANK(MD_BANK_GET(addr))
+#define MD_PHYS_BANK_GET(addr)   MD_BANK_TO_PHYS_BANK(MD_BANK_GET(addr))
+
+
+/* Split an MD pointer (or message source & suppl. fields) into node, device */
+
+#define MD_PTR_NODE_SHFT	3
+#define MD_PTR_DEVICE_MASK	0x7
+#define MD_PTR_SUBNODE0_MASK	0x1
+#define MD_PTR_SUBNODE1_MASK	0x4
+
+
+/**********************************************************************
+
+ Backdoor protection and page counter structures
+
+**********************************************************************/
+
+/* Protection entries and page counters are interleaved at 4 separate
+   addresses, 0x10 apart.  Software must read/write all four. */
+
+#define BD_ITLV_COUNT		4
+#define BD_ITLV_STRIDE		0x10
+
+/* Protection entries */
+
+/* (these macros work for standard (_rgn < 32) or premium DIMMs) */
+#define MD_PROT_SHFT(_rgn, _io)	((((_rgn) & 0x20) >> 2 | \
+				  ((_rgn) & 0x01) << 2 | \
+				  ((_io)  &  0x1) << 1) * 8)
+#define MD_PROT_MASK(_rgn, _io)	(0xff << MD_PROT_SHFT(_rgn, _io))
+#define MD_PROT_GET(_val, _rgn, _io) \
+	(((_val) & MD_PROT_MASK(_rgn, _io)) >> MD_PROT_SHFT(_rgn, _io))
+
+/* Protection field values */
+
+#define MD_PROT_RW              (UINT64_CAST 0xff)
+#define MD_PROT_RO              (UINT64_CAST 0x0f)
+#define MD_PROT_NO              (UINT64_CAST 0x00)
+
+
+
+
+/**********************************************************************
+
+ Directory format structures
+
+***********************************************************************/
+
+#ifdef _LANGUAGE_C
+
+/* Standard Directory Entries */
+
+#ifdef LITTLE_ENDIAN
+
+struct	md_sdir_pointer_fmt { /* exclusive, busy shared/excl, wait, poisoned */
+	bdrkreg_t	sdp_format                :	 2;
+        bdrkreg_t       sdp_state                 :      3;
+        bdrkreg_t       sdp_priority              :      3;
+        bdrkreg_t       sdp_pointer1              :      8;
+        bdrkreg_t       sdp_ecc                   :      6;
+        bdrkreg_t       sdp_locprot               :      1;
+        bdrkreg_t       sdp_reserved              :      1;
+        bdrkreg_t       sdp_crit_word_off         :      3;
+        bdrkreg_t       sdp_pointer2              :      5;
+        bdrkreg_t       sdp_fill                  :     32;
+};
+
+#else
+
+struct	md_sdir_pointer_fmt { /* exclusive, busy shared/excl, wait, poisoned */
+	bdrkreg_t	sdp_fill		  :	32;
+	bdrkreg_t	sdp_pointer2		  :	 5;
+	bdrkreg_t	sdp_crit_word_off	  :	 3;
+	bdrkreg_t	sdp_reserved		  :	 1;
+	bdrkreg_t	sdp_locprot		  :	 1;
+	bdrkreg_t	sdp_ecc			  :	 6;
+	bdrkreg_t	sdp_pointer1		  :	 8;
+	bdrkreg_t	sdp_priority		  :	 3;
+	bdrkreg_t	sdp_state		  :	 3;
+	bdrkreg_t	sdp_format		  :	 2;
+};
+
+#endif
+
+#ifdef LITTLE_ENDIAN
+
+struct	md_sdir_fine_fmt { /* shared (fine) */
+	bdrkreg_t	sdf_format                :	 2;
+        bdrkreg_t       sdf_tag1                  :      3;
+        bdrkreg_t       sdf_tag2                  :      3;
+        bdrkreg_t       sdf_vector1               :      8;
+        bdrkreg_t       sdf_ecc                   :      6;
+        bdrkreg_t       sdf_locprot               :      1;
+        bdrkreg_t       sdf_tag2valid             :      1;
+        bdrkreg_t       sdf_vector2               :      8;
+        bdrkreg_t       sdf_fill                  :     32;
+};
+
+#else
+
+struct	md_sdir_fine_fmt { /* shared (fine) */
+	bdrkreg_t	sdf_fill		  :	32;
+	bdrkreg_t	sdf_vector2		  :	 8;
+	bdrkreg_t	sdf_tag2valid		  :	 1;
+	bdrkreg_t	sdf_locprot		  :	 1;
+	bdrkreg_t	sdf_ecc			  :	 6;
+	bdrkreg_t	sdf_vector1		  :	 8;
+	bdrkreg_t	sdf_tag2		  :	 3;
+	bdrkreg_t	sdf_tag1		  :	 3;
+	bdrkreg_t	sdf_format		  :	 2;
+};
+
+#endif
+
+#ifdef LITTLE_ENDIAN
+
+struct	md_sdir_coarse_fmt { /* shared (coarse) */
+	bdrkreg_t	sdc_format                :	 2;
+        bdrkreg_t       sdc_reserved_1            :      6;
+        bdrkreg_t       sdc_vector_a              :      8;
+        bdrkreg_t       sdc_ecc                   :      6;
+        bdrkreg_t       sdc_locprot               :      1;
+        bdrkreg_t       sdc_reserved              :      1;
+        bdrkreg_t       sdc_vector_b              :      8;
+        bdrkreg_t       sdc_fill                  :     32;
+};
+
+#else
+
+struct	md_sdir_coarse_fmt { /* shared (coarse) */
+	bdrkreg_t	sdc_fill		  :	32;
+	bdrkreg_t	sdc_vector_b		  :	 8;
+	bdrkreg_t	sdc_reserved		  :	 1;
+	bdrkreg_t	sdc_locprot		  :	 1;
+	bdrkreg_t	sdc_ecc			  :	 6;
+	bdrkreg_t	sdc_vector_a		  :	 8;
+	bdrkreg_t	sdc_reserved_1		  :	 6;
+	bdrkreg_t	sdc_format		  :	 2;
+};
+
+#endif
+
+typedef union md_sdir {
+	/* The 32 bits of standard directory, in bits 31:0 */
+	uint64_t	sd_val;
+	struct	md_sdir_pointer_fmt	sdp_fmt;
+	struct	md_sdir_fine_fmt	sdf_fmt;
+	struct	md_sdir_coarse_fmt	sdc_fmt;
+} md_sdir_t;
+
+
+/* Premium Directory Entries */
+
+#ifdef LITTLE_ENDIAN
+
+struct	md_pdir_pointer_fmt { /* exclusive, busy shared/excl, wait, poisoned */
+	bdrkreg_t	pdp_format                :	 2;
+        bdrkreg_t       pdp_state                 :      3;
+        bdrkreg_t       pdp_priority              :      3;
+        bdrkreg_t       pdp_pointer1_a            :      8;
+        bdrkreg_t       pdp_reserved_4            :      6;
+        bdrkreg_t       pdp_pointer1_b            :      3;
+        bdrkreg_t       pdp_reserved_3            :      7;
+        bdrkreg_t       pdp_ecc_a                 :      6;
+        bdrkreg_t       pdp_locprot               :      1;
+        bdrkreg_t       pdp_reserved_2            :      1;
+        bdrkreg_t       pdp_crit_word_off         :      3;
+        bdrkreg_t       pdp_pointer2_a            :      5;
+        bdrkreg_t       pdp_ecc_b                 :      1;
+        bdrkreg_t       pdp_reserved_1            :      5;
+        bdrkreg_t       pdp_pointer2_b            :      3;
+        bdrkreg_t       pdp_reserved              :      7;
+};
+
+#else
+
+struct	md_pdir_pointer_fmt { /* exclusive, busy shared/excl, wait, poisoned */
+	bdrkreg_t	pdp_reserved		  :	 7;
+	bdrkreg_t	pdp_pointer2_b		  :	 3;
+	bdrkreg_t	pdp_reserved_1		  :	 5;
+	bdrkreg_t	pdp_ecc_b		  :	 1;
+	bdrkreg_t	pdp_pointer2_a		  :	 5;
+	bdrkreg_t	pdp_crit_word_off	  :	 3;
+	bdrkreg_t	pdp_reserved_2		  :	 1;
+	bdrkreg_t	pdp_locprot		  :	 1;
+	bdrkreg_t	pdp_ecc_a		  :	 6;
+	bdrkreg_t	pdp_reserved_3		  :	 7;
+	bdrkreg_t	pdp_pointer1_b		  :	 3;
+	bdrkreg_t	pdp_reserved_4		  :	 6;
+	bdrkreg_t	pdp_pointer1_a		  :	 8;
+	bdrkreg_t	pdp_priority		  :	 3;
+	bdrkreg_t	pdp_state		  :	 3;
+	bdrkreg_t	pdp_format		  :	 2;
+};
+
+#endif
+
+#ifdef LITTLE_ENDIAN
+
+struct	md_pdir_fine_fmt { /* shared (fine) */
+	bdrkreg_t	pdf_format                :	 2;
+        bdrkreg_t       pdf_tag1_a                :      3;
+        bdrkreg_t       pdf_tag2_a                :      3;
+        bdrkreg_t       pdf_vector1_a             :      8;
+        bdrkreg_t       pdf_reserved_1            :      6;
+        bdrkreg_t       pdf_tag1_b                :      2;
+        bdrkreg_t       pdf_vector1_b             :      8;
+        bdrkreg_t       pdf_ecc_a                 :      6;
+        bdrkreg_t       pdf_locprot               :      1;
+        bdrkreg_t       pdf_tag2valid             :      1;
+        bdrkreg_t       pdf_vector2_a             :      8;
+        bdrkreg_t       pdf_ecc_b                 :      1;
+        bdrkreg_t       pdf_reserved              :      5;
+        bdrkreg_t       pdf_tag2_b                :      2;
+        bdrkreg_t       pdf_vector2_b             :      8;
+};
+
+#else
+
+struct	md_pdir_fine_fmt { /* shared (fine) */
+	bdrkreg_t	pdf_vector2_b		  :	 8;
+	bdrkreg_t	pdf_tag2_b		  :	 2;
+	bdrkreg_t	pdf_reserved		  :	 5;
+	bdrkreg_t	pdf_ecc_b		  :	 1;
+	bdrkreg_t	pdf_vector2_a		  :	 8;
+	bdrkreg_t	pdf_tag2valid		  :	 1;
+	bdrkreg_t	pdf_locprot		  :	 1;
+	bdrkreg_t	pdf_ecc_a		  :	 6;
+	bdrkreg_t	pdf_vector1_b		  :	 8;
+	bdrkreg_t	pdf_tag1_b		  :	 2;
+	bdrkreg_t	pdf_reserved_1		  :	 6;
+	bdrkreg_t	pdf_vector1_a		  :	 8;
+	bdrkreg_t	pdf_tag2_a		  :	 3;
+	bdrkreg_t	pdf_tag1_a		  :	 3;
+	bdrkreg_t	pdf_format		  :	 2;
+};
+
+#endif
+
+#ifdef LITTLE_ENDIAN
+
+struct	md_pdir_sparse_fmt { /* shared (sparse) */
+	bdrkreg_t	pds_format                :	 2;
+        bdrkreg_t       pds_column_a              :      6;
+        bdrkreg_t       pds_row_a                 :      8;
+        bdrkreg_t       pds_column_b              :     16;
+        bdrkreg_t       pds_ecc_a                 :      6;
+        bdrkreg_t       pds_locprot               :      1;
+        bdrkreg_t       pds_reserved_1            :      1;
+        bdrkreg_t       pds_row_b                 :      8;
+        bdrkreg_t       pds_ecc_b                 :      1;
+        bdrkreg_t       pds_column_c              :     10;
+        bdrkreg_t       pds_reserved              :      5;
+};
+
+#else
+
+struct	md_pdir_sparse_fmt { /* shared (sparse) */
+	bdrkreg_t	pds_reserved		  :	 5;
+	bdrkreg_t	pds_column_c		  :	10;
+	bdrkreg_t	pds_ecc_b		  :	 1;
+	bdrkreg_t	pds_row_b		  :	 8;
+	bdrkreg_t	pds_reserved_1		  :	 1;
+	bdrkreg_t	pds_locprot		  :	 1;
+	bdrkreg_t	pds_ecc_a		  :	 6;
+	bdrkreg_t	pds_column_b		  :	16;
+	bdrkreg_t	pds_row_a		  :	 8;
+	bdrkreg_t	pds_column_a		  :	 6;
+	bdrkreg_t	pds_format		  :	 2;
+};
+
+#endif
+
+typedef union md_pdir {
+	/* The 64 bits of premium directory */
+	uint64_t	pd_val;
+	struct	md_pdir_pointer_fmt	pdp_fmt;
+	struct	md_pdir_fine_fmt	pdf_fmt;
+	struct	md_pdir_sparse_fmt	pds_fmt;
+} md_pdir_t;
+
+#endif /* _LANGUAGE_C */
+
+
+/**********************************************************************
+
+ The defines for backdoor directory and backdoor ECC.
+
+***********************************************************************/
+
+/* Directory formats, for each format's "format" field */
+
+#define MD_FORMAT_UNOWNED	(UINT64_CAST 0x0)	/* 00 */
+#define MD_FORMAT_POINTER	(UINT64_CAST 0x1)	/* 01 */
+#define MD_FORMAT_SHFINE	(UINT64_CAST 0x2)	/* 10 */
+#define MD_FORMAT_SHCOARSE	(UINT64_CAST 0x3)	/* 11 */
+  /* Shared coarse (standard) and shared sparse (premium) both use fmt 0x3 */
+
+
+/*
+ * Cacheline state values.
+ *
+ * These are really *software* notions of the "state" of a cacheline; but the
+ * actual values have been carefully chosen to align with some hardware values!
+ * The MD_FMT_ST_TO_STATE macro is used to convert from hardware format/state
+ * pairs in the directory entried into one of these cacheline state values.
+ */
+
+#define MD_DIR_EXCLUSIVE	(UINT64_CAST 0x0)	/* ptr format, hw-defined */
+#define MD_DIR_UNOWNED		(UINT64_CAST 0x1)	/* format=0 */
+#define MD_DIR_SHARED		(UINT64_CAST 0x2)	/* format=2,3 */
+#define MD_DIR_BUSY_SHARED	(UINT64_CAST 0x4)	/* ptr format, hw-defined */
+#define MD_DIR_BUSY_EXCL	(UINT64_CAST 0x5)	/* ptr format, hw-defined */
+#define MD_DIR_WAIT		(UINT64_CAST 0x6)	/* ptr format, hw-defined */
+#define MD_DIR_POISONED		(UINT64_CAST 0x7)	/* ptr format, hw-defined */
+
+#ifdef _LANGUAGE_C
+
+/* Convert format and state fields into a single "cacheline state" value, defined above */
+
+#define MD_FMT_ST_TO_STATE(fmt, state) \
+  ((fmt) == MD_FORMAT_POINTER ? (state) : \
+   (fmt) == MD_FORMAT_UNOWNED ? MD_DIR_UNOWNED : \
+   MD_DIR_SHARED)
+#define MD_DIR_STATE(x) MD_FMT_ST_TO_STATE(MD_DIR_FORMAT(x), MD_DIR_STVAL(x))
+
+#endif /* _LANGUAGE_C */
+
+
+
+/* Directory field shifts and masks */
+
+/* Standard */
+
+#define MD_SDIR_FORMAT_SHFT	0			/* All formats */
+#define MD_SDIR_FORMAT_MASK	(0x3 << 0)
+#define MD_SDIR_STATE_SHFT	2			/* Pointer fmt. only */
+#define MD_SDIR_STATE_MASK	(0x7 << 2)
+
+/* Premium */
+
+#define MD_PDIR_FORMAT_SHFT	0			/* All formats */
+#define MD_PDIR_FORMAT_MASK	(0x3 << 0)
+#define MD_PDIR_STATE_SHFT	2			/* Pointer fmt. only */
+#define MD_PDIR_STATE_MASK	(0x7 << 2)
+
+/* Generic */
+
+#define MD_FORMAT_SHFT	0				/* All formats */
+#define MD_FORMAT_MASK	(0x3 << 0)
+#define MD_STATE_SHFT	2				/* Pointer fmt. only */
+#define MD_STATE_MASK	(0x7 << 2)
+
+
+/* Special shifts to reconstruct fields from the _a and _b parts */
+
+/* Standard:  only shared coarse has split fields */
+
+#define MD_SDC_VECTORB_SHFT	8	/* eg: sdc_vector_a is 8 bits */
+
+/* Premium:  pointer, shared fine, shared sparse */
+
+#define MD_PDP_POINTER1A_MASK	0xFF
+#define MD_PDP_POINTER1B_SHFT	8
+#define MD_PDP_POINTER2B_SHFT	5
+#define MD_PDP_ECCB_SHFT	6
+
+#define MD_PDF_VECTOR1B_SHFT	8
+#define MD_PDF_VECTOR2B_SHFT	8
+#define MD_PDF_TAG1B_SHFT	3
+#define MD_PDF_TAG2B_SHFT	3
+#define MD_PDF_ECC_SHFT		6
+
+#define MD_PDS_ROWB_SHFT	8
+#define MD_PDS_COLUMNB_SHFT	6
+#define MD_PDS_COLUMNC_SHFT	(MD_PDS_COLUMNB_SHFT + 16)
+#define MD_PDS_ECC_SHFT		6
+
+
+
+/*
+ * Directory/protection/counter initialization values, premium and standard
+ */
+
+#define MD_PDIR_INIT		0
+#define MD_PDIR_INIT_CNT	0
+#define MD_PDIR_INIT_PROT	0
+
+#define MD_SDIR_INIT		0
+#define MD_SDIR_INIT_CNT	0
+#define MD_SDIR_INIT_PROT	0
+
+#define MD_PDIR_MASK            0xffffffffffffffff
+#define MD_SDIR_MASK            0xffffffff
+
+/* When premium mode is on for probing but standard directory memory
+   is installed, the vaild directory bits depend on the phys. bank */
+#define MD_PDIR_PROBE_MASK(pb)  0xffffffffffffffff
+#define MD_SDIR_PROBE_MASK(pb)  (0xffff0000ffff << ((pb) ? 16 : 0))
+
+
+/*
+ * Misc. field extractions and conversions
+ */
+
+/* Convert an MD pointer (or message source, supplemental fields) */
+
+#define MD_PTR_NODE(x)		((x) >> MD_PTR_NODE_SHFT)
+#define MD_PTR_DEVICE(x)	((x) & MD_PTR_DEVICE_MASK)
+#define MD_PTR_SLICE(x)		(((x) & MD_PTR_SUBNODE0_MASK) | \
+				 ((x) & MD_PTR_SUBNODE1_MASK) >> 1)
+#define MD_PTR_OWNER_CPU(x)	(! ((x) & 2))
+#define MD_PTR_OWNER_IO(x)	((x) & 2)
+
+/* Extract format and raw state from a directory entry */
+
+#define MD_DIR_FORMAT(x)	((x) >> MD_SDIR_FORMAT_SHFT & \
+				 MD_SDIR_FORMAT_MASK >> MD_SDIR_FORMAT_SHFT)
+#define MD_DIR_STVAL(x)		((x) >> MD_SDIR_STATE_SHFT & \
+				 MD_SDIR_STATE_MASK >> MD_SDIR_STATE_SHFT)
+
+/* Mask & Shift to get HSPEC_ADDR from MD DIR_ERROR register */
+#define ERROR_ADDR_SHFT         3
+#define ERROR_HSPEC_SHFT        3
+#define DIR_ERR_HSPEC_MASK      0x1fffffff8
+
+/*
+ *  DIR_ERR* and MEM_ERR* defines are used to avoid ugly
+ *  #ifdefs for SN0 and SN1 in memerror.c code.  See SN0/hubmd.h
+ *  for corresponding SN0 definitions.
+ */
+#define md_dir_error_t  md_dir_error_u_t
+#define md_mem_error_t  md_mem_error_u_t
+#define derr_reg        md_dir_error_regval
+#define merr_reg        md_mem_error_regval
+
+#define DIR_ERR_UCE_VALID       dir_err.md_dir_error_fld_s.de_uce_valid
+#define DIR_ERR_AE_VALID        dir_err.md_dir_error_fld_s.de_ae_valid
+#define DIR_ERR_BAD_SYN         dir_err.md_dir_error_fld_s.de_bad_syn
+#define DIR_ERR_CE_OVERRUN      dir_err.md_dir_error_fld_s.de_ce_overrun
+#define MEM_ERR_ADDRESS         mem_err.md_mem_error_fld_s.me_address
+        /* BRINGUP Can the overrun bit be set without the valid bit? */
+#define MEM_ERR_CE_OVERRUN      (mem_err.md_mem_error_fld_s.me_read_ce >> 1)
+#define MEM_ERR_BAD_SYN         mem_err.md_mem_error_fld_s.me_bad_syn
+#define MEM_ERR_UCE_VALID       (mem_err.md_mem_error_fld_s.me_read_uce & 1)
+
+
+
+/*********************************************************************
+
+ We have the shift and masks of various fields defined below.
+
+ *********************************************************************/
+
+/* MD_REFRESH_CONTROL fields */
+
+#define MRC_ENABLE_SHFT         63
+#define MRC_ENABLE_MASK         (UINT64_CAST 1 << 63)
+#define MRC_ENABLE              (UINT64_CAST 1 << 63)
+#define MRC_COUNTER_SHFT        12
+#define MRC_COUNTER_MASK        (UINT64_CAST 0xfff << 12)
+#define MRC_CNT_THRESH_MASK     0xfff
+#define MRC_RESET_DEFAULTS      (UINT64_CAST 0x800)
+
+/* MD_DIR_CONFIG fields */
+
+#define MDC_DIR_PREMIUM		(UINT64_CAST 1 << 0)
+#define MDC_IGNORE_ECC_SHFT      1
+#define MDC_IGNORE_ECC_MASK     (UINT64_CAST 1 << 1)
+
+/* MD_MEMORY_CONFIG fields */
+
+#define MMC_RP_CONFIG_SHFT	61
+#define MMC_RP_CONFIG_MASK	(UINT64_CAST 1 << 61)
+#define MMC_RCD_CONFIG_SHFT	60
+#define MMC_RCD_CONFIG_MASK	(UINT64_CAST 1 << 60)
+#define MMC_MB_NEG_EDGE_SHFT	56
+#define MMC_MB_NEG_EDGE_MASK	(UINT64_CAST 0x7 << 56)
+#define MMC_SAMPLE_TIME_SHFT	52
+#define MMC_SAMPLE_TIME_MASK	(UINT64_CAST 0x3 << 52)
+#define MMC_DELAY_MUX_SEL_SHFT	50
+#define MMC_DELAY_MUX_SEL_MASK	(UINT64_CAST 0x3 << 50)
+#define MMC_PHASE_DELAY_SHFT	49
+#define MMC_PHASE_DELAY_MASK	(UINT64_CAST 1 << 49)
+#define MMC_DB_NEG_EDGE_SHFT	48
+#define MMC_DB_NEG_EDGE_MASK	(UINT64_CAST 1 << 48)
+#define MMC_CPU_PROT_IGNORE_SHFT	 47
+#define MMC_CPU_PROT_IGNORE_MASK	(UINT64_CAST 1 << 47)
+#define MMC_IO_PROT_IGNORE_SHFT 46
+#define MMC_IO_PROT_IGNORE_MASK	(UINT64_CAST 1 << 46)
+#define MMC_IO_PROT_EN_SHFT	45
+#define MMC_IO_PROT_EN_MASK	(UINT64_CAST 1 << 45)
+#define MMC_CC_ENABLE_SHFT	44
+#define MMC_CC_ENABLE_MASK	(UINT64_CAST 1 << 44)
+#define MMC_DIMM0_SEL_SHFT	32
+#define MMC_DIMM0_SEL_MASK     (UINT64_CAST 0x3 << 32)
+#define MMC_DIMM_SIZE_SHFT(_dimm)    ((_dimm << 3) + 4)
+#define MMC_DIMM_SIZE_MASK(_dimm)    (UINT64_CAST 0xf << MMC_DIMM_SIZE_SHFT(_dimm))
+#define MMC_DIMM_WIDTH_SHFT(_dimm)    ((_dimm << 3) + 3)
+#define MMC_DIMM_WIDTH_MASK(_dimm)    (UINT64_CAST 0x1 << MMC_DIMM_WIDTH_SHFT(_dimm))
+#define MMC_DIMM_BANKS_SHFT(_dimm)    (_dimm << 3)
+#define MMC_DIMM_BANKS_MASK(_dimm)    (UINT64_CAST 0x3 << MMC_DIMM_BANKS_SHFT(_dimm))
+#define MMC_BANK_ALL_MASK	0xffffffffLL
+/* Default values for write-only bits in MD_MEMORY_CONFIG */
+#define MMC_DEFAULT_BITS	(UINT64_CAST 0x7 << MMC_MB_NEG_EDGE_SHFT)
+
+/* MD_MB_ECC_CONFIG fields */
+
+#define MEC_IGNORE_ECC		(UINT64_CAST 0x1 << 0)
+
+/* MD_BIST_DATA fields */
+
+#define MBD_BIST_WRITE		(UINT64_CAST 1 << 7)
+#define MBD_BIST_CYCLE		(UINT64_CAST 1 << 6)
+#define MBD_BIST_BYTE		(UINT64_CAST 1 << 5)
+#define MBD_BIST_NIBBLE		(UINT64_CAST 1 << 4)
+#define MBD_BIST_DATA_MASK	0xf
+
+/* MD_BIST_CTL fields */
+
+#define MBC_DIMM_SHFT		5
+#define MBC_DIMM_MASK		(UINT64_CAST 0x3 << 5)
+#define MBC_BANK_SHFT		4
+#define MBC_BANK_MASK		(UINT64_CAST 0x1 << 4)
+#define MBC_BIST_RESET		(UINT64_CAST 0x1 << 2)
+#define MBC_BIST_STOP		(UINT64_CAST 0x1 << 1)
+#define MBC_BIST_START		(UINT64_CAST 0x1 << 0)
+
+#define MBC_GO(dimm, bank) \
+    (((dimm) << MBC_DIMM_SHFT) & MBC_DIMM_MASK | \
+     ((bank) << MBC_BANK_SHFT) & MBC_BANK_MASK | \
+     MBC_BIST_START)
+
+/* MD_BIST_STATUS fields */
+
+#define MBS_BIST_DONE		(UINT64_CAST 0X1 << 1)
+#define MBS_BIST_PASSED		(UINT64_CAST 0X1 << 0)
+
+/* MD_JUNK_BUS_TIMING fields */
+
+#define MJT_SYNERGY_ENABLE_SHFT	40
+#define MJT_SYNERGY_ENABLE_MASK	(UINT64_CAST 0Xff << MJT_SYNERGY_ENABLE_SHFT)
+#define MJT_SYNERGY_SETUP_SHFT	32
+#define MJT_SYNERGY_SETUP_MASK	(UINT64_CAST 0Xff << MJT_SYNERGY_SETUP_SHFT)
+#define MJT_UART_ENABLE_SHFT	24
+#define MJT_UART_ENABLE_MASK	(UINT64_CAST 0Xff << MJT_UART_ENABLE_SHFT)
+#define MJT_UART_SETUP_SHFT	16
+#define MJT_UART_SETUP_MASK	(UINT64_CAST 0Xff << MJT_UART_SETUP_SHFT)
+#define MJT_FPROM_ENABLE_SHFT	8
+#define MJT_FPROM_ENABLE_MASK	(UINT64_CAST 0Xff << MJT_FPROM_ENABLE_SHFT)
+#define MJT_FPROM_SETUP_SHFT	0
+#define MJT_FPROM_SETUP_MASK	(UINT64_CAST 0Xff << MJT_FPROM_SETUP_SHFT)
+
+#define MEM_ERROR_VALID_CE      1
+
+
+/* MD_FANDOP_CAC_STAT0, MD_FANDOP_CAC_STAT1 addr field shift */
+
+#define MFC_ADDR_SHFT		6
+
+#endif  /* _ASM_SN_SN1_HUBMD_NEXT_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/hubni.h linux/include/asm-ia64/sn/sn1/hubni.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/hubni.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/sn1/hubni.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,1782 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_SN1_HUBNI_H
+#define _ASM_SN_SN1_HUBNI_H
+
+
+/************************************************************************
+ *                                                                      *
+ *      WARNING!!!  WARNING!!!  WARNING!!!  WARNING!!!  WARNING!!!      *
+ *                                                                      *
+ * This file is created by an automated script. Any (minimal) changes   *
+ * made manually to this  file should be made with care.                *
+ *                                                                      *
+ *               MAKE ALL ADDITIONS TO THE END OF THIS FILE             *
+ *                                                                      *
+ ************************************************************************/
+
+#define    NI_PORT_STATUS            0x00680000    /* LLP Status             */
+
+
+
+#define    NI_PORT_RESET             0x00680008    /*
+                                                    * Reset the Network
+                                                    * Interface
+                                                    */
+
+
+
+#define    NI_RESET_ENABLE           0x00680010    /* Warm Reset Enable      */
+
+
+
+#define    NI_DIAG_PARMS             0x00680018    /*
+                                                    * Diagnostic
+                                                    * Parameters
+                                                    */
+
+
+
+#define    NI_CHANNEL_CONTROL        0x00680020    /*
+                                                    * Virtual channel
+                                                    * control
+                                                    */
+
+
+
+#define    NI_CHANNEL_TEST           0x00680028    /* LLP Test Control.      */
+
+
+
+#define    NI_PORT_PARMS             0x00680030    /* LLP Parameters         */
+
+
+
+#define    NI_CHANNEL_AGE            0x00680038    /*
+                                                    * Network age
+                                                    * injection control
+                                                    */
+
+
+
+#define    NI_PORT_ERRORS            0x00680100    /* Errors                 */
+
+
+
+#define    NI_PORT_HEADER_A          0x00680108    /*
+                                                    * Error Header first
+                                                    * half
+                                                    */
+
+
+
+#define    NI_PORT_HEADER_B          0x00680110    /*
+                                                    * Error Header second
+                                                    * half
+                                                    */
+
+
+
+#define    NI_PORT_SIDEBAND          0x00680118    /* Error Sideband         */
+
+
+
+#define    NI_PORT_ERROR_CLEAR       0x00680120    /*
+                                                    * Clear the Error
+                                                    * bits
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_0          0x00681000    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_1          0x00681008    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_2          0x00681010    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_3          0x00681018    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_4          0x00681020    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_5          0x00681028    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_6          0x00681030    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_7          0x00681038    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_8          0x00681040    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_9          0x00681048    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_10         0x00681050    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_11         0x00681058    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_12         0x00681060    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_13         0x00681068    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_14         0x00681070    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_15         0x00681078    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_16         0x00681080    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_17         0x00681088    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_18         0x00681090    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_19         0x00681098    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_20         0x006810A0    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_21         0x006810A8    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_22         0x006810B0    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_23         0x006810B8    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_24         0x006810C0    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_25         0x006810C8    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_26         0x006810D0    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_27         0x006810D8    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_28         0x006810E0    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_29         0x006810E8    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_30         0x006810F0    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_31         0x006810F8    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_32         0x00681100    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_33         0x00681108    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_34         0x00681110    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_35         0x00681118    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_36         0x00681120    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_37         0x00681128    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_38         0x00681130    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_39         0x00681138    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_40         0x00681140    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_41         0x00681148    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_42         0x00681150    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_43         0x00681158    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_44         0x00681160    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_45         0x00681168    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_46         0x00681170    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_47         0x00681178    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_48         0x00681180    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_49         0x00681188    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_50         0x00681190    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_51         0x00681198    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_52         0x006811A0    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_53         0x006811A8    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_54         0x006811B0    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_55         0x006811B8    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_56         0x006811C0    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_57         0x006811C8    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_58         0x006811D0    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_59         0x006811D8    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_60         0x006811E0    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_61         0x006811E8    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_62         0x006811F0    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_63         0x006811F8    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_64         0x00681200    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_65         0x00681208    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_66         0x00681210    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_67         0x00681218    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_68         0x00681220    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_69         0x00681228    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_70         0x00681230    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_71         0x00681238    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_72         0x00681240    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_73         0x00681248    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_74         0x00681250    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_75         0x00681258    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_76         0x00681260    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_77         0x00681268    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_78         0x00681270    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_79         0x00681278    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_80         0x00681280    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_81         0x00681288    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_82         0x00681290    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_83         0x00681298    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_84         0x006812A0    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_85         0x006812A8    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_86         0x006812B0    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_87         0x006812B8    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_88         0x006812C0    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_89         0x006812C8    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_90         0x006812D0    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_91         0x006812D8    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_92         0x006812E0    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_93         0x006812E8    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_94         0x006812F0    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_95         0x006812F8    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_96         0x00681300    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_97         0x00681308    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_98         0x00681310    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_99         0x00681318    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_100        0x00681320    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_101        0x00681328    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_102        0x00681330    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_103        0x00681338    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_104        0x00681340    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_105        0x00681348    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_106        0x00681350    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_107        0x00681358    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_108        0x00681360    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_109        0x00681368    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_110        0x00681370    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_111        0x00681378    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_112        0x00681380    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_113        0x00681388    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_114        0x00681390    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_115        0x00681398    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_116        0x006813A0    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_117        0x006813A8    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_118        0x006813B0    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_119        0x006813B8    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_120        0x006813C0    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_121        0x006813C8    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_122        0x006813D0    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_123        0x006813D8    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_124        0x006813E0    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_125        0x006813E8    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_126        0x006813F0    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_LOCAL_TABLE_127        0x006813F8    /*
+                                                    * Base of Local
+                                                    * Mapping Table 0-127
+                                                    */
+
+
+
+#define    NI_GLOBAL_TABLE           0x00682000    /*
+                                                    * Base of Global
+                                                    * Mapping Table
+                                                    */
+
+
+
+
+
+#ifdef _LANGUAGE_C
+
+/************************************************************************
+ *                                                                      *
+ *  This register describes the LLP status.                             *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ni_port_status_u {
+	bdrkreg_t	ni_port_status_regval;
+	struct  {
+		bdrkreg_t	ps_port_status            :	 2;
+                bdrkreg_t       ps_remote_power           :      1;
+                bdrkreg_t       ps_rsvd                   :     61;
+	} ni_port_status_fld_s;
+} ni_port_status_u_t;
+
+#else
+
+typedef union ni_port_status_u {
+	bdrkreg_t	ni_port_status_regval;
+	struct	{
+		bdrkreg_t	ps_rsvd			  :	61;
+		bdrkreg_t	ps_remote_power		  :	 1;
+		bdrkreg_t	ps_port_status		  :	 2;
+	} ni_port_status_fld_s;
+} ni_port_status_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Writing this register issues a reset to the network interface.      *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ni_port_reset_u {
+	bdrkreg_t	ni_port_reset_regval;
+	struct  {
+		bdrkreg_t	pr_link_reset_out         :	 1;
+                bdrkreg_t       pr_port_reset             :      1;
+                bdrkreg_t       pr_local_reset            :      1;
+                bdrkreg_t       pr_rsvd                   :     61;
+	} ni_port_reset_fld_s;
+} ni_port_reset_u_t;
+
+#else
+
+typedef union ni_port_reset_u {
+	bdrkreg_t	ni_port_reset_regval;
+	struct	{
+		bdrkreg_t	pr_rsvd			  :	61;
+		bdrkreg_t	pr_local_reset		  :	 1;
+		bdrkreg_t	pr_port_reset		  :	 1;
+		bdrkreg_t	pr_link_reset_out	  :	 1;
+	} ni_port_reset_fld_s;
+} ni_port_reset_u_t;
+
+#endif
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register contains the warm reset enable bit.                   *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ni_reset_enable_u {
+	bdrkreg_t	ni_reset_enable_regval;
+	struct  {
+		bdrkreg_t	re_reset_ok               :	 1;
+                bdrkreg_t       re_rsvd                   :     63;
+	} ni_reset_enable_fld_s;
+} ni_reset_enable_u_t;
+
+#else
+
+typedef union ni_reset_enable_u {
+	bdrkreg_t	ni_reset_enable_regval;
+	struct	{
+		bdrkreg_t	re_rsvd			  :	63;
+		bdrkreg_t	re_reset_ok		  :	 1;
+	} ni_reset_enable_fld_s;
+} ni_reset_enable_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register contains parameters for diagnostics.                  *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ni_diag_parms_u {
+	bdrkreg_t	ni_diag_parms_regval;
+	struct  {
+		bdrkreg_t	dp_send_data_error        :	 1;
+                bdrkreg_t       dp_port_disable           :      1;
+                bdrkreg_t       dp_send_err_off           :      1;
+                bdrkreg_t       dp_rsvd                   :     61;
+	} ni_diag_parms_fld_s;
+} ni_diag_parms_u_t;
+
+#else
+
+typedef union ni_diag_parms_u {
+	bdrkreg_t	ni_diag_parms_regval;
+	struct	{
+		bdrkreg_t	dp_rsvd			  :	61;
+		bdrkreg_t	dp_send_err_off		  :	 1;
+		bdrkreg_t	dp_port_disable		  :	 1;
+		bdrkreg_t	dp_send_data_error	  :	 1;
+	} ni_diag_parms_fld_s;
+} ni_diag_parms_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register contains the virtual channel selection control for    *
+ * outgoing messages from the Bedrock.                                  *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ni_channel_control_u {
+	bdrkreg_t	ni_channel_control_regval;
+	struct  {
+		bdrkreg_t	cc_vch_one_request        :	 1;
+                bdrkreg_t       cc_vch_two_request        :      1;
+                bdrkreg_t       cc_vch_nine_request       :      1;
+                bdrkreg_t       cc_vch_vector_request     :      1;
+                bdrkreg_t       cc_vch_one_reply          :      1;
+                bdrkreg_t       cc_vch_two_reply          :      1;
+                bdrkreg_t       cc_vch_nine_reply         :      1;
+                bdrkreg_t       cc_vch_vector_reply       :      1;
+                bdrkreg_t       cc_send_vch_sel           :      1;
+                bdrkreg_t       cc_rsvd                   :     55;
+	} ni_channel_control_fld_s;
+} ni_channel_control_u_t;
+
+#else
+
+typedef union ni_channel_control_u {
+	bdrkreg_t	ni_channel_control_regval;
+	struct	{
+		bdrkreg_t	cc_rsvd			  :	55;
+		bdrkreg_t	cc_send_vch_sel		  :	 1;
+		bdrkreg_t	cc_vch_vector_reply	  :	 1;
+		bdrkreg_t	cc_vch_nine_reply	  :	 1;
+		bdrkreg_t	cc_vch_two_reply	  :	 1;
+		bdrkreg_t	cc_vch_one_reply	  :	 1;
+		bdrkreg_t	cc_vch_vector_request	  :	 1;
+		bdrkreg_t	cc_vch_nine_request	  :	 1;
+		bdrkreg_t	cc_vch_two_request	  :	 1;
+		bdrkreg_t	cc_vch_one_request	  :	 1;
+	} ni_channel_control_fld_s;
+} ni_channel_control_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register allows access to the LLP test logic.                  *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ni_channel_test_u {
+	bdrkreg_t	ni_channel_test_regval;
+	struct  {
+		bdrkreg_t	ct_testseed               :	20;
+                bdrkreg_t       ct_testmask               :      8;
+                bdrkreg_t       ct_testdata               :     20;
+                bdrkreg_t       ct_testvalid              :      1;
+                bdrkreg_t       ct_testcberr              :      1;
+                bdrkreg_t       ct_testflit               :      3;
+                bdrkreg_t       ct_testclear              :      1;
+                bdrkreg_t       ct_testerrcapture         :      1;
+                bdrkreg_t       ct_rsvd                   :      9;
+	} ni_channel_test_fld_s;
+} ni_channel_test_u_t;
+
+#else
+
+typedef union ni_channel_test_u {
+	bdrkreg_t	ni_channel_test_regval;
+	struct	{
+		bdrkreg_t	ct_rsvd			  :	 9;
+		bdrkreg_t	ct_testerrcapture	  :	 1;
+		bdrkreg_t	ct_testclear		  :	 1;
+		bdrkreg_t	ct_testflit		  :	 3;
+		bdrkreg_t	ct_testcberr		  :	 1;
+		bdrkreg_t	ct_testvalid		  :	 1;
+		bdrkreg_t	ct_testdata		  :	20;
+		bdrkreg_t	ct_testmask		  :	 8;
+		bdrkreg_t	ct_testseed		  :	20;
+	} ni_channel_test_fld_s;
+} ni_channel_test_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register contains LLP port parameters and enables for the      *
+ * capture of header data.                                              *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ni_port_parms_u {
+	bdrkreg_t	ni_port_parms_regval;
+	struct  {
+		bdrkreg_t	pp_max_burst              :	10;
+                bdrkreg_t       pp_null_timeout           :      6;
+                bdrkreg_t       pp_max_retry              :     10;
+                bdrkreg_t       pp_d_avail_sel            :      2;
+                bdrkreg_t       pp_rsvd_1                 :      1;
+                bdrkreg_t       pp_first_err_enable       :      1;
+                bdrkreg_t       pp_squash_err_enable      :      1;
+                bdrkreg_t       pp_vch_err_enable         :      4;
+                bdrkreg_t       pp_rsvd                   :     29;
+	} ni_port_parms_fld_s;
+} ni_port_parms_u_t;
+
+#else
+
+typedef union ni_port_parms_u {
+	bdrkreg_t	ni_port_parms_regval;
+	struct	{
+		bdrkreg_t	pp_rsvd			  :	29;
+		bdrkreg_t	pp_vch_err_enable	  :	 4;
+		bdrkreg_t	pp_squash_err_enable	  :	 1;
+		bdrkreg_t	pp_first_err_enable	  :	 1;
+		bdrkreg_t	pp_rsvd_1		  :	 1;
+		bdrkreg_t	pp_d_avail_sel		  :	 2;
+		bdrkreg_t	pp_max_retry		  :	10;
+		bdrkreg_t	pp_null_timeout		  :	 6;
+		bdrkreg_t	pp_max_burst		  :	10;
+	} ni_port_parms_fld_s;
+} ni_port_parms_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register contains the age at which request and reply packets   *
+ * are injected into the network. This feature allows replies to be     *
+ * given a higher fixed priority than requests, which can be            *
+ * important in some network saturation situations.                     *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ni_channel_age_u {
+	bdrkreg_t	ni_channel_age_regval;
+	struct  {
+		bdrkreg_t	ca_request_inject_age     :	 8;
+                bdrkreg_t       ca_reply_inject_age       :      8;
+                bdrkreg_t       ca_rsvd                   :     48;
+	} ni_channel_age_fld_s;
+} ni_channel_age_u_t;
+
+#else
+
+typedef union ni_channel_age_u {
+	bdrkreg_t	ni_channel_age_regval;
+	struct	{
+		bdrkreg_t	ca_rsvd			  :	48;
+		bdrkreg_t	ca_reply_inject_age	  :	 8;
+		bdrkreg_t	ca_request_inject_age	  :	 8;
+	} ni_channel_age_fld_s;
+} ni_channel_age_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register contains latched LLP port and problematic message     *
+ * errors. The contents are the same information as the                 *
+ * NI_PORT_ERROR_CLEAR register, but, in this register read accesses    *
+ * are non-destructive. Bits [52:24] assert the NI interrupt.           *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ni_port_errors_u {
+	bdrkreg_t	ni_port_errors_regval;
+	struct  {
+		bdrkreg_t	pe_sn_error_count         :	 8;
+                bdrkreg_t       pe_cb_error_count         :      8;
+                bdrkreg_t       pe_retry_count            :      8;
+                bdrkreg_t       pe_tail_timeout           :      4;
+                bdrkreg_t       pe_fifo_overflow          :      4;
+                bdrkreg_t       pe_external_short         :      4;
+                bdrkreg_t       pe_external_long          :      4;
+                bdrkreg_t       pe_external_bad_header    :      4;
+                bdrkreg_t       pe_internal_short         :      4;
+                bdrkreg_t       pe_internal_long          :      4;
+                bdrkreg_t       pe_link_reset_in          :      1;
+                bdrkreg_t       pe_rsvd                   :     11;
+	} ni_port_errors_fld_s;
+} ni_port_errors_u_t;
+
+#else
+
+typedef union ni_port_errors_u {
+	bdrkreg_t	ni_port_errors_regval;
+	struct	{
+		bdrkreg_t	pe_rsvd			  :	11;
+		bdrkreg_t	pe_link_reset_in	  :	 1;
+		bdrkreg_t	pe_internal_long	  :	 4;
+		bdrkreg_t	pe_internal_short	  :	 4;
+		bdrkreg_t	pe_external_bad_header	  :	 4;
+		bdrkreg_t	pe_external_long	  :	 4;
+		bdrkreg_t	pe_external_short	  :	 4;
+		bdrkreg_t	pe_fifo_overflow	  :	 4;
+		bdrkreg_t	pe_tail_timeout		  :	 4;
+		bdrkreg_t	pe_retry_count		  :	 8;
+		bdrkreg_t	pe_cb_error_count	  :	 8;
+		bdrkreg_t	pe_sn_error_count	  :	 8;
+	} ni_port_errors_fld_s;
+} ni_port_errors_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register provides the sideband data associated with the        *
+ * NI_PORT_HEADER registers and also additional data for error          *
+ * processing. This register is not cleared on reset.                   *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ni_port_sideband_u {
+	bdrkreg_t	ni_port_sideband_regval;
+	struct  {
+		bdrkreg_t	ps_sideband               :	 8;
+                bdrkreg_t       ps_bad_dest               :      1;
+                bdrkreg_t       ps_bad_prexsel            :      1;
+                bdrkreg_t       ps_rcv_error              :      1;
+                bdrkreg_t       ps_bad_message            :      1;
+                bdrkreg_t       ps_squash                 :      1;
+                bdrkreg_t       ps_sn_status              :      1;
+                bdrkreg_t       ps_cb_status              :      1;
+                bdrkreg_t       ps_send_error             :      1;
+                bdrkreg_t       ps_vch_active             :      4;
+                bdrkreg_t       ps_rsvd                   :     44;
+	} ni_port_sideband_fld_s;
+} ni_port_sideband_u_t;
+
+#else
+
+typedef union ni_port_sideband_u {
+	bdrkreg_t	ni_port_sideband_regval;
+	struct	{
+		bdrkreg_t	ps_rsvd			  :	44;
+		bdrkreg_t	ps_vch_active		  :	 4;
+		bdrkreg_t	ps_send_error		  :	 1;
+		bdrkreg_t	ps_cb_status		  :	 1;
+		bdrkreg_t	ps_sn_status		  :	 1;
+		bdrkreg_t	ps_squash		  :	 1;
+		bdrkreg_t	ps_bad_message		  :	 1;
+		bdrkreg_t	ps_rcv_error		  :	 1;
+		bdrkreg_t	ps_bad_prexsel		  :	 1;
+		bdrkreg_t	ps_bad_dest		  :	 1;
+		bdrkreg_t	ps_sideband		  :	 8;
+	} ni_port_sideband_fld_s;
+} ni_port_sideband_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register contains latched LLP port and problematic message     *
+ * errors. The contents are the same information as the                 *
+ * NI_PORT_ERROR_CLEAR register, but, in this register read accesses    *
+ * are non-destructive. Bits [52:24] assert the NI interrupt.           *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ni_port_error_clear_u {
+	bdrkreg_t	ni_port_error_clear_regval;
+	struct  {
+		bdrkreg_t	pec_sn_error_count        :	 8;
+                bdrkreg_t       pec_cb_error_count        :      8;
+                bdrkreg_t       pec_retry_count           :      8;
+                bdrkreg_t       pec_tail_timeout          :      4;
+                bdrkreg_t       pec_fifo_overflow         :      4;
+                bdrkreg_t       pec_external_short        :      4;
+                bdrkreg_t       pec_external_long         :      4;
+                bdrkreg_t       pec_external_bad_header   :      4;
+                bdrkreg_t       pec_internal_short        :      4;
+                bdrkreg_t       pec_internal_long         :      4;
+                bdrkreg_t       pec_link_reset_in         :      1;
+                bdrkreg_t       pec_rsvd                  :     11;
+	} ni_port_error_clear_fld_s;
+} ni_port_error_clear_u_t;
+
+#else
+
+typedef union ni_port_error_clear_u {
+	bdrkreg_t	ni_port_error_clear_regval;
+	struct	{
+		bdrkreg_t	pec_rsvd		  :	11;
+		bdrkreg_t	pec_link_reset_in	  :	 1;
+		bdrkreg_t	pec_internal_long	  :	 4;
+		bdrkreg_t	pec_internal_short	  :	 4;
+		bdrkreg_t	pec_external_bad_header	  :	 4;
+		bdrkreg_t	pec_external_long	  :	 4;
+		bdrkreg_t	pec_external_short	  :	 4;
+		bdrkreg_t	pec_fifo_overflow	  :	 4;
+		bdrkreg_t	pec_tail_timeout	  :	 4;
+		bdrkreg_t	pec_retry_count		  :	 8;
+		bdrkreg_t	pec_cb_error_count	  :	 8;
+		bdrkreg_t	pec_sn_error_count	  :	 8;
+	} ni_port_error_clear_fld_s;
+} ni_port_error_clear_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Lookup table for the next hop's exit port. The table entry          *
+ * selection is based on the 7-bit LocalCube routing destination.       *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ni_local_table_0_u {
+	bdrkreg_t	ni_local_table_0_regval;
+	struct  {
+		bdrkreg_t	lt0_next_exit_port        :	 4;
+                bdrkreg_t       lt0_next_vch_lsb          :      1;
+                bdrkreg_t       lt0_rsvd                  :     59;
+	} ni_local_table_0_fld_s;
+} ni_local_table_0_u_t;
+
+#else
+
+typedef union ni_local_table_0_u {
+	bdrkreg_t	ni_local_table_0_regval;
+	struct	{
+		bdrkreg_t	lt0_rsvd		  :	59;
+		bdrkreg_t	lt0_next_vch_lsb	  :	 1;
+		bdrkreg_t	lt0_next_exit_port	  :	 4;
+	} ni_local_table_0_fld_s;
+} ni_local_table_0_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Lookup table for the next hop's exit port. The table entry          *
+ * selection is based on the 7-bit LocalCube routing destination.       *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ni_local_table_127_u {
+	bdrkreg_t	ni_local_table_127_regval;
+	struct  {
+		bdrkreg_t	lt1_next_exit_port        :	 4;
+                bdrkreg_t       lt1_next_vch_lsb          :      1;
+                bdrkreg_t       lt1_rsvd                  :     59;
+	} ni_local_table_127_fld_s;
+} ni_local_table_127_u_t;
+
+#else
+
+typedef union ni_local_table_127_u {
+	bdrkreg_t	ni_local_table_127_regval;
+	struct	{
+		bdrkreg_t	lt1_rsvd		  :	59;
+		bdrkreg_t	lt1_next_vch_lsb	  :	 1;
+		bdrkreg_t	lt1_next_exit_port	  :	 4;
+	} ni_local_table_127_fld_s;
+} ni_local_table_127_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Lookup table for the next hop's exit port. The table entry          *
+ * selection is based on the 1-bit MetaCube routing destination.        *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ni_global_table_u {
+	bdrkreg_t	ni_global_table_regval;
+	struct  {
+		bdrkreg_t	gt_next_exit_port         :	 4;
+                bdrkreg_t       gt_next_vch_lsb           :      1;
+                bdrkreg_t       gt_rsvd                   :     59;
+	} ni_global_table_fld_s;
+} ni_global_table_u_t;
+
+#else
+
+typedef union ni_global_table_u {
+	bdrkreg_t	ni_global_table_regval;
+	struct	{
+		bdrkreg_t	gt_rsvd			  :	59;
+		bdrkreg_t	gt_next_vch_lsb		  :	 1;
+		bdrkreg_t	gt_next_exit_port	  :	 4;
+	} ni_global_table_fld_s;
+} ni_global_table_u_t;
+
+#endif
+
+
+
+
+
+
+#endif /* _LANGUAGE_C */
+
+/************************************************************************
+ *                                                                      *
+ * The following defines which were not formed into structures are      *
+ * probably indentical to another register, and the name of the         *
+ * register is provided against each of these registers. This           *
+ * information needs to be checked carefully                            *
+ *                                                                      *
+ *           NI_LOCAL_TABLE_1          NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_2          NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_3          NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_4          NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_5          NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_6          NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_7          NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_8          NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_9          NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_10         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_11         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_12         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_13         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_14         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_15         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_16         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_17         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_18         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_19         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_20         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_21         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_22         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_23         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_24         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_25         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_26         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_27         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_28         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_29         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_30         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_31         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_32         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_33         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_34         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_35         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_36         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_37         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_38         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_39         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_40         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_41         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_42         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_43         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_44         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_45         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_46         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_47         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_48         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_49         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_50         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_51         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_52         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_53         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_54         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_55         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_56         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_57         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_58         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_59         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_60         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_61         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_62         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_63         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_64         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_65         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_66         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_67         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_68         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_69         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_70         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_71         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_72         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_73         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_74         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_75         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_76         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_77         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_78         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_79         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_80         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_81         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_82         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_83         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_84         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_85         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_86         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_87         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_88         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_89         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_90         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_91         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_92         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_93         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_94         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_95         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_96         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_97         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_98         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_99         NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_100        NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_101        NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_102        NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_103        NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_104        NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_105        NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_106        NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_107        NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_108        NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_109        NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_110        NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_111        NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_112        NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_113        NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_114        NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_115        NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_116        NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_117        NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_118        NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_119        NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_120        NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_121        NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_122        NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_123        NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_124        NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_125        NI_LOCAL_TABLE_0                 *
+ *           NI_LOCAL_TABLE_126        NI_LOCAL_TABLE_0                 *
+ *                                                                      *
+ ************************************************************************/
+
+
+/************************************************************************
+ *                                                                      *
+ * The following defines were not formed into structures                *
+ *                                                                      *
+ * This could be because the document did not contain details of the    *
+ * register, or because the automated script did not recognize the      *
+ * register details in the documentation. If these register need        *
+ * structure definition, please create them manually                    *
+ *                                                                      *
+ *           NI_PORT_HEADER_A         0x680108                          *
+ *           NI_PORT_HEADER_B         0x680110                          *
+ *                                                                      *
+ ************************************************************************/
+
+
+/************************************************************************
+ *                                                                      *
+ *               MAKE ALL ADDITIONS AFTER THIS LINE                     *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+
+#endif /* _ASM_SN_SN1_HUBNI_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/hubni_next.h linux/include/asm-ia64/sn/sn1/hubni_next.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/hubni_next.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/sn1/hubni_next.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,175 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_SN1_HUBNI_NEXT_H
+#define _ASM_SN_SN1_HUBNI_NEXT_H
+
+#define NI_LOCAL_ENTRIES        128
+#define NI_META_ENTRIES        1
+
+#define NI_LOCAL_TABLE(_x)      (NI_LOCAL_TABLE_0 + (8 * (_x)))
+#define NI_META_TABLE(_x)       (NI_GLOBAL_TABLE + (8 * (_x)))
+
+/**************************************************************
+
+  Masks and shifts for NI registers are defined below. 
+
+**************************************************************/
+
+#define NPS_LINKUP_SHFT        1
+#define NPS_LINKUP_MASK        (UINT64_CAST 0x1 << 1)
+
+
+#define NPR_LOCALRESET          (UINT64_CAST 1 << 2)    /* Reset loc. bdrck */
+#define NPR_PORTRESET           (UINT64_CAST 1 << 1)    /* Send warm reset  */
+#define NPR_LINKRESET           (UINT64_CAST 1 << 0)    /* Send link reset  */
+
+/* NI_DIAG_PARMS bit definitions */
+#define NDP_SENDERROR           (UINT64_CAST 1 <<  0)   /* Send data error  */
+#define NDP_PORTDISABLE         (UINT64_CAST 1 <<  1)   /* Port disable     */
+#define NDP_SENDERROFF          (UINT64_CAST 1 <<  2)   /* Disable send error recovery */
+
+
+/* NI_PORT_ERROR mask and shift definitions (some are not present in SN0) */
+
+#define NPE_LINKRESET		(UINT64_CAST 1 << 52)
+#define NPE_INTLONG_SHFT	48
+#define NPE_INTLONG_MASK	(UINT64_CAST 0xf << NPE_INTLONG_SHFT)
+#define NPE_INTSHORT_SHFT	44
+#define NPE_INTSHORT_MASK	(UINT64_CAST 0xf << NPE_INTSHORT_SHFT)
+#define NPE_EXTBADHEADER_SHFT	40
+#define NPE_EXTBADHEADER_MASK	(UINT64_CAST 0xf << NPE_EXTBADHEADER_SHFT)
+#define NPE_EXTLONG_SHFT	36
+#define NPE_EXTLONG_MASK	(UINT64_CAST 0xf << NPE_EXTLONG_SHFT)
+#define NPE_EXTSHORT_SHFT	32
+#define NPE_EXTSHORT_MASK	(UINT64_CAST 0xf << NPE_EXTSHORT_SHFT)
+#define NPE_FIFOOVFLOW_SHFT	28
+#define NPE_FIFOOVFLOW_MASK	(UINT64_CAST 0xf << NPE_FIFOOVFLOW_SHFT)
+#define NPE_TAILTO_SHFT		24
+#define NPE_TAILTO_MASK		(UINT64_CAST 0xf << NPE_TAILTO_SHFT)
+#define NPE_RETRYCOUNT_SHFT	16
+#define NPE_RETRYCOUNT_MASK	(UINT64_CAST 0xff << NPE_RETRYCOUNT_SHFT)
+#define NPE_CBERRCOUNT_SHFT	8
+#define NPE_CBERRCOUNT_MASK	(UINT64_CAST 0xff << NPE_CBERRCOUNT_SHFT)
+#define NPE_SNERRCOUNT_SHFT	0
+#define NPE_SNERRCOUNT_MASK	(UINT64_CAST 0xff << NPE_SNERRCOUNT_SHFT)
+
+#define NPE_COUNT_MAX		0xff
+
+#define NPE_FATAL_ERRORS	(NPE_LINKRESET | NPE_INTLONG_MASK |\
+				 NPE_INTSHORT_MASK | NPE_EXTBADHEADER_MASK |\
+				 NPE_EXTLONG_MASK | NPE_EXTSHORT_MASK |\
+				 NPE_FIFOOVFLOW_MASK | NPE_TAILTO_MASK)
+
+#ifdef _LANGUAGE_C
+/* NI_PORT_HEADER[AB] registers (not automatically generated) */
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ni_port_header_a_u {
+	bdrkreg_t	ni_port_header_a_regval;
+	struct  {
+		bdrkreg_t	pha_v                     :	 1;
+                bdrkreg_t       pha_age                   :      8;
+                bdrkreg_t       pha_direction             :      4;
+                bdrkreg_t       pha_destination           :      8;
+                bdrkreg_t       pha_reserved_1            :      3;
+                bdrkreg_t       pha_command               :      8;
+                bdrkreg_t       pha_prexsel               :      3;
+                bdrkreg_t       pha_address_b             :     27;
+                bdrkreg_t       pha_reserved              :      2;
+	} ni_port_header_a_fld_s;
+} ni_port_header_a_u_t;
+
+#else
+
+typedef union ni_port_header_a_u {
+	bdrkreg_t	ni_port_header_a_regval;
+	struct	{
+		bdrkreg_t	pha_reserved		  :	 2;
+		bdrkreg_t	pha_address_b		  :	27;
+		bdrkreg_t	pha_prexsel		  :	 3;
+		bdrkreg_t	pha_command		  :	 8;
+		bdrkreg_t	pha_reserved_1		  :	 3;
+		bdrkreg_t	pha_destination		  :	 8;
+		bdrkreg_t	pha_direction		  :	 4;
+		bdrkreg_t	pha_age			  :	 8;
+		bdrkreg_t	pha_v			  :	 1;
+	} ni_port_header_a_fld_s;
+} ni_port_header_a_u_t;
+
+#endif
+
+#ifdef LITTLE_ENDIAN
+
+typedef union ni_port_header_b_u {
+	bdrkreg_t	ni_port_header_b_regval;
+	struct  {
+		bdrkreg_t	phb_supplemental           :	11;
+                bdrkreg_t       phb_reserved_2            :      5;
+                bdrkreg_t       phb_source                :     11;
+                bdrkreg_t       phb_reserved_1            :      8;
+                bdrkreg_t       phb_address_a             :      3;
+                bdrkreg_t       phb_address_c             :      8;
+                bdrkreg_t       phb_reserved              :     18;
+	} ni_port_header_b_fld_s;
+} ni_port_header_b_u_t;
+
+#else
+
+typedef union ni_port_header_b_u {
+	bdrkreg_t	ni_port_header_b_regval;
+	struct	{
+		bdrkreg_t	phb_reserved		  :	18;
+		bdrkreg_t	phb_address_c		  :	 8;
+		bdrkreg_t	phb_address_a		  :	 3;
+		bdrkreg_t	phb_reserved_1		  :	 8;
+		bdrkreg_t	phb_source		  :	11;
+		bdrkreg_t	phb_reserved_2		  :	 5;
+		bdrkreg_t	phb_supplemental	   :	11;
+	} ni_port_header_b_fld_s;
+} ni_port_header_b_u_t;
+
+#endif
+#endif
+
+/* NI_RESET_ENABLE mask definitions */
+
+#define NRE_RESETOK		(UINT64_CAST 1)	/* Let LLP reset bedrock */
+
+/* NI PORT_ERRORS, Max number of RETRY_COUNT, Check Bit, and Sequence   */
+/* Number errors (8 bit counters that do not wrap).                     */
+#define NI_LLP_RETRY_MAX        0xff
+#define NI_LLP_CB_MAX           0xff
+#define NI_LLP_SN_MAX           0xff
+
+/* NI_PORT_PARMS shift and mask definitions */
+
+#define NPP_VCH_ERR_EN_SHFT	31
+#define NPP_VCH_ERR_EN_MASK	(0xf << NPP_VCH_ERR_EN_SHFT)
+#define NPP_SQUASH_ERR_EN_SHFT	30
+#define NPP_SQUASH_ERR_EN_MASK	(0x1 << NPP_SQUASH_ERR_EN_SHFT)
+#define NPP_FIRST_ERR_EN_SHFT	29
+#define NPP_FIRST_ERR_EN_MASK	(0x1 << NPP_FIRST_ERR_EN_SHFT)
+#define NPP_D_AVAIL_SEL_SHFT	26
+#define NPP_D_AVAIL_SEL_MASK	(0x3 << NPP_D_AVAIL_SEL_SHFT)
+#define NPP_MAX_RETRY_SHFT	16
+#define NPP_MAX_RETRY_MASK	(0x3ff << NPP_MAX_RETRY_SHFT)
+#define NPP_NULL_TIMEOUT_SHFT	10
+#define NPP_NULL_TIMEOUT_MASK	(0x3f << NPP_NULL_TIMEOUT_SHFT)
+#define NPP_MAX_BURST_SHFT	0
+#define NPP_MAX_BURST_MASK	(0x3ff << NPP_MAX_BURST_SHFT)
+
+#define NPP_RESET_DEFAULTS	(0xf << NPP_VCH_ERR_EN_SHFT |   \
+				 0x1 << NPP_FIRST_ERR_EN_SHFT | \
+				 0x3ff << NPP_MAX_RETRY_SHFT |  \
+				 0x6 << NPP_NULL_TIMEOUT_SHFT | \
+				 0x3f0 << NPP_MAX_BURST_SHFT)
+
+#endif  /* _ASM_SN_SN1_HUBNI_NEXT_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/hubpi.h linux/include/asm-ia64/sn/sn1/hubpi.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/hubpi.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/sn1/hubpi.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,4264 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_SN1_HUBPI_H
+#define _ASM_SN_SN1_HUBPI_H
+
+/************************************************************************
+ *                                                                      *
+ *      WARNING!!!  WARNING!!!  WARNING!!!  WARNING!!!  WARNING!!!      *
+ *                                                                      *
+ * This file is created by an automated script. Any (minimal) changes   *
+ * made manually to this  file should be made with care.                *
+ *                                                                      *
+ *               MAKE ALL ADDITIONS TO THE END OF THIS FILE             *
+ *                                                                      *
+ ************************************************************************/
+
+
+#define    PI_CPU_PROTECT            0x00000000    /* CPU Protection         */
+
+
+
+#define    PI_PROT_OVRRD             0x00000008    /*
+                                                    * Clear CPU
+                                                    * Protection bit in 
+                                                    * CPU_PROTECT
+                                                    */
+
+
+
+#define    PI_IO_PROTECT             0x00000010    /*
+                                                    * Interrupt Pending
+                                                    * Protection for IO
+                                                    * access
+                                                    */
+
+
+
+#define    PI_REGION_PRESENT         0x00000018    /* Region present         */
+
+
+
+#define    PI_CPU_NUM                0x00000020    /* CPU Number ID          */
+
+
+
+#define    PI_CALIAS_SIZE            0x00000028    /* Cached Alias Size      */
+
+
+
+#define    PI_MAX_CRB_TIMEOUT        0x00000030    /*
+                                                    * Maximum Timeout for
+                                                    * CRB
+                                                    */
+
+
+
+#define    PI_CRB_SFACTOR            0x00000038    /*
+                                                    * Scale Factor for
+                                                    * CRB Timeout
+                                                    */
+
+
+
+#define    PI_CPU_PRESENT_A          0x00000040    /*
+                                                    * CPU Present for
+                                                    * CPU_A
+                                                    */
+
+
+
+#define    PI_CPU_PRESENT_B          0x00000048    /*
+                                                    * CPU Present for
+                                                    * CPU_B
+                                                    */
+
+
+
+#define    PI_CPU_ENABLE_A           0x00000050    /*
+                                                    * CPU Enable for
+                                                    * CPU_A
+                                                    */
+
+
+
+#define    PI_CPU_ENABLE_B           0x00000058    /*
+                                                    * CPU Enable for
+                                                    * CPU_B
+                                                    */
+
+
+
+#define    PI_REPLY_LEVEL            0x00010060    /*
+                                                    * Reply FIFO Priority
+                                                    * Control
+                                                    */
+
+
+
+#define    PI_GFX_CREDIT_MODE        0x00020068    /*
+                                                    * Graphics Credit
+                                                    * Mode
+                                                    */
+
+
+
+#define    PI_NMI_A                  0x00000070    /*
+                                                    * Non-maskable
+                                                    * Interrupt to CPU A
+                                                    */
+
+
+
+#define    PI_NMI_B                  0x00000078    /*
+                                                    * Non-maskable
+                                                    * Interrupt to CPU B
+                                                    */
+
+
+
+#define    PI_INT_PEND_MOD           0x00000090    /*
+                                                    * Interrupt Pending
+                                                    * Modify
+                                                    */
+
+
+
+#define    PI_INT_PEND0              0x00000098    /* Interrupt Pending 0    */
+
+
+
+#define    PI_INT_PEND1              0x000000A0    /* Interrupt Pending 1    */
+
+
+
+#define    PI_INT_MASK0_A            0x000000A8    /*
+                                                    * Interrupt Mask 0
+                                                    * for CPU A
+                                                    */
+
+
+
+#define    PI_INT_MASK1_A            0x000000B0    /*
+                                                    * Interrupt Mask 1
+                                                    * for CPU A
+                                                    */
+
+
+
+#define    PI_INT_MASK0_B            0x000000B8    /*
+                                                    * Interrupt Mask 0
+                                                    * for CPU B
+                                                    */
+
+
+
+#define    PI_INT_MASK1_B            0x000000C0    /*
+                                                    * Interrupt Mask 1
+                                                    * for CPU B
+                                                    */
+
+
+
+#define    PI_CC_PEND_SET_A          0x000000C8    /*
+                                                    * CC Interrupt
+                                                    * Pending for CPU A
+                                                    */
+
+
+
+#define    PI_CC_PEND_SET_B          0x000000D0    /*
+                                                    * CC Interrupt
+                                                    * Pending for CPU B
+                                                    */
+
+
+
+#define    PI_CC_PEND_CLR_A          0x000000D8    /*
+                                                    * CPU to CPU
+                                                    * Interrupt Pending
+                                                    * Clear for CPU A
+                                                    */
+
+
+
+#define    PI_CC_PEND_CLR_B          0x000000E0    /*
+                                                    * CPU to CPU
+                                                    * Interrupt Pending
+                                                    * Clear for CPU B
+                                                    */
+
+
+
+#define    PI_CC_MASK                0x000000E8    /*
+                                                    * Mask of both
+                                                    * CC_PENDs
+                                                    */
+
+
+
+#define    PI_INT_PEND1_REMAP        0x000000F0    /*
+                                                    * Remap Interrupt
+                                                    * Pending
+                                                    */
+
+
+
+#define    PI_RT_COUNTER             0x00030100    /* Real Time Counter      */
+
+
+
+#define    PI_RT_COMPARE_A           0x00000108    /* Real Time Compare A    */
+
+
+
+#define    PI_RT_COMPARE_B           0x00000110    /* Real Time Compare B    */
+
+
+
+#define    PI_PROFILE_COMPARE        0x00000118    /* Profiling Compare      */
+
+
+
+#define    PI_RT_INT_PEND_A          0x00000120    /*
+                                                    * RT interrupt
+                                                    * pending
+                                                    */
+
+
+
+#define    PI_RT_INT_PEND_B          0x00000128    /*
+                                                    * RT interrupt
+                                                    * pending
+                                                    */
+
+
+
+#define    PI_PROF_INT_PEND_A        0x00000130    /*
+                                                    * Profiling interrupt
+                                                    * pending
+                                                    */
+
+
+
+#define    PI_PROF_INT_PEND_B        0x00000138    /*
+                                                    * Profiling interrupt
+                                                    * pending
+                                                    */
+
+
+
+#define    PI_RT_INT_EN_A            0x00000140    /* RT Interrupt Enable    */
+
+
+
+#define    PI_RT_INT_EN_B            0x00000148    /* RT Interrupt Enable    */
+
+
+
+#define    PI_PROF_INT_EN_A          0x00000150    /*
+                                                    * Profiling Interrupt
+                                                    * Enable
+                                                    */
+
+
+
+#define    PI_PROF_INT_EN_B          0x00000158    /*
+                                                    * Profiling Interrupt
+                                                    * Enable
+                                                    */
+
+
+
+#define    PI_DEBUG_SEL              0x00000160    /* PI Debug Select        */
+
+
+
+#define    PI_INT_PEND_MOD_ALIAS     0x00000180    /*
+                                                    * Interrupt Pending
+                                                    * Modify
+                                                    */
+
+
+
+#define    PI_PERF_CNTL_A            0x00040200    /*
+                                                    * Performance Counter
+                                                    * Control A
+                                                    */
+
+
+
+#define    PI_PERF_CNTR0_A           0x00040208    /*
+                                                    * Performance Counter
+                                                    * 0 A
+                                                    */
+
+
+
+#define    PI_PERF_CNTR1_A           0x00040210    /*
+                                                    * Performance Counter
+                                                    * 1 A
+                                                    */
+
+
+
+#define    PI_PERF_CNTL_B            0x00050200    /*
+                                                    * Performance Counter
+                                                    * Control B
+                                                    */
+
+
+
+#define    PI_PERF_CNTR0_B           0x00050208    /*
+                                                    * Performance Counter
+                                                    * 0 B
+                                                    */
+
+
+
+#define    PI_PERF_CNTR1_B           0x00050210    /*
+                                                    * Performance Counter
+                                                    * 1 B
+                                                    */
+
+
+
+#define    PI_GFX_PAGE_A             0x00000300    /* Graphics Page          */
+
+
+
+#define    PI_GFX_CREDIT_CNTR_A      0x00000308    /*
+                                                    * Graphics Credit
+                                                    * Counter
+                                                    */
+
+
+
+#define    PI_GFX_BIAS_A             0x00000310    /* TRex+ BIAS             */
+
+
+
+#define    PI_GFX_INT_CNTR_A         0x00000318    /*
+                                                    * Graphics Interrupt
+                                                    * Counter
+                                                    */
+
+
+
+#define    PI_GFX_INT_CMP_A          0x00000320    /*
+                                                    * Graphics Interrupt
+                                                    * Compare
+                                                    */
+
+
+
+#define    PI_GFX_PAGE_B             0x00000328    /* Graphics Page          */
+
+
+
+#define    PI_GFX_CREDIT_CNTR_B      0x00000330    /*
+                                                    * Graphics Credit
+                                                    * Counter
+                                                    */
+
+
+
+#define    PI_GFX_BIAS_B             0x00000338    /* TRex+ BIAS             */
+
+
+
+#define    PI_GFX_INT_CNTR_B         0x00000340    /*
+                                                    * Graphics Interrupt
+                                                    * Counter
+                                                    */
+
+
+
+#define    PI_GFX_INT_CMP_B          0x00000348    /*
+                                                    * Graphics Interrupt
+                                                    * Compare
+                                                    */
+
+
+
+#define    PI_ERR_INT_PEND_WR        0x000003F8    /*
+                                                    * Error Interrupt
+                                                    * Pending (Writable)
+                                                    */
+
+
+
+#define    PI_ERR_INT_PEND           0x00000400    /*
+                                                    * Error Interrupt
+                                                    * Pending
+                                                    */
+
+
+
+#define    PI_ERR_INT_MASK_A         0x00000408    /*
+                                                    * Error Interrupt
+                                                    * Mask CPU_A
+                                                    */
+
+
+
+#define    PI_ERR_INT_MASK_B         0x00000410    /*
+                                                    * Error Interrupt
+                                                    * Mask CPU_B
+                                                    */
+
+
+
+#define    PI_ERR_STACK_ADDR_A       0x00000418    /*
+                                                    * Error Stack Address
+                                                    * Pointer
+                                                    */
+
+
+
+#define    PI_ERR_STACK_ADDR_B       0x00000420    /*
+                                                    * Error Stack Address
+                                                    * Pointer
+                                                    */
+
+
+
+#define    PI_ERR_STACK_SIZE         0x00000428    /* Error Stack Size       */
+
+
+
+#define    PI_ERR_STATUS0_A          0x00000430    /* Error Status 0         */
+
+
+
+#define    PI_ERR_STATUS0_A_CLR      0x00000438    /* Error Status 0         */
+
+
+
+#define    PI_ERR_STATUS1_A          0x00000440    /* Error Status 1         */
+
+
+
+#define    PI_ERR_STATUS1_A_CLR      0x00000448    /* Error Status 1         */
+
+
+
+#define    PI_ERR_STATUS0_B          0x00000450    /* Error Status 0         */
+
+
+
+#define    PI_ERR_STATUS0_B_CLR      0x00000458    /* Error Status 0         */
+
+
+
+#define    PI_ERR_STATUS1_B          0x00000460    /* Error Status 1         */
+
+
+
+#define    PI_ERR_STATUS1_B_CLR      0x00000468    /* Error Status 1         */
+
+
+
+#define    PI_SPOOL_CMP_A            0x00000470    /* Spool Compare          */
+
+
+
+#define    PI_SPOOL_CMP_B            0x00000478    /* Spool Compare          */
+
+
+
+#define    PI_CRB_TIMEOUT_A          0x00000480    /*
+                                                    * CRB entries which
+                                                    * have timed out but
+                                                    * are still valid
+                                                    */
+
+
+
+#define    PI_CRB_TIMEOUT_B          0x00000488    /*
+                                                    * CRB entries which
+                                                    * have timed out but
+                                                    * are still valid
+                                                    */
+
+
+
+#define    PI_SYSAD_ERRCHK_EN        0x00000490    /*
+                                                    * enables
+                                                    * sysad/cmd/state
+                                                    * error checking
+                                                    */
+
+
+
+#define    PI_FORCE_BAD_CHECK_BIT_A  0x00000498    /*
+                                                    * force SysAD Check
+                                                    * Bit error
+                                                    */
+
+
+
+#define    PI_FORCE_BAD_CHECK_BIT_B  0x000004A0    /*
+                                                    * force SysAD Check
+                                                    * Bit error
+                                                    */
+
+
+
+#define    PI_NACK_CNT_A             0x000004A8    /*
+                                                    * consecutive NACK
+                                                    * counter
+                                                    */
+
+
+
+#define    PI_NACK_CNT_B             0x000004B0    /*
+                                                    * consecutive NACK
+                                                    * counter
+                                                    */
+
+
+
+#define    PI_NACK_CMP               0x000004B8    /* NACK count compare     */
+
+
+
+#define    PI_SPOOL_MASK             0x000004C0    /* Spool error mask       */
+
+
+
+#define    PI_SPURIOUS_HDR_0         0x000004C8    /* Spurious Error 0       */
+
+
+
+#define    PI_SPURIOUS_HDR_1         0x000004D0    /* Spurious Error 1       */
+
+
+
+#define    PI_ERR_INJECT             0x000004D8    /*
+                                                    * SysAD bus error
+                                                    * injection
+                                                    */
+
+
+
+
+
+#ifdef _LANGUAGE_C
+
+/************************************************************************
+ *                                                                      *
+ * Description:  This read/write register determines on a               *
+ * bit-per-region basis whether incoming CPU-initiated PIO Read and     *
+ * Write to local PI registers are allowed. If access is allowed, the   *
+ * PI's response to a partial read is a PRPLY message, and the          *
+ * response to a partial write is a PACK message. If access is not      *
+ * allowed, the PI's response to a partial read is a PRERR message,     *
+ * and the response to a partial write is a PWERR message.              *
+ * This register is not reset by a soft reset.                          *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+typedef union pi_cpu_protect_u {
+	bdrkreg_t	pi_cpu_protect_regval;
+	struct  {
+		bdrkreg_t	cp_cpu_protect            :	64;
+	} pi_cpu_protect_fld_s;
+} pi_cpu_protect_u_t;
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  A write with a special data pattern allows any CPU to set its       *
+ * region's bit in CPU_PROTECT. This register has data pattern          *
+ * protection.                                                          *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+typedef union pi_prot_ovrrd_u {
+	bdrkreg_t	pi_prot_ovrrd_regval;
+	struct  {
+		bdrkreg_t	po_prot_ovrrd             :	64;
+	} pi_prot_ovrrd_fld_s;
+} pi_prot_ovrrd_u_t;
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  This read/write register determines on a               *
+ * bit-per-region basis whether incoming IO-initiated interrupts are    *
+ * allowed to set bits in INT_PEND0 and INT_PEND1. If access is         *
+ * allowed, the PI's response to a partial read is a PRPLY message,     *
+ * and the response to a partial write is a PACK message. If access     *
+ * is not allowed, the PI's response to a partial read is a PRERR       *
+ * message, and the response to a partial write is a PWERR message.     *
+ * This register is not reset by a soft reset.                          *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+typedef union pi_io_protect_u {
+	bdrkreg_t	pi_io_protect_regval;
+	struct  {
+		bdrkreg_t	ip_io_protect             :	64;
+	} pi_io_protect_fld_s;
+} pi_io_protect_u_t;
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  This read/write register determines on a               *
+ * bit-per-region basis whether read access from a local processor to   *
+ * the region is permissible. For example, setting a bit to 0           *
+ * prevents speculative reads to that non-existent node. If a read      *
+ * request to a non-present region occurs, an ERR response is issued    *
+ * to the TRex+ (no PI error registers are modified). It is up to       *
+ * software to load this register with the proper contents.             *
+ * Region-present checking is only done for coherent read requests -    *
+ * partial reads/writes will be issued to a non-present region. The     *
+ * setting of these bits does not affect a node's access to its         *
+ * CALIAS space.                                                        *
+ * This register is not reset by a soft reset.                          *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+typedef union pi_region_present_u {
+	bdrkreg_t	pi_region_present_regval;
+	struct  {
+		bdrkreg_t	rp_region_present         :	64;
+	} pi_region_present_fld_s;
+} pi_region_present_u_t;
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  A read to the location will allow a CPU to identify itself as       *
+ * either CPU_A or CPU_B, and will indicate whether the CPU is          *
+ * connected to PI 0 or PI 1.                                           *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_cpu_num_u {
+	bdrkreg_t	pi_cpu_num_regval;
+	struct  {
+		bdrkreg_t	cn_cpu_num                :	 1;
+                bdrkreg_t       cn_pi_id                  :      1;
+                bdrkreg_t       cn_rsvd                   :     62;
+	} pi_cpu_num_fld_s;
+} pi_cpu_num_u_t;
+
+#else
+
+typedef union pi_cpu_num_u {
+	bdrkreg_t	pi_cpu_num_regval;
+	struct	{
+		bdrkreg_t	cn_rsvd			  :	62;
+		bdrkreg_t	cn_pi_id		  :	 1;
+		bdrkreg_t	cn_cpu_num		  :	 1;
+	} pi_cpu_num_fld_s;
+} pi_cpu_num_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  This read/write location determines the size of the    *
+ * Calias Space.                                                        *
+ * This register is not reset by a soft reset.                          *
+ * NOTE: For predictable behavior, all Calias spaces in a system must   *
+ * be set to the same size.                                             *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_calias_size_u {
+	bdrkreg_t	pi_calias_size_regval;
+	struct  {
+		bdrkreg_t	cs_calias_size            :	 4;
+		bdrkreg_t       cs_rsvd                   :     60;
+	} pi_calias_size_fld_s;
+} pi_calias_size_u_t;
+
+#else
+
+typedef union pi_calias_size_u {
+	bdrkreg_t	pi_calias_size_regval;
+	struct	{
+		bdrkreg_t	cs_rsvd			  :	60;
+		bdrkreg_t	cs_calias_size		  :	 4;
+	} pi_calias_size_fld_s;
+} pi_calias_size_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This Read/Write location determines at which value (increment)      *
+ * the CRB Timeout Counters cause a timeout error to occur. See         *
+ * Section 3.4.2.2, &quot;Time-outs in RRB and WRB&quot; in the         *
+ * Processor Interface chapter, volume 1 of this document for more      *
+ * details.                                                             *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_max_crb_timeout_u {
+	bdrkreg_t	pi_max_crb_timeout_regval;
+	struct  {
+		bdrkreg_t	mct_max_timeout           :	 8;
+		bdrkreg_t       mct_rsvd                  :     56;
+	} pi_max_crb_timeout_fld_s;
+} pi_max_crb_timeout_u_t;
+
+#else
+
+typedef union pi_max_crb_timeout_u {
+	bdrkreg_t	pi_max_crb_timeout_regval;
+	struct	{
+		bdrkreg_t	mct_rsvd		  :	56;
+		bdrkreg_t	mct_max_timeout		  :	 8;
+	} pi_max_crb_timeout_fld_s;
+} pi_max_crb_timeout_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This Read/Write location determines how often a valid CRB's         *
+ * Timeout Counter is incremented. See Section 3.4.2.2,                 *
+ * &quot;Time-outs in RRB and WRB&quot; in the Processor Interface      *
+ * chapter, volume 1 of this document for more details.                 *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_crb_sfactor_u {
+	bdrkreg_t	pi_crb_sfactor_regval;
+	struct  {
+		bdrkreg_t	cs_sfactor                :	24;
+		bdrkreg_t       cs_rsvd                   :     40;
+	} pi_crb_sfactor_fld_s;
+} pi_crb_sfactor_u_t;
+
+#else
+
+typedef union pi_crb_sfactor_u {
+	bdrkreg_t	pi_crb_sfactor_regval;
+	struct	{
+		bdrkreg_t	cs_rsvd			  :	40;
+		bdrkreg_t	cs_sfactor		  :	24;
+	} pi_crb_sfactor_fld_s;
+} pi_crb_sfactor_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There is one of these registers for each CPU. The PI sets this      *
+ * bit when it sees the first transaction initiated by the associated   *
+ * CPU.                                                                 *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_cpu_present_a_u {
+	bdrkreg_t	pi_cpu_present_a_regval;
+	struct  {
+		bdrkreg_t	cpa_cpu_present           :	 1;
+		bdrkreg_t       cpa_rsvd                  :     63;
+	} pi_cpu_present_a_fld_s;
+} pi_cpu_present_a_u_t;
+
+#else
+
+typedef union pi_cpu_present_a_u {
+	bdrkreg_t	pi_cpu_present_a_regval;
+	struct	{
+		bdrkreg_t	cpa_rsvd		  :	63;
+		bdrkreg_t	cpa_cpu_present		  :	 1;
+	} pi_cpu_present_a_fld_s;
+} pi_cpu_present_a_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There is one of these registers for each CPU. The PI sets this      *
+ * bit when it sees the first transaction initiated by the associated   *
+ * CPU.                                                                 *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_cpu_present_b_u {
+	bdrkreg_t	pi_cpu_present_b_regval;
+	struct  {
+		bdrkreg_t	cpb_cpu_present           :	 1;
+		bdrkreg_t       cpb_rsvd                  :     63;
+	} pi_cpu_present_b_fld_s;
+} pi_cpu_present_b_u_t;
+
+#else
+
+typedef union pi_cpu_present_b_u {
+	bdrkreg_t	pi_cpu_present_b_regval;
+	struct	{
+		bdrkreg_t	cpb_rsvd		  :	63;
+		bdrkreg_t	cpb_cpu_present		  :	 1;
+	} pi_cpu_present_b_fld_s;
+} pi_cpu_present_b_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  There is one of these registers for each CPU. This     *
+ * Read/Write location determines whether the associated CPU is         *
+ * enabled to issue external requests. When this bit is zero for a      *
+ * processor, the PI ignores SysReq_L from that processor, and so       *
+ * never grants it the bus.                                             *
+ * This register is not reset by a soft reset.                          *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_cpu_enable_a_u {
+	bdrkreg_t	pi_cpu_enable_a_regval;
+	struct  {
+		bdrkreg_t	cea_cpu_enable            :	 1;
+		bdrkreg_t       cea_rsvd                  :     63;
+	} pi_cpu_enable_a_fld_s;
+} pi_cpu_enable_a_u_t;
+
+#else
+
+typedef union pi_cpu_enable_a_u {
+	bdrkreg_t	pi_cpu_enable_a_regval;
+	struct	{
+		bdrkreg_t	cea_rsvd		  :	63;
+		bdrkreg_t	cea_cpu_enable		  :	 1;
+	} pi_cpu_enable_a_fld_s;
+} pi_cpu_enable_a_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  There is one of these registers for each CPU. This     *
+ * Read/Write location determines whether the associated CPU is         *
+ * enabled to issue external requests. When this bit is zero for a      *
+ * processor, the PI ignores SysReq_L from that processor, and so       *
+ * never grants it the bus.                                             *
+ * This register is not reset by a soft reset.                          *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_cpu_enable_b_u {
+	bdrkreg_t	pi_cpu_enable_b_regval;
+	struct  {
+		bdrkreg_t	ceb_cpu_enable            :	 1;
+		bdrkreg_t       ceb_rsvd                  :     63;
+	} pi_cpu_enable_b_fld_s;
+} pi_cpu_enable_b_u_t;
+
+#else
+
+typedef union pi_cpu_enable_b_u {
+	bdrkreg_t	pi_cpu_enable_b_regval;
+	struct	{
+		bdrkreg_t	ceb_rsvd		  :	63;
+		bdrkreg_t	ceb_cpu_enable		  :	 1;
+	} pi_cpu_enable_b_fld_s;
+} pi_cpu_enable_b_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There is one of these registers for each CPU. A write to this       *
+ * location will cause an NMI to be issued to the CPU.                  *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+typedef union pi_nmi_a_u {
+	bdrkreg_t	pi_nmi_a_regval;
+	struct  {
+		bdrkreg_t	na_nmi_cpu                :	64;
+	} pi_nmi_a_fld_s;
+} pi_nmi_a_u_t;
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There is one of these registers for each CPU. A write to this       *
+ * location will cause an NMI to be issued to the CPU.                  *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+typedef union pi_nmi_b_u {
+	bdrkreg_t	pi_nmi_b_regval;
+	struct  {
+		bdrkreg_t	nb_nmi_cpu                :	64;
+	} pi_nmi_b_fld_s;
+} pi_nmi_b_u_t;
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  A write to this register allows a single bit in the INT_PEND0 or    *
+ * INT_PEND1 registers to be set or cleared. If 6 is clear, a bit is    *
+ * modified in INT_PEND0, while if 6 is set, a bit is modified in       *
+ * INT_PEND1. The value in 5:0 (ranging from 63 to 0) will determine    *
+ * which bit in the register is effected. The value of 8 will           *
+ * determine whether the desired bit is set (8=1) or cleared (8=0).     *
+ * This is the only register which is accessible by IO issued PWRI      *
+ * command and is protected through the IO_PROTECT register. If the     *
+ * region bit in the IO_PROTECT is not set then a WERR reply is         *
+ * issued. CPU access is controlled through CPU_PROTECT. The contents   *
+ * of this register are masked with the contents of INT_MASK_A          *
+ * (INT_MASK_B) to determine whether an L2 interrupt is issued to       *
+ * CPU_A (CPU_B).                                                       *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_int_pend_mod_u {
+	bdrkreg_t	pi_int_pend_mod_regval;
+	struct  {
+		bdrkreg_t	ipm_bit_select            :	 6;
+                bdrkreg_t       ipm_reg_select            :      1;
+                bdrkreg_t       ipm_rsvd_1                :      1;
+                bdrkreg_t       ipm_value                 :      1;
+                bdrkreg_t       ipm_rsvd                  :     55;
+	} pi_int_pend_mod_fld_s;
+} pi_int_pend_mod_u_t;
+
+#else
+
+typedef union pi_int_pend_mod_u {
+	bdrkreg_t	pi_int_pend_mod_regval;
+	struct	{
+		bdrkreg_t	ipm_rsvd		  :	55;
+		bdrkreg_t	ipm_value		  :	 1;
+		bdrkreg_t	ipm_rsvd_1		  :	 1;
+		bdrkreg_t	ipm_reg_select		  :	 1;
+		bdrkreg_t	ipm_bit_select		  :	 6;
+	} pi_int_pend_mod_fld_s;
+} pi_int_pend_mod_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This read-only register provides information about interrupts       *
+ * that are currently pending. The interrupts in this register map to   *
+ * interrupt level 2 (L2). The GFX_INT_A/B bits are set by hardware     *
+ * but must be cleared by software.                                     *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_int_pend0_u {
+	bdrkreg_t	pi_int_pend0_regval;
+	struct  {
+		bdrkreg_t	ip_int_pend0_lo           :	 1;
+                bdrkreg_t       ip_gfx_int_a              :      1;
+                bdrkreg_t       ip_gfx_int_b              :      1;
+                bdrkreg_t       ip_page_migration         :      1;
+                bdrkreg_t       ip_uart_ucntrl            :      1;
+                bdrkreg_t       ip_or_cc_pend_a           :      1;
+                bdrkreg_t       ip_or_cc_pend_b           :      1;
+                bdrkreg_t       ip_int_pend0_hi           :     57;
+	} pi_int_pend0_fld_s;
+} pi_int_pend0_u_t;
+
+#else
+
+typedef union pi_int_pend0_u {
+	bdrkreg_t	pi_int_pend0_regval;
+	struct	{
+		bdrkreg_t	ip_int_pend0_hi		  :	57;
+		bdrkreg_t	ip_or_cc_pend_b		  :	 1;
+		bdrkreg_t	ip_or_cc_pend_a		  :	 1;
+		bdrkreg_t	ip_uart_ucntrl		  :	 1;
+		bdrkreg_t	ip_page_migration	  :	 1;
+		bdrkreg_t	ip_gfx_int_b		  :	 1;
+		bdrkreg_t	ip_gfx_int_a		  :	 1;
+		bdrkreg_t	ip_int_pend0_lo		  :	 1;
+	} pi_int_pend0_fld_s;
+} pi_int_pend0_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This read-only register provides information about interrupts       *
+ * that are currently pending. The interrupts in this register map to   *
+ * interrupt level 3 (L3), unless remapped by the INT_PEND1_REMAP       *
+ * register. The SYS_COR_ERR_A/B, RTC_DROP_OUT, and NACK_INT_A/B bits   *
+ * are set by hardware but must be cleared by software. The             *
+ * SYSTEM_SHUTDOWN, NI_ERROR, LB_ERROR and XB_ERROR bits just reflect   *
+ * the value of other logic, and cannot be changed by PI register       *
+ * writes.                                                              *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_int_pend1_u {
+	bdrkreg_t	pi_int_pend1_regval;
+	struct  {
+		bdrkreg_t	ip_int_pend1              :	54;
+                bdrkreg_t       ip_xb_error               :      1;
+                bdrkreg_t       ip_lb_error               :      1;
+                bdrkreg_t       ip_nack_int_a             :      1;
+                bdrkreg_t       ip_nack_int_b             :      1;
+                bdrkreg_t       ip_perf_cntr_oflow        :      1;
+                bdrkreg_t       ip_sys_cor_err_b          :      1;
+                bdrkreg_t       ip_sys_cor_err_a          :      1;
+                bdrkreg_t       ip_md_corr_error          :      1;
+                bdrkreg_t       ip_ni_error               :      1;
+                bdrkreg_t       ip_system_shutdown        :      1;
+	} pi_int_pend1_fld_s;
+} pi_int_pend1_u_t;
+
+#else
+
+typedef union pi_int_pend1_u {
+	bdrkreg_t	pi_int_pend1_regval;
+	struct	{
+		bdrkreg_t	ip_system_shutdown	  :	 1;
+		bdrkreg_t	ip_ni_error		  :	 1;
+		bdrkreg_t	ip_md_corr_error	  :	 1;
+		bdrkreg_t	ip_sys_cor_err_a	  :	 1;
+		bdrkreg_t	ip_sys_cor_err_b	  :	 1;
+		bdrkreg_t	ip_perf_cntr_oflow	  :	 1;
+		bdrkreg_t	ip_nack_int_b		  :	 1;
+		bdrkreg_t	ip_nack_int_a		  :	 1;
+		bdrkreg_t	ip_lb_error		  :	 1;
+		bdrkreg_t	ip_xb_error		  :	 1;
+		bdrkreg_t	ip_int_pend1		  :	54;
+	} pi_int_pend1_fld_s;
+} pi_int_pend1_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This read/write register masks the contents of INT_PEND0 to         *
+ * determine whether an L2 interrupt (bit 10 of the processor's Cause   *
+ * register) is sent to CPU_A if the same bit in the INT_PEND0          *
+ * register is also set. Only one processor in a Bedrock should         *
+ * enable the PAGE_MIGRATION bit/interrupt.                             *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_int_mask0_a_u {
+	bdrkreg_t	pi_int_mask0_a_regval;
+	struct  {
+		bdrkreg_t	ima_int_mask0_lo          :	 1;
+                bdrkreg_t       ima_gfx_int_a             :      1;
+                bdrkreg_t       ima_gfx_int_b             :      1;
+                bdrkreg_t       ima_page_migration        :      1;
+                bdrkreg_t       ima_uart_ucntrl           :      1;
+                bdrkreg_t       ima_or_ccp_mask_a         :      1;
+                bdrkreg_t       ima_or_ccp_mask_b         :      1;
+                bdrkreg_t       ima_int_mask0_hi          :     57;
+	} pi_int_mask0_a_fld_s;
+} pi_int_mask0_a_u_t;
+
+#else
+
+typedef union pi_int_mask0_a_u {
+	bdrkreg_t	pi_int_mask0_a_regval;
+	struct	{
+		bdrkreg_t	ima_int_mask0_hi	  :	57;
+		bdrkreg_t	ima_or_ccp_mask_b	  :	 1;
+		bdrkreg_t	ima_or_ccp_mask_a	  :	 1;
+		bdrkreg_t	ima_uart_ucntrl		  :	 1;
+		bdrkreg_t	ima_page_migration	  :	 1;
+		bdrkreg_t	ima_gfx_int_b		  :	 1;
+		bdrkreg_t	ima_gfx_int_a		  :	 1;
+		bdrkreg_t	ima_int_mask0_lo	  :	 1;
+	} pi_int_mask0_a_fld_s;
+} pi_int_mask0_a_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This read/write register masks the contents of INT_PEND1 to         *
+ * determine whether an interrupt should be sent. Bits 63:32 always     *
+ * generate an L3 interrupt (bit 11 of the processor's Cause            *
+ * register) is sent to CPU_A if the same bit in the INT_PEND1          *
+ * register is set. Bits 31:0 can generate either an L3 or L2           *
+ * interrupt, depending on the value of INT_PEND1_REMAP[3:0]. Only      *
+ * one processor in a Bedrock should enable the NI_ERROR, LB_ERROR,     *
+ * XB_ERROR and MD_CORR_ERROR bits.                                     *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+typedef union pi_int_mask1_a_u {
+	bdrkreg_t	pi_int_mask1_a_regval;
+	struct  {
+		bdrkreg_t	ima_int_mask1             :	64;
+	} pi_int_mask1_a_fld_s;
+} pi_int_mask1_a_u_t;
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This read/write register masks the contents of INT_PEND0 to         *
+ * determine whether an L2 interrupt (bit 10 of the processor's Cause   *
+ * register) is sent to CPU_B if the same bit in the INT_PEND0          *
+ * register is also set. Only one processor in a Bedrock should         *
+ * enable the PAGE_MIGRATION bit/interrupt.                             *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_int_mask0_b_u {
+	bdrkreg_t	pi_int_mask0_b_regval;
+	struct  {
+		bdrkreg_t	imb_int_mask0_lo          :	 1;
+                bdrkreg_t       imb_gfx_int_a             :      1;
+                bdrkreg_t       imb_gfx_int_b             :      1;
+                bdrkreg_t       imb_page_migration        :      1;
+                bdrkreg_t       imb_uart_ucntrl           :      1;
+                bdrkreg_t       imb_or_ccp_mask_a         :      1;
+                bdrkreg_t       imb_or_ccp_mask_b         :      1;
+                bdrkreg_t       imb_int_mask0_hi          :     57;
+	} pi_int_mask0_b_fld_s;
+} pi_int_mask0_b_u_t;
+
+#else
+
+typedef union pi_int_mask0_b_u {
+	bdrkreg_t	pi_int_mask0_b_regval;
+	struct	{
+		bdrkreg_t	imb_int_mask0_hi	  :	57;
+		bdrkreg_t	imb_or_ccp_mask_b	  :	 1;
+		bdrkreg_t	imb_or_ccp_mask_a	  :	 1;
+		bdrkreg_t	imb_uart_ucntrl		  :	 1;
+		bdrkreg_t	imb_page_migration	  :	 1;
+		bdrkreg_t	imb_gfx_int_b		  :	 1;
+		bdrkreg_t	imb_gfx_int_a		  :	 1;
+		bdrkreg_t	imb_int_mask0_lo	  :	 1;
+	} pi_int_mask0_b_fld_s;
+} pi_int_mask0_b_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This read/write register masks the contents of INT_PEND1 to         *
+ * determine whether an interrupt should be sent. Bits 63:32 always     *
+ * generate an L3 interrupt (bit 11 of the processor's Cause            *
+ * register) is sent to CPU_B if the same bit in the INT_PEND1          *
+ * register is set. Bits 31:0 can generate either an L3 or L2           *
+ * interrupt, depending on the value of INT_PEND1_REMAP[3:0]. Only      *
+ * one processor in a Bedrock should enable the NI_ERROR, LB_ERROR,     *
+ * XB_ERROR and MD_CORR_ERROR bits.                                     *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+typedef union pi_int_mask1_b_u {
+	bdrkreg_t	pi_int_mask1_b_regval;
+	struct  {
+		bdrkreg_t	imb_int_mask1             :	64;
+	} pi_int_mask1_b_fld_s;
+} pi_int_mask1_b_u_t;
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There is one of these registers for each CPU. These registers do    *
+ * not have access protection. A store to this location by a CPU will   *
+ * cause the bit corresponding to the source's region to be set in      *
+ * CC_PEND_A (or CC_PEND_B). The contents of CC_PEND_A (or CC_PEND_B)   *
+ * determines on a bit-per-region basis whether a CPU-to-CPU            *
+ * interrupt is pending CPU_A (or CPU_B).                               *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+typedef union pi_cc_pend_set_a_u {
+	bdrkreg_t	pi_cc_pend_set_a_regval;
+	struct  {
+		bdrkreg_t	cpsa_cc_pend              :	64;
+	} pi_cc_pend_set_a_fld_s;
+} pi_cc_pend_set_a_u_t;
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There is one of these registers for each CPU. These registers do    *
+ * not have access protection. A store to this location by a CPU will   *
+ * cause the bit corresponding to the source's region to be set in      *
+ * CC_PEND_A (or CC_PEND_B). The contents of CC_PEND_A (or CC_PEND_B)   *
+ * determines on a bit-per-region basis whether a CPU-to-CPU            *
+ * interrupt is pending CPU_A (or CPU_B).                               *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+typedef union pi_cc_pend_set_b_u {
+	bdrkreg_t	pi_cc_pend_set_b_regval;
+	struct  {
+		bdrkreg_t	cpsb_cc_pend              :	64;
+	} pi_cc_pend_set_b_fld_s;
+} pi_cc_pend_set_b_u_t;
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There is one of these registers for each CPU. Reading this          *
+ * location will return the contents of CC_PEND_A (or CC_PEND_B).       *
+ * Writing this location will clear the bits corresponding to which     *
+ * data bits are driven high during the store; therefore, storing all   *
+ * ones would clear all bits.                                           *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+typedef union pi_cc_pend_clr_a_u {
+	bdrkreg_t	pi_cc_pend_clr_a_regval;
+	struct  {
+		bdrkreg_t	cpca_cc_pend              :	64;
+	} pi_cc_pend_clr_a_fld_s;
+} pi_cc_pend_clr_a_u_t;
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There is one of these registers for each CPU. Reading this          *
+ * location will return the contents of CC_PEND_A (or CC_PEND_B).       *
+ * Writing this location will clear the bits corresponding to which     *
+ * data bits are driven high during the store; therefore, storing all   *
+ * ones would clear all bits.                                           *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+typedef union pi_cc_pend_clr_b_u {
+	bdrkreg_t	pi_cc_pend_clr_b_regval;
+	struct  {
+		bdrkreg_t	cpcb_cc_pend              :	64;
+	} pi_cc_pend_clr_b_fld_s;
+} pi_cc_pend_clr_b_u_t;
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This read/write register masks the contents of both CC_PEND_A and   *
+ * CC_PEND_B.                                                           *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+typedef union pi_cc_mask_u {
+	bdrkreg_t	pi_cc_mask_regval;
+	struct  {
+		bdrkreg_t	cm_cc_mask                :	64;
+	} pi_cc_mask_fld_s;
+} pi_cc_mask_u_t;
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This read/write register redirects INT_PEND1[31:0] from L3 to L2    *
+ * interrupt level.Bit 4 in this register is used to enable error       *
+ * interrupt forwarding to the II. When this bit is set, if any of      *
+ * the three memory interrupts (correctable error, uncorrectable        *
+ * error, or page migration), or the NI, LB or XB error interrupts      *
+ * are set, the PI_II_ERROR_INT wire will be asserted. When this wire   *
+ * is asserted, the II will send an interrupt to the node specified     *
+ * in its IIDSR (Interrupt Destination Register). This allows these     *
+ * interrupts to be forwarded to another node.                          *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_int_pend1_remap_u {
+	bdrkreg_t	pi_int_pend1_remap_regval;
+	struct  {
+		bdrkreg_t	ipr_remap_0               :	 1;
+                bdrkreg_t       ipr_remap_1               :      1;
+                bdrkreg_t       ipr_remap_2               :      1;
+                bdrkreg_t       ipr_remap_3               :      1;
+                bdrkreg_t       ipr_error_forward         :      1;
+                bdrkreg_t       ipr_reserved              :     59;
+	} pi_int_pend1_remap_fld_s;
+} pi_int_pend1_remap_u_t;
+
+#else
+
+typedef union pi_int_pend1_remap_u {
+	bdrkreg_t	pi_int_pend1_remap_regval;
+	struct	{
+		bdrkreg_t	ipr_reserved		  :	59;
+		bdrkreg_t	ipr_error_forward	  :	 1;
+		bdrkreg_t	ipr_remap_3		  :	 1;
+		bdrkreg_t	ipr_remap_2		  :	 1;
+		bdrkreg_t	ipr_remap_1		  :	 1;
+		bdrkreg_t	ipr_remap_0		  :	 1;
+	} pi_int_pend1_remap_fld_s;
+} pi_int_pend1_remap_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There is one of these registers for each CPU. When the real time    *
+ * counter (RT_Counter) is equal to the value in this register, the     *
+ * RT_INT_PEND register is set, which causes a Level-4 interrupt to     *
+ * be sent to the processor.                                            *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_rt_compare_a_u {
+	bdrkreg_t	pi_rt_compare_a_regval;
+	struct  {
+		bdrkreg_t	rca_rt_compare            :	55;
+		bdrkreg_t       rca_rsvd                  :      9;
+	} pi_rt_compare_a_fld_s;
+} pi_rt_compare_a_u_t;
+
+#else
+
+typedef union pi_rt_compare_a_u {
+        bdrkreg_t       pi_rt_compare_a_regval;
+        struct  {
+                bdrkreg_t       rca_rsvd                  :      9;
+                bdrkreg_t       rca_rt_compare            :     55;
+        } pi_rt_compare_a_fld_s;
+} pi_rt_compare_a_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There is one of these registers for each CPU. When the real time    *
+ * counter (RT_Counter) is equal to the value in this register, the     *
+ * RT_INT_PEND register is set, which causes a Level-4 interrupt to     *
+ * be sent to the processor.                                            *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_rt_compare_b_u {
+	bdrkreg_t	pi_rt_compare_b_regval;
+	struct  {
+		bdrkreg_t	rcb_rt_compare            :	55;
+		bdrkreg_t       rcb_rsvd                  :      9;
+	} pi_rt_compare_b_fld_s;
+} pi_rt_compare_b_u_t;
+
+#else
+
+typedef union pi_rt_compare_b_u {
+	bdrkreg_t	pi_rt_compare_b_regval;
+	struct	{
+		bdrkreg_t	rcb_rsvd		  :	 9;
+		bdrkreg_t	rcb_rt_compare		  :	55;
+	} pi_rt_compare_b_fld_s;
+} pi_rt_compare_b_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  When the least significant 32 bits of the real time counter         *
+ * (RT_Counter) are equal to the value in this register, the            *
+ * PROF_INT_PEND_A and PROF_INT_PEND_B registers are set to 0x1.        *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_profile_compare_u {
+	bdrkreg_t	pi_profile_compare_regval;
+	struct  {
+		bdrkreg_t	pc_profile_compare        :	32;
+		bdrkreg_t       pc_rsvd                   :     32;
+	} pi_profile_compare_fld_s;
+} pi_profile_compare_u_t;
+
+#else
+
+typedef union pi_profile_compare_u {
+	bdrkreg_t	pi_profile_compare_regval;
+	struct	{
+		bdrkreg_t	pc_rsvd			  :	32;
+		bdrkreg_t	pc_profile_compare	  :	32;
+	} pi_profile_compare_fld_s;
+} pi_profile_compare_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There is one of these registers for each CPU. If the bit in the     *
+ * corresponding RT_INT_EN_A/B register is set, the processor's level   *
+ * 5 interrupt is set to the value of the RTC_INT_PEND bit in this      *
+ * register. Storing any value to this location will clear the          *
+ * RTC_INT_PEND bit in the register.                                    *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_rt_int_pend_a_u {
+	bdrkreg_t	pi_rt_int_pend_a_regval;
+	struct  {
+		bdrkreg_t	ripa_rtc_int_pend         :	 1;
+		bdrkreg_t       ripa_rsvd                 :     63;
+	} pi_rt_int_pend_a_fld_s;
+} pi_rt_int_pend_a_u_t;
+
+#else
+
+typedef union pi_rt_int_pend_a_u {
+	bdrkreg_t	pi_rt_int_pend_a_regval;
+	struct	{
+		bdrkreg_t	ripa_rsvd		  :	63;
+		bdrkreg_t	ripa_rtc_int_pend	  :	 1;
+	} pi_rt_int_pend_a_fld_s;
+} pi_rt_int_pend_a_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There is one of these registers for each CPU. If the bit in the     *
+ * corresponding RT_INT_EN_A/B register is set, the processor's level   *
+ * 5 interrupt is set to the value of the RTC_INT_PEND bit in this      *
+ * register. Storing any value to this location will clear the          *
+ * RTC_INT_PEND bit in the register.                                    *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_rt_int_pend_b_u {
+	bdrkreg_t	pi_rt_int_pend_b_regval;
+	struct  {
+		bdrkreg_t	ripb_rtc_int_pend         :	 1;
+		bdrkreg_t       ripb_rsvd                 :     63;
+	} pi_rt_int_pend_b_fld_s;
+} pi_rt_int_pend_b_u_t;
+
+#else
+
+typedef union pi_rt_int_pend_b_u {
+	bdrkreg_t	pi_rt_int_pend_b_regval;
+	struct	{
+		bdrkreg_t	ripb_rsvd		  :	63;
+		bdrkreg_t	ripb_rtc_int_pend	  :	 1;
+	} pi_rt_int_pend_b_fld_s;
+} pi_rt_int_pend_b_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There is one of these registers for each CPU. Both registers are    *
+ * set when the PROFILE_COMPARE register is equal to bits [31:0] of     *
+ * the RT_Counter. If the bit in the corresponding PROF_INT_EN_A/B      *
+ * register is set, the processor's level 5 interrupt is set to the     *
+ * value of the PROF_INT_PEND bit in this register. Storing any value   *
+ * to this location will clear the PROF_INT_PEND bit in the register.   *
+ * The reason for having A and B versions of this register is that      *
+ * they need to be cleared independently.                               *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_prof_int_pend_a_u {
+	bdrkreg_t	pi_prof_int_pend_a_regval;
+	struct  {
+		bdrkreg_t	pipa_prof_int_pend        :	 1;
+		bdrkreg_t       pipa_rsvd                 :     63;
+	} pi_prof_int_pend_a_fld_s;
+} pi_prof_int_pend_a_u_t;
+
+#else
+
+typedef union pi_prof_int_pend_a_u {
+	bdrkreg_t	pi_prof_int_pend_a_regval;
+	struct	{
+		bdrkreg_t	pipa_rsvd		  :	63;
+		bdrkreg_t	pipa_prof_int_pend	  :	 1;
+	} pi_prof_int_pend_a_fld_s;
+} pi_prof_int_pend_a_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There is one of these registers for each CPU. Both registers are    *
+ * set when the PROFILE_COMPARE register is equal to bits [31:0] of     *
+ * the RT_Counter. If the bit in the corresponding PROF_INT_EN_A/B      *
+ * register is set, the processor's level 5 interrupt is set to the     *
+ * value of the PROF_INT_PEND bit in this register. Storing any value   *
+ * to this location will clear the PROF_INT_PEND bit in the register.   *
+ * The reason for having A and B versions of this register is that      *
+ * they need to be cleared independently.                               *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_prof_int_pend_b_u {
+	bdrkreg_t	pi_prof_int_pend_b_regval;
+	struct  {
+		bdrkreg_t	pipb_prof_int_pend        :	 1;
+		bdrkreg_t       pipb_rsvd                 :     63;
+	} pi_prof_int_pend_b_fld_s;
+} pi_prof_int_pend_b_u_t;
+
+#else
+
+typedef union pi_prof_int_pend_b_u {
+	bdrkreg_t	pi_prof_int_pend_b_regval;
+	struct	{
+		bdrkreg_t	pipb_rsvd		  :	63;
+		bdrkreg_t	pipb_prof_int_pend	  :	 1;
+	} pi_prof_int_pend_b_fld_s;
+} pi_prof_int_pend_b_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There is one of these registers for each CPU. Enables RTC           *
+ * interrupt to the associated CPU.                                     *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_rt_int_en_a_u {
+	bdrkreg_t	pi_rt_int_en_a_regval;
+	struct  {
+		bdrkreg_t	riea_rtc_int_en           :	 1;
+		bdrkreg_t       riea_rsvd                 :     63;
+	} pi_rt_int_en_a_fld_s;
+} pi_rt_int_en_a_u_t;
+
+#else
+
+typedef union pi_rt_int_en_a_u {
+        bdrkreg_t       pi_rt_int_en_a_regval;
+        struct  {
+                bdrkreg_t       riea_rsvd                 :     63;
+                bdrkreg_t       riea_rtc_int_en           :      1;
+        } pi_rt_int_en_a_fld_s;
+} pi_rt_int_en_a_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There is one of these registers for each CPU. Enables RTC           *
+ * interrupt to the associated CPU.                                     *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_rt_int_en_b_u {
+	bdrkreg_t	pi_rt_int_en_b_regval;
+	struct  {
+		bdrkreg_t	rieb_rtc_int_en           :	 1;
+		bdrkreg_t       rieb_rsvd                 :     63;
+	} pi_rt_int_en_b_fld_s;
+} pi_rt_int_en_b_u_t;
+
+#else
+
+typedef union pi_rt_int_en_b_u {
+        bdrkreg_t       pi_rt_int_en_b_regval;
+        struct  {
+                bdrkreg_t       rieb_rsvd                 :     63;
+                bdrkreg_t       rieb_rtc_int_en           :      1;
+        } pi_rt_int_en_b_fld_s;
+} pi_rt_int_en_b_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There is one of these registers for each CPU. Enables profiling     *
+ * interrupt to the associated CPU.                                     *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_prof_int_en_a_u {
+	bdrkreg_t	pi_prof_int_en_a_regval;
+	struct  {
+		bdrkreg_t	piea_prof_int_en          :	 1;
+		bdrkreg_t       piea_rsvd                 :     63;
+	} pi_prof_int_en_a_fld_s;
+} pi_prof_int_en_a_u_t;
+
+#else
+
+typedef union pi_prof_int_en_a_u {
+	bdrkreg_t	pi_prof_int_en_a_regval;
+	struct	{
+		bdrkreg_t	piea_rsvd		  :	63;
+		bdrkreg_t	piea_prof_int_en	  :	 1;
+	} pi_prof_int_en_a_fld_s;
+} pi_prof_int_en_a_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There is one of these registers for each CPU. Enables profiling     *
+ * interrupt to the associated CPU.                                     *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_prof_int_en_b_u {
+	bdrkreg_t	pi_prof_int_en_b_regval;
+	struct  {
+		bdrkreg_t	pieb_prof_int_en          :	 1;
+		bdrkreg_t       pieb_rsvd                 :     63;
+	} pi_prof_int_en_b_fld_s;
+} pi_prof_int_en_b_u_t;
+
+#else
+
+typedef union pi_prof_int_en_b_u {
+	bdrkreg_t	pi_prof_int_en_b_regval;
+	struct	{
+		bdrkreg_t	pieb_rsvd		  :	63;
+		bdrkreg_t	pieb_prof_int_en	  :	 1;
+	} pi_prof_int_en_b_fld_s;
+} pi_prof_int_en_b_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register controls operation of the debug data from the PI,     *
+ * along with Debug_Sel[2:0] from the Debug module. For some values     *
+ * of Debug_Sel[2:0], the B_SEL bit selects whether the debug bits      *
+ * are looking at the processor A or processor B logic. The remaining   *
+ * bits select which signal(s) are ORed to create DebugData bits 31     *
+ * and 30 for all of the PI debug selections.                           *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_debug_sel_u {
+	bdrkreg_t	pi_debug_sel_regval;
+	struct  {
+		bdrkreg_t	ds_low_t5cc_a             :	 1;
+                bdrkreg_t       ds_low_t5cc_b             :      1;
+                bdrkreg_t       ds_low_totcc_a            :      1;
+                bdrkreg_t       ds_low_totcc_b            :      1;
+                bdrkreg_t       ds_low_reqcc_a            :      1;
+                bdrkreg_t       ds_low_reqcc_b            :      1;
+                bdrkreg_t       ds_low_rplcc_a            :      1;
+                bdrkreg_t       ds_low_rplcc_b            :      1;
+                bdrkreg_t       ds_low_intcc              :      1;
+                bdrkreg_t       ds_low_perf_inc_a_0       :      1;
+                bdrkreg_t       ds_low_perf_inc_a_1       :      1;
+                bdrkreg_t       ds_low_perf_inc_b_0       :      1;
+                bdrkreg_t       ds_low_perf_inc_b_1       :      1;
+                bdrkreg_t       ds_high_t5cc_a            :      1;
+                bdrkreg_t       ds_high_t5cc_b            :      1;
+                bdrkreg_t       ds_high_totcc_a           :      1;
+                bdrkreg_t       ds_high_totcc_b           :      1;
+                bdrkreg_t       ds_high_reqcc_a           :      1;
+                bdrkreg_t       ds_high_reqcc_b           :      1;
+                bdrkreg_t       ds_high_rplcc_a           :      1;
+                bdrkreg_t       ds_high_rplcc_b           :      1;
+                bdrkreg_t       ds_high_intcc             :      1;
+                bdrkreg_t       ds_high_perf_inc_a_0      :      1;
+                bdrkreg_t       ds_high_perf_inc_a_1      :      1;
+                bdrkreg_t       ds_high_perf_inc_b_0      :      1;
+                bdrkreg_t       ds_high_perf_inc_b_1      :      1;
+                bdrkreg_t       ds_b_sel                  :      1;
+                bdrkreg_t       ds_rsvd                   :     37;
+	} pi_debug_sel_fld_s;
+} pi_debug_sel_u_t;
+
+#else
+
+typedef union pi_debug_sel_u {
+	bdrkreg_t	pi_debug_sel_regval;
+	struct	{
+		bdrkreg_t	ds_rsvd			  :	37;
+		bdrkreg_t	ds_b_sel		  :	 1;
+		bdrkreg_t	ds_high_perf_inc_b_1	  :	 1;
+		bdrkreg_t	ds_high_perf_inc_b_0	  :	 1;
+		bdrkreg_t	ds_high_perf_inc_a_1	  :	 1;
+		bdrkreg_t	ds_high_perf_inc_a_0	  :	 1;
+		bdrkreg_t	ds_high_intcc		  :	 1;
+		bdrkreg_t	ds_high_rplcc_b		  :	 1;
+		bdrkreg_t	ds_high_rplcc_a		  :	 1;
+		bdrkreg_t	ds_high_reqcc_b		  :	 1;
+		bdrkreg_t	ds_high_reqcc_a		  :	 1;
+		bdrkreg_t	ds_high_totcc_b		  :	 1;
+		bdrkreg_t	ds_high_totcc_a		  :	 1;
+		bdrkreg_t	ds_high_t5cc_b		  :	 1;
+		bdrkreg_t	ds_high_t5cc_a		  :	 1;
+		bdrkreg_t	ds_low_perf_inc_b_1	  :	 1;
+		bdrkreg_t	ds_low_perf_inc_b_0	  :	 1;
+		bdrkreg_t	ds_low_perf_inc_a_1	  :	 1;
+		bdrkreg_t	ds_low_perf_inc_a_0	  :	 1;
+		bdrkreg_t	ds_low_intcc		  :	 1;
+		bdrkreg_t	ds_low_rplcc_b		  :	 1;
+		bdrkreg_t	ds_low_rplcc_a		  :	 1;
+		bdrkreg_t	ds_low_reqcc_b		  :	 1;
+		bdrkreg_t	ds_low_reqcc_a		  :	 1;
+		bdrkreg_t	ds_low_totcc_b		  :	 1;
+		bdrkreg_t	ds_low_totcc_a		  :	 1;
+		bdrkreg_t	ds_low_t5cc_b		  :	 1;
+		bdrkreg_t	ds_low_t5cc_a		  :	 1;
+	} pi_debug_sel_fld_s;
+} pi_debug_sel_u_t;
+
+#endif
+
+
+/************************************************************************
+ *                                                                      *
+ *  A write to this register allows a single bit in the INT_PEND0 or    *
+ * INT_PEND1 registers to be set or cleared. If 6 is clear, a bit is    *
+ * modified in INT_PEND0, while if 6 is set, a bit is modified in       *
+ * INT_PEND1. The value in 5:0 (ranging from 63 to 0) will determine    *
+ * which bit in the register is effected. The value of 8 will           *
+ * determine whether the desired bit is set (8=1) or cleared (8=0).     *
+ * This is the only register which is accessible by IO issued PWRI      *
+ * command and is protected through the IO_PROTECT register. If the     *
+ * region bit in the IO_PROTECT is not set then a WERR reply is         *
+ * issued. CPU access is controlled through CPU_PROTECT. The contents   *
+ * of this register are masked with the contents of INT_MASK_A          *
+ * (INT_MASK_B) to determine whether an L2 interrupt is issued to       *
+ * CPU_A (CPU_B).                                                       *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_int_pend_mod_alias_u {
+	bdrkreg_t	pi_int_pend_mod_alias_regval;
+	struct  {
+		bdrkreg_t	ipma_bit_select           :	 6;
+                bdrkreg_t       ipma_reg_select           :      1;
+                bdrkreg_t       ipma_rsvd_1               :      1;
+                bdrkreg_t       ipma_value                :      1;
+                bdrkreg_t       ipma_rsvd                 :     55;
+	} pi_int_pend_mod_alias_fld_s;
+} pi_int_pend_mod_alias_u_t;
+
+#else
+
+typedef union pi_int_pend_mod_alias_u {
+	bdrkreg_t	pi_int_pend_mod_alias_regval;
+	struct	{
+		bdrkreg_t	ipma_rsvd		  :	55;
+		bdrkreg_t	ipma_value		  :	 1;
+		bdrkreg_t	ipma_rsvd_1		  :	 1;
+		bdrkreg_t	ipma_reg_select		  :	 1;
+		bdrkreg_t	ipma_bit_select		  :	 6;
+	} pi_int_pend_mod_alias_fld_s;
+} pi_int_pend_mod_alias_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There is one of these registers for each CPU. This register         *
+ * specifies the value of the Graphics Page. Uncached writes into the   *
+ * Graphics Page (with uncached attribute of IO) are done with GFXWS    *
+ * commands rather than the normal PWRI commands. GFXWS commands are    *
+ * tracked with the graphics credit counters.                           *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_gfx_page_a_u {
+	bdrkreg_t	pi_gfx_page_a_regval;
+	struct  {
+		bdrkreg_t	gpa_rsvd_1                :	17;
+                bdrkreg_t       gpa_gfx_page_addr         :     23;
+                bdrkreg_t       gpa_en_gfx_page           :      1;
+                bdrkreg_t       gpa_rsvd                  :     23;
+	} pi_gfx_page_a_fld_s;
+} pi_gfx_page_a_u_t;
+
+#else
+
+typedef union pi_gfx_page_a_u {
+	bdrkreg_t	pi_gfx_page_a_regval;
+	struct	{
+		bdrkreg_t	gpa_rsvd		  :	23;
+		bdrkreg_t	gpa_en_gfx_page		  :	 1;
+		bdrkreg_t	gpa_gfx_page_addr	  :	23;
+		bdrkreg_t	gpa_rsvd_1		  :	17;
+	} pi_gfx_page_a_fld_s;
+} pi_gfx_page_a_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There is one of these registers for each CPU. This register         *
+ * counts graphics credits. This counter is decremented for each        *
+ * doubleword sent to graphics with GFXWS or GFXWL commands. It is      *
+ * incremented for each doubleword acknowledge from graphics. When      *
+ * this counter has a smaller value than the GFX_BIAS register,         *
+ * SysWrRdy_L is deasserted, an interrupt is sent to the processor,     *
+ * and SysWrRdy_L is allowed to be asserted again. This is the basic    *
+ * mechanism for flow-controlling graphics writes.                      *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_gfx_credit_cntr_a_u {
+	bdrkreg_t	pi_gfx_credit_cntr_a_regval;
+	struct  {
+		bdrkreg_t	gcca_gfx_credit_cntr      :	12;
+		bdrkreg_t       gcca_rsvd                 :     52;
+	} pi_gfx_credit_cntr_a_fld_s;
+} pi_gfx_credit_cntr_a_u_t;
+
+#else
+
+typedef union pi_gfx_credit_cntr_a_u {
+	bdrkreg_t	pi_gfx_credit_cntr_a_regval;
+	struct	{
+		bdrkreg_t	gcca_rsvd		  :	52;
+		bdrkreg_t	gcca_gfx_credit_cntr	  :	12;
+	} pi_gfx_credit_cntr_a_fld_s;
+} pi_gfx_credit_cntr_a_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There is one of these registers for each CPU. When the graphics     *
+ * credit counter is less than or equal to this value, a flow control   *
+ * interrupt is sent.                                                   *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_gfx_bias_a_u {
+	bdrkreg_t	pi_gfx_bias_a_regval;
+	struct  {
+		bdrkreg_t	gba_gfx_bias              :	12;
+		bdrkreg_t       gba_rsvd                  :     52;
+	} pi_gfx_bias_a_fld_s;
+} pi_gfx_bias_a_u_t;
+
+#else
+
+typedef union pi_gfx_bias_a_u {
+	bdrkreg_t	pi_gfx_bias_a_regval;
+	struct	{
+		bdrkreg_t	gba_rsvd		  :	52;
+		bdrkreg_t	gba_gfx_bias		  :	12;
+	} pi_gfx_bias_a_fld_s;
+} pi_gfx_bias_a_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  There is one of these registers for each CPU. When     *
+ * this counter reaches the value of the GFX_INT_CMP register, an       *
+ * interrupt is sent to the associated processor. At each clock         *
+ * cycle, the value in this register can be changed by any one of the   *
+ * following actions:                                                   *
+ * - Written by software.                                               *
+ * - Loaded with the value of GFX_INT_CMP, when an interrupt, NMI, or   *
+ * soft reset occurs, thus preventing an additional interrupt.          *
+ * - Zeroed, when the GFX_CREDIT_CNTR rises above the bias value.       *
+ * - Incremented (by one at each clock) for each clock that the         *
+ * GFX_CREDIT_CNTR is less than or equal to zero.                       *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_gfx_int_cntr_a_u {
+	bdrkreg_t	pi_gfx_int_cntr_a_regval;
+	struct  {
+		bdrkreg_t	gica_gfx_int_cntr         :	26;
+		bdrkreg_t       gica_rsvd                 :     38;
+	} pi_gfx_int_cntr_a_fld_s;
+} pi_gfx_int_cntr_a_u_t;
+
+#else
+
+typedef union pi_gfx_int_cntr_a_u {
+	bdrkreg_t	pi_gfx_int_cntr_a_regval;
+	struct	{
+		bdrkreg_t	gica_rsvd		  :	38;
+		bdrkreg_t	gica_gfx_int_cntr	  :	26;
+	} pi_gfx_int_cntr_a_fld_s;
+} pi_gfx_int_cntr_a_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There is one of these registers for each CPU. The value in this     *
+ * register is loaded into the GFX_INT_CNTR register when an            *
+ * interrupt, NMI, or soft reset is sent to the processor. The value    *
+ * in this register is compared to the value of GFX_INT_CNTR and an     *
+ * interrupt is sent when they become equal.                            *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LINUX
+
+typedef union pi_gfx_int_cmp_a_u {
+	bdrkreg_t	pi_gfx_int_cmp_a_regval;
+	struct  {
+		bdrkreg_t	gica_gfx_int_cmp          :	26;
+		bdrkreg_t       gica_rsvd                 :     38;
+	} pi_gfx_int_cmp_a_fld_s;
+} pi_gfx_int_cmp_a_u_t;
+
+#else
+
+typedef union pi_gfx_int_cmp_a_u {
+	bdrkreg_t	pi_gfx_int_cmp_a_regval;
+	struct	{
+		bdrkreg_t	gica_rsvd		  :	38;
+		bdrkreg_t	gica_gfx_int_cmp	  :	26;
+	} pi_gfx_int_cmp_a_fld_s;
+} pi_gfx_int_cmp_a_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There is one of these registers for each CPU. This register         *
+ * specifies the value of the Graphics Page. Uncached writes into the   *
+ * Graphics Page (with uncached attribute of IO) are done with GFXWS    *
+ * commands rather than the normal PWRI commands. GFXWS commands are    *
+ * tracked with the graphics credit counters.                           *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_gfx_page_b_u {
+	bdrkreg_t	pi_gfx_page_b_regval;
+	struct  {
+		bdrkreg_t	gpb_rsvd_1                :	17;
+                bdrkreg_t       gpb_gfx_page_addr         :     23;
+                bdrkreg_t       gpb_en_gfx_page           :      1;
+                bdrkreg_t       gpb_rsvd                  :     23;
+	} pi_gfx_page_b_fld_s;
+} pi_gfx_page_b_u_t;
+
+#else
+
+typedef union pi_gfx_page_b_u {
+	bdrkreg_t	pi_gfx_page_b_regval;
+	struct	{
+		bdrkreg_t	gpb_rsvd		  :	23;
+		bdrkreg_t	gpb_en_gfx_page		  :	 1;
+		bdrkreg_t	gpb_gfx_page_addr	  :	23;
+		bdrkreg_t	gpb_rsvd_1		  :	17;
+	} pi_gfx_page_b_fld_s;
+} pi_gfx_page_b_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There is one of these registers for each CPU. This register         *
+ * counts graphics credits. This counter is decremented for each        *
+ * doubleword sent to graphics with GFXWS or GFXWL commands. It is      *
+ * incremented for each doubleword acknowledge from graphics. When      *
+ * this counter has a smaller value than the GFX_BIAS register,         *
+ * SysWrRdy_L is deasserted, an interrupt is sent to the processor,     *
+ * and SysWrRdy_L is allowed to be asserted again. This is the basic    *
+ * mechanism for flow-controlling graphics writes.                      *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_gfx_credit_cntr_b_u {
+	bdrkreg_t	pi_gfx_credit_cntr_b_regval;
+	struct  {
+		bdrkreg_t	gccb_gfx_credit_cntr      :	12;
+		bdrkreg_t       gccb_rsvd                 :     52;
+	} pi_gfx_credit_cntr_b_fld_s;
+} pi_gfx_credit_cntr_b_u_t;
+
+#else
+
+typedef union pi_gfx_credit_cntr_b_u {
+	bdrkreg_t	pi_gfx_credit_cntr_b_regval;
+	struct	{
+		bdrkreg_t	gccb_rsvd		  :	52;
+		bdrkreg_t	gccb_gfx_credit_cntr	  :	12;
+	} pi_gfx_credit_cntr_b_fld_s;
+} pi_gfx_credit_cntr_b_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There is one of these registers for each CPU. When the graphics     *
+ * credit counter is less than or equal to this value, a flow control   *
+ * interrupt is sent.                                                   *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_gfx_bias_b_u {
+	bdrkreg_t	pi_gfx_bias_b_regval;
+	struct  {
+		bdrkreg_t	gbb_gfx_bias              :	12;
+		bdrkreg_t       gbb_rsvd                  :     52;
+	} pi_gfx_bias_b_fld_s;
+} pi_gfx_bias_b_u_t;
+
+#else
+
+typedef union pi_gfx_bias_b_u {
+	bdrkreg_t	pi_gfx_bias_b_regval;
+	struct	{
+		bdrkreg_t	gbb_rsvd		  :	52;
+		bdrkreg_t	gbb_gfx_bias		  :	12;
+	} pi_gfx_bias_b_fld_s;
+} pi_gfx_bias_b_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  There is one of these registers for each CPU. When     *
+ * this counter reaches the value of the GFX_INT_CMP register, an       *
+ * interrupt is sent to the associated processor. At each clock         *
+ * cycle, the value in this register can be changed by any one of the   *
+ * following actions:                                                   *
+ * - Written by software.                                               *
+ * - Loaded with the value of GFX_INT_CMP, when an interrupt, NMI, or   *
+ * soft reset occurs, thus preventing an additional interrupt.          *
+ * - Zeroed, when the GFX_CREDIT_CNTR rises above the bias value.       *
+ * - Incremented (by one at each clock) for each clock that the         *
+ * GFX_CREDIT_CNTR is less than or equal to zero.                       *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_gfx_int_cntr_b_u {
+	bdrkreg_t	pi_gfx_int_cntr_b_regval;
+	struct  {
+		bdrkreg_t	gicb_gfx_int_cntr         :	26;
+		bdrkreg_t       gicb_rsvd                 :     38;
+	} pi_gfx_int_cntr_b_fld_s;
+} pi_gfx_int_cntr_b_u_t;
+
+#else
+
+typedef union pi_gfx_int_cntr_b_u {
+	bdrkreg_t	pi_gfx_int_cntr_b_regval;
+	struct	{
+		bdrkreg_t	gicb_rsvd		  :	38;
+		bdrkreg_t	gicb_gfx_int_cntr	  :	26;
+	} pi_gfx_int_cntr_b_fld_s;
+} pi_gfx_int_cntr_b_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There is one of these registers for each CPU. The value in this     *
+ * register is loaded into the GFX_INT_CNTR register when an            *
+ * interrupt, NMI, or soft reset is sent to the processor. The value    *
+ * in this register is compared to the value of GFX_INT_CNTR and an     *
+ * interrupt is sent when they become equal.                            *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_gfx_int_cmp_b_u {
+	bdrkreg_t	pi_gfx_int_cmp_b_regval;
+	struct  {
+		bdrkreg_t	gicb_gfx_int_cmp          :	26;
+		bdrkreg_t       gicb_rsvd                 :     38;
+	} pi_gfx_int_cmp_b_fld_s;
+} pi_gfx_int_cmp_b_u_t;
+
+#else
+
+typedef union pi_gfx_int_cmp_b_u {
+	bdrkreg_t	pi_gfx_int_cmp_b_regval;
+	struct	{
+		bdrkreg_t	gicb_rsvd		  :	38;
+		bdrkreg_t	gicb_gfx_int_cmp	  :	26;
+	} pi_gfx_int_cmp_b_fld_s;
+} pi_gfx_int_cmp_b_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  A read of this register returns all sources of         *
+ * Bedrock Error Interrupts. Storing to the write-with-clear location   *
+ * clears any bit for which a one appears on the data bus. Storing to   *
+ * the writable location does a direct write to all unreserved bits     *
+ * (except for MEM_UNC).                                                *
+ * In Synergy mode, the processor that is the source of the command     *
+ * that got an error is independent of the A or B SysAD bus. So in      *
+ * Synergy mode, Synergy provides the source processor number in bit    *
+ * 52 of the SysAD bus in all commands. The PI saves this in the RRB    *
+ * or WRB entry, and uses that value to determine which error bit (A    *
+ * or B) to set, as well as which ERR_STATUS and spool registers to     *
+ * use, for all error types in this register that are specified as an   *
+ * error to CPU_A or CPU_B.                                             *
+ * This register is not cleared at reset.                               *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_err_int_pend_wr_u {
+	bdrkreg_t	pi_err_int_pend_wr_regval;
+	struct  {
+		bdrkreg_t	eipw_spool_comp_b         :	 1;
+                bdrkreg_t       eipw_spool_comp_a         :      1;
+                bdrkreg_t       eipw_spurious_b           :      1;
+                bdrkreg_t       eipw_spurious_a           :      1;
+                bdrkreg_t       eipw_wrb_terr_b           :      1;
+                bdrkreg_t       eipw_wrb_terr_a           :      1;
+                bdrkreg_t       eipw_wrb_werr_b           :      1;
+                bdrkreg_t       eipw_wrb_werr_a           :      1;
+                bdrkreg_t       eipw_sysstate_par_b       :      1;
+                bdrkreg_t       eipw_sysstate_par_a       :      1;
+                bdrkreg_t       eipw_sysad_data_ecc_b     :      1;
+                bdrkreg_t       eipw_sysad_data_ecc_a     :      1;
+                bdrkreg_t       eipw_sysad_addr_ecc_b     :      1;
+                bdrkreg_t       eipw_sysad_addr_ecc_a     :      1;
+                bdrkreg_t       eipw_syscmd_data_par_b    :      1;
+                bdrkreg_t       eipw_syscmd_data_par_a    :      1;
+                bdrkreg_t       eipw_syscmd_addr_par_b    :      1;
+                bdrkreg_t       eipw_syscmd_addr_par_a    :      1;
+                bdrkreg_t       eipw_spool_err_b          :      1;
+                bdrkreg_t       eipw_spool_err_a          :      1;
+                bdrkreg_t       eipw_ue_uncached_b        :      1;
+                bdrkreg_t       eipw_ue_uncached_a        :      1;
+                bdrkreg_t       eipw_sysstate_tag_b       :      1;
+                bdrkreg_t       eipw_sysstate_tag_a       :      1;
+                bdrkreg_t       eipw_mem_unc              :      1;
+                bdrkreg_t       eipw_sysad_bad_data_b     :      1;
+                bdrkreg_t       eipw_sysad_bad_data_a     :      1;
+                bdrkreg_t       eipw_ue_cached_b          :      1;
+                bdrkreg_t       eipw_ue_cached_a          :      1;
+                bdrkreg_t       eipw_pkt_len_err_b        :      1;
+                bdrkreg_t       eipw_pkt_len_err_a        :      1;
+                bdrkreg_t       eipw_irb_err_b            :      1;
+                bdrkreg_t       eipw_irb_err_a            :      1;
+                bdrkreg_t       eipw_irb_timeout_b        :      1;
+                bdrkreg_t       eipw_irb_timeout_a        :      1;
+                bdrkreg_t       eipw_rsvd                 :     29;
+	} pi_err_int_pend_wr_fld_s;
+} pi_err_int_pend_wr_u_t;
+
+#else
+
+typedef union pi_err_int_pend_wr_u {
+	bdrkreg_t	pi_err_int_pend_wr_regval;
+	struct	{
+		bdrkreg_t	eipw_rsvd		  :	29;
+		bdrkreg_t	eipw_irb_timeout_a	  :	 1;
+		bdrkreg_t	eipw_irb_timeout_b	  :	 1;
+		bdrkreg_t	eipw_irb_err_a		  :	 1;
+		bdrkreg_t	eipw_irb_err_b		  :	 1;
+		bdrkreg_t	eipw_pkt_len_err_a	  :	 1;
+		bdrkreg_t	eipw_pkt_len_err_b	  :	 1;
+		bdrkreg_t	eipw_ue_cached_a	  :	 1;
+		bdrkreg_t	eipw_ue_cached_b	  :	 1;
+		bdrkreg_t	eipw_sysad_bad_data_a	  :	 1;
+		bdrkreg_t	eipw_sysad_bad_data_b	  :	 1;
+		bdrkreg_t	eipw_mem_unc		  :	 1;
+		bdrkreg_t	eipw_sysstate_tag_a	  :	 1;
+		bdrkreg_t	eipw_sysstate_tag_b	  :	 1;
+		bdrkreg_t	eipw_ue_uncached_a	  :	 1;
+		bdrkreg_t	eipw_ue_uncached_b	  :	 1;
+		bdrkreg_t	eipw_spool_err_a	  :	 1;
+		bdrkreg_t	eipw_spool_err_b	  :	 1;
+		bdrkreg_t	eipw_syscmd_addr_par_a	  :	 1;
+		bdrkreg_t	eipw_syscmd_addr_par_b	  :	 1;
+		bdrkreg_t	eipw_syscmd_data_par_a	  :	 1;
+		bdrkreg_t	eipw_syscmd_data_par_b	  :	 1;
+		bdrkreg_t	eipw_sysad_addr_ecc_a	  :	 1;
+		bdrkreg_t	eipw_sysad_addr_ecc_b	  :	 1;
+		bdrkreg_t	eipw_sysad_data_ecc_a	  :	 1;
+		bdrkreg_t	eipw_sysad_data_ecc_b	  :	 1;
+		bdrkreg_t	eipw_sysstate_par_a	  :	 1;
+		bdrkreg_t	eipw_sysstate_par_b	  :	 1;
+		bdrkreg_t	eipw_wrb_werr_a		  :	 1;
+		bdrkreg_t	eipw_wrb_werr_b		  :	 1;
+		bdrkreg_t	eipw_wrb_terr_a		  :	 1;
+		bdrkreg_t	eipw_wrb_terr_b		  :	 1;
+		bdrkreg_t	eipw_spurious_a		  :	 1;
+		bdrkreg_t	eipw_spurious_b		  :	 1;
+		bdrkreg_t	eipw_spool_comp_a	  :	 1;
+		bdrkreg_t	eipw_spool_comp_b	  :	 1;
+	} pi_err_int_pend_wr_fld_s;
+} pi_err_int_pend_wr_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  A read of this register returns all sources of         *
+ * Bedrock Error Interrupts. Storing to the write-with-clear location   *
+ * clears any bit for which a one appears on the data bus. Storing to   *
+ * the writable location does a direct write to all unreserved bits     *
+ * (except for MEM_UNC).                                                *
+ * In Synergy mode, the processor that is the source of the command     *
+ * that got an error is independent of the A or B SysAD bus. So in      *
+ * Synergy mode, Synergy provides the source processor number in bit    *
+ * 52 of the SysAD bus in all commands. The PI saves this in the RRB    *
+ * or WRB entry, and uses that value to determine which error bit (A    *
+ * or B) to set, as well as which ERR_STATUS and spool registers to     *
+ * use, for all error types in this register that are specified as an   *
+ * error to CPU_A or CPU_B.                                             *
+ * This register is not cleared at reset.                               *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_err_int_pend_u {
+	bdrkreg_t	pi_err_int_pend_regval;
+	struct  {
+		bdrkreg_t	eip_spool_comp_b          :	 1;
+                bdrkreg_t       eip_spool_comp_a          :      1;
+                bdrkreg_t       eip_spurious_b            :      1;
+                bdrkreg_t       eip_spurious_a            :      1;
+                bdrkreg_t       eip_wrb_terr_b            :      1;
+                bdrkreg_t       eip_wrb_terr_a            :      1;
+                bdrkreg_t       eip_wrb_werr_b            :      1;
+                bdrkreg_t       eip_wrb_werr_a            :      1;
+                bdrkreg_t       eip_sysstate_par_b        :      1;
+                bdrkreg_t       eip_sysstate_par_a        :      1;
+                bdrkreg_t       eip_sysad_data_ecc_b      :      1;
+                bdrkreg_t       eip_sysad_data_ecc_a      :      1;
+                bdrkreg_t       eip_sysad_addr_ecc_b      :      1;
+                bdrkreg_t       eip_sysad_addr_ecc_a      :      1;
+                bdrkreg_t       eip_syscmd_data_par_b     :      1;
+                bdrkreg_t       eip_syscmd_data_par_a     :      1;
+                bdrkreg_t       eip_syscmd_addr_par_b     :      1;
+                bdrkreg_t       eip_syscmd_addr_par_a     :      1;
+                bdrkreg_t       eip_spool_err_b           :      1;
+                bdrkreg_t       eip_spool_err_a           :      1;
+                bdrkreg_t       eip_ue_uncached_b         :      1;
+                bdrkreg_t       eip_ue_uncached_a         :      1;
+                bdrkreg_t       eip_sysstate_tag_b        :      1;
+                bdrkreg_t       eip_sysstate_tag_a        :      1;
+                bdrkreg_t       eip_mem_unc               :      1;
+                bdrkreg_t       eip_sysad_bad_data_b      :      1;
+                bdrkreg_t       eip_sysad_bad_data_a      :      1;
+                bdrkreg_t       eip_ue_cached_b           :      1;
+                bdrkreg_t       eip_ue_cached_a           :      1;
+                bdrkreg_t       eip_pkt_len_err_b         :      1;
+                bdrkreg_t       eip_pkt_len_err_a         :      1;
+                bdrkreg_t       eip_irb_err_b             :      1;
+                bdrkreg_t       eip_irb_err_a             :      1;
+                bdrkreg_t       eip_irb_timeout_b         :      1;
+                bdrkreg_t       eip_irb_timeout_a         :      1;
+                bdrkreg_t       eip_rsvd                  :     29;
+	} pi_err_int_pend_fld_s;
+} pi_err_int_pend_u_t;
+
+#else
+
+typedef union pi_err_int_pend_u {
+	bdrkreg_t	pi_err_int_pend_regval;
+	struct	{
+		bdrkreg_t	eip_rsvd		  :	29;
+		bdrkreg_t	eip_irb_timeout_a	  :	 1;
+		bdrkreg_t	eip_irb_timeout_b	  :	 1;
+		bdrkreg_t	eip_irb_err_a		  :	 1;
+		bdrkreg_t	eip_irb_err_b		  :	 1;
+		bdrkreg_t	eip_pkt_len_err_a	  :	 1;
+		bdrkreg_t	eip_pkt_len_err_b	  :	 1;
+		bdrkreg_t	eip_ue_cached_a		  :	 1;
+		bdrkreg_t	eip_ue_cached_b		  :	 1;
+		bdrkreg_t	eip_sysad_bad_data_a	  :	 1;
+		bdrkreg_t	eip_sysad_bad_data_b	  :	 1;
+		bdrkreg_t	eip_mem_unc		  :	 1;
+		bdrkreg_t	eip_sysstate_tag_a	  :	 1;
+		bdrkreg_t	eip_sysstate_tag_b	  :	 1;
+		bdrkreg_t	eip_ue_uncached_a	  :	 1;
+		bdrkreg_t	eip_ue_uncached_b	  :	 1;
+		bdrkreg_t	eip_spool_err_a		  :	 1;
+		bdrkreg_t	eip_spool_err_b		  :	 1;
+		bdrkreg_t	eip_syscmd_addr_par_a	  :	 1;
+		bdrkreg_t	eip_syscmd_addr_par_b	  :	 1;
+		bdrkreg_t	eip_syscmd_data_par_a	  :	 1;
+		bdrkreg_t	eip_syscmd_data_par_b	  :	 1;
+		bdrkreg_t	eip_sysad_addr_ecc_a	  :	 1;
+		bdrkreg_t	eip_sysad_addr_ecc_b	  :	 1;
+		bdrkreg_t	eip_sysad_data_ecc_a	  :	 1;
+		bdrkreg_t	eip_sysad_data_ecc_b	  :	 1;
+		bdrkreg_t	eip_sysstate_par_a	  :	 1;
+		bdrkreg_t	eip_sysstate_par_b	  :	 1;
+		bdrkreg_t	eip_wrb_werr_a		  :	 1;
+		bdrkreg_t	eip_wrb_werr_b		  :	 1;
+		bdrkreg_t	eip_wrb_terr_a		  :	 1;
+		bdrkreg_t	eip_wrb_terr_b		  :	 1;
+		bdrkreg_t	eip_spurious_a		  :	 1;
+		bdrkreg_t	eip_spurious_b		  :	 1;
+		bdrkreg_t	eip_spool_comp_a	  :	 1;
+		bdrkreg_t	eip_spool_comp_b	  :	 1;
+	} pi_err_int_pend_fld_s;
+} pi_err_int_pend_u_t;
+
+#endif
+
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There is one of these registers for each CPU. This read/write       *
+ * register masks the contents of ERR_INT_PEND to determine which       *
+ * conditions cause a Level-6 interrupt to CPU_A or CPU_B. A bit set    *
+ * allows the interrupt. Only one processor in a Bedrock should         *
+ * enable the Memory/Directory Uncorrectable Error bit.                 *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_err_int_mask_a_u {
+	bdrkreg_t	pi_err_int_mask_a_regval;
+	struct  {
+		bdrkreg_t	eima_mask                 :	35;
+		bdrkreg_t       eima_rsvd                 :     29;
+	} pi_err_int_mask_a_fld_s;
+} pi_err_int_mask_a_u_t;
+
+#else
+
+typedef union pi_err_int_mask_a_u {
+	bdrkreg_t	pi_err_int_mask_a_regval;
+	struct	{
+		bdrkreg_t	eima_rsvd		  :	29;
+		bdrkreg_t	eima_mask		  :	35;
+	} pi_err_int_mask_a_fld_s;
+} pi_err_int_mask_a_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There is one of these registers for each CPU. This read/write       *
+ * register masks the contents of ERR_INT_PEND to determine which       *
+ * conditions cause a Level-6 interrupt to CPU_A or CPU_B. A bit set    *
+ * allows the interrupt. Only one processor in a Bedrock should         *
+ * enable the Memory/Directory Uncorrectable Error bit.                 *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_err_int_mask_b_u {
+	bdrkreg_t	pi_err_int_mask_b_regval;
+	struct  {
+		bdrkreg_t	eimb_mask                 :	35;
+		bdrkreg_t       eimb_rsvd                 :     29;
+	} pi_err_int_mask_b_fld_s;
+} pi_err_int_mask_b_u_t;
+
+#else
+
+typedef union pi_err_int_mask_b_u {
+	bdrkreg_t	pi_err_int_mask_b_regval;
+	struct	{
+		bdrkreg_t	eimb_rsvd		  :	29;
+		bdrkreg_t	eimb_mask		  :	35;
+	} pi_err_int_mask_b_fld_s;
+} pi_err_int_mask_b_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  There is one of these registers for each CPU. This     *
+ * register is the address of the next write to the error stack. This   *
+ * register is incremented after each such write. Only the low N bits   *
+ * are incremented, where N is defined by the size of the error stack   *
+ * specified in the ERR_STACK_SIZE register.                            *
+ * This register is not reset by a soft reset.                          *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_err_stack_addr_a_u {
+	bdrkreg_t	pi_err_stack_addr_a_regval;
+	struct  {
+		bdrkreg_t	esaa_rsvd_1               :	 3;
+                bdrkreg_t       esaa_addr                 :     30;
+                bdrkreg_t       esaa_rsvd                 :     31;
+	} pi_err_stack_addr_a_fld_s;
+} pi_err_stack_addr_a_u_t;
+
+#else
+
+typedef union pi_err_stack_addr_a_u {
+	bdrkreg_t	pi_err_stack_addr_a_regval;
+	struct	{
+		bdrkreg_t	esaa_rsvd		  :	31;
+		bdrkreg_t	esaa_addr		  :	30;
+		bdrkreg_t	esaa_rsvd_1		  :	 3;
+	} pi_err_stack_addr_a_fld_s;
+} pi_err_stack_addr_a_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  There is one of these registers for each CPU. This     *
+ * register is the address of the next write to the error stack. This   *
+ * register is incremented after each such write. Only the low N bits   *
+ * are incremented, where N is defined by the size of the error stack   *
+ * specified in the ERR_STACK_SIZE register.                            *
+ * This register is not reset by a soft reset.                          *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_err_stack_addr_b_u {
+	bdrkreg_t	pi_err_stack_addr_b_regval;
+	struct  {
+		bdrkreg_t	esab_rsvd_1               :	 3;
+                bdrkreg_t       esab_addr                 :     30;
+                bdrkreg_t       esab_rsvd                 :     31;
+	} pi_err_stack_addr_b_fld_s;
+} pi_err_stack_addr_b_u_t;
+
+#else
+
+typedef union pi_err_stack_addr_b_u {
+	bdrkreg_t	pi_err_stack_addr_b_regval;
+	struct	{
+		bdrkreg_t	esab_rsvd		  :	31;
+		bdrkreg_t	esab_addr		  :	30;
+		bdrkreg_t	esab_rsvd_1		  :	 3;
+	} pi_err_stack_addr_b_fld_s;
+} pi_err_stack_addr_b_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  Sets the size (number of 64-bit entries) in the        *
+ * error stack that is spooled to local memory when an error occurs.    *
+ * Table16 defines the format of each entry in the spooled error        *
+ * stack.                                                               *
+ * This register is not reset by a soft reset.                          *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_err_stack_size_u {
+	bdrkreg_t	pi_err_stack_size_regval;
+	struct  {
+		bdrkreg_t	ess_size                  :	 4;
+                bdrkreg_t       ess_rsvd                  :     60;
+	} pi_err_stack_size_fld_s;
+} pi_err_stack_size_u_t;
+
+#else
+
+typedef union pi_err_stack_size_u {
+	bdrkreg_t	pi_err_stack_size_regval;
+	struct	{
+		bdrkreg_t	ess_rsvd		  :	60;
+		bdrkreg_t	ess_size		  :	 4;
+	} pi_err_stack_size_fld_s;
+} pi_err_stack_size_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register is not cleared at reset. Writing this register with   *
+ * the Write-clear address (with any data) clears both the              *
+ * ERR_STATUS0_A and ERR_STATUS1_A registers.                           *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_err_status0_a_u {
+	bdrkreg_t	pi_err_status0_a_regval;
+	struct  {
+		bdrkreg_t	esa_error_type            :	 3;
+                bdrkreg_t       esa_proc_req_num          :      3;
+                bdrkreg_t       esa_supplemental          :     11;
+                bdrkreg_t       esa_cmd                   :      8;
+                bdrkreg_t       esa_addr                  :     37;
+                bdrkreg_t       esa_over_run              :      1;
+                bdrkreg_t       esa_valid                 :      1;
+	} pi_err_status0_a_fld_s;
+} pi_err_status0_a_u_t;
+
+#else
+
+typedef union pi_err_status0_a_u {
+	bdrkreg_t	pi_err_status0_a_regval;
+	struct	{
+		bdrkreg_t	esa_valid		  :	 1;
+		bdrkreg_t	esa_over_run		  :	 1;
+		bdrkreg_t	esa_addr		  :	37;
+		bdrkreg_t	esa_cmd			  :	 8;
+		bdrkreg_t	esa_supplemental	  :	11;
+		bdrkreg_t	esa_proc_req_num	  :	 3;
+		bdrkreg_t	esa_error_type		  :	 3;
+	} pi_err_status0_a_fld_s;
+} pi_err_status0_a_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register is not cleared at reset. Writing this register with   *
+ * the Write-clear address (with any data) clears both the              *
+ * ERR_STATUS0_A and ERR_STATUS1_A registers.                           *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_err_status0_a_clr_u {
+	bdrkreg_t	pi_err_status0_a_clr_regval;
+	struct  {
+		bdrkreg_t	esac_error_type           :	 3;
+                bdrkreg_t       esac_proc_req_num         :      3;
+                bdrkreg_t       esac_supplemental         :     11;
+                bdrkreg_t       esac_cmd                  :      8;
+                bdrkreg_t       esac_addr                 :     37;
+                bdrkreg_t       esac_over_run             :      1;
+                bdrkreg_t       esac_valid                :      1;
+	} pi_err_status0_a_clr_fld_s;
+} pi_err_status0_a_clr_u_t;
+
+#else
+
+typedef union pi_err_status0_a_clr_u {
+	bdrkreg_t	pi_err_status0_a_clr_regval;
+	struct	{
+		bdrkreg_t	esac_valid		  :	 1;
+		bdrkreg_t	esac_over_run		  :	 1;
+		bdrkreg_t	esac_addr		  :	37;
+		bdrkreg_t	esac_cmd		  :	 8;
+		bdrkreg_t	esac_supplemental	  :	11;
+		bdrkreg_t	esac_proc_req_num	  :	 3;
+		bdrkreg_t	esac_error_type		  :	 3;
+	} pi_err_status0_a_clr_fld_s;
+} pi_err_status0_a_clr_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register is not cleared at reset. Writing this register with   *
+ * the Write-clear address (with any data) clears both the              *
+ * ERR_STATUS0_A and ERR_STATUS1_A registers.                           *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_err_status1_a_u {
+	bdrkreg_t	pi_err_status1_a_regval;
+	struct  {
+		bdrkreg_t	esa_spool_count           :	21;
+                bdrkreg_t       esa_time_out_count        :      8;
+                bdrkreg_t       esa_inval_count           :     10;
+                bdrkreg_t       esa_crb_num               :      3;
+                bdrkreg_t       esa_wrb                   :      1;
+                bdrkreg_t       esa_e_bits                :      2;
+                bdrkreg_t       esa_t_bit                 :      1;
+                bdrkreg_t       esa_i_bit                 :      1;
+                bdrkreg_t       esa_h_bit                 :      1;
+                bdrkreg_t       esa_w_bit                 :      1;
+                bdrkreg_t       esa_a_bit                 :      1;
+                bdrkreg_t       esa_r_bit                 :      1;
+                bdrkreg_t       esa_v_bit                 :      1;
+                bdrkreg_t       esa_p_bit                 :      1;
+                bdrkreg_t       esa_source                :     11;
+	} pi_err_status1_a_fld_s;
+} pi_err_status1_a_u_t;
+
+#else
+
+typedef union pi_err_status1_a_u {
+	bdrkreg_t	pi_err_status1_a_regval;
+	struct	{
+		bdrkreg_t	esa_source		  :	11;
+		bdrkreg_t	esa_p_bit		  :	 1;
+		bdrkreg_t	esa_v_bit		  :	 1;
+		bdrkreg_t	esa_r_bit		  :	 1;
+		bdrkreg_t	esa_a_bit		  :	 1;
+		bdrkreg_t	esa_w_bit		  :	 1;
+		bdrkreg_t	esa_h_bit		  :	 1;
+		bdrkreg_t	esa_i_bit		  :	 1;
+		bdrkreg_t	esa_t_bit		  :	 1;
+		bdrkreg_t	esa_e_bits		  :	 2;
+		bdrkreg_t	esa_wrb			  :	 1;
+		bdrkreg_t	esa_crb_num		  :	 3;
+		bdrkreg_t	esa_inval_count		  :	10;
+		bdrkreg_t	esa_time_out_count	  :	 8;
+		bdrkreg_t	esa_spool_count		  :	21;
+	} pi_err_status1_a_fld_s;
+} pi_err_status1_a_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register is not cleared at reset. Writing this register with   *
+ * the Write-clear address (with any data) clears both the              *
+ * ERR_STATUS0_A and ERR_STATUS1_A registers.                           *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_err_status1_a_clr_u {
+	bdrkreg_t	pi_err_status1_a_clr_regval;
+	struct  {
+		bdrkreg_t	esac_spool_count          :	21;
+                bdrkreg_t       esac_time_out_count       :      8;
+                bdrkreg_t       esac_inval_count          :     10;
+                bdrkreg_t       esac_crb_num              :      3;
+                bdrkreg_t       esac_wrb                  :      1;
+                bdrkreg_t       esac_e_bits               :      2;
+                bdrkreg_t       esac_t_bit                :      1;
+                bdrkreg_t       esac_i_bit                :      1;
+                bdrkreg_t       esac_h_bit                :      1;
+                bdrkreg_t       esac_w_bit                :      1;
+                bdrkreg_t       esac_a_bit                :      1;
+                bdrkreg_t       esac_r_bit                :      1;
+                bdrkreg_t       esac_v_bit                :      1;
+                bdrkreg_t       esac_p_bit                :      1;
+                bdrkreg_t       esac_source               :     11;
+	} pi_err_status1_a_clr_fld_s;
+} pi_err_status1_a_clr_u_t;
+
+#else
+
+typedef union pi_err_status1_a_clr_u {
+	bdrkreg_t	pi_err_status1_a_clr_regval;
+	struct	{
+		bdrkreg_t	esac_source		  :	11;
+		bdrkreg_t	esac_p_bit		  :	 1;
+		bdrkreg_t	esac_v_bit		  :	 1;
+		bdrkreg_t	esac_r_bit		  :	 1;
+		bdrkreg_t	esac_a_bit		  :	 1;
+		bdrkreg_t	esac_w_bit		  :	 1;
+		bdrkreg_t	esac_h_bit		  :	 1;
+		bdrkreg_t	esac_i_bit		  :	 1;
+		bdrkreg_t	esac_t_bit		  :	 1;
+		bdrkreg_t	esac_e_bits		  :	 2;
+		bdrkreg_t	esac_wrb		  :	 1;
+		bdrkreg_t	esac_crb_num		  :	 3;
+		bdrkreg_t	esac_inval_count	  :	10;
+		bdrkreg_t	esac_time_out_count	  :	 8;
+		bdrkreg_t	esac_spool_count	  :	21;
+	} pi_err_status1_a_clr_fld_s;
+} pi_err_status1_a_clr_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register is not cleared at reset. Writing this register with   *
+ * the Write-clear address (with any data) clears both the              *
+ * ERR_STATUS0_B and ERR_STATUS1_B registers.                           *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_err_status0_b_u {
+	bdrkreg_t	pi_err_status0_b_regval;
+	struct  {
+		bdrkreg_t	esb_error_type            :	 3;
+                bdrkreg_t       esb_proc_request_number   :      3;
+                bdrkreg_t       esb_supplemental          :     11;
+                bdrkreg_t       esb_cmd                   :      8;
+                bdrkreg_t       esb_addr                  :     37;
+                bdrkreg_t       esb_over_run              :      1;
+                bdrkreg_t       esb_valid                 :      1;
+	} pi_err_status0_b_fld_s;
+} pi_err_status0_b_u_t;
+
+#else
+
+typedef union pi_err_status0_b_u {
+	bdrkreg_t	pi_err_status0_b_regval;
+	struct	{
+		bdrkreg_t	esb_valid		  :	 1;
+		bdrkreg_t	esb_over_run		  :	 1;
+		bdrkreg_t	esb_addr		  :	37;
+		bdrkreg_t	esb_cmd			  :	 8;
+		bdrkreg_t	esb_supplemental	  :	11;
+		bdrkreg_t	esb_proc_request_number	  :	 3;
+		bdrkreg_t	esb_error_type		  :	 3;
+	} pi_err_status0_b_fld_s;
+} pi_err_status0_b_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register is not cleared at reset. Writing this register with   *
+ * the Write-clear address (with any data) clears both the              *
+ * ERR_STATUS0_B and ERR_STATUS1_B registers.                           *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_err_status0_b_clr_u {
+	bdrkreg_t	pi_err_status0_b_clr_regval;
+	struct  {
+		bdrkreg_t	esbc_error_type           :	 3;
+                bdrkreg_t       esbc_proc_request_number  :      3;
+                bdrkreg_t       esbc_supplemental         :     11;
+                bdrkreg_t       esbc_cmd                  :      8;
+                bdrkreg_t       esbc_addr                 :     37;
+                bdrkreg_t       esbc_over_run             :      1;
+                bdrkreg_t       esbc_valid                :      1;
+	} pi_err_status0_b_clr_fld_s;
+} pi_err_status0_b_clr_u_t;
+
+#else
+
+typedef union pi_err_status0_b_clr_u {
+	bdrkreg_t	pi_err_status0_b_clr_regval;
+	struct	{
+		bdrkreg_t	esbc_valid		  :	 1;
+		bdrkreg_t	esbc_over_run		  :	 1;
+		bdrkreg_t	esbc_addr		  :	37;
+		bdrkreg_t	esbc_cmd		  :	 8;
+		bdrkreg_t	esbc_supplemental	  :	11;
+		bdrkreg_t	esbc_proc_request_number  :	 3;
+		bdrkreg_t	esbc_error_type		  :	 3;
+	} pi_err_status0_b_clr_fld_s;
+} pi_err_status0_b_clr_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register is not cleared at reset. Writing this register with   *
+ * the Write-clear address (with any data) clears both the              *
+ * ERR_STATUS0_B and ERR_STATUS1_B registers.                           *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_err_status1_b_u {
+	bdrkreg_t	pi_err_status1_b_regval;
+	struct  {
+		bdrkreg_t	esb_spool_count           :	21;
+                bdrkreg_t       esb_time_out_count        :      8;
+                bdrkreg_t       esb_inval_count           :     10;
+                bdrkreg_t       esb_crb_num               :      3;
+                bdrkreg_t       esb_wrb                   :      1;
+                bdrkreg_t       esb_e_bits                :      2;
+                bdrkreg_t       esb_t_bit                 :      1;
+                bdrkreg_t       esb_i_bit                 :      1;
+                bdrkreg_t       esb_h_bit                 :      1;
+                bdrkreg_t       esb_w_bit                 :      1;
+                bdrkreg_t       esb_a_bit                 :      1;
+                bdrkreg_t       esb_r_bit                 :      1;
+                bdrkreg_t       esb_v_bit                 :      1;
+                bdrkreg_t       esb_p_bit                 :      1;
+                bdrkreg_t       esb_source                :     11;
+	} pi_err_status1_b_fld_s;
+} pi_err_status1_b_u_t;
+
+#else
+
+typedef union pi_err_status1_b_u {
+	bdrkreg_t	pi_err_status1_b_regval;
+	struct	{
+		bdrkreg_t	esb_source		  :	11;
+		bdrkreg_t	esb_p_bit		  :	 1;
+		bdrkreg_t	esb_v_bit		  :	 1;
+		bdrkreg_t	esb_r_bit		  :	 1;
+		bdrkreg_t	esb_a_bit		  :	 1;
+		bdrkreg_t	esb_w_bit		  :	 1;
+		bdrkreg_t	esb_h_bit		  :	 1;
+		bdrkreg_t	esb_i_bit		  :	 1;
+		bdrkreg_t	esb_t_bit		  :	 1;
+		bdrkreg_t	esb_e_bits		  :	 2;
+		bdrkreg_t	esb_wrb			  :	 1;
+		bdrkreg_t	esb_crb_num		  :	 3;
+		bdrkreg_t	esb_inval_count		  :	10;
+		bdrkreg_t	esb_time_out_count	  :	 8;
+		bdrkreg_t	esb_spool_count		  :	21;
+	} pi_err_status1_b_fld_s;
+} pi_err_status1_b_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register is not cleared at reset. Writing this register with   *
+ * the Write-clear address (with any data) clears both the              *
+ * ERR_STATUS0_B and ERR_STATUS1_B registers.                           *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_err_status1_b_clr_u {
+	bdrkreg_t	pi_err_status1_b_clr_regval;
+	struct  {
+		bdrkreg_t	esbc_spool_count          :	21;
+                bdrkreg_t       esbc_time_out_count       :      8;
+                bdrkreg_t       esbc_inval_count          :     10;
+                bdrkreg_t       esbc_crb_num              :      3;
+                bdrkreg_t       esbc_wrb                  :      1;
+                bdrkreg_t       esbc_e_bits               :      2;
+                bdrkreg_t       esbc_t_bit                :      1;
+                bdrkreg_t       esbc_i_bit                :      1;
+                bdrkreg_t       esbc_h_bit                :      1;
+                bdrkreg_t       esbc_w_bit                :      1;
+                bdrkreg_t       esbc_a_bit                :      1;
+                bdrkreg_t       esbc_r_bit                :      1;
+                bdrkreg_t       esbc_v_bit                :      1;
+                bdrkreg_t       esbc_p_bit                :      1;
+                bdrkreg_t       esbc_source               :     11;
+	} pi_err_status1_b_clr_fld_s;
+} pi_err_status1_b_clr_u_t;
+
+#else
+
+typedef union pi_err_status1_b_clr_u {
+	bdrkreg_t	pi_err_status1_b_clr_regval;
+	struct	{
+		bdrkreg_t	esbc_source		  :	11;
+		bdrkreg_t	esbc_p_bit		  :	 1;
+		bdrkreg_t	esbc_v_bit		  :	 1;
+		bdrkreg_t	esbc_r_bit		  :	 1;
+		bdrkreg_t	esbc_a_bit		  :	 1;
+		bdrkreg_t	esbc_w_bit		  :	 1;
+		bdrkreg_t	esbc_h_bit		  :	 1;
+		bdrkreg_t	esbc_i_bit		  :	 1;
+		bdrkreg_t	esbc_t_bit		  :	 1;
+		bdrkreg_t	esbc_e_bits		  :	 2;
+		bdrkreg_t	esbc_wrb		  :	 1;
+		bdrkreg_t	esbc_crb_num		  :	 3;
+		bdrkreg_t	esbc_inval_count	  :	10;
+		bdrkreg_t	esbc_time_out_count	  :	 8;
+		bdrkreg_t	esbc_spool_count	  :	21;
+	} pi_err_status1_b_clr_fld_s;
+} pi_err_status1_b_clr_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There is one of these registers for each CPU.                       *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_spool_cmp_a_u {
+	bdrkreg_t	pi_spool_cmp_a_regval;
+	struct  {
+		bdrkreg_t	sca_compare               :	20;
+		bdrkreg_t       sca_rsvd                  :     44;
+	} pi_spool_cmp_a_fld_s;
+} pi_spool_cmp_a_u_t;
+
+#else
+
+typedef union pi_spool_cmp_a_u {
+	bdrkreg_t	pi_spool_cmp_a_regval;
+	struct	{
+		bdrkreg_t	sca_rsvd		  :	44;
+		bdrkreg_t	sca_compare		  :	20;
+	} pi_spool_cmp_a_fld_s;
+} pi_spool_cmp_a_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There is one of these registers for each CPU.                       *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_spool_cmp_b_u {
+	bdrkreg_t	pi_spool_cmp_b_regval;
+	struct  {
+		bdrkreg_t	scb_compare               :	20;
+		bdrkreg_t       scb_rsvd                  :     44;
+	} pi_spool_cmp_b_fld_s;
+} pi_spool_cmp_b_u_t;
+
+#else
+
+typedef union pi_spool_cmp_b_u {
+	bdrkreg_t	pi_spool_cmp_b_regval;
+	struct	{
+		bdrkreg_t	scb_rsvd		  :	44;
+		bdrkreg_t	scb_compare		  :	20;
+	} pi_spool_cmp_b_fld_s;
+} pi_spool_cmp_b_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There is one of these registers for each CPU. A timeout can be      *
+ * forced by writing one(s).                                            *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_crb_timeout_a_u {
+	bdrkreg_t	pi_crb_timeout_a_regval;
+	struct  {
+		bdrkreg_t	cta_rrb                   :	 4;
+                bdrkreg_t       cta_wrb                   :      8;
+                bdrkreg_t       cta_rsvd                  :     52;
+	} pi_crb_timeout_a_fld_s;
+} pi_crb_timeout_a_u_t;
+
+#else
+
+typedef union pi_crb_timeout_a_u {
+	bdrkreg_t	pi_crb_timeout_a_regval;
+	struct	{
+		bdrkreg_t	cta_rsvd		  :	52;
+		bdrkreg_t	cta_wrb			  :	 8;
+		bdrkreg_t	cta_rrb			  :	 4;
+	} pi_crb_timeout_a_fld_s;
+} pi_crb_timeout_a_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There is one of these registers for each CPU. A timeout can be      *
+ * forced by writing one(s).                                            *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_crb_timeout_b_u {
+	bdrkreg_t	pi_crb_timeout_b_regval;
+	struct  {
+		bdrkreg_t	ctb_rrb                   :	 4;
+                bdrkreg_t       ctb_wrb                   :      8;
+                bdrkreg_t       ctb_rsvd                  :     52;
+	} pi_crb_timeout_b_fld_s;
+} pi_crb_timeout_b_u_t;
+
+#else
+
+typedef union pi_crb_timeout_b_u {
+	bdrkreg_t	pi_crb_timeout_b_regval;
+	struct	{
+		bdrkreg_t	ctb_rsvd		  :	52;
+		bdrkreg_t	ctb_wrb			  :	 8;
+		bdrkreg_t	ctb_rrb			  :	 4;
+	} pi_crb_timeout_b_fld_s;
+} pi_crb_timeout_b_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register controls error checking and forwarding of SysAD       *
+ * errors.                                                              *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_sysad_errchk_en_u {
+	bdrkreg_t	pi_sysad_errchk_en_regval;
+	struct  {
+		bdrkreg_t	see_ecc_gen_en            :	 1;
+                bdrkreg_t       see_qual_gen_en           :      1;
+                bdrkreg_t       see_sadp_chk_en           :      1;
+                bdrkreg_t       see_cmdp_chk_en           :      1;
+                bdrkreg_t       see_state_chk_en          :      1;
+                bdrkreg_t       see_qual_chk_en           :      1;
+                bdrkreg_t       see_rsvd                  :     58;
+	} pi_sysad_errchk_en_fld_s;
+} pi_sysad_errchk_en_u_t;
+
+#else
+
+typedef union pi_sysad_errchk_en_u {
+	bdrkreg_t	pi_sysad_errchk_en_regval;
+	struct	{
+		bdrkreg_t	see_rsvd		  :	58;
+		bdrkreg_t	see_qual_chk_en		  :	 1;
+		bdrkreg_t	see_state_chk_en	  :	 1;
+		bdrkreg_t	see_cmdp_chk_en		  :	 1;
+		bdrkreg_t	see_sadp_chk_en		  :	 1;
+		bdrkreg_t	see_qual_gen_en		  :	 1;
+		bdrkreg_t	see_ecc_gen_en		  :	 1;
+	} pi_sysad_errchk_en_fld_s;
+} pi_sysad_errchk_en_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There is one of these registers for each CPU. If any bit in this    *
+ * register is set, then whenever reply data arrives with the UE        *
+ * (uncorrectable error) indication set, the check-bits that are        *
+ * generated and sent to the SysAD will be inverted corresponding to    *
+ * the bits set in the register. This will also prevent the assertion   *
+ * of the data quality indicator.                                       *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_force_bad_check_bit_a_u {
+	bdrkreg_t	pi_force_bad_check_bit_a_regval;
+	struct  {
+		bdrkreg_t	fbcba_bad_check_bit       :	 8;
+		bdrkreg_t       fbcba_rsvd                :     56;
+	} pi_force_bad_check_bit_a_fld_s;
+} pi_force_bad_check_bit_a_u_t;
+
+#else
+
+typedef union pi_force_bad_check_bit_a_u {
+	bdrkreg_t	pi_force_bad_check_bit_a_regval;
+	struct	{
+		bdrkreg_t	fbcba_rsvd		  :	56;
+		bdrkreg_t	fbcba_bad_check_bit	  :	 8;
+	} pi_force_bad_check_bit_a_fld_s;
+} pi_force_bad_check_bit_a_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There is one of these registers for each CPU. If any bit in this    *
+ * register is set, then whenever reply data arrives with the UE        *
+ * (uncorrectable error) indication set, the check-bits that are        *
+ * generated and sent to the SysAD will be inverted corresponding to    *
+ * the bits set in the register. This will also prevent the assertion   *
+ * of the data quality indicator.                                       *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_force_bad_check_bit_b_u {
+	bdrkreg_t	pi_force_bad_check_bit_b_regval;
+	struct  {
+		bdrkreg_t	fbcbb_bad_check_bit       :	 8;
+		bdrkreg_t       fbcbb_rsvd                :     56;
+	} pi_force_bad_check_bit_b_fld_s;
+} pi_force_bad_check_bit_b_u_t;
+
+#else
+
+typedef union pi_force_bad_check_bit_b_u {
+	bdrkreg_t	pi_force_bad_check_bit_b_regval;
+	struct	{
+		bdrkreg_t	fbcbb_rsvd		  :	56;
+		bdrkreg_t	fbcbb_bad_check_bit	  :	 8;
+	} pi_force_bad_check_bit_b_fld_s;
+} pi_force_bad_check_bit_b_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There is one of these registers for each CPU. When a counter is     *
+ * enabled, it increments each time a DNACK reply is received. The      *
+ * counter is cleared when any other reply is received. The register    *
+ * is cleared when the CNT_EN bit is zero. If a DNACK reply is          *
+ * received when the counter equals the value in the NACK_CMP           *
+ * register, the counter is cleared, an error response is sent to the   *
+ * CPU instead of a nack response, and the NACK_INT_A/B bit is set in   *
+ * INT_PEND1.                                                           *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_nack_cnt_a_u {
+	bdrkreg_t	pi_nack_cnt_a_regval;
+	struct  {
+		bdrkreg_t	nca_nack_cnt              :	20;
+                bdrkreg_t       nca_cnt_en                :      1;
+                bdrkreg_t       nca_rsvd                  :     43;
+	} pi_nack_cnt_a_fld_s;
+} pi_nack_cnt_a_u_t;
+
+#else
+
+typedef union pi_nack_cnt_a_u {
+	bdrkreg_t	pi_nack_cnt_a_regval;
+	struct	{
+		bdrkreg_t	nca_rsvd		  :	43;
+		bdrkreg_t	nca_cnt_en		  :	 1;
+		bdrkreg_t	nca_nack_cnt		  :	20;
+	} pi_nack_cnt_a_fld_s;
+} pi_nack_cnt_a_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  There is one of these registers for each CPU. When a counter is     *
+ * enabled, it increments each time a DNACK reply is received. The      *
+ * counter is cleared when any other reply is received. The register    *
+ * is cleared when the CNT_EN bit is zero. If a DNACK reply is          *
+ * received when the counter equals the value in the NACK_CMP           *
+ * register, the counter is cleared, an error response is sent to the   *
+ * CPU instead of a nack response, and the NACK_INT_A/B bit is set in   *
+ * INT_PEND1.                                                           *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_nack_cnt_b_u {
+	bdrkreg_t	pi_nack_cnt_b_regval;
+	struct  {
+		bdrkreg_t	ncb_nack_cnt              :	20;
+                bdrkreg_t       ncb_cnt_en                :      1;
+                bdrkreg_t       ncb_rsvd                  :     43;
+	} pi_nack_cnt_b_fld_s;
+} pi_nack_cnt_b_u_t;
+
+#else
+
+typedef union pi_nack_cnt_b_u {
+	bdrkreg_t	pi_nack_cnt_b_regval;
+	struct	{
+		bdrkreg_t	ncb_rsvd		  :	43;
+		bdrkreg_t	ncb_cnt_en		  :	 1;
+		bdrkreg_t	ncb_nack_cnt		  :	20;
+	} pi_nack_cnt_b_fld_s;
+} pi_nack_cnt_b_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  The setting of this register affects both CPUs on this PI.          *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_nack_cmp_u {
+	bdrkreg_t	pi_nack_cmp_regval;
+	struct  {
+		bdrkreg_t	nc_nack_cmp               :	20;
+		bdrkreg_t       nc_rsvd                   :     44;
+	} pi_nack_cmp_fld_s;
+} pi_nack_cmp_u_t;
+
+#else
+
+typedef union pi_nack_cmp_u {
+	bdrkreg_t	pi_nack_cmp_regval;
+	struct	{
+		bdrkreg_t	nc_rsvd			  :	44;
+		bdrkreg_t	nc_nack_cmp		  :	20;
+	} pi_nack_cmp_fld_s;
+} pi_nack_cmp_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register controls which errors are spooled. When a bit in      *
+ * this register is set, the corresponding error is spooled. The        *
+ * setting of this register affects both CPUs on this PI.               *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_spool_mask_u {
+	bdrkreg_t	pi_spool_mask_regval;
+	struct  {
+		bdrkreg_t	sm_access_err             :	 1;
+                bdrkreg_t       sm_uncached_err           :      1;
+                bdrkreg_t       sm_dir_err                :      1;
+                bdrkreg_t       sm_timeout_err            :      1;
+                bdrkreg_t       sm_poison_err             :      1;
+                bdrkreg_t       sm_nack_oflow_err         :      1;
+                bdrkreg_t       sm_rsvd                   :     58;
+	} pi_spool_mask_fld_s;
+} pi_spool_mask_u_t;
+
+#else
+
+typedef union pi_spool_mask_u {
+	bdrkreg_t	pi_spool_mask_regval;
+	struct	{
+		bdrkreg_t	sm_rsvd			  :	58;
+		bdrkreg_t	sm_nack_oflow_err	  :	 1;
+		bdrkreg_t	sm_poison_err		  :	 1;
+		bdrkreg_t	sm_timeout_err		  :	 1;
+		bdrkreg_t	sm_dir_err		  :	 1;
+		bdrkreg_t	sm_uncached_err		  :	 1;
+		bdrkreg_t	sm_access_err		  :	 1;
+	} pi_spool_mask_fld_s;
+} pi_spool_mask_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register is not cleared at reset. When the VALID bit is        *
+ * zero, this register (along with SPURIOUS_HDR_1) will capture the     *
+ * header of an incoming spurious message received from the XBar. A     *
+ * spurious message is a message that does not match up with any of     *
+ * the CRB entries. This is a read/write register, so it is cleared     *
+ * by writing of all zeros.                                             *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_spurious_hdr_0_u {
+	bdrkreg_t	pi_spurious_hdr_0_regval;
+	struct  {
+		bdrkreg_t	sh0_prev_valid_b          :	 1;
+                bdrkreg_t       sh0_prev_valid_a          :      1;
+                bdrkreg_t       sh0_rsvd                  :      4;
+                bdrkreg_t       sh0_supplemental          :     11;
+                bdrkreg_t       sh0_cmd                   :      8;
+                bdrkreg_t       sh0_addr                  :     37;
+                bdrkreg_t       sh0_tail                  :      1;
+                bdrkreg_t       sh0_valid                 :      1;
+	} pi_spurious_hdr_0_fld_s;
+} pi_spurious_hdr_0_u_t;
+
+#else
+
+typedef union pi_spurious_hdr_0_u {
+	bdrkreg_t	pi_spurious_hdr_0_regval;
+	struct	{
+		bdrkreg_t	sh0_valid		  :	 1;
+		bdrkreg_t	sh0_tail		  :	 1;
+		bdrkreg_t	sh0_addr		  :	37;
+		bdrkreg_t	sh0_cmd			  :	 8;
+		bdrkreg_t	sh0_supplemental	  :	11;
+		bdrkreg_t	sh0_rsvd		  :	 4;
+		bdrkreg_t	sh0_prev_valid_a	  :	 1;
+		bdrkreg_t	sh0_prev_valid_b	  :	 1;
+	} pi_spurious_hdr_0_fld_s;
+} pi_spurious_hdr_0_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register is not cleared at reset. When the VALID bit in        *
+ * SPURIOUS_HDR_0 is zero, this register (along with SPURIOUS_HDR_0)    *
+ * will capture the header of an incoming spurious message received     *
+ * from the XBar. A spurious message is a message that does not match   *
+ * up with any of the CRB entries. This is a read/write register, so    *
+ * it is cleared by writing of all zeros.                               *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_spurious_hdr_1_u {
+	bdrkreg_t	pi_spurious_hdr_1_regval;
+	struct  {
+		bdrkreg_t	sh1_rsvd                  :	53;
+		bdrkreg_t       sh1_source                :     11;
+	} pi_spurious_hdr_1_fld_s;
+} pi_spurious_hdr_1_u_t;
+
+#else
+
+typedef union pi_spurious_hdr_1_u {
+	bdrkreg_t	pi_spurious_hdr_1_regval;
+	struct	{
+		bdrkreg_t	sh1_source		  :	11;
+		bdrkreg_t	sh1_rsvd		  :	53;
+	} pi_spurious_hdr_1_fld_s;
+} pi_spurious_hdr_1_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ * Description:  This register controls the injection of errors in      *
+ * outbound SysAD transfers. When a write sets a bit in this            *
+ * register, the PI logic is "armed" to inject that error. At the       *
+ * first transfer of the specified type, the error is injected and      *
+ * the bit in this register is cleared. Writing to this register does   *
+ * not cause a transaction to occur. A bit in this register will        *
+ * remain set until a transaction of the specified type occurs as a     *
+ * result of normal system activity. This register can be polled to     *
+ * determine if an error has been injected or is still "armed".         *
+ * This register does not control injection of data quality bad         *
+ * indicator on a data cycle. This type of error can be created by      *
+ * reading from a memory location that has an uncorrectable ECC         *
+ * error.                                                               *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_err_inject_u {
+	bdrkreg_t	pi_err_inject_regval;
+	struct  {
+		bdrkreg_t	ei_cmd_syscmd_par_a       :	 1;
+                bdrkreg_t       ei_data_syscmd_par_a      :      1;
+                bdrkreg_t       ei_cmd_sysad_corecc_a     :      1;
+                bdrkreg_t       ei_data_sysad_corecc_a    :      1;
+                bdrkreg_t       ei_cmd_sysad_uncecc_a     :      1;
+                bdrkreg_t       ei_data_sysad_uncecc_a    :      1;
+                bdrkreg_t       ei_sysresp_par_a          :      1;
+                bdrkreg_t       ei_reserved_1             :     25;
+                bdrkreg_t       ei_cmd_syscmd_par_b       :      1;
+                bdrkreg_t       ei_data_syscmd_par_b      :      1;
+                bdrkreg_t       ei_cmd_sysad_corecc_b     :      1;
+                bdrkreg_t       ei_data_sysad_corecc_b    :      1;
+                bdrkreg_t       ei_cmd_sysad_uncecc_b     :      1;
+                bdrkreg_t       ei_data_sysad_uncecc_b    :      1;
+                bdrkreg_t       ei_sysresp_par_b          :      1;
+                bdrkreg_t       ei_reserved               :     25;
+	} pi_err_inject_fld_s;
+} pi_err_inject_u_t;
+
+#else
+
+typedef union pi_err_inject_u {
+	bdrkreg_t	pi_err_inject_regval;
+	struct	{
+		bdrkreg_t	ei_reserved		  :	25;
+		bdrkreg_t	ei_sysresp_par_b	  :	 1;
+		bdrkreg_t	ei_data_sysad_uncecc_b	  :	 1;
+		bdrkreg_t	ei_cmd_sysad_uncecc_b	  :	 1;
+		bdrkreg_t	ei_data_sysad_corecc_b	  :	 1;
+		bdrkreg_t	ei_cmd_sysad_corecc_b	  :	 1;
+		bdrkreg_t	ei_data_syscmd_par_b	  :	 1;
+		bdrkreg_t	ei_cmd_syscmd_par_b	  :	 1;
+		bdrkreg_t	ei_reserved_1		  :	25;
+		bdrkreg_t	ei_sysresp_par_a	  :	 1;
+		bdrkreg_t	ei_data_sysad_uncecc_a	  :	 1;
+		bdrkreg_t	ei_cmd_sysad_uncecc_a	  :	 1;
+		bdrkreg_t	ei_data_sysad_corecc_a	  :	 1;
+		bdrkreg_t	ei_cmd_sysad_corecc_a	  :	 1;
+		bdrkreg_t	ei_data_syscmd_par_a	  :	 1;
+		bdrkreg_t	ei_cmd_syscmd_par_a	  :	 1;
+	} pi_err_inject_fld_s;
+} pi_err_inject_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This Read/Write location determines at what point the TRex+ is      *
+ * stopped from issuing requests, based on the number of entries in     *
+ * the incoming reply FIFO. When the number of entries in the Reply     *
+ * FIFO is greater than the value of this register, the PI will         *
+ * deassert both SysWrRdy and SysRdRdy to both processors. The Reply    *
+ * FIFO has a depth of 0x3F entries, so setting this register to 0x3F   *
+ * effectively disables this feature, allowing requests to be issued    *
+ * always. Setting this register to 0x00 effectively lowers the         *
+ * TRex+'s priority below the reply FIFO, disabling TRex+ requests      *
+ * any time there is an entry waiting in the incoming FIFO.This         *
+ * register is in its own 64KB page so that it can be mapped to user    *
+ * space.                                                               *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_reply_level_u {
+	bdrkreg_t	pi_reply_level_regval;
+	struct  {
+		bdrkreg_t	rl_reply_level            :	 6;
+		bdrkreg_t	rl_rsvd			  :	58;
+	} pi_reply_level_fld_s;
+} pi_reply_level_u_t;
+
+#else
+
+typedef union pi_reply_level_u {
+	bdrkreg_t	pi_reply_level_regval;
+	struct	{
+		bdrkreg_t	rl_rsvd			  :	58;
+		bdrkreg_t	rl_reply_level		  :	 6;
+	} pi_reply_level_fld_s;
+} pi_reply_level_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register is used to change the graphics credit counter         *
+ * operation from "Doubleword" mode to "Transaction" mode. This         *
+ * register is in its own 64KB page so that it can be mapped to user    *
+ * space.                                                               *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_gfx_credit_mode_u {
+	bdrkreg_t	pi_gfx_credit_mode_regval;
+	struct  {
+		bdrkreg_t	gcm_trans_mode            :	 1;
+		bdrkreg_t       gcm_rsvd                  :     63;
+	} pi_gfx_credit_mode_fld_s;
+} pi_gfx_credit_mode_u_t;
+
+#else
+
+typedef union pi_gfx_credit_mode_u {
+	bdrkreg_t	pi_gfx_credit_mode_regval;
+	struct	{
+		bdrkreg_t	gcm_rsvd		  :	63;
+		bdrkreg_t	gcm_trans_mode		  :	 1;
+	} pi_gfx_credit_mode_fld_s;
+} pi_gfx_credit_mode_u_t;
+
+#endif
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This location contains a 55-bit read/write counter that wraps to    *
+ * zero when the maximum value is reached. This counter is              *
+ * incremented at each rising edge of the global clock (GCLK). This     *
+ * register is in its own 64KB page so that it can be mapped to user    *
+ * space.                                                               *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_rt_counter_u {
+	bdrkreg_t	pi_rt_counter_regval;
+	struct  {
+		bdrkreg_t	rc_count                  :	55;
+		bdrkreg_t       rc_rsvd                   :      9;
+	} pi_rt_counter_fld_s;
+} pi_rt_counter_u_t;
+
+#else
+
+typedef union pi_rt_counter_u {
+	bdrkreg_t	pi_rt_counter_regval;
+	struct	{
+		bdrkreg_t	rc_rsvd			  :	 9;
+		bdrkreg_t	rc_count		  :	55;
+	} pi_rt_counter_fld_s;
+} pi_rt_counter_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register controls the performance counters for one CPU.        *
+ * There are two counters for each CPU. Each counter can be             *
+ * configured to count a variety of events. The performance counter     *
+ * registers for each processor are in their own 64KB page so that      *
+ * they can be mapped to user space.                                    *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_perf_cntl_a_u {
+	bdrkreg_t	pi_perf_cntl_a_regval;
+	struct  {
+		bdrkreg_t	pca_cntr_0_select         :	28;
+                bdrkreg_t       pca_cntr_0_mode           :      3;
+                bdrkreg_t       pca_cntr_0_enable         :      1;
+                bdrkreg_t       pca_cntr_1_select         :     28;
+                bdrkreg_t       pca_cntr_1_mode           :      3;
+                bdrkreg_t       pca_cntr_1_enable         :      1;
+	} pi_perf_cntl_a_fld_s;
+} pi_perf_cntl_a_u_t;
+
+#else
+
+typedef union pi_perf_cntl_a_u {
+	bdrkreg_t	pi_perf_cntl_a_regval;
+	struct	{
+		bdrkreg_t	pca_cntr_1_enable	  :	 1;
+		bdrkreg_t	pca_cntr_1_mode		  :	 3;
+		bdrkreg_t	pca_cntr_1_select	  :	28;
+		bdrkreg_t	pca_cntr_0_enable	  :	 1;
+		bdrkreg_t	pca_cntr_0_mode		  :	 3;
+		bdrkreg_t	pca_cntr_0_select	  :	28;
+	} pi_perf_cntl_a_fld_s;
+} pi_perf_cntl_a_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register accesses the performance counter 0 for each CPU.      *
+ * Each performance counter is 40-bits wide. On overflow, It wraps to   *
+ * zero, sets the overflow bit in this register, and sets the           *
+ * PERF_CNTR_OFLOW bit in the INT_PEND1 register.                       *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_perf_cntr0_a_u {
+	bdrkreg_t	pi_perf_cntr0_a_regval;
+	struct  {
+		bdrkreg_t	pca_count_value           :	40;
+                bdrkreg_t       pca_overflow              :      1;
+                bdrkreg_t       pca_rsvd                  :     23;
+	} pi_perf_cntr0_a_fld_s;
+} pi_perf_cntr0_a_u_t;
+
+#else
+
+typedef union pi_perf_cntr0_a_u {
+	bdrkreg_t	pi_perf_cntr0_a_regval;
+	struct	{
+		bdrkreg_t	pca_rsvd		  :	23;
+		bdrkreg_t	pca_overflow		  :	 1;
+		bdrkreg_t	pca_count_value		  :	40;
+	} pi_perf_cntr0_a_fld_s;
+} pi_perf_cntr0_a_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register accesses the performance counter 1for each CPU.       *
+ * Each performance counter is 40-bits wide. On overflow, It wraps to   *
+ * zero, sets the overflow bit in this register, and sets the           *
+ * PERF_CNTR_OFLOW bit in the INT_PEND1 register.                       *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_perf_cntr1_a_u {
+	bdrkreg_t	pi_perf_cntr1_a_regval;
+	struct  {
+		bdrkreg_t	pca_count_value           :	40;
+                bdrkreg_t       pca_overflow              :      1;
+                bdrkreg_t       pca_rsvd                  :     23;
+	} pi_perf_cntr1_a_fld_s;
+} pi_perf_cntr1_a_u_t;
+
+#else
+
+typedef union pi_perf_cntr1_a_u {
+	bdrkreg_t	pi_perf_cntr1_a_regval;
+	struct	{
+		bdrkreg_t	pca_rsvd		  :	23;
+		bdrkreg_t	pca_overflow		  :	 1;
+		bdrkreg_t	pca_count_value		  :	40;
+	} pi_perf_cntr1_a_fld_s;
+} pi_perf_cntr1_a_u_t;
+
+#endif
+
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register controls the performance counters for one CPU.        *
+ * There are two counters for each CPU. Each counter can be             *
+ * configured to count a variety of events. The performance counter     *
+ * registers for each processor are in their own 64KB page so that      *
+ * they can be mapped to user space.                                    *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_perf_cntl_b_u {
+	bdrkreg_t	pi_perf_cntl_b_regval;
+	struct  {
+		bdrkreg_t	pcb_cntr_0_select         :	28;
+                bdrkreg_t       pcb_cntr_0_mode           :      3;
+                bdrkreg_t       pcb_cntr_0_enable         :      1;
+                bdrkreg_t       pcb_cntr_1_select         :     28;
+                bdrkreg_t       pcb_cntr_1_mode           :      3;
+                bdrkreg_t       pcb_cntr_1_enable         :      1;
+	} pi_perf_cntl_b_fld_s;
+} pi_perf_cntl_b_u_t;
+
+#else
+
+typedef union pi_perf_cntl_b_u {
+	bdrkreg_t	pi_perf_cntl_b_regval;
+	struct	{
+		bdrkreg_t	pcb_cntr_1_enable	  :	 1;
+		bdrkreg_t	pcb_cntr_1_mode		  :	 3;
+		bdrkreg_t	pcb_cntr_1_select	  :	28;
+		bdrkreg_t	pcb_cntr_0_enable	  :	 1;
+		bdrkreg_t	pcb_cntr_0_mode		  :	 3;
+		bdrkreg_t	pcb_cntr_0_select	  :	28;
+	} pi_perf_cntl_b_fld_s;
+} pi_perf_cntl_b_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register accesses the performance counter 0 for each CPU.      *
+ * Each performance counter is 40-bits wide. On overflow, It wraps to   *
+ * zero, sets the overflow bit in this register, and sets the           *
+ * PERF_CNTR_OFLOW bit in the INT_PEND1 register.                       *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_perf_cntr0_b_u {
+	bdrkreg_t	pi_perf_cntr0_b_regval;
+	struct  {
+		bdrkreg_t	pcb_count_value           :	40;
+                bdrkreg_t       pcb_overflow              :      1;
+                bdrkreg_t       pcb_rsvd                  :     23;
+	} pi_perf_cntr0_b_fld_s;
+} pi_perf_cntr0_b_u_t;
+
+#else
+
+typedef union pi_perf_cntr0_b_u {
+	bdrkreg_t	pi_perf_cntr0_b_regval;
+	struct	{
+		bdrkreg_t	pcb_rsvd		  :	23;
+		bdrkreg_t	pcb_overflow		  :	 1;
+		bdrkreg_t	pcb_count_value		  :	40;
+	} pi_perf_cntr0_b_fld_s;
+} pi_perf_cntr0_b_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  This register accesses the performance counter 1for each CPU.       *
+ * Each performance counter is 40-bits wide. On overflow, It wraps to   *
+ * zero, sets the overflow bit in this register, and sets the           *
+ * PERF_CNTR_OFLOW bit in the INT_PEND1 register.                       *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union pi_perf_cntr1_b_u {
+	bdrkreg_t	pi_perf_cntr1_b_regval;
+	struct  {
+		bdrkreg_t	pcb_count_value           :	40;
+                bdrkreg_t       pcb_overflow              :      1;
+                bdrkreg_t       pcb_rsvd                  :     23;
+	} pi_perf_cntr1_b_fld_s;
+} pi_perf_cntr1_b_u_t;
+
+#else
+
+typedef union pi_perf_cntr1_b_u {
+	bdrkreg_t	pi_perf_cntr1_b_regval;
+	struct	{
+		bdrkreg_t	pcb_rsvd		  :	23;
+		bdrkreg_t	pcb_overflow		  :	 1;
+		bdrkreg_t	pcb_count_value		  :	40;
+	} pi_perf_cntr1_b_fld_s;
+} pi_perf_cntr1_b_u_t;
+
+#endif
+
+
+
+
+
+
+#endif /* _LANGUAGE_C */
+
+/************************************************************************
+ *                                                                      *
+ *               MAKE ALL ADDITIONS AFTER THIS LINE                     *
+ *                                                                      *
+ ************************************************************************/
+
+
+#define PI_GFX_OFFSET		(PI_GFX_PAGE_B - PI_GFX_PAGE_A)
+#define PI_GFX_PAGE_ENABLE	0x0000010000000000LL
+
+
+#endif /* _ASM_SN_SN1_HUBPI_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/hubpi_next.h linux/include/asm-ia64/sn/sn1/hubpi_next.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/hubpi_next.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/sn1/hubpi_next.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,332 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_SN1_HUBPI_NEXT_H
+#define _ASM_SN_SN1_HUBPI_NEXT_H
+
+
+/* define for remote PI_1 space. It is always half of a node_addressspace
+ * from PI_0. The normal REMOTE_HUB space for PI registers access
+ * the PI_0 space, unless they are qualified by PI_1.
+ */
+#define PI_0(x)			(x)
+#define PI_1(x)			((x) + 0x200000)
+#define PIREG(x,sn)		((sn) ? PI_1(x) : PI_0(x))
+
+#define PI_MIN_STACK_SIZE 4096  /* For figuring out the size to set */
+#define PI_STACK_SIZE_SHFT      12      /* 4k */
+
+#define PI_STACKADDR_OFFSET     (PI_ERR_STACK_ADDR_B - PI_ERR_STACK_ADDR_A)
+#define PI_ERRSTAT_OFFSET       (PI_ERR_STATUS0_B - PI_ERR_STATUS0_A)
+#define PI_RDCLR_OFFSET         (PI_ERR_STATUS0_A_RCLR - PI_ERR_STATUS0_A)
+/* these macros are correct, but fix their users to understand two PIs
+   and 4 CPUs (slices) per bedrock */
+#define PI_INT_MASK_OFFSET      (PI_INT_MASK0_B - PI_INT_MASK0_A)
+#define PI_INT_SET_OFFSET       (PI_CC_PEND_CLR_B - PI_CC_PEND_CLR_A)
+#define PI_NMI_OFFSET		(PI_NMI_B - PI_NMI_A)
+
+#define ERR_STACK_SIZE_BYTES(_sz) \
+       ((_sz) ? (PI_MIN_STACK_SIZE << ((_sz) - 1)) : 0)
+
+#define PI_CRB_STS_P	(1 << 9) 	/* "P" (partial word read/write) bit */
+#define PI_CRB_STS_V	(1 << 8)	/* "V" (valid) bit */
+#define PI_CRB_STS_R	(1 << 7)	/* "R" (response data sent to CPU) */
+#define PI_CRB_STS_A	(1 << 6)	/* "A" (data ack. received) bit */
+#define PI_CRB_STS_W	(1 << 5)	/* "W" (waiting for write compl.) */
+#define PI_CRB_STS_H	(1 << 4)	/* "H" (gathering invalidates) bit */
+#define PI_CRB_STS_I	(1 << 3)	/* "I" (targ. inbound invalidate) */
+#define PI_CRB_STS_T	(1 << 2)	/* "T" (targ. inbound intervention) */
+#define PI_CRB_STS_E	(0x3)		/* "E" (coherent read type) */
+
+/* When the "P" bit is set in the sk_crb_sts field of an error stack
+ * entry, the "R," "A," "H," and "I" bits are actually bits 6..3 of
+ * the address.  This macro extracts those address bits and shifts
+ * them to their proper positions, ready to be ORed in to the rest of
+ * the address (which is calculated as sk_addr << 7).
+ */
+#define PI_CRB_STS_ADDR_BITS(sts) \
+    ((sts) & (PI_CRB_STS_I | PI_CRB_STS_H) | \
+     ((sts) & (PI_CRB_STS_A | PI_CRB_STS_R)) >> 1)
+
+#ifdef _LANGUAGE_C
+/*
+ * format of error stack and error status registers.
+ */
+
+#ifdef LITTLE_ENDIAN
+
+struct err_stack_format {
+        uint64_t      sk_err_type:  3,   /* error type        */
+                        sk_suppl   :  3,   /* lowest 3 bit of supplemental */
+                        sk_t5_req  :  3,   /* RRB T5 request number */
+                        sk_crb_num :  3,   /* WRB (0 to 7) or RRB (0 to 4) */
+                        sk_rw_rb   :  1,   /* RRB == 0, WRB == 1 */
+                        sk_crb_sts : 10,   /* status from RRB or WRB */
+                        sk_cmd     :  8,   /* message command */
+			sk_addr    : 33;   /* address */
+};
+
+#else
+
+struct err_stack_format {
+	uint64_t	sk_addr	   : 33,   /* address */
+			sk_cmd	   :  8,   /* message command */
+			sk_crb_sts : 10,   /* status from RRB or WRB */
+			sk_rw_rb   :  1,   /* RRB == 0, WRB == 1 */
+			sk_crb_num :  3,   /* WRB (0 to 7) or RRB (0 to 4) */
+			sk_t5_req  :  3,   /* RRB T5 request number */
+			sk_suppl   :  3,   /* lowest 3 bit of supplemental */
+			sk_err_type:  3;   /* error type	*/
+};
+
+#endif
+
+typedef union pi_err_stack {
+        uint64_t      pi_stk_word;
+        struct  err_stack_format pi_stk_fmt;
+} pi_err_stack_t;
+
+/* Simplified version of pi_err_status0_a_u_t (PI_ERR_STATUS0_A) */
+#ifdef LITTLE_ENDIAN
+
+struct err_status0_format {
+	uint64_t	s0_err_type	:  3,	/* Encoded error cause */
+                        s0_proc_req_num :  3,   /* Request number for RRB only */
+                        s0_supplemental : 11,   /* ncoming message sup field */
+                        s0_cmd          :  8,   /* Incoming message command */
+                        s0_addr         : 37,   /* Address */
+                        s0_over_run     :  1,   /* Subsequent errors spooled */
+			s0_valid        :  1;   /* error is valid */
+};
+
+#else
+
+struct err_status0_format {
+	uint64_t	s0_valid	:  1,	/* error is valid */
+			s0_over_run	:  1,	/* Subsequent errors spooled */
+			s0_addr		: 37,	/* Address */
+			s0_cmd		:  8,	/* Incoming message command */
+			s0_supplemental : 11,	/* ncoming message sup field */
+			s0_proc_req_num :  3,	/* Request number for RRB only */
+			s0_err_type	:  3;	/* Encoded error cause */
+};
+
+#endif
+
+
+typedef union pi_err_stat0 {
+	uint64_t	pi_stat0_word;
+        struct err_status0_format pi_stat0_fmt;
+} pi_err_stat0_t;
+
+/* Simplified version of pi_err_status1_a_u_t (PI_ERR_STATUS1_A) */
+
+#ifdef LITTLE_ENDIAN
+
+struct err_status1_format {
+	 uint64_t	s1_spl_cnt : 21,   /* number spooled to memory */
+                        s1_to_cnt  :  8,   /* crb timeout counter */
+                        s1_inval_cnt:10,   /* signed invalidate counter RRB */
+                        s1_crb_num :  3,   /* WRB (0 to 7) or RRB (0 to 4) */
+                        s1_rw_rb   :  1,   /* RRB == 0, WRB == 1 */
+                        s1_crb_sts : 10,   /* status from RRB or WRB */
+			s1_src     : 11;   /* message source */
+};
+
+#else
+
+struct err_status1_format {
+	uint64_t	s1_src	   : 11,   /* message source */
+			s1_crb_sts : 10,   /* status from RRB or WRB */
+			s1_rw_rb   :  1,   /* RRB == 0, WRB == 1 */
+			s1_crb_num :  3,   /* WRB (0 to 7) or RRB (0 to 4) */
+			s1_inval_cnt:10,   /* signed invalidate counter RRB */
+			s1_to_cnt  :  8,   /* crb timeout counter */
+			s1_spl_cnt : 21;   /* number spooled to memory */
+};
+
+#endif
+
+typedef union pi_err_stat1 {
+	uint64_t	pi_stat1_word;
+	struct err_status1_format pi_stat1_fmt;
+} pi_err_stat1_t;
+#endif
+
+/* Error stack types (sk_err_type) for reads:	*/
+#define PI_ERR_RD_AERR		0	/* Read Access Error */
+#define PI_ERR_RD_PRERR         1	/* Uncached Partitial Read */
+#define PI_ERR_RD_DERR          2	/* Directory Error */
+#define PI_ERR_RD_TERR          3	/* read timeout */
+#define PI_ERR_RD_PERR		4	/* Poison Access Violation */
+#define PI_ERR_RD_NACK		5	/* Excessive NACKs	*/
+#define PI_ERR_RD_RDE		6	/* Response Data Error	*/
+#define PI_ERR_RD_PLERR		7	/* Packet Length Error */
+/* Error stack types (sk_err_type) for writes:	*/
+#define PI_ERR_WR_WERR          0	/* Write Access Error */
+#define PI_ERR_WR_PWERR         1	/* Uncached Write Error */
+#define PI_ERR_WR_TERR          3	/* write timeout */
+#define PI_ERR_WR_RDE		6	/* Response Data Error */
+#define PI_ERR_WR_PLERR		7	/* Packet Length Error */
+
+
+/* For backwards compatibility */
+#define PI_RT_COUNT	PI_RT_COUNTER    /* Real Time Counter 		    */
+#define PI_RT_EN_A	PI_RT_INT_EN_A   /* RT int for CPU A enable         */
+#define PI_RT_EN_B	PI_RT_INT_EN_B   /* RT int for CPU B enable         */
+#define PI_PROF_EN_A	PI_PROF_INT_EN_A /* PROF int for CPU A enable       */
+#define PI_PROF_EN_B	PI_PROF_INT_EN_B /* PROF int for CPU B enable       */
+#define PI_RT_PEND_A    PI_RT_INT_PEND_A /* RT interrupt pending 	    */
+#define PI_RT_PEND_B    PI_RT_INT_PEND_B /* RT interrupt pending 	    */
+#define PI_PROF_PEND_A  PI_PROF_INT_PEND_A /* Profiling interrupt pending   */
+#define PI_PROF_PEND_B  PI_PROF_INT_PEND_B /* Profiling interrupt pending   */
+
+
+/* Bits in PI_SYSAD_ERRCHK_EN */
+#define PI_SYSAD_ERRCHK_ECCGEN  0x01    /* Enable ECC generation            */
+#define PI_SYSAD_ERRCHK_QUALGEN 0x02    /* Enable data quality signal gen.  */
+#define PI_SYSAD_ERRCHK_SADP    0x04    /* Enable SysAD parity checking     */
+#define PI_SYSAD_ERRCHK_CMDP    0x08    /* Enable SysCmd parity checking    */
+#define PI_SYSAD_ERRCHK_STATE   0x10    /* Enable SysState parity checking  */
+#define PI_SYSAD_ERRCHK_QUAL    0x20    /* Enable data quality checking     */
+#define PI_SYSAD_CHECK_ALL      0x3f    /* Generate and check all signals.  */
+
+/* CALIAS values */
+#define PI_CALIAS_SIZE_0        0
+#define PI_CALIAS_SIZE_4K       1
+#define PI_CALIAS_SIZE_8K       2
+#define PI_CALIAS_SIZE_16K      3
+#define PI_CALIAS_SIZE_32K      4
+#define PI_CALIAS_SIZE_64K      5
+#define PI_CALIAS_SIZE_128K     6
+#define PI_CALIAS_SIZE_256K     7
+#define PI_CALIAS_SIZE_512K     8
+#define PI_CALIAS_SIZE_1M       9
+#define PI_CALIAS_SIZE_2M       10
+#define PI_CALIAS_SIZE_4M       11
+#define PI_CALIAS_SIZE_8M       12
+#define PI_CALIAS_SIZE_16M      13
+#define PI_CALIAS_SIZE_32M      14
+#define PI_CALIAS_SIZE_64M      15
+
+/* Fields in PI_ERR_STATUS0_[AB] */
+#define PI_ERR_ST0_VALID_MASK	0x8000000000000000
+#define PI_ERR_ST0_VALID_SHFT	63
+
+/* Fields in PI_SPURIOUS_HDR_0 */
+#define PI_SPURIOUS_HDR_VALID_MASK	0x8000000000000000
+#define PI_SPURIOUS_HDR_VALID_SHFT	63
+
+/* Fields in PI_NACK_CNT_A/B */
+#define PI_NACK_CNT_EN_SHFT	20
+#define PI_NACK_CNT_EN_MASK	0x100000
+#define PI_NACK_CNT_MASK	0x0fffff
+#define PI_NACK_CNT_MAX		0x0fffff
+
+/* Bits in PI_ERR_INT_PEND */
+#define PI_ERR_SPOOL_CMP_B	0x000000001	/* Spool end hit high water */
+#define PI_ERR_SPOOL_CMP_A	0x000000002
+#define PI_ERR_SPUR_MSG_B	0x000000004	/* Spurious message intr.   */
+#define PI_ERR_SPUR_MSG_A	0x000000008
+#define PI_ERR_WRB_TERR_B	0x000000010	/* WRB TERR		    */
+#define PI_ERR_WRB_TERR_A	0x000000020
+#define PI_ERR_WRB_WERR_B	0x000000040	/* WRB WERR 		    */
+#define PI_ERR_WRB_WERR_A	0x000000080
+#define PI_ERR_SYSSTATE_B	0x000000100	/* SysState parity error    */
+#define PI_ERR_SYSSTATE_A	0x000000200
+#define PI_ERR_SYSAD_DATA_B	0x000000400	/* SysAD data parity error  */
+#define PI_ERR_SYSAD_DATA_A	0x000000800
+#define PI_ERR_SYSAD_ADDR_B	0x000001000	/* SysAD addr parity error  */
+#define PI_ERR_SYSAD_ADDR_A	0x000002000
+#define PI_ERR_SYSCMD_DATA_B	0x000004000	/* SysCmd data parity error */
+#define PI_ERR_SYSCMD_DATA_A	0x000008000
+#define PI_ERR_SYSCMD_ADDR_B	0x000010000	/* SysCmd addr parity error */
+#define PI_ERR_SYSCMD_ADDR_A	0x000020000
+#define PI_ERR_BAD_SPOOL_B	0x000040000	/* Error spooling to memory */
+#define PI_ERR_BAD_SPOOL_A	0x000080000
+#define PI_ERR_UNCAC_UNCORR_B	0x000100000	/* Uncached uncorrectable   */
+#define PI_ERR_UNCAC_UNCORR_A	0x000200000
+#define PI_ERR_SYSSTATE_TAG_B	0x000400000	/* SysState tag parity error */
+#define PI_ERR_SYSSTATE_TAG_A	0x000800000
+#define PI_ERR_MD_UNCORR	0x001000000	/* Must be cleared in MD    */
+#define PI_ERR_SYSAD_BAD_DATA_B	0x002000000	/* SysAD Data quality bad   */
+#define PI_ERR_SYSAD_BAD_DATA_A	0x004000000
+#define PI_ERR_UE_CACHED_B	0x008000000	/* UE during cached load    */
+#define PI_ERR_UE_CACHED_A	0x010000000
+#define PI_ERR_PKT_LEN_ERR_B	0x020000000	/* Xbar data too long/short */
+#define PI_ERR_PKT_LEN_ERR_A	0x040000000
+#define PI_ERR_IRB_ERR_B	0x080000000	/* Protocol error           */
+#define PI_ERR_IRB_ERR_A	0x100000000
+#define PI_ERR_IRB_TIMEOUT_B	0x200000000	/* IRB_B got a timeout      */
+#define PI_ERR_IRB_TIMEOUT_A	0x400000000
+
+#define PI_ERR_CLEAR_ALL_A	0x554aaaaaa
+#define PI_ERR_CLEAR_ALL_B	0x2aa555555
+
+
+/*
+ * The following three macros define all possible error int pends. 
+ */
+
+#define PI_FATAL_ERR_CPU_A	(PI_ERR_SYSAD_BAD_DATA_A | \
+				 PI_ERR_SYSSTATE_TAG_A 	| \
+				 PI_ERR_BAD_SPOOL_A 	| \
+				 PI_ERR_SYSCMD_ADDR_A 	| \
+				 PI_ERR_SYSCMD_DATA_A 	| \
+				 PI_ERR_SYSAD_ADDR_A 	| \
+				 PI_ERR_SYSAD_DATA_A	| \
+				 PI_ERR_SYSSTATE_A)
+
+#define PI_MISC_ERR_CPU_A	(PI_ERR_IRB_TIMEOUT_A   | \
+				 PI_ERR_IRB_ERR_A       | \
+				 PI_ERR_PKT_LEN_ERR_A   | \
+				 PI_ERR_UE_CACHED_A     | \
+				 PI_ERR_UNCAC_UNCORR_A 	| \
+				 PI_ERR_WRB_WERR_A 	| \
+				 PI_ERR_WRB_TERR_A 	| \
+				 PI_ERR_SPUR_MSG_A 	| \
+				 PI_ERR_SPOOL_CMP_A)
+
+#define PI_FATAL_ERR_CPU_B	(PI_ERR_SYSAD_BAD_DATA_B | \
+				 PI_ERR_SYSSTATE_TAG_B 	| \
+				 PI_ERR_BAD_SPOOL_B 	| \
+				 PI_ERR_SYSCMD_ADDR_B 	| \
+				 PI_ERR_SYSCMD_DATA_B 	| \
+				 PI_ERR_SYSAD_ADDR_B 	| \
+				 PI_ERR_SYSAD_DATA_B	| \
+				 PI_ERR_SYSSTATE_B)
+
+#define PI_MISC_ERR_CPU_B 	(PI_ERR_IRB_TIMEOUT_B   | \
+				 PI_ERR_IRB_ERR_B       | \
+				 PI_ERR_PKT_LEN_ERR_B   | \
+				 PI_ERR_UE_CACHED_B     | \
+				 PI_ERR_UNCAC_UNCORR_B  | \
+				 PI_ERR_WRB_WERR_B 	| \
+				 PI_ERR_WRB_TERR_B 	| \
+				 PI_ERR_SPUR_MSG_B 	| \
+				 PI_ERR_SPOOL_CMP_B)
+
+#define PI_ERR_GENERIC	(PI_ERR_MD_UNCORR)
+
+/* Values for PI_MAX_CRB_TIMEOUT and PI_CRB_SFACTOR */
+#define PMCT_MAX	0xff
+#define PCS_MAX		0xffffff
+
+/* pi_err_status0_a_u_t address shift */
+#define ERR_STAT0_ADDR_SHFT     3
+
+/* PI error read/write bit (RRB == 0, WRB == 1)	*/
+/* pi_err_status1_a_u_t.pi_err_status1_a_fld_s.esa_wrb */
+#define PI_ERR_RRB	0
+#define PI_ERR_WRB	1
+
+/* Error stack address shift, for use with pi_stk_fmt.sk_addr */
+#define ERR_STK_ADDR_SHFT	3
+
+#endif /* _ASM_SN_SN1_HUBPI_NEXT_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/hubxb.h linux/include/asm-ia64/sn/sn1/hubxb.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/hubxb.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/sn1/hubxb.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,1289 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_SN1_HUBXB_H
+#define _ASM_SN_SN1_HUBXB_H
+
+/************************************************************************
+ *                                                                      *
+ *      WARNING!!!  WARNING!!!  WARNING!!!  WARNING!!!  WARNING!!!      *
+ *                                                                      *
+ * This file is created by an automated script. Any (minimal) changes   *
+ * made manually to this  file should be made with care.                *
+ *                                                                      *
+ *               MAKE ALL ADDITIONS TO THE END OF THIS FILE             *
+ *                                                                      *
+ ************************************************************************/
+
+
+#define    XB_PARMS                  0x00700000    /*
+                                                    * Controls
+                                                    * crossbar-wide
+                                                    * parameters.
+                                                    */
+
+
+
+#define    XB_SLOW_GNT               0x00700008    /*
+                                                    * Controls wavefront
+                                                    * arbiter grant
+                                                    * frequency, used to
+                                                    * slow XB grants
+                                                    */
+
+
+
+#define    XB_SPEW_CONTROL           0x00700010    /*
+                                                    * Controls spew
+                                                    * settings (debug
+                                                    * only).
+                                                    */
+
+
+
+#define    XB_IOQ_ARB_TRIGGER        0x00700018    /*
+                                                    * Controls IOQ
+                                                    * trigger level
+                                                    */
+
+
+
+#define    XB_FIRST_ERROR            0x00700090    /*
+                                                    * Records the first
+                                                    * crossbar error
+                                                    * seen.
+                                                    */
+
+
+
+#define    XB_POQ0_ERROR             0x00700020    /*
+                                                    * POQ0 error
+                                                    * register.
+                                                    */
+
+
+
+#define    XB_PIQ0_ERROR             0x00700028    /*
+                                                    * PIQ0 error
+                                                    * register.
+                                                    */
+
+
+
+#define    XB_POQ1_ERROR             0x00700030    /*
+                                                    * POQ1 error
+                                                    * register.
+                                                    */
+
+
+
+#define    XB_PIQ1_ERROR             0x00700038    /*
+                                                    * PIQ1 error
+                                                    * register.
+                                                    */
+
+
+
+#define    XB_MP0_ERROR              0x00700040    /*
+                                                    * MOQ for PI0 error
+                                                    * register.
+                                                    */
+
+
+
+#define    XB_MP1_ERROR              0x00700048    /*
+                                                    * MOQ for PI1 error
+                                                    * register.
+                                                    */
+
+
+
+#define    XB_MMQ_ERROR              0x00700050    /*
+                                                    * MOQ for misc. (LB,
+                                                    * NI, II) error
+                                                    * register.
+                                                    */
+
+
+
+#define    XB_MIQ_ERROR              0x00700058    /*
+                                                    * MIQ error register,
+                                                    * addtional MIQ
+                                                    * errors are logged
+                                                    * in MD &quot;Input
+                                                    * Error
+                                                    * Registers&quot;.
+                                                    */
+
+
+
+#define    XB_NOQ_ERROR              0x00700060    /* NOQ error register.    */
+
+
+
+#define    XB_NIQ_ERROR              0x00700068    /* NIQ error register.    */
+
+
+
+#define    XB_IOQ_ERROR              0x00700070    /* IOQ error register.    */
+
+
+
+#define    XB_IIQ_ERROR              0x00700078    /* IIQ error register.    */
+
+
+
+#define    XB_LOQ_ERROR              0x00700080    /* LOQ error register.    */
+
+
+
+#define    XB_LIQ_ERROR              0x00700088    /* LIQ error register.    */
+
+
+
+#define    XB_DEBUG_DATA_CTL         0x00700098    /*
+                                                    * Debug Datapath
+                                                    * Select
+                                                    */
+
+
+
+#define    XB_DEBUG_ARB_CTL          0x007000A0    /*
+                                                    * XB master debug
+                                                    * control
+                                                    */
+
+
+
+#define    XB_POQ0_ERROR_CLEAR       0x00700120    /*
+                                                    * Clears
+                                                    * XB_POQ0_ERROR
+                                                    * register.
+                                                    */
+
+
+
+#define    XB_PIQ0_ERROR_CLEAR       0x00700128    /*
+                                                    * Clears
+                                                    * XB_PIQ0_ERROR
+                                                    * register.
+                                                    */
+
+
+
+#define    XB_POQ1_ERROR_CLEAR       0x00700130    /*
+                                                    * Clears
+                                                    * XB_POQ1_ERROR
+                                                    * register.
+                                                    */
+
+
+
+#define    XB_PIQ1_ERROR_CLEAR       0x00700138    /*
+                                                    * Clears
+                                                    * XB_PIQ1_ERROR
+                                                    * register.
+                                                    */
+
+
+
+#define    XB_MP0_ERROR_CLEAR        0x00700140    /*
+                                                    * Clears XB_MP0_ERROR
+                                                    * register.
+                                                    */
+
+
+
+#define    XB_MP1_ERROR_CLEAR        0x00700148    /*
+                                                    * Clears XB_MP1_ERROR
+                                                    * register.
+                                                    */
+
+
+
+#define    XB_MMQ_ERROR_CLEAR        0x00700150    /*
+                                                    * Clears XB_MMQ_ERROR
+                                                    * register.
+                                                    */
+
+
+
+#define    XB_XM_MIQ_ERROR_CLEAR     0x00700158    /*
+                                                    * Clears XB_MIQ_ERROR
+                                                    * register
+                                                    */
+
+
+
+#define    XB_NOQ_ERROR_CLEAR        0x00700160    /*
+                                                    * Clears XB_NOQ_ERROR
+                                                    * register.
+                                                    */
+
+
+
+#define    XB_NIQ_ERROR_CLEAR        0x00700168    /*
+                                                    * Clears XB_NIQ_ERROR
+                                                    * register.
+                                                    */
+
+
+
+#define    XB_IOQ_ERROR_CLEAR        0x00700170    /*
+                                                    * Clears XB_IOQ
+                                                    * _ERROR register.
+                                                    */
+
+
+
+#define    XB_IIQ_ERROR_CLEAR        0x00700178    /*
+                                                    * Clears XB_IIQ
+                                                    * _ERROR register.
+                                                    */
+
+
+
+#define    XB_LOQ_ERROR_CLEAR        0x00700180    /*
+                                                    * Clears XB_LOQ_ERROR
+                                                    * register.
+                                                    */
+
+
+
+#define    XB_LIQ_ERROR_CLEAR        0x00700188    /*
+                                                    * Clears XB_LIQ_ERROR
+                                                    * register.
+                                                    */
+
+
+
+#define    XB_FIRST_ERROR_CLEAR      0x00700190    /*
+                                                    * Clears
+                                                    * XB_FIRST_ERROR
+                                                    * register
+                                                    */
+
+
+
+
+
+#ifdef _LANGUAGE_C
+
+/************************************************************************
+ *                                                                      *
+ *  Access to parameters which control various aspects of the           *
+ * crossbar's operation.                                                *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union xb_parms_u {
+	bdrkreg_t	xb_parms_regval;
+	struct  {
+		bdrkreg_t	p_byp_en                  :	 1;
+                bdrkreg_t       p_rsrvd_1                 :      3;
+                bdrkreg_t       p_age_wrap                :      8;
+                bdrkreg_t       p_deadlock_to_wrap        :     20;
+                bdrkreg_t       p_tail_to_wrap            :     20;
+                bdrkreg_t       p_rsrvd                   :     12;
+	} xb_parms_fld_s;
+} xb_parms_u_t;
+
+#else
+
+typedef union xb_parms_u {
+	bdrkreg_t	xb_parms_regval;
+	struct	{
+		bdrkreg_t	p_rsrvd			  :	12;
+		bdrkreg_t	p_tail_to_wrap		  :	20;
+		bdrkreg_t	p_deadlock_to_wrap	  :	20;
+		bdrkreg_t	p_age_wrap		  :	 8;
+		bdrkreg_t	p_rsrvd_1		  :	 3;
+		bdrkreg_t	p_byp_en		  :	 1;
+	} xb_parms_fld_s;
+} xb_parms_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Sets the period of wavefront grants given to each unit. The         *
+ * register's value corresponds to the number of cycles between each    *
+ * wavefront grant opportunity given to the requesting unit. If set     *
+ * to 0xF, no grants are given to this unit. If set to 0xE, the unit    *
+ * is granted at the slowest rate (sometimes called "molasses mode").   *
+ * This feature can be used to apply backpressure to a unit's output    *
+ * queue(s). The setting does not affect bypass grants.                 *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union xb_slow_gnt_u {
+	bdrkreg_t	xb_slow_gnt_regval;
+	struct  {
+		bdrkreg_t	sg_lb_slow_gnt            :	 4;
+                bdrkreg_t       sg_ii_slow_gnt            :      4;
+                bdrkreg_t       sg_ni_slow_gnt            :      4;
+                bdrkreg_t       sg_mmq_slow_gnt           :      4;
+                bdrkreg_t       sg_mp1_slow_gnt           :      4;
+                bdrkreg_t       sg_mp0_slow_gnt           :      4;
+                bdrkreg_t       sg_pi1_slow_gnt           :      4;
+                bdrkreg_t       sg_pi0_slow_gnt           :      4;
+                bdrkreg_t       sg_rsrvd                  :     32;
+	} xb_slow_gnt_fld_s;
+} xb_slow_gnt_u_t;
+
+#else
+
+typedef union xb_slow_gnt_u {
+	bdrkreg_t	xb_slow_gnt_regval;
+	struct	{
+		bdrkreg_t	sg_rsrvd		  :	32;
+		bdrkreg_t	sg_pi0_slow_gnt		  :	 4;
+		bdrkreg_t	sg_pi1_slow_gnt		  :	 4;
+		bdrkreg_t	sg_mp0_slow_gnt		  :	 4;
+		bdrkreg_t	sg_mp1_slow_gnt		  :	 4;
+		bdrkreg_t	sg_mmq_slow_gnt		  :	 4;
+		bdrkreg_t	sg_ni_slow_gnt		  :	 4;
+		bdrkreg_t	sg_ii_slow_gnt		  :	 4;
+		bdrkreg_t	sg_lb_slow_gnt		  :	 4;
+	} xb_slow_gnt_fld_s;
+} xb_slow_gnt_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Enables snooping of internal crossbar traffic by spewing all        *
+ * traffic across a selected crossbar point to the PI1 port. Only one   *
+ * bit should be set at any one time, and any bit set will preclude     *
+ * using the P1 for anything but a debug connection.                    *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union xb_spew_control_u {
+	bdrkreg_t	xb_spew_control_regval;
+	struct  {
+		bdrkreg_t	sc_snoop_liq              :	 1;
+                bdrkreg_t       sc_snoop_iiq              :      1;
+                bdrkreg_t       sc_snoop_niq              :      1;
+                bdrkreg_t       sc_snoop_miq              :      1;
+                bdrkreg_t       sc_snoop_piq0             :      1;
+                bdrkreg_t       sc_snoop_loq              :      1;
+                bdrkreg_t       sc_snoop_ioq              :      1;
+                bdrkreg_t       sc_snoop_noq              :      1;
+                bdrkreg_t       sc_snoop_mmq              :      1;
+                bdrkreg_t       sc_snoop_mp0              :      1;
+                bdrkreg_t       sc_snoop_poq0             :      1;
+                bdrkreg_t       sc_rsrvd                  :     53;
+	} xb_spew_control_fld_s;
+} xb_spew_control_u_t;
+
+#else
+
+typedef union xb_spew_control_u {
+	bdrkreg_t	xb_spew_control_regval;
+	struct	{
+		bdrkreg_t	sc_rsrvd		  :	53;
+		bdrkreg_t	sc_snoop_poq0		  :	 1;
+		bdrkreg_t	sc_snoop_mp0		  :	 1;
+		bdrkreg_t	sc_snoop_mmq		  :	 1;
+		bdrkreg_t	sc_snoop_noq		  :	 1;
+		bdrkreg_t	sc_snoop_ioq		  :	 1;
+		bdrkreg_t	sc_snoop_loq		  :	 1;
+		bdrkreg_t	sc_snoop_piq0		  :	 1;
+		bdrkreg_t	sc_snoop_miq		  :	 1;
+		bdrkreg_t	sc_snoop_niq		  :	 1;
+		bdrkreg_t	sc_snoop_iiq		  :	 1;
+		bdrkreg_t	sc_snoop_liq		  :	 1;
+	} xb_spew_control_fld_s;
+} xb_spew_control_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Number of clocks the IOQ will wait before beginning XB              *
+ * arbitration. This is set so that the slower IOQ data rate can        *
+ * catch up up with the XB data rate in the IOQ buffer.                 *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union xb_ioq_arb_trigger_u {
+	bdrkreg_t	xb_ioq_arb_trigger_regval;
+	struct  {
+		bdrkreg_t	iat_ioq_arb_trigger       :	 4;
+	        bdrkreg_t       iat_rsrvd                 :     60;
+	} xb_ioq_arb_trigger_fld_s;
+} xb_ioq_arb_trigger_u_t;
+
+#else
+
+typedef union xb_ioq_arb_trigger_u {
+	bdrkreg_t	xb_ioq_arb_trigger_regval;
+	struct	{
+		bdrkreg_t	iat_rsrvd		  :	60;
+		bdrkreg_t	iat_ioq_arb_trigger	  :	 4;
+	} xb_ioq_arb_trigger_fld_s;
+} xb_ioq_arb_trigger_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Records errors seen by POQ0.Can be written to test software, will   *
+ * cause an interrupt.                                                  *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union xb_poq0_error_u {
+	bdrkreg_t	xb_poq0_error_regval;
+	struct  {
+		bdrkreg_t	pe_invalid_xsel           :	 2;
+                bdrkreg_t       pe_rsrvd_3                :      2;
+                bdrkreg_t       pe_overflow               :      2;
+                bdrkreg_t       pe_rsrvd_2                :      2;
+                bdrkreg_t       pe_underflow              :      2;
+                bdrkreg_t       pe_rsrvd_1                :      2;
+                bdrkreg_t       pe_tail_timeout           :      2;
+                bdrkreg_t       pe_unused                 :      6;
+                bdrkreg_t       pe_rsrvd                  :     44;
+	} xb_poq0_error_fld_s;
+} xb_poq0_error_u_t;
+
+#else
+
+typedef union xb_poq0_error_u {
+	bdrkreg_t	xb_poq0_error_regval;
+	struct	{
+		bdrkreg_t	pe_rsrvd		  :	44;
+		bdrkreg_t	pe_unused		  :	 6;
+		bdrkreg_t	pe_tail_timeout		  :	 2;
+		bdrkreg_t	pe_rsrvd_1		  :	 2;
+		bdrkreg_t	pe_underflow		  :	 2;
+		bdrkreg_t	pe_rsrvd_2		  :	 2;
+		bdrkreg_t	pe_overflow		  :	 2;
+		bdrkreg_t	pe_rsrvd_3		  :	 2;
+		bdrkreg_t	pe_invalid_xsel		  :	 2;
+	} xb_poq0_error_fld_s;
+} xb_poq0_error_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Records errors seen by PIQ0. Note that the PIQ/PI interface         *
+ * precludes PIQ underflow.                                             *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union xb_piq0_error_u {
+	bdrkreg_t	xb_piq0_error_regval;
+	struct  {
+		bdrkreg_t	pe_overflow               :	 2;
+                bdrkreg_t       pe_rsrvd_1                :      2;
+                bdrkreg_t       pe_deadlock_timeout       :      2;
+                bdrkreg_t       pe_rsrvd                  :     58;
+	} xb_piq0_error_fld_s;
+} xb_piq0_error_u_t;
+
+#else
+
+typedef union xb_piq0_error_u {
+	bdrkreg_t	xb_piq0_error_regval;
+	struct	{
+		bdrkreg_t	pe_rsrvd		  :	58;
+		bdrkreg_t	pe_deadlock_timeout	  :	 2;
+		bdrkreg_t	pe_rsrvd_1		  :	 2;
+		bdrkreg_t	pe_overflow		  :	 2;
+	} xb_piq0_error_fld_s;
+} xb_piq0_error_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Records errors seen by MP0 queue (the MOQ for processor 0). Since   *
+ * the xselect is decoded on the MD/MOQ interface, no invalid xselect   *
+ * errors are possible.                                                 *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union xb_mp0_error_u {
+	bdrkreg_t	xb_mp0_error_regval;
+	struct  {
+		bdrkreg_t	me_rsrvd_3                :	 4;
+                bdrkreg_t       me_overflow               :      2;
+                bdrkreg_t       me_rsrvd_2                :      2;
+                bdrkreg_t       me_underflow              :      2;
+                bdrkreg_t       me_rsrvd_1                :      2;
+                bdrkreg_t       me_tail_timeout           :      2;
+                bdrkreg_t       me_rsrvd                  :     50;
+	} xb_mp0_error_fld_s;
+} xb_mp0_error_u_t;
+
+#else
+
+typedef union xb_mp0_error_u {
+	bdrkreg_t	xb_mp0_error_regval;
+	struct	{
+		bdrkreg_t	me_rsrvd		  :	50;
+		bdrkreg_t	me_tail_timeout		  :	 2;
+		bdrkreg_t	me_rsrvd_1		  :	 2;
+		bdrkreg_t	me_underflow		  :	 2;
+		bdrkreg_t	me_rsrvd_2		  :	 2;
+		bdrkreg_t	me_overflow		  :	 2;
+		bdrkreg_t	me_rsrvd_3		  :	 4;
+	} xb_mp0_error_fld_s;
+} xb_mp0_error_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Records errors seen by MIQ.                                         *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union xb_miq_error_u {
+	bdrkreg_t	xb_miq_error_regval;
+	struct  {
+		bdrkreg_t	me_rsrvd_1                :	 4;
+                bdrkreg_t       me_deadlock_timeout       :      4;
+                bdrkreg_t       me_rsrvd                  :     56;
+	} xb_miq_error_fld_s;
+} xb_miq_error_u_t;
+
+#else
+
+typedef union xb_miq_error_u {
+	bdrkreg_t	xb_miq_error_regval;
+	struct	{
+		bdrkreg_t	me_rsrvd		  :	56;
+		bdrkreg_t	me_deadlock_timeout	  :	 4;
+		bdrkreg_t	me_rsrvd_1		  :	 4;
+	} xb_miq_error_fld_s;
+} xb_miq_error_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Records errors seen by NOQ.                                         *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union xb_noq_error_u {
+	bdrkreg_t	xb_noq_error_regval;
+	struct  {
+		bdrkreg_t	ne_rsvd                   :	 4;
+                bdrkreg_t       ne_overflow               :      4;
+                bdrkreg_t       ne_underflow              :      4;
+                bdrkreg_t       ne_tail_timeout           :      4;
+                bdrkreg_t       ne_rsrvd                  :     48;
+	} xb_noq_error_fld_s;
+} xb_noq_error_u_t;
+
+#else
+
+typedef union xb_noq_error_u {
+	bdrkreg_t	xb_noq_error_regval;
+	struct	{
+		bdrkreg_t	ne_rsrvd		  :	48;
+		bdrkreg_t	ne_tail_timeout		  :	 4;
+		bdrkreg_t	ne_underflow		  :	 4;
+		bdrkreg_t	ne_overflow		  :	 4;
+		bdrkreg_t	ne_rsvd			  :	 4;
+	} xb_noq_error_fld_s;
+} xb_noq_error_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Records errors seen by LOQ.                                         *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union xb_loq_error_u {
+	bdrkreg_t	xb_loq_error_regval;
+	struct  {
+		bdrkreg_t	le_invalid_xsel           :	 2;
+                bdrkreg_t       le_rsrvd_1                :      6;
+                bdrkreg_t       le_underflow              :      2;
+                bdrkreg_t       le_rsvd                   :      2;
+                bdrkreg_t       le_tail_timeout           :      2;
+                bdrkreg_t       le_rsrvd                  :     50;
+	} xb_loq_error_fld_s;
+} xb_loq_error_u_t;
+
+#else
+
+typedef union xb_loq_error_u {
+	bdrkreg_t	xb_loq_error_regval;
+	struct	{
+		bdrkreg_t	le_rsrvd		  :	50;
+		bdrkreg_t	le_tail_timeout		  :	 2;
+		bdrkreg_t	le_rsvd			  :	 2;
+		bdrkreg_t	le_underflow		  :	 2;
+		bdrkreg_t	le_rsrvd_1		  :	 6;
+		bdrkreg_t	le_invalid_xsel		  :	 2;
+	} xb_loq_error_fld_s;
+} xb_loq_error_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Records errors seen by LIQ. Note that the LIQ only records errors   *
+ * for the request channel. The reply channel can never deadlock or     *
+ * overflow because it does not have hardware flow control.             *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union xb_liq_error_u {
+	bdrkreg_t	xb_liq_error_regval;
+	struct  {
+		bdrkreg_t	le_overflow               :	 1;
+                bdrkreg_t       le_rsrvd_1                :      3;
+                bdrkreg_t       le_deadlock_timeout       :      1;
+                bdrkreg_t       le_rsrvd                  :     59;
+	} xb_liq_error_fld_s;
+} xb_liq_error_u_t;
+
+#else
+
+typedef union xb_liq_error_u {
+	bdrkreg_t	xb_liq_error_regval;
+	struct	{
+		bdrkreg_t	le_rsrvd		  :	59;
+		bdrkreg_t	le_deadlock_timeout	  :	 1;
+		bdrkreg_t	le_rsrvd_1		  :	 3;
+		bdrkreg_t	le_overflow		  :	 1;
+	} xb_liq_error_fld_s;
+} xb_liq_error_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  First error is latched whenever the Valid bit is clear and an       *
+ * error occurs. Any valid bit on in this register causes an            *
+ * interrupt to PI0 and PI1. This interrupt bit will persist until      *
+ * the specific error register to capture the error is cleared, then    *
+ * the FIRST_ERROR register is cleared (in that oder.) The              *
+ * FIRST_ERROR register is not writable, but will be set when any of    *
+ * the corresponding error registers are written by software.           *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union xb_first_error_u {
+	bdrkreg_t	xb_first_error_regval;
+	struct  {
+		bdrkreg_t	fe_type                   :	 4;
+                bdrkreg_t       fe_channel                :      4;
+                bdrkreg_t       fe_source                 :      4;
+                bdrkreg_t       fe_valid                  :      1;
+                bdrkreg_t       fe_rsrvd                  :     51;
+	} xb_first_error_fld_s;
+} xb_first_error_u_t;
+
+#else
+
+typedef union xb_first_error_u {
+	bdrkreg_t	xb_first_error_regval;
+	struct	{
+		bdrkreg_t	fe_rsrvd		  :	51;
+		bdrkreg_t	fe_valid		  :	 1;
+		bdrkreg_t	fe_source		  :	 4;
+		bdrkreg_t	fe_channel		  :	 4;
+		bdrkreg_t	fe_type			  :	 4;
+	} xb_first_error_fld_s;
+} xb_first_error_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Controls DEBUG_DATA mux setting. Allows user to watch the output    *
+ * of any OQ or input of any IQ on the DEBUG port. Note that bits       *
+ * 13:0 are one-hot. If more than one bit is set in [13:0], the debug   *
+ * output is undefined. Details on the debug output lines can be        *
+ * found in the XB chapter of the Bedrock Interface Specification.      *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union xb_debug_data_ctl_u {
+	bdrkreg_t	xb_debug_data_ctl_regval;
+	struct  {
+		bdrkreg_t	ddc_observe_liq_traffic   :	 1;
+                bdrkreg_t       ddc_observe_iiq_traffic   :      1;
+                bdrkreg_t       ddc_observe_niq_traffic   :      1;
+                bdrkreg_t       ddc_observe_miq_traffic   :      1;
+                bdrkreg_t       ddc_observe_piq1_traffic  :      1;
+                bdrkreg_t       ddc_observe_piq0_traffic  :      1;
+                bdrkreg_t       ddc_observe_loq_traffic   :      1;
+                bdrkreg_t       ddc_observe_ioq_traffic   :      1;
+                bdrkreg_t       ddc_observe_noq_traffic   :      1;
+                bdrkreg_t       ddc_observe_mp1_traffic   :      1;
+                bdrkreg_t       ddc_observe_mp0_traffic   :      1;
+                bdrkreg_t       ddc_observe_mmq_traffic   :      1;
+                bdrkreg_t       ddc_observe_poq1_traffic  :      1;
+                bdrkreg_t       ddc_observe_poq0_traffic  :      1;
+                bdrkreg_t       ddc_observe_source_field  :      1;
+                bdrkreg_t       ddc_observe_lodata        :      1;
+                bdrkreg_t       ddc_rsrvd                 :     48;
+	} xb_debug_data_ctl_fld_s;
+} xb_debug_data_ctl_u_t;
+
+#else
+
+typedef union xb_debug_data_ctl_u {
+	bdrkreg_t	xb_debug_data_ctl_regval;
+	struct	{
+		bdrkreg_t	ddc_rsrvd		  :	48;
+		bdrkreg_t	ddc_observe_lodata	  :	 1;
+		bdrkreg_t	ddc_observe_source_field  :	 1;
+		bdrkreg_t	ddc_observe_poq0_traffic  :	 1;
+		bdrkreg_t	ddc_observe_poq1_traffic  :	 1;
+		bdrkreg_t	ddc_observe_mmq_traffic	  :	 1;
+		bdrkreg_t	ddc_observe_mp0_traffic	  :	 1;
+		bdrkreg_t	ddc_observe_mp1_traffic	  :	 1;
+		bdrkreg_t	ddc_observe_noq_traffic	  :	 1;
+		bdrkreg_t	ddc_observe_ioq_traffic	  :	 1;
+		bdrkreg_t	ddc_observe_loq_traffic	  :	 1;
+		bdrkreg_t	ddc_observe_piq0_traffic  :	 1;
+		bdrkreg_t	ddc_observe_piq1_traffic  :	 1;
+		bdrkreg_t	ddc_observe_miq_traffic	  :	 1;
+		bdrkreg_t	ddc_observe_niq_traffic	  :	 1;
+		bdrkreg_t	ddc_observe_iiq_traffic	  :	 1;
+		bdrkreg_t	ddc_observe_liq_traffic	  :	 1;
+	} xb_debug_data_ctl_fld_s;
+} xb_debug_data_ctl_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Controls debug mux setting for XB Input/Output Queues and           *
+ * Arbiter. Can select one of the following values. Details on the      *
+ * debug output lines can be found in the XB chapter of the Bedrock     *
+ * Interface Specification.                                             *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union xb_debug_arb_ctl_u {
+	bdrkreg_t	xb_debug_arb_ctl_regval;
+	struct  {
+		bdrkreg_t	dac_xb_debug_select       :	 3;
+		bdrkreg_t       dac_rsrvd                 :     61;
+	} xb_debug_arb_ctl_fld_s;
+} xb_debug_arb_ctl_u_t;
+
+#else
+
+typedef union xb_debug_arb_ctl_u {
+        bdrkreg_t       xb_debug_arb_ctl_regval;
+        struct  {
+                bdrkreg_t       dac_rsrvd                 :     61;
+                bdrkreg_t       dac_xb_debug_select       :      3;
+        } xb_debug_arb_ctl_fld_s;
+} xb_debug_arb_ctl_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Records errors seen by POQ0.Can be written to test software, will   *
+ * cause an interrupt.                                                  *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union xb_poq0_error_clear_u {
+	bdrkreg_t	xb_poq0_error_clear_regval;
+	struct  {
+		bdrkreg_t	pec_invalid_xsel          :	 2;
+                bdrkreg_t       pec_rsrvd_3               :      2;
+                bdrkreg_t       pec_overflow              :      2;
+                bdrkreg_t       pec_rsrvd_2               :      2;
+                bdrkreg_t       pec_underflow             :      2;
+                bdrkreg_t       pec_rsrvd_1               :      2;
+                bdrkreg_t       pec_tail_timeout          :      2;
+                bdrkreg_t       pec_unused                :      6;
+                bdrkreg_t       pec_rsrvd                 :     44;
+	} xb_poq0_error_clear_fld_s;
+} xb_poq0_error_clear_u_t;
+
+#else
+
+typedef union xb_poq0_error_clear_u {
+	bdrkreg_t	xb_poq0_error_clear_regval;
+	struct	{
+		bdrkreg_t	pec_rsrvd		  :	44;
+		bdrkreg_t	pec_unused		  :	 6;
+		bdrkreg_t	pec_tail_timeout	  :	 2;
+		bdrkreg_t	pec_rsrvd_1		  :	 2;
+		bdrkreg_t	pec_underflow		  :	 2;
+		bdrkreg_t	pec_rsrvd_2		  :	 2;
+		bdrkreg_t	pec_overflow		  :	 2;
+		bdrkreg_t	pec_rsrvd_3		  :	 2;
+		bdrkreg_t	pec_invalid_xsel	  :	 2;
+	} xb_poq0_error_clear_fld_s;
+} xb_poq0_error_clear_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Records errors seen by PIQ0. Note that the PIQ/PI interface         *
+ * precludes PIQ underflow.                                             *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union xb_piq0_error_clear_u {
+	bdrkreg_t	xb_piq0_error_clear_regval;
+	struct  {
+		bdrkreg_t	pec_overflow              :	 2;
+                bdrkreg_t       pec_rsrvd_1               :      2;
+                bdrkreg_t       pec_deadlock_timeout      :      2;
+                bdrkreg_t       pec_rsrvd                 :     58;
+	} xb_piq0_error_clear_fld_s;
+} xb_piq0_error_clear_u_t;
+
+#else
+
+typedef union xb_piq0_error_clear_u {
+	bdrkreg_t	xb_piq0_error_clear_regval;
+	struct	{
+		bdrkreg_t	pec_rsrvd		  :	58;
+		bdrkreg_t	pec_deadlock_timeout	  :	 2;
+		bdrkreg_t	pec_rsrvd_1		  :	 2;
+		bdrkreg_t	pec_overflow		  :	 2;
+	} xb_piq0_error_clear_fld_s;
+} xb_piq0_error_clear_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Records errors seen by MP0 queue (the MOQ for processor 0). Since   *
+ * the xselect is decoded on the MD/MOQ interface, no invalid xselect   *
+ * errors are possible.                                                 *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union xb_mp0_error_clear_u {
+	bdrkreg_t	xb_mp0_error_clear_regval;
+	struct  {
+		bdrkreg_t	mec_rsrvd_3               :	 4;
+                bdrkreg_t       mec_overflow              :      2;
+                bdrkreg_t       mec_rsrvd_2               :      2;
+                bdrkreg_t       mec_underflow             :      2;
+                bdrkreg_t       mec_rsrvd_1               :      2;
+                bdrkreg_t       mec_tail_timeout          :      2;
+                bdrkreg_t       mec_rsrvd                 :     50;
+	} xb_mp0_error_clear_fld_s;
+} xb_mp0_error_clear_u_t;
+
+#else
+
+typedef union xb_mp0_error_clear_u {
+	bdrkreg_t	xb_mp0_error_clear_regval;
+	struct	{
+		bdrkreg_t	mec_rsrvd		  :	50;
+		bdrkreg_t	mec_tail_timeout	  :	 2;
+		bdrkreg_t	mec_rsrvd_1		  :	 2;
+		bdrkreg_t	mec_underflow		  :	 2;
+		bdrkreg_t	mec_rsrvd_2		  :	 2;
+		bdrkreg_t	mec_overflow		  :	 2;
+		bdrkreg_t	mec_rsrvd_3		  :	 4;
+	} xb_mp0_error_clear_fld_s;
+} xb_mp0_error_clear_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Records errors seen by MIQ.                                         *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union xb_xm_miq_error_clear_u {
+	bdrkreg_t	xb_xm_miq_error_clear_regval;
+	struct  {
+		bdrkreg_t	xmec_rsrvd_1              :	 4;
+                bdrkreg_t       xmec_deadlock_timeout     :      4;
+                bdrkreg_t       xmec_rsrvd                :     56;
+	} xb_xm_miq_error_clear_fld_s;
+} xb_xm_miq_error_clear_u_t;
+
+#else
+
+typedef union xb_xm_miq_error_clear_u {
+	bdrkreg_t	xb_xm_miq_error_clear_regval;
+	struct	{
+		bdrkreg_t	xmec_rsrvd		  :	56;
+		bdrkreg_t	xmec_deadlock_timeout	  :	 4;
+		bdrkreg_t	xmec_rsrvd_1		  :	 4;
+	} xb_xm_miq_error_clear_fld_s;
+} xb_xm_miq_error_clear_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Records errors seen by NOQ.                                         *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union xb_noq_error_clear_u {
+	bdrkreg_t	xb_noq_error_clear_regval;
+	struct  {
+		bdrkreg_t	nec_rsvd                  :	 4;
+                bdrkreg_t       nec_overflow              :      4;
+                bdrkreg_t       nec_underflow             :      4;
+                bdrkreg_t       nec_tail_timeout          :      4;
+                bdrkreg_t       nec_rsrvd                 :     48;
+	} xb_noq_error_clear_fld_s;
+} xb_noq_error_clear_u_t;
+
+#else
+
+typedef union xb_noq_error_clear_u {
+	bdrkreg_t	xb_noq_error_clear_regval;
+	struct	{
+		bdrkreg_t	nec_rsrvd		  :	48;
+		bdrkreg_t	nec_tail_timeout	  :	 4;
+		bdrkreg_t	nec_underflow		  :	 4;
+		bdrkreg_t	nec_overflow		  :	 4;
+		bdrkreg_t	nec_rsvd		  :	 4;
+	} xb_noq_error_clear_fld_s;
+} xb_noq_error_clear_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Records errors seen by LOQ.                                         *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union xb_loq_error_clear_u {
+	bdrkreg_t	xb_loq_error_clear_regval;
+	struct  {
+		bdrkreg_t	lec_invalid_xsel          :	 2;
+                bdrkreg_t       lec_rsrvd_1               :      6;
+                bdrkreg_t       lec_underflow             :      2;
+                bdrkreg_t       lec_rsvd                  :      2;
+                bdrkreg_t       lec_tail_timeout          :      2;
+                bdrkreg_t       lec_rsrvd                 :     50;
+	} xb_loq_error_clear_fld_s;
+} xb_loq_error_clear_u_t;
+
+#else
+
+typedef union xb_loq_error_clear_u {
+	bdrkreg_t	xb_loq_error_clear_regval;
+	struct	{
+		bdrkreg_t	lec_rsrvd		  :	50;
+		bdrkreg_t	lec_tail_timeout	  :	 2;
+		bdrkreg_t	lec_rsvd		  :	 2;
+		bdrkreg_t	lec_underflow		  :	 2;
+		bdrkreg_t	lec_rsrvd_1		  :	 6;
+		bdrkreg_t	lec_invalid_xsel	  :	 2;
+	} xb_loq_error_clear_fld_s;
+} xb_loq_error_clear_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  Records errors seen by LIQ. Note that the LIQ only records errors   *
+ * for the request channel. The reply channel can never deadlock or     *
+ * overflow because it does not have hardware flow control.             *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union xb_liq_error_clear_u {
+	bdrkreg_t	xb_liq_error_clear_regval;
+	struct  {
+		bdrkreg_t	lec_overflow              :	 1;
+                bdrkreg_t       lec_rsrvd_1               :      3;
+                bdrkreg_t       lec_deadlock_timeout      :      1;
+                bdrkreg_t       lec_rsrvd                 :     59;
+	} xb_liq_error_clear_fld_s;
+} xb_liq_error_clear_u_t;
+
+#else
+
+typedef union xb_liq_error_clear_u {
+        bdrkreg_t       xb_liq_error_clear_regval;
+        struct  {
+                bdrkreg_t       lec_rsrvd                 :     59;
+                bdrkreg_t       lec_deadlock_timeout      :      1;
+                bdrkreg_t       lec_rsrvd_1               :      3;
+                bdrkreg_t       lec_overflow              :      1;
+        } xb_liq_error_clear_fld_s;
+} xb_liq_error_clear_u_t;
+
+#endif
+
+
+
+
+/************************************************************************
+ *                                                                      *
+ *  First error is latched whenever the Valid bit is clear and an       *
+ * error occurs. Any valid bit on in this register causes an            *
+ * interrupt to PI0 and PI1. This interrupt bit will persist until      *
+ * the specific error register to capture the error is cleared, then    *
+ * the FIRST_ERROR register is cleared (in that oder.) The              *
+ * FIRST_ERROR register is not writable, but will be set when any of    *
+ * the corresponding error registers are written by software.           *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union xb_first_error_clear_u {
+	bdrkreg_t	xb_first_error_clear_regval;
+	struct  {
+		bdrkreg_t	fec_type                  :	 4;
+                bdrkreg_t       fec_channel               :      4;
+                bdrkreg_t       fec_source                :      4;
+                bdrkreg_t       fec_valid                 :      1;
+                bdrkreg_t       fec_rsrvd                 :     51;
+	} xb_first_error_clear_fld_s;
+} xb_first_error_clear_u_t;
+
+#else
+
+typedef union xb_first_error_clear_u {
+	bdrkreg_t	xb_first_error_clear_regval;
+	struct	{
+		bdrkreg_t	fec_rsrvd		  :	51;
+		bdrkreg_t	fec_valid		  :	 1;
+		bdrkreg_t	fec_source		  :	 4;
+		bdrkreg_t	fec_channel		  :	 4;
+		bdrkreg_t	fec_type		  :	 4;
+	} xb_first_error_clear_fld_s;
+} xb_first_error_clear_u_t;
+
+#endif
+
+
+
+
+
+
+#endif /* _LANGUAGE_C */
+
+/************************************************************************
+ *                                                                      *
+ * The following defines were not formed into structures                *
+ *                                                                      *
+ * This could be because the document did not contain details of the    *
+ * register, or because the automated script did not recognize the      *
+ * register details in the documentation. If these register need        *
+ * structure definition, please create them manually                    *
+ *                                                                      *
+ *           XB_POQ1_ERROR            0x700030                          *
+ *           XB_PIQ1_ERROR            0x700038                          *
+ *           XB_MP1_ERROR             0x700048                          *
+ *           XB_MMQ_ERROR             0x700050                          *
+ *           XB_NIQ_ERROR             0x700068                          *
+ *           XB_IOQ_ERROR             0x700070                          *
+ *           XB_IIQ_ERROR             0x700078                          *
+ *           XB_POQ1_ERROR_CLEAR      0x700130                          *
+ *           XB_PIQ1_ERROR_CLEAR      0x700138                          *
+ *           XB_MP1_ERROR_CLEAR       0x700148                          *
+ *           XB_MMQ_ERROR_CLEAR       0x700150                          *
+ *           XB_NIQ_ERROR_CLEAR       0x700168                          *
+ *           XB_IOQ_ERROR_CLEAR       0x700170                          *
+ *           XB_IIQ_ERROR_CLEAR       0x700178                          *
+ *                                                                      *
+ ************************************************************************/
+
+
+/************************************************************************
+ *                                                                      *
+ *               MAKE ALL ADDITIONS AFTER THIS LINE                     *
+ *                                                                      *
+ ************************************************************************/
+
+
+
+
+
+#endif /* _ASM_SN_SN1_HUBXB_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/hubxb_next.h linux/include/asm-ia64/sn/sn1/hubxb_next.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/hubxb_next.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/sn1/hubxb_next.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,32 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_SN1_HUBXB_NEXT_H
+#define _ASM_SN_SN1_HUBXB_NEXT_H
+
+/* XB_FIRST_ERROR fe_source field encoding */
+#define XVE_SOURCE_POQ0 0xf	/* 1111 */
+#define XVE_SOURCE_PIQ0 0xe	/* 1110 */
+#define XVE_SOURCE_POQ1 0xd	/* 1101 */
+#define XVE_SOURCE_PIQ1 0xc	/* 1100 */
+#define XVE_SOURCE_MP0  0xb	/* 1011 */
+#define XVE_SOURCE_MP1  0xa	/* 1010 */
+#define XVE_SOURCE_MMQ  0x9	/* 1001 */
+#define XVE_SOURCE_MIQ  0x8	/* 1000 */
+#define XVE_SOURCE_NOQ  0x7	/* 0111 */
+#define XVE_SOURCE_NIQ  0x6	/* 0110 */
+#define XVE_SOURCE_IOQ  0x5	/* 0101 */
+#define XVE_SOURCE_IIQ  0x4	/* 0100 */
+#define XVE_SOURCE_LOQ  0x3	/* 0011 */
+#define XVE_SOURCE_LIQ  0x2	/* 0010 */
+
+/* XB_PARMS fields */
+#define XBP_RESET_DEFAULTS	0x0008000080000021LL
+
+#endif	/* _ASM_SN_SN1_HUBXB_NEXT_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/ip27config.h linux/include/asm-ia64/sn/sn1/ip27config.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/ip27config.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/sn1/ip27config.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,657 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+#ifndef _ASM_SN_SN1_IP27CONFIG_H
+#define _ASM_SN_SN1_IP27CONFIG_H
+
+
+/*
+ * Structure: 	ip27config_s
+ * Typedef:	ip27config_t
+ * Purpose: 	Maps out the region of the boot prom used to define
+ *		configuration information.
+ * Notes:       Corresponds to ip27config structure found in start.s.
+ *		Fields are ulong where possible to facilitate IP27 PROM fetches.
+ */
+
+#define CONFIG_INFO_OFFSET		0x60
+
+#define IP27CONFIG_ADDR			(LBOOT_BASE	    + \
+					 CONFIG_INFO_OFFSET)
+#define IP27CONFIG_ADDR_NODE(n)		(NODE_RBOOT_BASE(n) + \
+					 CONFIG_INFO_OFFSET)
+
+/* Offset to the config_type field within local ip27config structure */
+#define CONFIG_FLAGS_ADDR			(IP27CONFIG_ADDR + 72)
+/* Offset to the config_type field in the ip27config structure on 
+ * node with nasid n
+ */
+#define CONFIG_FLAGS_ADDR_NODE(n)		(IP27CONFIG_ADDR_NODE(n) + 72)
+
+/* Meaning of each valid bit in the config flags 
+ * None are currently defined
+ */
+
+/* Meaning of each mach_type value
+ */
+#define SN1_MACH_TYPE 0
+
+/*
+ * Since 800 ns works well with various HUB frequencies, (such as 360,
+ * 380, 390, and 400 MHZ), we now use 800ns rtc cycle time instead of
+ * 1 microsec.
+ */
+#define IP27_RTC_FREQ			1250	/* 800ns cycle time */
+
+#if _LANGUAGE_C
+
+typedef	struct ip27config_s {		/* KEEP IN SYNC w/ start.s & below  */
+    uint		time_const;	/* Time constant 		    */
+    uint		r10k_mode;	/* R10k boot mode bits 		    */
+
+    uint64_t		magic;		/* CONFIG_MAGIC			    */
+
+    uint64_t		freq_cpu;	/* Hz 				    */
+    uint64_t		freq_hub;	/* Hz 				    */
+    uint64_t		freq_rtc;	/* Hz 				    */
+
+    uint		ecc_enable;	/* ECC enable flag		    */
+    uint		fprom_cyc;	/* FPROM_CYC speed control  	    */
+
+    uint		mach_type;	/* Inidicate IP27 (0) or Sn00 (1)    */
+
+    uint		check_sum_adj;	/* Used after config hdr overlay    */
+					/* to make the checksum 0 again     */
+    uint		flash_count;	/* Value incr'd on each PROM flash  */
+    uint		fprom_wr;	/* FPROM_WR speed control  	    */
+
+    uint		pvers_vers;	/* Prom version number		    */
+    uint		pvers_rev;	/* Prom revision number		    */
+    uint		config_type;	/* To support special configurations
+					 * (none currently defined)
+					 */
+} ip27config_t;
+
+typedef	struct {
+    uint		r10k_mode;	/* R10k boot mode bits 		    */
+    uint		freq_cpu;	/* Hz 				    */
+    uint		freq_hub;	/* Hz 				    */
+    char		fprom_cyc;	/* FPROM_CYC speed control  	    */
+    char		mach_type;	/* IP35(0) is only type defined      */
+    char		fprom_wr;	/* FPROM_WR speed control  	    */
+} config_modifiable_t;
+
+#define IP27CONFIG		(*(ip27config_t *) IP27CONFIG_ADDR)
+#define IP27CONFIG_NODE(n)	(*(ip27config_t *) IP27CONFIG_ADDR_NODE(n))
+#define SN00			0 /* IP35 has no Speedo equivalent */
+
+/* Get the config flags from local ip27config */
+#define CONFIG_FLAGS		(*(uint *) (CONFIG_FLAGS_ADDR))
+
+/* Get the config flags from ip27config on the node
+ * with nasid n
+ */
+#define CONFIG_FLAGS_NODE(n)	(*(uint *) (CONFIG_FLAGS_ADDR_NODE(n)))
+
+/* Macro to check if the local ip27config indicates a config
+ * of 12 p 4io
+ */
+#define CONFIG_12P4I		(0) /* IP35 has no 12p4i equivalent */
+
+/* Macro to check if the ip27config on node with nasid n
+ * indicates a config of 12 p 4io
+ */
+#define CONFIG_12P4I_NODE(n)	(0)
+
+#endif /* _LANGUAGE_C */
+
+#if _LANGUAGE_ASSEMBLY
+	.struct		0		/* KEEP IN SYNC WITH C structure */
+
+ip27c_time_const:	.word	0
+ip27c_r10k_mode:	.word	0
+
+ip27c_magic:		.dword	0
+
+ip27c_freq_cpu:		.dword	0
+ip27c_freq_hub:		.dword	0
+ip27c_freq_rtc:		.dword	0
+
+ip27c_ecc_enable:	.word	1
+ip27c_fprom_cyc:	.word	0
+
+ip27c_mach_type:	.word	0
+ip27c_check_sum_adj:	.word	0
+
+ip27c_flash_count:	.word	0
+ip27c_fprom_wr:		.word	0
+
+ip27c_pvers_vers:	.word	0
+ip27c_pvers_rev:	.word	0
+
+ip27c_config_type:	.word 	0	/* To recognize special configs */
+#endif /* _LANGUAGE_ASSEMBLY */
+
+/*
+ * R10000 Configuration Cycle - These define the SYSAD values used
+ * during the reset cycle.
+ */
+
+#define	IP27C_R10000_KSEG0CA_SHFT	0
+#define	IP27C_R10000_KSEG0CA_MASK	(7 << IP27C_R10000_KSEG0CA_SHFT)
+#define	IP27C_R10000_KSEG0CA(_B)	 ((_B) << IP27C_R10000_KSEG0CA_SHFT)
+
+#define	IP27C_R10000_DEVNUM_SHFT	3
+#define	IP27C_R10000_DEVNUM_MASK	(3 << IP27C_R10000_DEVNUM_SHFT)
+#define	IP27C_R10000_DEVNUM(_B)		((_B) << IP27C_R10000_DEVNUM_SHFT)
+
+#define	IP27C_R10000_CRPT_SHFT		5
+#define	IP27C_R10000_CRPT_MASK		(1 << IP27C_R10000_CRPT_SHFT)
+#define	IP27C_R10000_CPRT(_B)		((_B)<<IP27C_R10000_CRPT_SHFT)
+
+#define	IP27C_R10000_PER_SHFT		6
+#define	IP27C_R10000_PER_MASK		(1 << IP27C_R10000_PER_SHFT)
+#define	IP27C_R10000_PER(_B)		((_B) << IP27C_R10000_PER_SHFT)
+
+#define	IP27C_R10000_PRM_SHFT		7
+#define	IP27C_R10000_PRM_MASK		(3 << IP27C_R10000_PRM_SHFT)
+#define	IP27C_R10000_PRM(_B)		((_B) << IP27C_R10000_PRM_SHFT)
+
+#define	IP27C_R10000_SCD_SHFT		9
+#define	IP27C_R10000_SCD_MASK		(0xf << IP27C_R10000_SCD_MASK)
+#define	IP27C_R10000_SCD(_B)		((_B) << IP27C_R10000_SCD_SHFT)
+
+#define	IP27C_R10000_SCBS_SHFT		13
+#define	IP27C_R10000_SCBS_MASK		(1 << IP27C_R10000_SCBS_SHFT)
+#define	IP27C_R10000_SCBS(_B)		(((_B)) << IP27C_R10000_SCBS_SHFT)
+
+#define	IP27C_R10000_SCCE_SHFT		14
+#define	IP27C_R10000_SCCE_MASK		(1 << IP27C_R10000_SCCE_SHFT)
+#define	IP27C_R10000_SCCE(_B)		((_B) << IP27C_R10000_SCCE_SHFT)
+
+#define	IP27C_R10000_ME_SHFT		15
+#define	IP27C_R10000_ME_MASK		(1 << IP27C_R10000_ME_SHFT)
+#define	IP27C_R10000_ME(_B)		((_B) << IP27C_R10000_ME_SHFT)
+
+#define	IP27C_R10000_SCS_SHFT		16
+#define	IP27C_R10000_SCS_MASK		(7 << IP27C_R10000_SCS_SHFT)
+#define	IP27C_R10000_SCS(_B)		((_B) << IP27C_R10000_SCS_SHFT)
+
+#define	IP27C_R10000_SCCD_SHFT		19
+#define	IP27C_R10000_SCCD_MASK		(7 << IP27C_R10000_SCCD_SHFT)
+#define	IP27C_R10000_SCCD(_B)		((_B) << IP27C_R10000_SCCD_SHFT)
+
+#define	IP27C_R10000_SCCT_SHFT		25
+#define	IP27C_R10000_SCCT_MASK		(0xf << IP27C_R10000_SCCT_SHFT)
+#define	IP27C_R10000_SCCT(_B)		((_B) << IP27C_R10000_SCCT_SHFT)
+
+#define	IP27C_R10000_ODSC_SHFT		29
+#define IP27C_R10000_ODSC_MASK		(1 << IP27C_R10000_ODSC_SHFT)
+#define	IP27C_R10000_ODSC(_B)		((_B) << IP27C_R10000_ODSC_SHFT)
+
+#define	IP27C_R10000_ODSYS_SHFT		30
+#define	IP27C_R10000_ODSYS_MASK		(1 << IP27C_R10000_ODSYS_SHFT)
+#define	IP27C_R10000_ODSYS(_B)		((_B) << IP27C_R10000_ODSYS_SHFT)
+
+#define	IP27C_R10000_CTM_SHFT		31
+#define	IP27C_R10000_CTM_MASK		(1 << IP27C_R10000_CTM_SHFT)
+#define	IP27C_R10000_CTM(_B)		((_B) << IP27C_R10000_CTM_SHFT)
+
+#define IP27C_MHZ(x)			(1000000 * (x))
+#define IP27C_KHZ(x)			(1000 * (x))
+#define IP27C_MB(x)			((x) << 20)
+
+/*
+ * PROM Configurations
+ */
+
+#define CONFIG_MAGIC		0x69703237636f6e66
+
+/* The high 32 bits of the "mode bits".  Bits 7..0 contain one more
+ * than the number of 5ms clocks in the 100ms "long delay" intervals
+ * of the TRex reset sequence.  Bit 8 is the "synergy mode" bit.
+ */
+#define CONFIG_TIME_CONST	0x15
+
+#define CONFIG_ECC_ENABLE	1
+#define CONFIG_CHECK_SUM_ADJ	0
+#define CONFIG_DEFAULT_FLASH_COUNT    0
+
+/*
+ * Some promICEs have trouble if CONFIG_FPROM_SETUP is too low.
+ * The nominal value for 100 MHz hub is 5, for 200MHz bedrock is 16.
+ * any update to the below should also reflected in the logic in
+ *   IO7prom/flashprom.c function _verify_config_info and _fill_in_config_info
+ */
+
+/* default junk bus timing values to use */
+#define CONFIG_SYNERGY_ENABLE	0xff
+#define CONFIG_SYNERGY_SETUP	0xff
+#define CONFIG_UART_ENABLE	0x0c
+#define CONFIG_UART_SETUP	0x02
+#define CONFIG_FPROM_ENABLE	0x10
+#define CONFIG_FPROM_SETUP	0x10
+
+#define CONFIG_FREQ_RTC	IP27C_KHZ(IP27_RTC_FREQ)
+
+#if _LANGUAGE_C
+
+/* we are going to define all the known configs is a table
+ * for building hex images we will pull out the particular
+ * slice we care about by using the IP27_CONFIG_XX_XX as
+ * entries into the table
+ * to keep the table of reasonable size we only include the
+ * values that differ across configurations
+ * please note then that this makes assumptions about what
+ * will and will not change across configurations
+ */
+
+/* these numbers are as the are ordered in the table below */
+#define	IP27_CONFIG_UNKNOWN -1
+#define IP27_CONFIG_SN1_1MB_200_400_200_TABLE 0
+#define IP27_CONFIG_SN00_4MB_100_200_133_TABLE 1
+#define IP27_CONFIG_SN1_4MB_200_400_267_TABLE 2
+#define IP27_CONFIG_SN1_8MB_200_500_250_TABLE 3
+#define IP27_CONFIG_SN1_8MB_200_400_267_TABLE 4
+#define IP27_CONFIG_SN1_4MB_180_360_240_TABLE 5
+#define NUMB_IP_CONFIGS 6
+
+#ifdef DEF_IP_CONFIG_TABLE
+/*
+ * N.B.: A new entry needs to be added here everytime a new config is added
+ * The table is indexed by the PIMM PSC value
+ */
+
+static int psc_to_flash_config[] = {
+        IP27_CONFIG_SN1_4MB_200_400_267_TABLE,	/* 0x0 */
+        IP27_CONFIG_SN1_8MB_200_500_250_TABLE,	/* 0x1 */
+        IP27_CONFIG_SN1_8MB_200_400_267_TABLE,	/* 0x2 */
+        IP27_CONFIG_UNKNOWN,	/* 0x3 */
+        IP27_CONFIG_UNKNOWN,	/* 0x4 */
+        IP27_CONFIG_UNKNOWN,	/* 0x5 */
+        IP27_CONFIG_UNKNOWN,	/* 0x6 */
+        IP27_CONFIG_UNKNOWN,	/* 0x7 */
+        IP27_CONFIG_SN1_4MB_180_360_240_TABLE,	/* 0x8 */
+        IP27_CONFIG_UNKNOWN,	/* 0x9 */
+        IP27_CONFIG_UNKNOWN,	/* 0xa */
+        IP27_CONFIG_UNKNOWN,	/* 0xb */
+        IP27_CONFIG_UNKNOWN,	/* 0xc */
+        IP27_CONFIG_UNKNOWN,	/* 0xd */
+        IP27_CONFIG_SN00_4MB_100_200_133_TABLE, /* 0xe  O200 PIMM for bringup */
+        IP27_CONFIG_UNKNOWN	/* 0xf == PIMM not installed */
+};
+
+static config_modifiable_t ip_config_table[NUMB_IP_CONFIGS] = {
+/* the 1MB_200_400_200 values (Generic settings, will work for any config.) */
+{
+	(IP27C_R10000_KSEG0CA(5) + \
+	 IP27C_R10000_DEVNUM(0)	 + \
+	 IP27C_R10000_CPRT(0)	 + \
+	 IP27C_R10000_PER(0)	 + \
+	 IP27C_R10000_PRM(3)	 + \
+	 IP27C_R10000_SCD(3)	 + \
+	 IP27C_R10000_SCBS(1)	 + \
+	 IP27C_R10000_SCCE(0)	 + \
+	 IP27C_R10000_ME(1)	 + \
+	 IP27C_R10000_SCS(1)	 + \
+	 IP27C_R10000_SCCD(3)	 + \
+	 IP27C_R10000_SCCT(9)	 + \
+	 IP27C_R10000_ODSC(0)	 + \
+	 IP27C_R10000_ODSYS(1)	 + \
+	 IP27C_R10000_CTM(0)),
+	IP27C_MHZ(400),
+	IP27C_MHZ(200),
+	CONFIG_FPROM_SETUP,
+	SN1_MACH_TYPE,
+	CONFIG_FPROM_ENABLE
+},
+
+/* the 4MB_100_200_133 values (O200 PIMM w/translation board, PSC 0xe)
+ * (SysAD at 100MHz (SCD=3), and bedrock core at 200 MHz) */
+{
+ /* ODSYS == 0 means HSTL1 on SysAD bus; other PIMMs use HSTL2 */
+	(IP27C_R10000_KSEG0CA(5) + \
+	 IP27C_R10000_DEVNUM(0)	 + \
+	 IP27C_R10000_CPRT(0)	 + \
+	 IP27C_R10000_PER(0)	 + \
+	 IP27C_R10000_PRM(3)	 + \
+	 IP27C_R10000_SCD(3)	 + \
+	 IP27C_R10000_SCBS(1)	 + \
+	 IP27C_R10000_SCCE(0)	 + \
+	 IP27C_R10000_ME(1)	 + \
+	 IP27C_R10000_SCS(3)	 + \
+	 IP27C_R10000_SCCD(2)	 + \
+	 IP27C_R10000_SCCT(9)	 + \
+	 IP27C_R10000_ODSC(0)	 + \
+	 IP27C_R10000_ODSYS(0)	 + \
+	 IP27C_R10000_CTM(0)),
+	IP27C_MHZ(200),
+	IP27C_MHZ(200),
+	CONFIG_FPROM_SETUP,
+	SN1_MACH_TYPE,
+	CONFIG_FPROM_ENABLE
+},
+
+/* 4MB_200_400_267 values (R12KS, 3.7ns, LWR, 030-1602-001, PSC 0x0) */
+{
+	(IP27C_R10000_KSEG0CA(5) + \
+	 IP27C_R10000_DEVNUM(0)	 + \
+	 IP27C_R10000_CPRT(0)	 + \
+	 IP27C_R10000_PER(0)	 + \
+	 IP27C_R10000_PRM(3)	 + \
+	 IP27C_R10000_SCD(3)	 + \
+	 IP27C_R10000_SCBS(1)	 + \
+	 IP27C_R10000_SCCE(0)	 + \
+	 IP27C_R10000_ME(1)	 + \
+	 IP27C_R10000_SCS(3)	 + \
+	 IP27C_R10000_SCCD(2)	 + \
+	 IP27C_R10000_SCCT(0xa)	 + \
+	 IP27C_R10000_ODSC(0)	 + \
+	 IP27C_R10000_ODSYS(1)	 + \
+	 IP27C_R10000_CTM(0)),
+	IP27C_MHZ(400),
+	IP27C_MHZ(200),
+	CONFIG_FPROM_SETUP,
+	SN1_MACH_TYPE,
+	CONFIG_FPROM_ENABLE
+},
+
+/* 8MB_200_500_250 values (R14K, 4.0ns, DDR1, 030-1520-001, PSC 0x1) */
+{
+	(IP27C_R10000_KSEG0CA(5) + \
+	 IP27C_R10000_DEVNUM(0)	 + \
+	 IP27C_R10000_CPRT(0)	 + \
+	 IP27C_R10000_PER(0)	 + \
+	 IP27C_R10000_PRM(3)	 + \
+	 IP27C_R10000_SCD(4)	 + \
+	 IP27C_R10000_SCBS(1)	 + \
+	 IP27C_R10000_SCCE(0)	 + \
+	 IP27C_R10000_ME(1)	 + \
+	 IP27C_R10000_SCS(4)	 + \
+	 IP27C_R10000_SCCD(3)	 + \
+	 IP27C_R10000_SCCT(0xa)	 + \
+	 IP27C_R10000_ODSC(0)	 + \
+	 IP27C_R10000_ODSYS(1)	 + \
+	 IP27C_R10000_CTM(0)),
+	IP27C_MHZ(500),
+	IP27C_MHZ(200),
+	CONFIG_FPROM_SETUP,
+	SN1_MACH_TYPE,
+	CONFIG_FPROM_ENABLE
+},
+
+/* 8MB_200_400_267 values (R12KS, 3.7ns, LWR, 030-1616-001, PSC 0x2) */
+{
+	(IP27C_R10000_KSEG0CA(5) + \
+	 IP27C_R10000_DEVNUM(0)	 + \
+	 IP27C_R10000_CPRT(0)	 + \
+	 IP27C_R10000_PER(0)	 + \
+	 IP27C_R10000_PRM(3)	 + \
+	 IP27C_R10000_SCD(3)	 + \
+	 IP27C_R10000_SCBS(1)	 + \
+	 IP27C_R10000_SCCE(0)	 + \
+	 IP27C_R10000_ME(1)	 + \
+	 IP27C_R10000_SCS(4)	 + \
+	 IP27C_R10000_SCCD(2)	 + \
+	 IP27C_R10000_SCCT(0xa)	 + \
+	 IP27C_R10000_ODSC(0)	 + \
+	 IP27C_R10000_ODSYS(1)	 + \
+	 IP27C_R10000_CTM(0)),
+	IP27C_MHZ(400),
+	IP27C_MHZ(200),
+	CONFIG_FPROM_SETUP,
+	SN1_MACH_TYPE,
+	CONFIG_FPROM_ENABLE
+},
+
+/* 4MB_180_360_240 values (R12KS, 3.7ns, LWR, 030-1627-001, PSC 0x8)
+ * (SysAD at 180 MHz (SCD=3, the fastest possible), bedrock core at 200MHz) */
+{
+	(IP27C_R10000_KSEG0CA(5) + \
+	 IP27C_R10000_DEVNUM(0)	 + \
+	 IP27C_R10000_CPRT(0)	 + \
+	 IP27C_R10000_PER(0)	 + \
+	 IP27C_R10000_PRM(3)	 + \
+	 IP27C_R10000_SCD(3)	 + \
+	 IP27C_R10000_SCBS(1)	 + \
+	 IP27C_R10000_SCCE(0)	 + \
+	 IP27C_R10000_ME(1)	 + \
+	 IP27C_R10000_SCS(3)	 + \
+	 IP27C_R10000_SCCD(2)	 + \
+	 IP27C_R10000_SCCT(9)	 + \
+	 IP27C_R10000_ODSC(0)	 + \
+	 IP27C_R10000_ODSYS(1)	 + \
+	 IP27C_R10000_CTM(0)),
+	IP27C_MHZ(360),
+	IP27C_MHZ(200),
+	CONFIG_FPROM_SETUP,
+	SN1_MACH_TYPE,
+	CONFIG_FPROM_ENABLE
+},
+
+};
+#else
+extern	config_modifiable_t	ip_config_table[];
+#endif /* DEF_IP27_CONFIG_TABLE */
+
+#ifdef IP27_CONFIG_SN00_4MB_100_200_133
+#define CONFIG_CPU_MODE	ip_config_table[IP27_CONFIG_SN00_4MB_100_200_133_TABLE].r10k_mode
+#define CONFIG_FREQ_CPU	ip_config_table[IP27_CONFIG_SN00_4MB_100_200_133_TABLE].freq_cpu
+#define CONFIG_FREQ_HUB	ip_config_table[IP27_CONFIG_SN00_4MB_100_200_133_TABLE].freq_hub
+#define CONFIG_FPROM_CYC ip_config_table[IP27_CONFIG_SN00_4MB_100_200_133_TABLE].fprom_cyc
+#define CONFIG_MACH_TYPE ip_config_table[IP27_CONFIG_SN00_4MB_100_200_133_TABLE].mach_type
+#define CONFIG_FPROM_WR	ip_config_table[IP27_CONFIG_SN00_4MB_100_200_133_TABLE].fprom_wr
+#endif /* IP27_CONFIG_SN00_4MB_100_200_133 */
+
+#ifdef IP27_CONFIG_SN1_1MB_200_400_200
+#define CONFIG_CPU_MODE	ip_config_table[IP27_CONFIG_SN1_1MB_200_400_200_TABLE].r10k_mode
+#define CONFIG_FREQ_CPU	ip_config_table[IP27_CONFIG_SN1_1MB_200_400_200_TABLE].freq_cpu
+#define CONFIG_FREQ_HUB	ip_config_table[IP27_CONFIG_SN1_1MB_200_400_200_TABLE].freq_hub
+#define CONFIG_FPROM_CYC ip_config_table[IP27_CONFIG_SN1_1MB_200_400_200_TABLE].fprom_cyc
+#define CONFIG_MACH_TYPE ip_config_table[IP27_CONFIG_SN1_1MB_200_400_200_TABLE].mach_type
+#define CONFIG_FPROM_WR	ip_config_table[IP27_CONFIG_SN1_1MB_200_400_200_TABLE].fprom_wr
+#endif /* IP27_CONFIG_SN1_1MB_200_400_200 */
+
+#ifdef IP27_CONFIG_SN1_4MB_200_400_267
+#define CONFIG_CPU_MODE	ip_config_table[IP27_CONFIG_SN1_4MB_200_400_267_TABLE].r10k_mode
+#define CONFIG_FREQ_CPU	ip_config_table[IP27_CONFIG_SN1_4MB_200_400_267_TABLE].freq_cpu
+#define CONFIG_FREQ_HUB	ip_config_table[IP27_CONFIG_SN1_4MB_200_400_267_TABLE].freq_hub
+#define CONFIG_FPROM_CYC ip_config_table[IP27_CONFIG_SN1_4MB_200_400_267_TABLE].fprom_cyc
+#define CONFIG_MACH_TYPE ip_config_table[IP27_CONFIG_SN1_4MB_200_400_267_TABLE].mach_type
+#define CONFIG_FPROM_WR	ip_config_table[IP27_CONFIG_SN1_4MB_200_400_267_TABLE].fprom_wr
+#endif /* IP27_CONFIG_SN1_4MB_200_400_267 */
+
+#ifdef IP27_CONFIG_SN1_8MB_200_500_250
+#define CONFIG_CPU_MODE	ip_config_table[IP27_CONFIG_SN1_8MB_200_500_250_TABLE].r10k_mode
+#define CONFIG_FREQ_CPU	ip_config_table[IP27_CONFIG_SN1_8MB_200_500_250_TABLE].freq_cpu
+#define CONFIG_FREQ_HUB	ip_config_table[IP27_CONFIG_SN1_8MB_200_500_250_TABLE].freq_hub
+#define CONFIG_FPROM_CYC ip_config_table[IP27_CONFIG_SN1_8MB_200_500_250_TABLE].fprom_cyc
+#define CONFIG_MACH_TYPE ip_config_table[IP27_CONFIG_SN1_8MB_200_500_250_TABLE].mach_type
+#define CONFIG_FPROM_WR	ip_config_table[IP27_CONFIG_SN1_8MB_200_500_250_TABLE].fprom_wr
+#endif /* IP27_CONFIG_SN1_8MB_200_500_250 */
+
+#ifdef IP27_CONFIG_SN1_8MB_200_400_267
+#define CONFIG_CPU_MODE	ip_config_table[IP27_CONFIG_SN1_8MB_200_400_267_TABLE].r10k_mode
+#define CONFIG_FREQ_CPU	ip_config_table[IP27_CONFIG_SN1_8MB_200_400_267_TABLE].freq_cpu
+#define CONFIG_FREQ_HUB	ip_config_table[IP27_CONFIG_SN1_8MB_200_400_267_TABLE].freq_hub
+#define CONFIG_FPROM_CYC ip_config_table[IP27_CONFIG_SN1_8MB_200_400_267_TABLE].fprom_cyc
+#define CONFIG_MACH_TYPE ip_config_table[IP27_CONFIG_SN1_8MB_200_400_267_TABLE].mach_type
+#define CONFIG_FPROM_WR	ip_config_table[IP27_CONFIG_SN1_8MB_200_400_267_TABLE].fprom_wr
+#endif /* IP27_CONFIG_SN1_8MB_200_400_267 */
+
+#ifdef IP27_CONFIG_SN1_4MB_180_360_240
+#define CONFIG_CPU_MODE	ip_config_table[IP27_CONFIG_SN1_4MB_180_360_240_TABLE].r10k_mode
+#define CONFIG_FREQ_CPU	ip_config_table[IP27_CONFIG_SN1_4MB_180_360_240_TABLE].freq_cpu
+#define CONFIG_FREQ_HUB	ip_config_table[IP27_CONFIG_SN1_4MB_180_360_240_TABLE].freq_hub
+#define CONFIG_FPROM_CYC ip_config_table[IP27_CONFIG_SN1_4MB_180_360_240_TABLE].fprom_cyc
+#define CONFIG_MACH_TYPE ip_config_table[IP27_CONFIG_SN1_4MB_180_360_240_TABLE].mach_type
+#define CONFIG_FPROM_WR	ip_config_table[IP27_CONFIG_SN1_4MB_180_360_240_TABLE].fprom_wr
+#endif /* IP27_CONFIG_SN1_4MB_180_360_240 */
+
+#endif /* _LANGUAGE_C */
+
+#if _LANGUAGE_ASSEMBLY
+
+/* these need to be in here since we need assembly definitions
+ * for building hex images (as required by start.s)
+ */
+#ifdef IP27_CONFIG_SN00_4MB_100_200_133
+#ifdef IRIX
+/* Set PrcReqMax to 0 to reduce memory problems */
+#define	BRINGUP_PRM_VAL	0
+#else
+#define	BRINGUP_PRM_VAL	3
+#endif
+#define CONFIG_CPU_MODE \
+	(IP27C_R10000_KSEG0CA(5) + \
+	 IP27C_R10000_DEVNUM(0)	 + \
+	 IP27C_R10000_CPRT(0)	 + \
+	 IP27C_R10000_PER(0)	 + \
+	 IP27C_R10000_PRM(BRINGUP_PRM_VAL)	 + \
+	 IP27C_R10000_SCD(3)	 + \
+	 IP27C_R10000_SCBS(1)	 + \
+	 IP27C_R10000_SCCE(0)	 + \
+	 IP27C_R10000_ME(1)	 + \
+	 IP27C_R10000_SCS(3)	 + \
+	 IP27C_R10000_SCCD(2)	 + \
+	 IP27C_R10000_SCCT(9)	 + \
+	 IP27C_R10000_ODSC(0)	 + \
+	 IP27C_R10000_ODSYS(0)	 + \
+	 IP27C_R10000_CTM(0))
+#define CONFIG_FREQ_CPU IP27C_MHZ(200)
+#define CONFIG_FREQ_HUB IP27C_MHZ(200)
+#define CONFIG_FPROM_CYC CONFIG_FPROM_SETUP
+#define CONFIG_MACH_TYPE SN1_MACH_TYPE
+#define CONFIG_FPROM_WR CONFIG_FPROM_ENABLE
+#endif /* IP27_CONFIG_SN00_4MB_100_200_133 */
+
+#ifdef IP27_CONFIG_SN1_1MB_200_400_200
+#define CONFIG_CPU_MODE \
+	(IP27C_R10000_KSEG0CA(5) + \
+	 IP27C_R10000_DEVNUM(0)	 + \
+	 IP27C_R10000_CPRT(0)	 + \
+	 IP27C_R10000_PER(0)	 + \
+	 IP27C_R10000_PRM(3)	 + \
+	 IP27C_R10000_SCD(3)	 + \
+	 IP27C_R10000_SCBS(1)	 + \
+	 IP27C_R10000_SCCE(0)	 + \
+	 IP27C_R10000_ME(1)	 + \
+	 IP27C_R10000_SCS(1)	 + \
+	 IP27C_R10000_SCCD(3)	 + \
+	 IP27C_R10000_SCCT(9)	 + \
+	 IP27C_R10000_ODSC(0)	 + \
+	 IP27C_R10000_ODSYS(1)	 + \
+	 IP27C_R10000_CTM(0))
+#define CONFIG_FREQ_CPU IP27C_MHZ(400)
+#define CONFIG_FREQ_HUB IP27C_MHZ(200)
+#define CONFIG_FPROM_CYC CONFIG_FPROM_SETUP
+#define CONFIG_MACH_TYPE SN1_MACH_TYPE
+#define CONFIG_FPROM_WR CONFIG_FPROM_ENABLE
+#endif /* IP27_CONFIG_SN1_1MB_200_400_200 */
+
+#ifdef IP27_CONFIG_SN1_4MB_200_400_267
+#define CONFIG_CPU_MODE \
+	(IP27C_R10000_KSEG0CA(5) + \
+	 IP27C_R10000_DEVNUM(0)	 + \
+	 IP27C_R10000_CPRT(0)	 + \
+	 IP27C_R10000_PER(0)	 + \
+	 IP27C_R10000_PRM(3)	 + \
+	 IP27C_R10000_SCD(3)	 + \
+	 IP27C_R10000_SCBS(1)	 + \
+	 IP27C_R10000_SCCE(0)	 + \
+	 IP27C_R10000_ME(1)	 + \
+	 IP27C_R10000_SCS(3)	 + \
+	 IP27C_R10000_SCCD(2)	 + \
+	 IP27C_R10000_SCCT(0xa)	 + \
+	 IP27C_R10000_ODSC(0)	 + \
+	 IP27C_R10000_ODSYS(1)	 + \
+	 IP27C_R10000_CTM(0))
+#define CONFIG_FREQ_CPU IP27C_MHZ(400)
+#define CONFIG_FREQ_HUB IP27C_MHZ(200)
+#define CONFIG_FPROM_CYC CONFIG_FPROM_SETUP
+#define CONFIG_MACH_TYPE SN1_MACH_TYPE
+#define CONFIG_FPROM_WR CONFIG_FPROM_ENABLE
+#endif /* IP27_CONFIG_SN1_4MB_200_400_267 */
+
+#ifdef IP27_CONFIG_SN1_8MB_200_500_250
+#define CONFIG_CPU_MODE \
+	(IP27C_R10000_KSEG0CA(5) + \
+	 IP27C_R10000_DEVNUM(0)	 + \
+	 IP27C_R10000_CPRT(0)	 + \
+	 IP27C_R10000_PER(0)	 + \
+	 IP27C_R10000_PRM(3)	 + \
+	 IP27C_R10000_SCD(4)	 + \
+	 IP27C_R10000_SCBS(1)	 + \
+	 IP27C_R10000_SCCE(0)	 + \
+	 IP27C_R10000_ME(1)	 + \
+	 IP27C_R10000_SCS(4)	 + \
+	 IP27C_R10000_SCCD(3)	 + \
+	 IP27C_R10000_SCCT(0xa)	 + \
+	 IP27C_R10000_ODSC(0)	 + \
+	 IP27C_R10000_ODSYS(1)	 + \
+	 IP27C_R10000_CTM(0))
+#define CONFIG_FREQ_CPU IP27C_MHZ(500)
+#define CONFIG_FREQ_HUB IP27C_MHZ(200)
+#define CONFIG_FPROM_CYC CONFIG_FPROM_SETUP
+#define CONFIG_MACH_TYPE SN1_MACH_TYPE
+#define CONFIG_FPROM_WR CONFIG_FPROM_ENABLE
+#endif /* IP27_CONFIG_SN1_8MB_200_500_250 */
+
+#ifdef IP27_CONFIG_SN1_8MB_200_400_267
+#define CONFIG_CPU_MODE \
+	(IP27C_R10000_KSEG0CA(5) + \
+	 IP27C_R10000_DEVNUM(0)	 + \
+	 IP27C_R10000_CPRT(0)	 + \
+	 IP27C_R10000_PER(0)	 + \
+	 IP27C_R10000_PRM(3)	 + \
+	 IP27C_R10000_SCD(3)	 + \
+	 IP27C_R10000_SCBS(1)	 + \
+	 IP27C_R10000_SCCE(0)	 + \
+	 IP27C_R10000_ME(1)	 + \
+	 IP27C_R10000_SCS(4)	 + \
+	 IP27C_R10000_SCCD(2)	 + \
+	 IP27C_R10000_SCCT(0xa)	 + \
+	 IP27C_R10000_ODSC(0)	 + \
+	 IP27C_R10000_ODSYS(1)	 + \
+	 IP27C_R10000_CTM(0))
+#define CONFIG_FREQ_CPU IP27C_MHZ(400)
+#define CONFIG_FREQ_HUB IP27C_MHZ(200)
+#define CONFIG_FPROM_CYC CONFIG_FPROM_SETUP
+#define CONFIG_MACH_TYPE SN1_MACH_TYPE
+#define CONFIG_FPROM_WR CONFIG_FPROM_ENABLE
+#endif /* IP27_CONFIG_SN1_8MB_200_400_267 */
+
+#ifdef IP27_CONFIG_SN1_4MB_180_360_240
+#define CONFIG_CPU_MODE \
+	(IP27C_R10000_KSEG0CA(5) + \
+	 IP27C_R10000_DEVNUM(0)	 + \
+	 IP27C_R10000_CPRT(0)	 + \
+	 IP27C_R10000_PER(0)	 + \
+	 IP27C_R10000_PRM(3)	 + \
+	 IP27C_R10000_SCD(3)	 + \
+	 IP27C_R10000_SCBS(1)	 + \
+	 IP27C_R10000_SCCE(0)	 + \
+	 IP27C_R10000_ME(1)	 + \
+	 IP27C_R10000_SCS(3)	 + \
+	 IP27C_R10000_SCCD(2)	 + \
+	 IP27C_R10000_SCCT(9)	 + \
+	 IP27C_R10000_ODSC(0)	 + \
+	 IP27C_R10000_ODSYS(1)	 + \
+	 IP27C_R10000_CTM(0))
+#define CONFIG_FREQ_CPU IP27C_MHZ(360)
+#define CONFIG_FREQ_HUB IP27C_MHZ(200)
+#define CONFIG_FPROM_CYC CONFIG_FPROM_SETUP
+#define CONFIG_MACH_TYPE SN1_MACH_TYPE
+#define CONFIG_FPROM_WR CONFIG_FPROM_ENABLE
+#endif /* IP27_CONFIG_SN1_4MB_180_360_240 */
+
+#endif /* _LANGUAGE_C */
+
+#endif /* _ASM_SN_SN1_IP27CONFIG_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/kldir.h linux/include/asm-ia64/sn/sn1/kldir.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/kldir.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/sn1/kldir.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,222 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+#ifndef _ASM_SN_SN1_KLDIR_H
+#define _ASM_SN_SN1_KLDIR_H
+
+/*
+ * The upper portion of the memory map applies during boot
+ * only and is overwritten by IRIX/SYMMON.  The minimum memory bank
+ * size on IP35 is 64M, which provides a limit on the amount of space
+ * the PROM can assume it has available.
+ *
+ * Most of the addresses below are defined as macros in this file, or
+ * in SN/addrs.h or SN/SN1/addrs.h.
+ *
+ *                                    MEMORY MAP PER NODE
+ *
+ * 0x4000000 (64M)         +-----------------------------------------+
+ *                         |                                         |
+ *                         |                                         |
+ *                         |      IO7 TEXT/DATA/BSS/stack            |
+ * 0x3000000 (48M)         +-----------------------------------------+
+ *                         |      Free                               |
+ * 0x2102000 (>33M)        +-----------------------------------------+
+ *                         |      IP35 Topology (PCFG) + misc data   |
+ * 0x2000000 (32M)         +-----------------------------------------+
+ *                         |      IO7 BUFFERS FOR FLASH ENET IOC3    |
+ * 0x1F80000 (31.5M)       +-----------------------------------------+
+ *                         |      Free                               |
+ * 0x1C00000 (28M)         +-----------------------------------------+
+ *                         |      IP35 PROM TEXT/DATA/BSS/stack      |
+ * 0x1A00000 (26M)         +-----------------------------------------+
+ *                         |      Routing temp. space                |
+ * 0x1800000 (24M)         +-----------------------------------------+
+ *                         |      Diagnostics temp. space            |
+ * 0x1500000 (21M)         +-----------------------------------------+
+ *                         |      Free                               |
+ * 0x1400000 (20M)         +-----------------------------------------+
+ *                         |      IO7 PROM temporary copy            |
+ * 0x1300000 (19M)         +-----------------------------------------+
+ *                         |                                         |
+ *                         |      Free                               |
+ *                         |      (UNIX DATA starts above 0x1000000) |
+ *                         |                                         |
+ *                         +-----------------------------------------+
+ *                         |      UNIX DEBUG Version                 |
+ * 0x0310000 (3.1M)        +-----------------------------------------+
+ *                         |      SYMMON, loaded just below UNIX     |
+ *                         |      (For UNIX Debug only)              |
+ *                         |                                         |
+ *                         |                                         |
+ * 0x006C000 (432K)        +-----------------------------------------+
+ *                         |      SYMMON STACK [NUM_CPU_PER_NODE]    |
+ *                         |      (For UNIX Debug only)              |
+ * 0x004C000 (304K)        +-----------------------------------------+
+ *                         |                                         |
+ *                         |                                         |
+ *                         |      UNIX NON-DEBUG Version             |
+ * 0x0040000 (256K)        +-----------------------------------------+
+ *
+ *
+ * The lower portion of the memory map contains information that is
+ * permanent and is used by the IP35PROM, IO7PROM and IRIX.
+ *
+ * 0x40000 (256K)          +-----------------------------------------+
+ *                         |                                         |
+ *                         |      KLCONFIG (64K)                     |
+ *                         |                                         |
+ * 0x30000 (192K)          +-----------------------------------------+
+ *                         |                                         |
+ *                         |      PI Error Spools (64K)              |
+ *                         |                                         |
+ * 0x20000 (128K)          +-----------------------------------------+
+ *                         |                                         |
+ *                         |      Unused                             |
+ *                         |                                         |
+ * 0x19000 (100K)          +-----------------------------------------+
+ *                         |      Early cache Exception stack (CPU 3)|
+ * 0x18800 (98K)           +-----------------------------------------+
+ *			   |      cache error eframe (CPU 3)	     |
+ * 0x18400 (97K)           +-----------------------------------------+
+ *                         |      Exception Handlers (CPU 3)         |
+ * 0x18000 (96K)           +-----------------------------------------+
+ *                         |                                         |
+ *                         |      Unused                             |
+ *                         |                                         |
+ * 0x13c00 (79K)           +-----------------------------------------+
+ *                         |      GPDA (8k)                          |
+ * 0x11c00 (71K)           +-----------------------------------------+
+ *                         |      Early cache Exception stack (CPU 2)|
+ * 0x10800 (66k)	   +-----------------------------------------+
+ *			   |      cache error eframe (CPU 2)	     |
+ * 0x10400 (65K)           +-----------------------------------------+
+ *                         |      Exception Handlers (CPU 2)         |
+ * 0x10000 (64K)           +-----------------------------------------+
+ *                         |                                         |
+ *                         |      Unused                             |
+ *                         |                                         |
+ * 0x0b400 (45K)           +-----------------------------------------+
+ *                         |      GDA (1k)                           |
+ * 0x0b000 (44K)           +-----------------------------------------+
+ *                         |      NMI Eframe areas (4)       	     |
+ * 0x0a000 (40K)           +-----------------------------------------+
+ *                         |      NMI Register save areas (4)        |
+ * 0x09000 (36K)           +-----------------------------------------+
+ *                         |      Early cache Exception stack (CPU 1)|
+ * 0x08800 (34K)           +-----------------------------------------+
+ *			   |      cache error eframe (CPU 1)	     |
+ * 0x08400 (33K)           +-----------------------------------------+
+ *                         |      Exception Handlers (CPU 1)         |
+ * 0x08000 (32K)           +-----------------------------------------+
+ *                         |                                         |
+ *                         |                                         |
+ *                         |      Unused                             |
+ *                         |                                         |
+ *                         |                                         |
+ * 0x04000 (16K)           +-----------------------------------------+
+ *                         |      NMI Handler (Protected Page)       |
+ * 0x03000 (12K)           +-----------------------------------------+
+ *                         |      ARCS PVECTORS (master node only)   |
+ * 0x02c00 (11K)           +-----------------------------------------+
+ *                         |      ARCS TVECTORS (master node only)   |
+ * 0x02800 (10K)           +-----------------------------------------+
+ *                         |      LAUNCH [NUM_CPU]                   |
+ * 0x02400 (9K)            +-----------------------------------------+
+ *                         |      Low memory directory (KLDIR)       |
+ * 0x02000 (8K)            +-----------------------------------------+
+ *                         |      ARCS SPB (1K)                      |
+ * 0x01000 (4K)            +-----------------------------------------+
+ *                         |      Early cache Exception stack (CPU 0)|
+ * 0x00800 (2k)	           +-----------------------------------------+
+ *			   |      cache error eframe (CPU 0)	     |
+ * 0x00400 (1K)            +-----------------------------------------+
+ *                         |      Exception Handlers (CPU 0)         |
+ * 0x00000 (0K)            +-----------------------------------------+
+ */
+
+/*
+ * NOTE:  To change the kernel load address, you must update:
+ *  - the appropriate elspec files in irix/kern/master.d
+ *  - NODEBUGUNIX_ADDR in SN/SN1/addrs.h
+ *  - IP27_FREEMEM_OFFSET below
+ *  - KERNEL_START_OFFSET below (if supporting cells)
+ */
+
+
+/*
+ * This is defined here because IP27_SYMMON_STK_SIZE must be at least what
+ * we define here.  Since it's set up in the prom.  We can't redefine it later
+ * and expect more space to be allocated.  The way to find out the true size
+ * of the symmon stacks is to divide SYMMON_STK_SIZE by SYMMON_STK_STRIDE
+ * for a particular node.
+ */
+#define SYMMON_STACK_SIZE		0x8000
+
+#if defined (PROM) || defined (SABLE)
+
+/*
+ * These defines are prom version dependent.  No code other than the IP35
+ * prom should attempt to use these values.
+ */
+#define IP27_LAUNCH_OFFSET		0x2400
+#define IP27_LAUNCH_SIZE		0x400
+#define IP27_LAUNCH_COUNT		4
+#define IP27_LAUNCH_STRIDE		0x100 /* could be as small as 0x80 */
+
+#define IP27_KLCONFIG_OFFSET		0x30000
+#define IP27_KLCONFIG_SIZE		0x10000
+#define IP27_KLCONFIG_COUNT		1
+#define IP27_KLCONFIG_STRIDE		0
+
+#define IP27_NMI_OFFSET			0x3000
+#define IP27_NMI_SIZE			0x100
+#define IP27_NMI_COUNT			4
+#define IP27_NMI_STRIDE			0x40
+
+#define IP27_PI_ERROR_OFFSET		0x20000
+#define IP27_PI_ERROR_SIZE		0x10000
+#define IP27_PI_ERROR_COUNT		1
+#define IP27_PI_ERROR_STRIDE		0
+
+#define IP27_SYMMON_STK_OFFSET		0x4c000
+#define IP27_SYMMON_STK_SIZE		0x20000
+#define IP27_SYMMON_STK_COUNT		4
+/* IP27_SYMMON_STK_STRIDE must be >= SYMMON_STACK_SIZE */
+#define IP27_SYMMON_STK_STRIDE		0x8000
+
+#define IP27_FREEMEM_OFFSET		0x40000
+#define IP27_FREEMEM_SIZE		-1
+#define IP27_FREEMEM_COUNT		1
+#define IP27_FREEMEM_STRIDE		0
+
+#endif /* PROM || SABLE*/
+/*
+ * There will be only one of these in a partition so the IO7 must set it up.
+ */
+#define IO6_GDA_OFFSET			0xb000
+#define IO6_GDA_SIZE			0x400
+#define IO6_GDA_COUNT			1
+#define IO6_GDA_STRIDE			0
+
+/*
+ * save area of kernel nmi regs in the prom format
+ */
+#define IP27_NMI_KREGS_OFFSET		0x9000
+#define IP27_NMI_KREGS_CPU_SIZE		0x400
+/*
+ * save area of kernel nmi regs in eframe format 
+ */
+#define IP27_NMI_EFRAME_OFFSET		0xa000
+#define IP27_NMI_EFRAME_SIZE		0x400
+
+#define GPDA_OFFSET			0x11c00
+
+#endif /* _ASM_SN_SN1_KLDIR_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/leds.h linux/include/asm-ia64/sn/sn1/leds.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/leds.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/sn1/leds.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,35 @@
+#ifndef _ASM_SN_SN1_LED_H
+#define _ASM_SN_SN1_LED_H
+
+/*
+ * Copyright (C) 2000 Silicon Graphics, Inc
+ * Copyright (C) 2000 Jack Steiner (steiner@sgi.com)
+ */
+
+#include <asm/smp.h>
+
+#define LED0		0xc0000b00100000c0LL	/* ZZZ fixme */
+
+
+
+#define LED_AP_START	0x01		/* AP processor started */
+#define LED_AP_IDLE	0x01
+
+/*
+ * Basic macros for flashing the LEDS on an SGI, SN1.
+ */
+
+extern __inline__ void
+HUB_SET_LED(int val)
+{
+	long	*ledp;
+	int	eid;
+
+	eid = hard_processor_sapicid() & 3;
+	ledp = (long*) (LED0 + (eid<<3));
+	*ledp = val;
+}
+
+
+#endif /* _ASM_SN_SN1_LED_H */
+
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/promlog.h linux/include/asm-ia64/sn/sn1/promlog.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/promlog.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/sn1/promlog.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,85 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+#ifndef	_ASM_SN_SN1_PROMLOG_H
+#define	_ASM_SN_SN1_PROMLOG_H
+
+#include <asm/sn/fprom.h>
+
+#define PROMLOG_MAGIC			0x504c4f49
+#define PROMLOG_VERSION			1
+
+#define PROMLOG_OFFSET_MAGIC		0x10
+#define PROMLOG_OFFSET_VERSION		0x14
+#define PROMLOG_OFFSET_SEQUENCE		0x18
+#define PROMLOG_OFFSET_ENTRY0		0x100
+
+#define PROMLOG_ERROR_NONE		0
+#define PROMLOG_ERROR_PROM	       -1
+#define PROMLOG_ERROR_MAGIC	       -2
+#define PROMLOG_ERROR_CORRUPT	       -3
+#define PROMLOG_ERROR_BOL	       -4
+#define PROMLOG_ERROR_EOL	       -5
+#define PROMLOG_ERROR_POS	       -6
+#define PROMLOG_ERROR_REPLACE	       -7
+#define PROMLOG_ERROR_COMPACT	       -8
+#define PROMLOG_ERROR_FULL	       -9
+#define PROMLOG_ERROR_ARG	       -10
+#define PROMLOG_ERROR_UNUSED	       -11	  	
+
+#define PROMLOG_TYPE_UNUSED		0xf
+#define PROMLOG_TYPE_LOG		3
+#define PROMLOG_TYPE_LIST		2
+#define PROMLOG_TYPE_VAR		1
+#define PROMLOG_TYPE_DELETED		0
+
+#define PROMLOG_TYPE_ANY		98
+#define PROMLOG_TYPE_INVALID		99
+
+#define PROMLOG_KEY_MAX			14
+#define PROMLOG_VALUE_MAX		47
+#define PROMLOG_CPU_MAX			4
+
+typedef struct promlog_header_s {
+    unsigned int	unused[4];
+    unsigned int	magic;
+    unsigned int	version;
+    unsigned int	sequence;
+} promlog_header_t;
+
+typedef unsigned int promlog_pos_t;
+
+typedef struct promlog_ent_s {		/* PROM individual entry */
+    uint		type		: 4;
+    uint		cpu_num		: 4;
+    char		key[PROMLOG_KEY_MAX + 1];
+
+    char		value[PROMLOG_VALUE_MAX + 1];
+
+} promlog_ent_t;
+
+typedef struct promlog_s {		/* Activation handle */
+    fprom_t		f;
+    int			sector_base;
+    int			cpu_num;
+
+    int			active;		/* Active sector, 0 or 1 */
+
+    promlog_pos_t	log_start;
+    promlog_pos_t	log_end;
+
+    promlog_pos_t	alt_start;
+    promlog_pos_t	alt_end;
+
+    promlog_pos_t	pos;
+    promlog_ent_t	ent;
+} promlog_t;
+
+#endif /* _ASM_SN_SN1_PROMLOG_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/router.h linux/include/asm-ia64/sn/sn1/router.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/router.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/sn1/router.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,669 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+#ifndef	_ASM_SN_SN1_ROUTER_H
+#define	_ASM_SN_SN1_ROUTER_H
+
+/*
+ * Router Register definitions
+ *
+ * Macro argument _L always stands for a link number (1 to 8, inclusive).
+ */
+
+#if defined(_LANGUAGE_C) || defined(_LANGUAGE_C_PLUS_PLUS)
+
+#include <asm/sn/vector.h>
+#include <asm/sn/slotnum.h>
+#include <asm/sn/arch.h>
+
+typedef uint64_t	router_reg_t;
+
+#define MAX_ROUTERS	64
+
+#define MAX_ROUTER_PATH	80
+
+#define ROUTER_REG_CAST		(volatile router_reg_t *)
+#define PS_UINT_CAST		(__psunsigned_t)
+#define UINT64_CAST		(uint64_t)
+typedef signed char port_no_t;	 /* Type for router port number      */
+
+#elif _LANGUAGE_ASSEMBLY
+
+#define ROUTERREG_CAST
+#define PS_UINT_CAST
+#define UINT64_CAST
+
+#endif /* _LANGUAGE_C || _LANGUAGE_C_PLUS_PLUS */
+
+#define MAX_ROUTER_PORTS (8)	 /* Max. number of ports on a router */
+
+#define ALL_PORTS ((1 << MAX_ROUTER_PORTS) - 1)	/* for 0 based references */
+
+#define PORT_INVALID (-1)	 /* Invalid port number              */
+
+#define	IS_META(_rp)	((_rp)->flags & PCFG_ROUTER_META)
+
+#define	IS_REPEATER(_rp)((_rp)->flags & PCFG_ROUTER_REPEATER)
+
+/*
+ * RR_TURN makes a given number of clockwise turns (0 to 7) from an inport
+ * port to generate an output port.
+ *
+ * RR_DISTANCE returns the number of turns necessary (0 to 7) to go from
+ * an input port (_L1 = 1 to 8) to an output port ( _L2 = 1 to 8).
+ *
+ * These are written to work on unsigned data.
+ */
+
+#define RR_TURN(_L, count)	((_L) + (count) > MAX_ROUTER_PORTS ?	\
+				 (_L) + (count) - MAX_ROUTER_PORTS :	\
+				 (_L) + (count))
+
+#define RR_DISTANCE(_LS, _LD)	((_LD) >= (_LS) ?			\
+				 (_LD) - (_LS) :			\
+				 (_LD) + MAX_ROUTER_PORTS - (_LS))
+
+/* Router register addresses */
+
+#define RR_STATUS_REV_ID	0x00000	/* Status register and Revision ID  */
+#define RR_PORT_RESET		0x00008	/* Multiple port reset              */
+#define RR_PROT_CONF		0x00010	/* Inter-partition protection conf. */
+#define RR_GLOBAL_PORT_DEF	0x00018 /* Global Port definitions          */
+#define RR_GLOBAL_PARMS0	0x00020	/* Parameters shared by all 8 ports */
+#define RR_GLOBAL_PARMS1	0x00028	/* Parameters shared by all 8 ports */
+#define RR_DIAG_PARMS		0x00030	/* Parameters for diag. testing     */
+#define RR_DEBUG_ADDR		0x00038 /* Debug address select - debug port*/
+#define RR_LB_TO_L2		0x00040 /* Local Block to L2 cntrl intf reg */ 
+#define RR_L2_TO_LB		0x00048 /* L2 cntrl intf to Local Block reg */
+#define RR_JBUS_CONTROL		0x00050 /* read/write timing for JBUS intf  */
+
+#define RR_SCRATCH_REG0		0x00100	/* Scratch 0 is 64 bits */
+#define RR_SCRATCH_REG1		0x00108	/* Scratch 1 is 64 bits */
+#define RR_SCRATCH_REG2		0x00110	/* Scratch 2 is 64 bits */
+#define RR_SCRATCH_REG3		0x00118	/* Scratch 3 is 1 bit */
+#define RR_SCRATCH_REG4		0x00120	/* Scratch 4 is 1 bit */
+
+#define RR_JBUS0(_D)		(((_D) & 0x7) << 3 | 0x00200) /* JBUS0 addresses   */
+#define RR_JBUS1(_D)		(((_D) & 0x7) << 3 | 0x00240) /* JBUS1 addresses   */
+
+#define RR_SCRATCH_REG0_WZ	0x00500	/* Scratch 0 is 64 bits */
+#define RR_SCRATCH_REG1_WZ	0x00508	/* Scratch 1 is 64 bits */
+#define RR_SCRATCH_REG2_WZ	0x00510	/* Scratch 2 is 64 bits */
+#define RR_SCRATCH_REG3_SZ	0x00518	/* Scratch 3 is 1 bit */
+#define RR_SCRATCH_REG4_SZ	0x00520	/* Scratch 4 is 1 bit */
+
+#define RR_VECTOR_HW_BAR(context) (0x08000 | (context)<<3) /* barrier config registers */
+/* Port-specific registers (_L is the link number from 1 to 8) */
+
+#define RR_PORT_PARMS(_L)	(((_L+1) & 0xe) << 15 | ((_L+1) & 0x1) << 11 | 0x0000) /* LLP parameters     */
+#define RR_STATUS_ERROR(_L)	(((_L+1) & 0xe) << 15 | ((_L+1) & 0x1) << 11 | 0x0008) /* Port-related errs  */
+#define RR_CHANNEL_TEST(_L)	(((_L+1) & 0xe) << 15 | ((_L+1) & 0x1) << 11 | 0x0010) /* Port LLP chan test */
+#define RR_RESET_MASK(_L)	(((_L+1) & 0xe) << 15 | ((_L+1) & 0x1) << 11 | 0x0018) /* Remote reset mask  */
+#define RR_HISTOGRAM0(_L)	(((_L+1) & 0xe) << 15 | ((_L+1) & 0x1) << 11 | 0x0020) /* Port usage histgrm */
+#define RR_HISTOGRAM1(_L)	(((_L+1) & 0xe) << 15 | ((_L+1) & 0x1) << 11 | 0x0028) /* Port usage histgrm */
+#define RR_HISTOGRAM0_WC(_L)	(((_L+1) & 0xe) << 15 | ((_L+1) & 0x1) << 11 | 0x0030) /* Port usage histgrm */
+#define RR_HISTOGRAM1_WC(_L)	(((_L+1) & 0xe) << 15 | ((_L+1) & 0x1) << 11 | 0x0038) /* Port usage histgrm */
+#define RR_ERROR_CLEAR(_L)	(((_L+1) & 0xe) << 15 | ((_L+1) & 0x1) << 11 | 0x0088) /* Read/clear errors  */
+#define RR_GLOBAL_TABLE0(_L)	(((_L+1) & 0xe) << 15 | ((_L+1) & 0x1) << 11 | 0x0100) /* starting address of global table for this port */
+#define RR_GLOBAL_TABLE(_L, _x) (RR_GLOBAL_TABLE0(_L) + ((_x) << 3))
+#define RR_LOCAL_TABLE0(_L)	(((_L+1) & 0xe) << 15 | ((_L+1) & 0x1) << 11 | 0x0200) /* starting address of local table for this port */
+#define RR_LOCAL_TABLE(_L, _x) (RR_LOCAL_TABLE0(_L) + ((_x) << 3))
+
+#define RR_META_ENTRIES		16
+
+#define RR_LOCAL_ENTRIES	128
+
+/*
+ * RR_STATUS_REV_ID mask and shift definitions
+ */
+
+#define RSRI_INPORT_SHFT	52
+#define RSRI_INPORT_MASK	(UINT64_CAST 0xf << 52)
+#define RSRI_LINKWORKING_BIT(_L) (35 + 2 * (_L))
+#define RSRI_LINKWORKING(_L)	(UINT64_CAST 1 << (35 + 2 * (_L)))
+#define RSRI_LINKRESETFAIL(_L)	(UINT64_CAST 1 << (34 + 2 * (_L)))
+#define RSRI_LSTAT_SHFT(_L)	(34 + 2 * (_L))
+#define RSRI_LSTAT_MASK(_L)	(UINT64_CAST 0x3 << 34 + 2 * (_L))
+#define RSRI_LOCALSBERROR	(UINT64_CAST 1 << 35)
+#define RSRI_LOCALSTUCK		(UINT64_CAST 1 << 34)
+#define RSRI_LOCALBADVEC	(UINT64_CAST 1 << 33)
+#define RSRI_LOCALTAILERR	(UINT64_CAST 1 << 32)
+#define RSRI_LOCAL_SHFT 	32
+#define RSRI_LOCAL_MASK		(UINT64_CAST 0xf << 32)
+#define RSRI_CHIPREV_SHFT	28
+#define RSRI_CHIPREV_MASK	(UINT64_CAST 0xf << 28)
+#define RSRI_CHIPID_SHFT	12
+#define RSRI_CHIPID_MASK	(UINT64_CAST 0xffff << 12)
+#define RSRI_MFGID_SHFT		1
+#define RSRI_MFGID_MASK		(UINT64_CAST 0x7ff << 1)
+
+#define RSRI_LSTAT_WENTDOWN	0
+#define RSRI_LSTAT_RESETFAIL	1
+#define RSRI_LSTAT_LINKUP	2
+#define RSRI_LSTAT_NOTUSED	3
+
+/*
+ * RR_PORT_RESET mask definitions
+ */
+
+#define RPRESET_WARM		(UINT64_CAST 1 << 9)
+#define RPRESET_LINK(_L)	(UINT64_CAST 1 << (_L))
+#define RPRESET_LOCAL		(UINT64_CAST 1)
+
+/*
+ * RR_PROT_CONF mask and shift definitions
+ */
+
+#define RPCONF_DIRCMPDIS_SHFT	13
+#define RPCONF_DIRCMPDIS_MASK	(UINT64_CAST 1 << 13)
+#define RPCONF_FORCELOCAL	(UINT64_CAST 1 << 12)
+#define RPCONF_FLOCAL_SHFT	12
+#define RPCONF_METAID_SHFT	8
+#define RPCONF_METAID_MASK	(UINT64_CAST 0xf << 8)
+#define RPCONF_RESETOK(_L)	(UINT64_CAST 1 << ((_L) - 1))
+
+/*
+ * RR_GLOBAL_PORT_DEF mask and shift definitions
+ */
+
+#define RGPD_MGLBLNHBR_ID_SHFT	12	/* -global neighbor ID */
+#define RGPD_MGLBLNHBR_ID_MASK	(UINT64_CAST 0xf << 12)
+#define RGPD_MGLBLNHBR_VLD_SHFT	11	/* -global neighbor Valid */
+#define RGPD_MGLBLNHBR_VLD_MASK	(UINT64_CAST 0x1 << 11)
+#define RGPD_MGLBLPORT_SHFT	8	/* -global neighbor Port */
+#define RGPD_MGLBLPORT_MASK	(UINT64_CAST 0x7 << 8)
+#define RGPD_PGLBLNHBR_ID_SHFT	4	/* +global neighbor ID */
+#define RGPD_PGLBLNHBR_ID_MASK	(UINT64_CAST 0xf << 4)
+#define RGPD_PGLBLNHBR_VLD_SHFT	3	/* +global neighbor Valid */
+#define RGPD_PGLBLNHBR_VLD_MASK	(UINT64_CAST 0x1 << 3)
+#define RGPD_PGLBLPORT_SHFT	0	/* +global neighbor Port */
+#define RGPD_PGLBLPORT_MASK	(UINT64_CAST 0x7 << 0)
+
+#define GLBL_PARMS_REGS		2	/* Two Global Parms registers */
+
+/*
+ * RR_GLOBAL_PARMS0 mask and shift definitions
+ */
+
+#define RGPARM0_ARB_VALUE_SHFT	54	/* Local Block Arbitration State */
+#define RGPARM0_ARB_VALUE_MASK	(UINT64_CAST 0x7 << 54)
+#define RGPARM0_ROTATEARB_SHFT	53	/* Rotate Local Block Arbitration */
+#define RGPARM0_ROTATEARB_MASK	(UINT64_CAST 0x1 << 53)
+#define RGPARM0_FAIREN_SHFT	52	/* Fairness logic Enable */
+#define RGPARM0_FAIREN_MASK	(UINT64_CAST 0x1 << 52)
+#define RGPARM0_LOCGNTTO_SHFT	40	/* Local grant timeout */
+#define RGPARM0_LOCGNTTO_MASK	(UINT64_CAST 0xfff << 40)
+#define RGPARM0_DATELINE_SHFT	38	/* Dateline crossing router */
+#define RGPARM0_DATELINE_MASK	(UINT64_CAST 0x1 << 38)
+#define RGPARM0_MAXRETRY_SHFT	28	/* Max retry count */
+#define RGPARM0_MAXRETRY_MASK	(UINT64_CAST 0x3ff << 28)
+#define RGPARM0_URGWRAP_SHFT	20	/* Urgent wrap */
+#define RGPARM0_URGWRAP_MASK	(UINT64_CAST 0xff << 20)
+#define RGPARM0_DEADLKTO_SHFT	16	/* Deadlock timeout */
+#define RGPARM0_DEADLKTO_MASK	(UINT64_CAST 0xf << 16)
+#define RGPARM0_URGVAL_SHFT	12	/* Urgent value */
+#define RGPARM0_URGVAL_MASK	(UINT64_CAST 0xf << 12)
+#define RGPARM0_VCHSELEN_SHFT	11	/* VCH_SEL_EN */
+#define RGPARM0_VCHSELEN_MASK	(UINT64_CAST 0x1 << 11)
+#define RGPARM0_LOCURGTO_SHFT	9	/* Local urgent timeout */
+#define RGPARM0_LOCURGTO_MASK	(UINT64_CAST 0x3 << 9)
+#define RGPARM0_TAILVAL_SHFT	5	/* Tail value */
+#define RGPARM0_TAILVAL_MASK	(UINT64_CAST 0xf << 5)
+#define RGPARM0_CLOCK_SHFT	1	/* Global clock select */
+#define RGPARM0_CLOCK_MASK	(UINT64_CAST 0xf << 1)
+#define RGPARM0_BYPEN_SHFT	0
+#define RGPARM0_BYPEN_MASK	(UINT64_CAST 1)	/* Bypass enable */
+
+/*
+ * RR_GLOBAL_PARMS1 shift and mask definitions
+ */
+
+#define RGPARM1_TTOWRAP_SHFT	12	/* Tail timeout wrap */
+#define RGPARM1_TTOWRAP_MASK	(UINT64_CAST 0xfffff << 12)
+#define RGPARM1_AGERATE_SHFT	8	/* Age rate */
+#define RGPARM1_AGERATE_MASK	(UINT64_CAST 0xf << 8)
+#define RGPARM1_JSWSTAT_SHFT	0	/* JTAG Sw Register bits */
+#define RGPARM1_JSWSTAT_MASK	(UINT64_CAST 0xff << 0)
+
+/*
+ * RR_DIAG_PARMS mask and shift definitions
+ */
+
+#define RDPARM_ABSHISTOGRAM	(UINT64_CAST 1 << 17)	/* Absolute histgrm */
+#define RDPARM_DEADLOCKRESET	(UINT64_CAST 1 << 16)	/* Reset on deadlck */
+#define RDPARM_DISABLE(_L)	(UINT64_CAST 1 << ((_L) +  7))
+#define RDPARM_SENDERROR(_L)	(UINT64_CAST 1 << ((_L) -  1))
+
+/*
+ * RR_DEBUG_ADDR mask and shift definitions
+ */
+
+#define RDA_DATA_SHFT		10	/* Observed debug data */
+#define RDA_DATA_MASK		(UINT64_CAST 0xffff << 10)
+#define RDA_ADDR_SHFT		0	/* debug address for data */
+#define RDA_ADDR_MASK		(UINT64_CAST 0x3ff << 0)
+
+/*
+ * RR_LB_TO_L2 mask and shift definitions
+ */
+
+#define RLBTOL2_DATA_VLD_SHFT	32	/* data is valid for JTAG controller */
+#define RLBTOL2_DATA_VLD_MASK	(UINT64_CAST 0x1 << 32)
+#define RLBTOL2_DATA_SHFT	0	/* data bits for JTAG controller */
+#define RLBTOL2_DATA_MASK	(UINT64_CAST 0xffffffff)
+
+/*
+ * RR_L2_TO_LB mask and shift definitions
+ */
+
+#define RL2TOLB_DATA_VLD_SHFT	33	/* data is valid from JTAG controller */
+#define RL2TOLB_DATA_VLD_MASK	(UINT64_CAST 0x1 << 33)
+#define RL2TOLB_PARITY_SHFT	32	/* sw implemented parity for data */
+#define RL2TOLB_PARITY_MASK	(UINT64_CAST 0x1 << 32)
+#define RL2TOLB_DATA_SHFT	0	/* data bits from JTAG controller */
+#define RL2TOLB_DATA_MASK	(UINT64_CAST 0xffffffff)
+
+/*
+ * RR_JBUS_CONTROL mask and shift definitions
+ */
+
+#define RJC_POS_BITS_SHFT	20	/* Router position bits */
+#define RJC_POS_BITS_MASK	(UINT64_CAST 0xf << 20)
+#define RJC_RD_DATA_STROBE_SHFT	16	/* count when read data is strobed in */
+#define RJC_RD_DATA_STROBE_MASK	(UINT64_CAST 0xf << 16)
+#define RJC_WE_OE_HOLD_SHFT	8	/* time OE or WE is held */
+#define RJC_WE_OE_HOLD_MASK	(UINT64_CAST 0xff << 8)
+#define RJC_ADDR_SET_HLD_SHFT	0	/* time address driven around OE/WE */
+#define RJC_ADDR_SET_HLD_MASK	(UINT64_CAST 0xff)
+
+/*
+ * RR_SCRATCH_REGx mask and shift definitions
+ *  note: these fields represent a software convention, and are not
+ *        understood/interpreted by the hardware. 
+ */
+
+#define	RSCR0_BOOTED_SHFT	63
+#define	RSCR0_BOOTED_MASK	(UINT64_CAST 0x1 << RSCR0_BOOTED_SHFT)
+#define RSCR0_LOCALID_SHFT	56
+#define RSCR0_LOCALID_MASK	(UINT64_CAST 0x7f << RSCR0_LOCALID_SHFT)
+#define	RSCR0_UNUSED_SHFT	48
+#define	RSCR0_UNUSED_MASK	(UINT64_CAST 0xff << RSCR0_UNUSED_SHFT)
+#define RSCR0_NIC_SHFT		0
+#define RSCR0_NIC_MASK		(UINT64_CAST 0xffffffffffff)
+
+#define RSCR1_MODID_SHFT	0
+#define RSCR1_MODID_MASK	(UINT64_CAST 0xffff)
+
+/*
+ * RR_VECTOR_HW_BAR mask and shift definitions
+ */
+
+#define BAR_TX_SHFT		27	/* Barrier in trans(m)it when read */
+#define BAR_TX_MASK		(UINT64_CAST 1 << BAR_TX_SHFT)
+#define BAR_VLD_SHFT		26	/* Valid Configuration */
+#define BAR_VLD_MASK		(UINT64_CAST 1 << BAR_VLD_SHFT)
+#define BAR_SEQ_SHFT		24	/* Sequence number */
+#define BAR_SEQ_MASK		(UINT64_CAST 3 << BAR_SEQ_SHFT)
+#define BAR_LEAFSTATE_SHFT	18	/* Leaf State */
+#define BAR_LEAFSTATE_MASK	(UINT64_CAST 0x3f << BAR_LEAFSTATE_SHFT)
+#define BAR_PARENT_SHFT		14	/* Parent Port */
+#define BAR_PARENT_MASK		(UINT64_CAST 0xf << BAR_PARENT_SHFT)
+#define BAR_CHILDREN_SHFT	6	/* Child Select port bits */
+#define BAR_CHILDREN_MASK	(UINT64_CAST 0xff << BAR_CHILDREN_SHFT)
+#define BAR_LEAFCOUNT_SHFT	0	/* Leaf Count to trigger parent */
+#define BAR_LEAFCOUNT_MASK	(UINT64_CAST 0x3f)
+
+/*
+ * RR_PORT_PARMS(_L) mask and shift definitions
+ */
+
+#define RPPARM_MIPRESETEN_SHFT	29	/* Message In Progress reset enable */
+#define RPPARM_MIPRESETEN_MASK	(UINT64_CAST 0x1 << 29)
+#define RPPARM_UBAREN_SHFT	28	/* Enable user barrier requests */
+#define RPPARM_UBAREN_MASK	(UINT64_CAST 0x1 << 28)
+#define RPPARM_OUTPDTO_SHFT	24	/* Output Port Deadlock TO value */
+#define RPPARM_OUTPDTO_MASK	(UINT64_CAST 0xf << 24)
+#define RPPARM_PORTMATE_SHFT	21	/* Port Mate for the port */
+#define RPPARM_PORTMATE_MASK	(UINT64_CAST 0x7 << 21)
+#define RPPARM_HISTEN_SHFT	20	/* Histogram counter enable */
+#define RPPARM_HISTEN_MASK	(UINT64_CAST 0x1 << 20)
+#define RPPARM_HISTSEL_SHFT	18
+#define RPPARM_HISTSEL_MASK	(UINT64_CAST 0x3 << 18)
+#define RPPARM_DAMQHS_SHFT	16
+#define RPPARM_DAMQHS_MASK	(UINT64_CAST 0x3 << 16)
+#define RPPARM_NULLTO_SHFT	10
+#define RPPARM_NULLTO_MASK	(UINT64_CAST 0x3f << 10)
+#define RPPARM_MAXBURST_SHFT	0
+#define RPPARM_MAXBURST_MASK	(UINT64_CAST 0x3ff)
+
+/*
+ * NOTE: Normally the kernel tracks only UTILIZATION statistics.
+ * The other 2 should not be used, except during any experimentation
+ * with the router.
+ */
+#define RPPARM_HISTSEL_AGE	0	/* Histogram age characterization.  */
+#define RPPARM_HISTSEL_UTIL	1	/* Histogram link utilization 	    */
+#define RPPARM_HISTSEL_DAMQ	2	/* Histogram DAMQ characterization. */
+
+/*
+ * RR_STATUS_ERROR(_L) and RR_ERROR_CLEAR(_L) mask and shift definitions
+ */
+#define RSERR_POWERNOK		(UINT64_CAST 1 << 38)
+#define RSERR_PORT_DEADLOCK     (UINT64_CAST 1 << 37)
+#define RSERR_WARMRESET         (UINT64_CAST 1 << 36)
+#define RSERR_LINKRESET         (UINT64_CAST 1 << 35)
+#define RSERR_RETRYTIMEOUT      (UINT64_CAST 1 << 34)
+#define RSERR_FIFOOVERFLOW	(UINT64_CAST 1 << 33)
+#define RSERR_ILLEGALPORT	(UINT64_CAST 1 << 32)
+#define RSERR_DEADLOCKTO_SHFT	28
+#define RSERR_DEADLOCKTO_MASK	(UINT64_CAST 0xf << 28)
+#define RSERR_RECVTAILTO_SHFT	24
+#define RSERR_RECVTAILTO_MASK	(UINT64_CAST 0xf << 24)
+#define RSERR_RETRYCNT_SHFT	16
+#define RSERR_RETRYCNT_MASK	(UINT64_CAST 0xff << 16)
+#define RSERR_CBERRCNT_SHFT	8
+#define RSERR_CBERRCNT_MASK	(UINT64_CAST 0xff << 8)
+#define RSERR_SNERRCNT_SHFT	0
+#define RSERR_SNERRCNT_MASK	(UINT64_CAST 0xff << 0)
+
+
+#define PORT_STATUS_UP		(1 << 0)	/* Router link up */
+#define PORT_STATUS_FENCE	(1 << 1)	/* Router link fenced */
+#define PORT_STATUS_RESETFAIL	(1 << 2)	/* Router link didnot 
+						 * come out of reset */
+#define PORT_STATUS_DISCFAIL	(1 << 3)	/* Router link failed after 
+						 * out of reset but before
+						 * router tables were
+						 * programmed
+						 */
+#define PORT_STATUS_KERNFAIL	(1 << 4)	/* Router link failed
+						 * after reset and the 
+						 * router tables were
+						 * programmed
+						 */
+#define PORT_STATUS_UNDEF	(1 << 5)	/* Unable to pinpoint
+						 * why the router link
+						 * went down
+						 */	
+#define PROBE_RESULT_BAD	(-1)		/* Set if any of the router
+						 * links failed after reset
+						 */
+#define PROBE_RESULT_GOOD	(0)		/* Set if all the router links
+						 * which came out of reset 
+						 * are up
+						 */
+
+/* Should be enough for 256 CPUs */
+#define MAX_RTR_BREADTH		64		/* Max # of routers possible */
+
+/* Get the require set of bits in a var. corr to a sequence of bits  */
+#define GET_FIELD(var, fname) \
+        ((var) >> fname##_SHFT & fname##_MASK >> fname##_SHFT)
+/* Set the require set of bits in a var. corr to a sequence of bits  */
+#define SET_FIELD(var, fname, fval) \
+        ((var) = (var) & ~fname##_MASK | (uint64_t) (fval) << fname##_SHFT)
+
+
+#if defined(_LANGUAGE_C) || defined(_LANGUAGE_C_PLUS_PLUS)
+
+typedef struct router_map_ent_s {
+	uint64_t	nic;
+	moduleid_t	module;
+	slotid_t	slot;
+} router_map_ent_t;
+
+struct rr_status_error_fmt {
+	uint64_t	rserr_unused		: 30,
+			rserr_fifooverflow	: 1,
+			rserr_illegalport	: 1,
+			rserr_deadlockto	: 4,
+			rserr_recvtailto	: 4,
+			rserr_retrycnt		: 8,
+			rserr_cberrcnt		: 8,
+			rserr_snerrcnt		: 8;
+};
+
+/*
+ * This type is used to store "absolute" counts of router events
+ */
+typedef int	router_count_t;
+
+/* All utilizations are on a scale from 0 - 1023. */
+#define RP_BYPASS_UTIL	0
+#define RP_RCV_UTIL	1
+#define RP_SEND_UTIL	2
+#define RP_TOTAL_PKTS	3	/* Free running clock/packet counter */
+
+#define RP_NUM_UTILS	3
+
+#define RP_HIST_REGS	2
+#define RP_NUM_BUCKETS  4
+#define RP_HIST_TYPES	3
+
+#define RP_AGE0		0
+#define RP_AGE1		1
+#define RP_AGE2		2
+#define RP_AGE3		3
+
+
+#define RR_UTIL_SCALE	1024
+
+/*
+ * Router port-oriented information
+ */
+typedef struct router_port_info_s {
+	router_reg_t	rp_histograms[RP_HIST_REGS];/* Port usage info */
+	router_reg_t	rp_port_error;		/* Port error info */
+	router_count_t	rp_retry_errors;	/* Total retry errors */
+	router_count_t	rp_sn_errors;		/* Total sn errors */
+	router_count_t	rp_cb_errors;		/* Total cb errors */
+	int		rp_overflows;		/* Total count overflows */
+	int		rp_excess_err;		/* Port has excessive errors */
+	ushort		rp_util[RP_NUM_BUCKETS];/* Port utilization */
+} router_port_info_t;
+
+#define ROUTER_INFO_VERSION	7
+
+struct lboard_s;
+
+/*
+ * Router information
+ */
+typedef struct router_info_s {
+	char		ri_version;	/* structure version		    */
+	cnodeid_t	ri_cnode;	/* cnode of its legal guardian hub  */
+	nasid_t		ri_nasid;	/* Nasid of same 		    */
+	char		ri_ledcache;	/* Last LED bitmap		    */
+	char		ri_leds;	/* Current LED bitmap		    */
+	char		ri_portmask;	/* Active port bitmap		    */
+	router_reg_t	ri_stat_rev_id;	/* Status rev ID value		    */
+	net_vec_t	ri_vector;	/* vector from guardian to router   */
+	int		ri_writeid;	/* router's vector write ID	    */
+	int64_t	ri_timebase;	/* Time of first sample		    */
+	int64_t	ri_timestamp;	/* Time of last sample		    */
+	router_port_info_t ri_port[MAX_ROUTER_PORTS]; /* per port info      */
+	moduleid_t	ri_module;	/* Which module are we in?	    */
+	slotid_t	ri_slotnum;	/* Which slot are we in?	    */
+	router_reg_t	ri_glbl_parms[GLBL_PARMS_REGS];
+					/* Global parms0&1 register contents*/
+	devfs_handle_t	ri_vertex;	/* hardware graph vertex            */
+	router_reg_t	ri_prot_conf;	/* protection config. register	    */
+	int64_t	ri_per_minute;	/* Ticks per minute		    */
+
+	/*
+	 * Everything below here is for kernel use only and may change at	
+	 * at any time with or without a change in teh revision number
+	 *
+	 * Any pointers or things that come and go with DEBUG must go at
+ 	 * the bottom of the structure, below the user stuff.
+	 */
+	char		ri_hist_type;   /* histogram type		    */
+	devfs_handle_t	ri_guardian;	/* guardian node for the router	    */
+	int64_t	ri_last_print;	/* When did we last print	    */
+	char		ri_print;	/* Should we print 		    */
+	char 		ri_just_blink;	/* Should we blink the LEDs         */
+	
+#ifdef DEBUG
+	int64_t	ri_deltatime;	/* Time it took to sample	    */
+#endif
+	lock_t		ri_lock;	/* Lock for access to router info   */
+	net_vec_t	*ri_vecarray;	/* Pointer to array of vectors	    */
+	struct lboard_s	*ri_brd;	/* Pointer to board structure	    */
+	char *		ri_name;	/* This board's hwg path 	    */
+        unsigned char	ri_port_maint[MAX_ROUTER_PORTS]; /* should we send a 
+					message to availmon */
+} router_info_t;
+
+
+/* Router info location specifiers */
+
+#define RIP_PROMLOG			2	/* Router info in promlog */
+#define RIP_CONSOLE			4	/* Router info on console */
+
+#define ROUTER_INFO_PRINT(_rip,_where)	(_rip->ri_print |= _where)	
+					/* Set the field used to check if a 
+					 * router info can be printed
+					 */
+#define IS_ROUTER_INFO_PRINTED(_rip,_where)	\
+					(_rip->ri_print & _where)	
+					/* Was the router info printed to
+					 * the given location (_where) ?
+					 * Mainly used to prevent duplicate
+					 * router error states.
+					 */
+#define ROUTER_INFO_LOCK(_rip,_s)	_s = mutex_spinlock(&(_rip->ri_lock))
+					/* Take the lock on router info
+					 * to gain exclusive access
+					 */
+#define ROUTER_INFO_UNLOCK(_rip,_s)	mutex_spinunlock(&(_rip->ri_lock),_s)
+					/* Release the lock on router info */
+/* 
+ * Router info hanging in the nodepda 
+ */
+typedef struct nodepda_router_info_s {
+	devfs_handle_t 	router_vhdl;	/* vertex handle of the router 	    */
+	short		router_port;	/* port thru which we entered       */
+	short		router_portmask;
+	moduleid_t	router_module;	/* module in which router is there  */
+	slotid_t	router_slot;	/* router slot			    */
+	unsigned char	router_type;	/* kind of router 		    */
+	net_vec_t	router_vector;	/* vector from the guardian node    */
+
+	router_info_t	*router_infop;	/* info hanging off the hwg vertex  */
+	struct nodepda_router_info_s *router_next;
+	                                /* pointer to next element 	    */
+} nodepda_router_info_t;
+
+#define ROUTER_NAME_SIZE	20	/* Max size of a router name */
+
+#define NORMAL_ROUTER_NAME	"normal_router"
+#define NULL_ROUTER_NAME	"null_router"
+#define META_ROUTER_NAME	"meta_router"
+#define UNKNOWN_ROUTER_NAME	"unknown_router" 
+
+/* The following definitions are needed by the router traversing
+ * code either using the hardware graph or using vector operations.
+ */
+/* Structure of the router queue element */
+typedef struct router_elt_s {
+	union {
+		/* queue element structure during router probing */
+		struct {
+			/* number-in-a-can (unique) for the router */
+			nic_t		nic;	
+			/* vector route from the master hub to 
+			 * this router.
+			 */
+			net_vec_t	vec;	
+			/* port status */
+			uint64_t	status;	
+			char		port_status[MAX_ROUTER_PORTS + 1];
+		} r_elt;
+		/* queue element structure during router guardian 
+		 * assignment
+		 */
+		struct {
+			/* vertex handle for the router */
+			devfs_handle_t	vhdl;
+			/* guardian for this router */
+			devfs_handle_t	guard;	
+			/* vector router from the guardian to the router */
+			net_vec_t	vec;
+		} k_elt;
+	} u;
+	                        /* easy to use port status interpretation */
+} router_elt_t;
+
+/* structure of the router queue */
+
+typedef struct router_queue_s {
+	char		head;	/* Point where a queue element is inserted */
+	char		tail;	/* Point where a queue element is removed */
+	int		type;
+	router_elt_t	array[MAX_RTR_BREADTH];
+	                        /* Entries for queue elements */
+} router_queue_t;
+
+
+#endif /* _LANGUAGE_C || _LANGUAGE_C_PLUS_PLUS */
+
+/*
+ * RR_HISTOGRAM(_L) mask and shift definitions
+ * There are two 64 bit histogram registers, so the following macros take
+ * into account dealing with an array of 4 32 bit values indexed by _x
+ */
+
+#define RHIST_BUCKET_SHFT(_x)	(32 * ((_x) & 0x1))
+#define RHIST_BUCKET_MASK(_x)	(UINT64_CAST 0xffffffff << RHIST_BUCKET_SHFT((_x) & 0x1))
+#define RHIST_GET_BUCKET(_x, _reg)	\
+	((RHIST_BUCKET_MASK(_x) & ((_reg)[(_x) >> 1])) >> RHIST_BUCKET_SHFT(_x))
+
+/*
+ * RR_RESET_MASK(_L) mask and shift definitions
+ */
+
+#define RRM_RESETOK(_L)		(UINT64_CAST 1 << ((_L) - 1))
+#define RRM_RESETOK_ALL		(UINT64_CAST 0x3f)
+
+/*
+ * RR_META_TABLE(_x) and RR_LOCAL_TABLE(_x) mask and shift definitions
+ */
+
+#define RTABLE_SHFT(_L)		(4 * ((_L) - 1))
+#define RTABLE_MASK(_L)		(UINT64_CAST 0x7 << RTABLE_SHFT(_L))
+
+
+#define	ROUTERINFO_STKSZ	4096
+
+#if defined(_LANGUAGE_C) || defined(_LANGUAGE_C_PLUS_PLUS)
+#if defined(_LANGUAGE_C_PLUS_PLUS)
+extern "C" {
+#endif
+
+int router_reg_read(router_info_t *rip, int regno, router_reg_t *val);
+int router_reg_write(router_info_t *rip, int regno, router_reg_t val);
+int router_get_info(devfs_handle_t routerv, router_info_t *, int);
+int router_init(cnodeid_t cnode,int writeid, nodepda_router_info_t *npda_rip);
+int router_set_leds(router_info_t *rip);
+void router_print_state(router_info_t *rip, int level,
+		   void (*pf)(int, char *, ...),int print_where);
+void capture_router_stats(router_info_t *rip);
+
+
+int 	probe_routers(void);
+void 	get_routername(unsigned char brd_type,char *rtrname);
+void 	router_guardians_set(devfs_handle_t hwgraph_root);
+int 	router_hist_reselect(router_info_t *, int64_t);
+#if defined(_LANGUAGE_C_PLUS_PLUS)
+}
+#endif
+#endif /* _LANGUAGE_C || _LANGUAGE_C_PLUS_PLUS */
+
+#endif /* _ASM_SN_SN1_ROUTER_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/slotnum.h linux/include/asm-ia64/sn/sn1/slotnum.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/slotnum.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/sn1/slotnum.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,86 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+#ifndef _ASM_SN_SN1_SLOTNUM_H
+#define _ASM_SN_SN1_SLOTNUM_H
+
+#define SLOTNUM_MAXLENGTH	16
+
+/*
+ * This file attempts to define a slot number space across all slots
+ * a IP27 module.  Here, we deal with the top level slots.
+ *
+ *	Node slots
+ *	Router slots
+ *	Crosstalk slots
+ *
+ *	Other slots are children of their parent crosstalk slot:
+ *		PCI slots
+ *		VME slots
+ */
+// #include <slotnum.h>
+
+// #ifdef NOTDEF	/* moved to sys/slotnum.h */
+#define SLOTNUM_NODE_CLASS	0x00	/* Node   */
+#define SLOTNUM_ROUTER_CLASS	0x10	/* Router */
+#define SLOTNUM_XTALK_CLASS	0x20	/* Xtalk  */
+#define SLOTNUM_MIDPLANE_CLASS	0x30	/* Midplane */
+#define SLOTNUM_XBOW_CLASS	0x40	/* Xbow  */
+#define SLOTNUM_KNODE_CLASS	0x50	/* Kego node */
+#define SLOTNUM_INVALID_CLASS	0xf0	/* Invalid */
+
+#define SLOTNUM_CLASS_MASK	0xf0
+#define SLOTNUM_SLOT_MASK	0x0f
+
+#define SLOTNUM_GETCLASS(_sn)	((_sn) & SLOTNUM_CLASS_MASK)
+#define SLOTNUM_GETSLOT(_sn)	((_sn) & SLOTNUM_SLOT_MASK)
+// #endif	/* NOTDEF */
+
+/* This determines module to pnode mapping. */
+/* NODESLOTS_PER_MODULE has changed from 4 to 6
+ * to support the 12P 4IO configuration. This change
+ * helps in minimum  number of changes to code which
+ * depend on the number of node boards within a module.
+ */
+#define NODESLOTS_PER_MODULE		6
+#define NODESLOTS_PER_MODULE_SHFT	2
+
+#define HIGHEST_I2C_VISIBLE_NODESLOT	4
+#define	RTRSLOTS_PER_MODULE		2
+
+#if __KERNEL__
+#include <asm/sn/xtalk/xtalk.h>
+
+extern slotid_t xbwidget_to_xtslot(int crossbow, int widget);
+extern slotid_t hub_slotbits_to_slot(slotid_t slotbits);
+extern slotid_t hub_slot_to_crossbow(slotid_t hub_slot);
+extern slotid_t router_slotbits_to_slot(slotid_t slotbits);
+extern slotid_t get_node_slotid(nasid_t nasid);
+extern slotid_t get_my_slotid(void);
+extern slotid_t get_node_crossbow(nasid_t);
+extern xwidgetnum_t hub_slot_to_widget(slotid_t);
+extern void get_slotname(slotid_t, char *);
+extern void get_my_slotname(char *);
+extern slotid_t get_widget_slotnum(int xbow, int widget);
+extern void get_widget_slotname(int, int, char *);
+extern void router_slotbits_to_slotname(int, char *);
+extern slotid_t meta_router_slotbits_to_slot(slotid_t) ;
+extern slotid_t hub_slot_get(void);
+
+extern int node_can_talk_to_elsc(void);
+
+extern int  slot_to_widget(int) ;
+#define MAX_IO_SLOT_NUM		12
+#define MAX_NODE_SLOT_NUM	4
+#define MAX_ROUTER_SLOTNUM	2
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_SN_SN1_SLOTNUM_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/sn1.h linux/include/asm-ia64/sn/sn1/sn1.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/sn1.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/sn1/sn1.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,34 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+
+/*
+ * sn1.h -- hardware specific defines for sn1 boards
+ * The defines used here are used to limit the size of 
+ * various datastructures in the PROM. eg. KLCFGINFO, MPCONF etc.
+ */
+
+#ifndef _ASM_SN_SN1_SN1_H
+#define _ASM_SN_SN1_SN1_H
+
+extern xwidgetnum_t hub_widget_id(nasid_t);
+extern nasid_t get_nasid(void);
+extern int	get_slice(void);
+extern int     is_fine_dirmode(void);
+extern hubreg_t get_hub_chiprev(nasid_t nasid);
+extern hubreg_t get_region(cnodeid_t);
+extern hubreg_t nasid_to_region(nasid_t);
+extern int      verify_snchip_rev(void);
+extern void 	ni_reset_port(void);
+
+#ifdef SN1_USE_POISON_BITS
+extern int hub_bte_poison_ok(void);
+#endif /* SN1_USE_POISON_BITS */
+
+#endif /* _ASM_SN_SN1_SN1_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/uart16550.h linux/include/asm-ia64/sn/sn1/uart16550.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/uart16550.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/sn1/uart16550.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,214 @@
+
+/*
+ * Definitions for 16550  chip
+ */
+
+	/* defined as offsets from the data register */
+#define REG_DAT     0   /* receive/transmit data */
+#define REG_ICR     1   /* interrupt control register */
+#define REG_ISR     2   /* interrupt status register */
+#define REG_FCR     2   /* fifo control register */
+#define REG_LCR     3   /* line control register */
+#define REG_MCR     4   /* modem control register */
+#define REG_LSR     5   /* line status register */
+#define REG_MSR     6   /* modem status register */
+#define REG_SCR     7   /* Scratch register      */
+#define REG_DLL     0   /* divisor latch (lsb) */
+#define REG_DLH     1   /* divisor latch (msb) */
+#define REG_EFR		2	/* 16650 enhanced feature register */
+
+/*
+ * 16450/16550 Registers Structure.
+ */
+
+/* Line Control Register */
+#define		LCR_WLS0	0x01	/*word length select bit 0 */	
+#define		LCR_WLS1	0x02	/*word length select bit 2 */	
+#define		LCR_STB	0x04		/* number of stop bits */
+#define		LCR_PEN	0x08		/* parity enable */
+#define		LCR_EPS	0x10		/* even parity select */
+#define		LCR_SETBREAK 0x40	/* break key */
+#define		LCR_DLAB	0x80	/* divisor latch access bit */
+#define 	LCR_RXLEN   0x03    /* # of data bits per received/xmitted char */
+#define 	LCR_STOP1   0x00
+#define 	LCR_STOP2   0x04
+#define 	LCR_PAREN   0x08
+#define 	LCR_PAREVN  0x10
+#define 	LCR_PARMARK 0x20
+#define 	LCR_SNDBRK  0x40
+#define 	LCR_DLAB    0x80
+
+
+#define		LCR_BITS5	0x00	/* 5 bits per char */
+#define		LCR_BITS6	0x01	/* 6 bits per char */
+#define		LCR_BITS7	0x02	/* 7 bits per char */
+#define		LCR_BITS8	0x03	/* 8 bits per char */
+
+#define		LCR_MASK_BITS_CHAR 		0x03
+#define 	LCR_MASK_STOP_BITS		0x04
+#define		LCR_MASK_PARITY_BITS	0x18
+
+
+/* Line Status Register */
+#define		LSR_RCA	0x01		/* data ready */
+#define		LSR_OVRRUN	0x02	/* overrun error */
+#define		LSR_PARERR	0x04	/* parity error */
+#define		LSR_FRMERR	0x08	/* framing error */
+#define		LSR_BRKDET 	0x10	/* a break has arrived */
+#define		LSR_XHRE	0x20	/* tx hold reg is now empty */
+#define		LSR_XSRE	0x40	/* tx shift reg is now empty */
+#define		LSR_RFBE	0x80	/* rx FIFO Buffer error */
+
+/* Interrupt Status Regisger */
+#define		ISR_MSTATUS	0x00
+#define		ISR_TxRDY	0x02
+#define		ISR_RxRDY	0x04
+#define		ISR_ERROR_INTR	0x08
+#define		ISR_FFTMOUT 0x0c	/* FIFO Timeout */
+#define		ISR_RSTATUS 0x06	/* Receiver Line status */
+
+/* Interrupt Enable Register */
+#define		ICR_RIEN	0x01	/* Received Data Ready */
+#define		ICR_TIEN	0x02	/* Tx Hold Register Empty */
+#define		ICR_SIEN	0x04	/* Receiver Line Status */
+#define		ICR_MIEN	0x08	/* Modem Status */
+
+/* Modem Control Register */
+#define		MCR_DTR		0x01	/* Data Terminal Ready */
+#define		MCR_RTS		0x02	/* Request To Send */
+#define		MCR_OUT1	0x04	/* Aux output - not used */
+#define		MCR_OUT2	0x08	/* turns intr to 386 on/off */	
+#define		MCR_LOOP	0x10	/* loopback for diagnostics */
+#define		MCR_AFE 	0x20	/* Auto flow control enable */
+
+/* Modem Status Register */
+#define		MSR_DCTS	0x01	/* Delta Clear To Send */
+#define		MSR_DDSR	0x02	/* Delta Data Set Ready */
+#define		MSR_DRI		0x04	/* Trail Edge Ring Indicator */
+#define		MSR_DDCD	0x08	/* Delta Data Carrier Detect */
+#define		MSR_CTS		0x10	/* Clear To Send */
+#define		MSR_DSR		0x20	/* Data Set Ready */
+#define		MSR_RI		0x40	/* Ring Indicator */
+#define		MSR_DCD		0x80	/* Data Carrier Detect */
+
+#define 	DELTAS(x) 	((x)&(MSR_DCTS|MSR_DDSR|MSR_DRI|MSR_DDCD))
+#define 	STATES(x) 	((x)(MSR_CTS|MSR_DSR|MSR_RI|MSR_DCD))
+
+
+#define		FCR_FIFOEN	0x01	/* enable receive/transmit fifo */
+#define		FCR_RxFIFO	0x02	/* enable receive fifo */
+#define		FCR_TxFIFO	0x04	/* enable transmit fifo */
+#define 	FCR_MODE1	0x08	/* change to mode 1 */
+#define		RxLVL0		0x00	/* Rx fifo level at 1	*/
+#define		RxLVL1		0x40	/* Rx fifo level at 4 */
+#define		RxLVL2		0x80	/* Rx fifo level at 8 */
+#define		RxLVL3		0xc0	/* Rx fifo level at 14 */
+
+#define 	FIFOEN		(FCR_FIFOEN | FCR_RxFIFO | FCR_TxFIFO | RxLVL3 | FCR_MODE1) 
+
+#define		FCT_TxMASK	0x30	/* mask for Tx trigger */
+#define		FCT_RxMASK	0xc0	/* mask for Rx trigger */
+
+/* enhanced festures register */
+#define		EFR_SFLOW	0x0f	/* various S/w Flow Controls */
+#define 	EFR_EIC		0x10	/* Enhanced Interrupt Control bit */
+#define 	EFR_SCD		0x20	/* Special Character Detect */
+#define 	EFR_RTS		0x40	/* RTS flow control */
+#define 	EFR_CTS		0x80	/* CTS flow control */
+
+/* Rx Tx software flow controls in 16650 enhanced mode */
+#define		SFLOW_Tx0	0x00	/* no Xmit flow control */
+#define		SFLOW_Tx1	0x08	/* Transmit Xon1, Xoff1 */
+#define		SFLOW_Tx2	0x04	/* Transmit Xon2, Xoff2 */
+#define		SFLOW_Tx3	0x0c	/* Transmit Xon1,Xon2, Xoff1,Xoff2 */
+#define		SFLOW_Rx0	0x00	/* no Rcv flow control */
+#define		SFLOW_Rx1	0x02	/* Receiver compares Xon1, Xoff1 */
+#define		SFLOW_Rx2	0x01	/* Receiver compares Xon2, Xoff2 */
+
+#define	ASSERT_DTR(x)		(x |= MCR_DTR)
+#define	ASSERT_RTS(x)		(x |= MCR_RTS)
+#define DU_RTS_ASSERTED(x)  (((x) & MCR_RTS) != 0)
+#define DU_RTS_ASSERT(x)    ((x) |= MCR_RTS)
+#define DU_RTS_DEASSERT(x)  ((x) &= ~MCR_RTS)
+
+
+/*
+ * ioctl(fd, I_STR, arg)
+ * use the SIOC_RS422 and SIOC_EXTCLK combination to support MIDI
+ */
+#define SIOC        ('z' << 8)  /* z for z85130 */
+#define SIOC_EXTCLK (SIOC | 1)  /* select/de-select external clock */
+#define SIOC_RS422  (SIOC | 2)  /* select/de-select RS422 protocol */
+#define SIOC_ITIMER (SIOC | 3)  /* upstream timer adjustment */
+#define SIOC_LOOPBACK   (SIOC | 4)  /* diagnostic loopback test mode */
+
+
+/* channel control register */
+#define	DMA_INT_MASK		0xe0	/* ring intr mask */
+#define DMA_INT_TH25		0x20	/* 25% threshold */
+#define DMA_INT_TH50		0x40	/* 50% threshold */
+#define DMA_INT_TH75		0x60	/* 75% threshold */
+#define DMA_INT_EMPTY		0x80	/* ring buffer empty */
+#define DMA_INT_NEMPTY		0xa0	/* ring buffer not empty */
+#define DMA_INT_FULL		0xc0	/* ring buffer full */
+#define DMA_INT_NFULL		0xe0	/* ring buffer not full */
+
+#define DMA_CHANNEL_RESET	0x400	/* reset dma channel */
+#define DMA_ENABLE			0x200	/* enable DMA */
+
+/* peripheral controller intr status bits applicable to serial ports */
+#define ISA_SERIAL0_MASK 		0x03f00000	/* mask for port #1 intrs */
+#define ISA_SERIAL0_DIR			0x00100000	/* device intr request */
+#define ISA_SERIAL0_Tx_THIR		0x00200000	/* Transmit DMA threshold */
+#define ISA_SERIAL0_Tx_PREQ		0x00400000	/* Transmit DMA pair req */
+#define ISA_SERIAL0_Tx_MEMERR	0x00800000	/* Transmit DMA memory err */
+#define ISA_SERIAL0_Rx_THIR		0x01000000	/* Receive DMA threshold  */
+#define ISA_SERIAL0_Rx_OVERRUN	0x02000000	/* Receive DMA over-run  */
+
+#define ISA_SERIAL1_MASK 		0xfc000000	/* mask for port #1 intrs */
+#define ISA_SERIAL1_DIR			0x04000000	/* device intr request */
+#define ISA_SERIAL1_Tx_THIR		0x08000000	/* Transmit DMA threshold */
+#define ISA_SERIAL1_Tx_PREQ		0x10000000	/* Transmit DMA pair req */
+#define ISA_SERIAL1_Tx_MEMERR	0x20000000	/* Transmit DMA memory err */
+#define ISA_SERIAL1_Rx_THIR		0x40000000	/* Receive DMA threshold  */
+#define ISA_SERIAL1_Rx_OVERRUN	0x80000000	/* Receive DMA over-run  */
+
+#define MAX_RING_BLOCKS		128			/* 4096/32 */
+#define MAX_RING_SIZE		4096
+
+/* DMA Input Control Byte */
+#define	DMA_IC_OVRRUN	0x01	/* overrun error */
+#define	DMA_IC_PARERR	0x02	/* parity error */
+#define	DMA_IC_FRMERR	0x04	/* framing error */
+#define	DMA_IC_BRKDET 	0x08	/* a break has arrived */
+#define DMA_IC_VALID	0x80	/* pair is valid */
+
+/* DMA Output Control Byte */
+#define DMA_OC_TxINTR	0x20	/* set Tx intr after processing byte */
+#define DMA_OC_INVALID	0x00	/* invalid pair */
+#define DMA_OC_WTHR		0x40	/* Write byte to THR */
+#define DMA_OC_WMCR		0x80	/* Write byte to MCR */
+#define DMA_OC_DELAY	0xc0	/* time delay before next xmit */
+
+/* ring id's */
+#define RID_SERIAL0_TX	0x4		/* serial port 0, transmit ring buffer */
+#define RID_SERIAL0_RX	0x5		/* serial port 0, receive ring buffer */
+#define RID_SERIAL1_TX	0x6		/* serial port 1, transmit ring buffer */
+#define RID_SERIAL1_RX	0x7		/* serial port 1, receive ring buffer */
+
+#define CLOCK_XIN			22
+#define PRESCALER_DIVISOR	3
+#define CLOCK_ACE			7333333
+
+/*
+ * increment the ring offset. One way to do this would be to add b'100000.
+ * this would let the offset value roll over automatically when it reaches
+ * its maximum value (127). However when we use the offset, we must use
+ * the appropriate bits only by masking with 0xfe0.
+ * The other option is to shift the offset right by 5 bits and look at its
+ * value. Then increment if required and shift back
+ * note: 127 * 2^5 = 4064
+ */
+#define INC_RING_POINTER(x) \
+	( ((x & 0xffe0) < 4064) ? (x += 32) : 0 )
+
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/war.h linux/include/asm-ia64/sn/sn1/war.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/sn1/war.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/sn1/war.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,25 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_SN1_WAR_H
+#define _ASM_SN_SN1_WAR_H
+
+/****************************************************************************
+ * Support macros and defitions for hardware workarounds in		    *
+ * early chip versions.                                                     *
+ ****************************************************************************/
+
+/*
+ * This is the bitmap of runtime-switched workarounds.
+ */
+typedef short warbits_t;
+
+extern int warbits_override;
+
+#endif /*  _ASM_SN_SN1_WAR_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/sn_cpuid.h linux/include/asm-ia64/sn/sn_cpuid.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/sn_cpuid.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/sn_cpuid.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,198 @@
+/* 
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Jack Steiner (steiner@sgi.com)
+ */
+
+
+#ifndef _ASM_IA64_SN_SN_CPUID_H
+#define _ASM_IA64_SN_SN_CPUID_H
+
+#include <asm/sn/mmzone_sn1.h>
+
+/*
+ * Functions for converting between cpuids, nodeids and NASIDs.
+ * 
+ * These are for SGI platforms only.
+ *
+ */
+
+
+
+
+/*
+ * The following assumes the following mappings for LID register values:
+ *
+ *         LID
+ *		31:24 - id   Contains the NASID
+ *		23:16 - eid  Contains 0-3 to identify the cpu on the node
+ *				bit 17 - synergy number
+ *				bit 16 - FSB number 
+ *
+ * 	   SAPICID
+ *		This is the same as 31:24 of LID
+ *
+ * The macros convert between cpuid & slice/fsb/synergy/nasid/cnodeid.
+ * These terms are described below:
+ *
+ *
+ *          -----   -----           -----   -----       CPU
+ *          | 0 |   | 1 |           | 2 |   | 3 |       SLICE
+ *          -----   -----           -----   -----
+ *            |       |               |       |
+ *            |       |               |       |
+ *          0 |       | 1           0 |       | 1       FSB
+ *             -------                 -------  
+ *                |                       |
+ *                |                       |
+ *             -------                 -------
+ *             |     |                 |     |
+ *             |  0  |                 |  1  |         SYNERGY
+ *             |     |                 |     |
+ *             -------                 -------
+ *                |                       |
+ *                |                       |
+ *             -------------------------------
+ *             |                             |
+ *             |         BEDROCK             |        NASID   (0..127)
+ *             |                             |        CNODEID (0..numnodes-1)
+ *             |                             |
+ *             |                             |
+ *             -------------------------------
+ *                           |
+ *
+ */
+
+
+
+#define sapicid_to_nasid(sid)		((sid) >> 8)
+#define sapicid_to_synergy(sid)		(((sid) >> 1) & 1)
+#define sapicid_to_fsb(sid)		((sid) & 1)
+#define sapicid_to_slice(sid)		((sid) & 3)
+
+/*
+ * NOTE: id & eid refer to Intels definitions of the LID register
+ *	(id = NASID, eid = slice)
+ * NOTE: on non-MP systems, only cpuid 0 exists
+ */
+#define id_eid_to_sapicid(id,eid)       (((id)<<8) | (eid))
+#define id_eid_to_cpuid(id,eid)         ((NASID_TO_CNODEID(id)<<2) | (eid))
+
+
+/*
+ * The following table/struct is for translating between sapicid and cpuids.
+ * It is also used for managing PTC coherency domains.
+ */
+typedef struct {
+	u8	domain;
+	u8	reserved;
+	u16	sapicid;
+} sn_sapicid_info_t;
+
+extern sn_sapicid_info_t	sn_sapicid_info[];	/* indexed by cpuid */
+
+
+
+/*
+ * cpuid_to_spaicid  - Convert a cpuid to a SAPIC id of the cpu. 
+ * The SAPIC id is the same as bits 31:16 of the LID register.
+ */
+static __inline__ int
+cpuid_to_spaicid(int cpuid)
+{
+#ifdef CONFIG_SMP
+	return cpu_physical_id(cpuid);
+#else
+	return ((ia64_get_lid() >> 16) & 0xffff);
+#endif
+}
+
+
+/*
+ * cpuid_to_fsb_slot  - convert a cpuid to the fsb slot number that it is in.
+ *   (there are 2 cpus per FSB. This function returns 0 or 1)
+ */
+static __inline__ int
+cpuid_to_fsb_slot(int cpuid)
+{
+	return sapicid_to_fsb(cpuid_to_spaicid(cpuid));
+}
+
+
+/*
+ * cpuid_to_synergy  - convert a cpuid to the synergy that it resides on
+ *   (there are 2 synergies per node. Function returns 0 or 1 to
+ *    specify which synergy the cpu is on)
+ */
+static __inline__ int
+cpuid_to_synergy(int cpuid)
+{
+	return sapicid_to_synergy(cpuid_to_spaicid(cpuid));
+}
+
+
+/*
+ * cpuid_to_slice  - convert a cpuid to the slice that it resides on
+ *  There are 4 cpus per node. This function returns 0 .. 3)
+ */
+static __inline__ int
+cpuid_to_slice(int cpuid)
+{
+	return sapicid_to_slice(cpuid_to_spaicid(cpuid));
+}
+
+
+/*
+ * cpuid_to_nasid  - convert a cpuid to the NASID that it resides on
+ */
+static __inline__ int
+cpuid_to_nasid(int cpuid)
+{
+	return sapicid_to_nasid(cpuid_to_spaicid(cpuid));
+}
+
+
+/*
+ * cpuid_to_cnodeid  - convert a cpuid to the cnode that it resides on
+ */
+static __inline__ int
+cpuid_to_cnodeid(int cpuid)
+{
+	return nasid_map[cpuid_to_nasid(cpuid)];
+}
+
+static __inline__ int
+cnodeid_to_nasid(int cnodeid)
+{
+	int i;
+	for (i = 0; i < MAXNASIDS; i++) {
+		if (nasid_map[i] == cnodeid) {
+			return(i);
+		}
+	}
+	return(-1);
+}
+
+static __inline__ int
+cnode_slice_to_cpuid(int cnodeid, int slice) {
+	return(id_eid_to_cpuid(cnodeid_to_nasid(cnodeid),slice));
+}
+
+static __inline__ int
+cpuid_to_subnode(int cpuid) {
+	int ret = cpuid_to_slice(cpuid);
+	if (ret < 2) return 0;
+	else return 1;
+}
+
+static __inline__ int
+cpuid_to_localslice(int cpuid) {
+	return(cpuid_to_slice(cpuid) & 1);
+}
+
+
+#endif /* _ASM_IA64_SN_SN_CPUID_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/sn_fru.h linux/include/asm-ia64/sn/sn_fru.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/sn_fru.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/sn_fru.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,49 @@
+/**************************************************************************
+ *									  *
+ *		 Copyright (C) 1992-1997, Silicon Graphics, Inc.	  *
+ *									  *
+ *  These coded instructions, statements, and computer programs	 contain  *
+ *  unpublished	 proprietary  information of Silicon Graphics, Inc., and  *
+ *  are protected by Federal copyright law.  They  may	not be disclosed  *
+ *  to	third  parties	or copied or duplicated in any form, in whole or  *
+ *  in part, without the prior written consent of Silicon Graphics, Inc.  *
+ *									  *
+ **************************************************************************/
+#ifndef __SYS_SN_SN0_FRU_H__
+#define __SYS_SN_SN0_FRU_H__
+
+#define MAX_DIMMS			8	 /* max # of dimm banks */
+#define MAX_PCIDEV			8	 /* max # of pci devices on a pci bus */
+
+typedef unsigned char confidence_t;
+
+typedef struct kf_mem_s {
+	confidence_t km_confidence; /* confidence level that the memory is bad
+				     * is this necessary ?
+				     */
+	confidence_t km_dimm[MAX_DIMMS];
+				    /* confidence level that dimm[i] is bad
+				     *I think this is the right number
+				     */
+
+} kf_mem_t;
+
+typedef struct kf_cpu_s {
+	confidence_t	kc_confidence; /* confidence level that cpu is bad */
+	confidence_t	kc_icache; /* confidence level that instr. cache is bad */
+	confidence_t	kc_dcache; /* confidence level that data   cache is bad */
+	confidence_t	kc_scache; /* confidence level that sec.   cache is bad */
+	confidence_t	kc_sysbus; /* confidence level that sysad/cmd/state bus is bad */
+} kf_cpu_t;
+
+
+typedef struct kf_pci_bus_s {
+	confidence_t	kpb_belief;	/* confidence level  that the  pci bus is bad */
+	confidence_t	kpb_pcidev_belief[MAX_PCIDEV];
+					/* confidence level that the pci dev is bad */
+} kf_pci_bus_t;
+
+
+
+#endif /* #ifdef __SYS_SN_SN0_FRU_H__ */
+
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/sn_private.h linux/include/asm-ia64/sn/sn_private.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/sn_private.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/sn_private.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,302 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_PRIVATE_H
+#define _ASM_SN_PRIVATE_H
+
+#include <asm/sn/nodepda.h>
+#include <asm/sn/xtalk/xwidget.h>
+#include <asm/sn/xtalk/xtalk_private.h>
+
+extern nasid_t master_nasid;
+
+extern hubreg_t get_region(cnodeid_t);
+extern hubreg_t	nasid_to_region(nasid_t);
+/* promif.c */
+#ifndef CONFIG_IA64_SGI_IO
+extern cpuid_t cpu_node_probe(cpumask_t *cpumask, int *numnodes);
+#endif
+extern void he_arcs_set_vectors(void);
+extern void mem_init(void);
+#ifndef CONFIG_IA64_SGI_IO
+extern int cpu_enabled(cpuid_t);
+#endif
+extern void cpu_unenable(cpuid_t);
+extern nasid_t get_lowest_nasid(void);
+extern __psunsigned_t get_master_bridge_base(void);
+extern void set_master_bridge_base(void);
+extern int check_nasid_equiv(nasid_t, nasid_t);
+extern nasid_t get_console_nasid(void);
+extern char get_console_pcislot(void);
+#ifndef CONFIG_IA64_SGI_IO
+extern void intr_init_vecblk(nodepda_t *npda, cnodeid_t, int);
+#endif
+
+extern int is_master_nasid_widget(nasid_t test_nasid, xwidgetnum_t test_wid);
+
+/* memsupport.c */
+extern void poison_state_alter_range(__psunsigned_t start, int len, int poison);
+extern int memory_present(paddr_t);
+extern int memory_read_accessible(paddr_t);
+extern int memory_write_accessible(paddr_t);
+extern void memory_set_access(paddr_t, int, int);
+extern void show_dir_state(paddr_t, void (*)(char *, ...));
+extern void check_dir_state(nasid_t, int, void (*)(char *, ...));
+extern void set_dir_owner(paddr_t, int);
+extern void set_dir_state(paddr_t, int);
+extern void set_dir_state_POISONED(paddr_t);
+extern void set_dir_state_UNOWNED(paddr_t);
+extern int is_POISONED_dir_state(paddr_t);
+extern int is_UNOWNED_dir_state(paddr_t);
+extern void get_dir_ent(paddr_t paddr, int *state,
+			uint64_t *vec_ptr, hubreg_t *elo);
+
+/* intr.c */
+#if defined(NEW_INTERRUPTS)
+extern int intr_reserve_level(cpuid_t cpu, int level, int err, devfs_handle_t owner_dev, char *name);
+extern void intr_unreserve_level(cpuid_t cpu, int level);
+extern int intr_connect_level(cpuid_t cpu, int bit, ilvl_t mask_no, 
+			intr_func_t intr_func, void *intr_arg,
+			intr_func_t intr_prefunc);
+extern int intr_disconnect_level(cpuid_t cpu, int bit);
+extern cpuid_t intr_heuristic(devfs_handle_t dev, device_desc_t dev_desc,
+			      int req_bit,int intr_resflags,devfs_handle_t owner_dev,
+			      char *intr_name,int *resp_bit);
+#endif	/* NEW_INTERRUPTS */
+extern void intr_block_bit(cpuid_t cpu, int bit);
+extern void intr_unblock_bit(cpuid_t cpu, int bit);
+extern void setrtvector(intr_func_t);
+extern void install_cpuintr(cpuid_t cpu);
+extern void install_dbgintr(cpuid_t cpu);
+extern void install_tlbintr(cpuid_t cpu);
+extern void hub_migrintr_init(cnodeid_t /*cnode*/);
+extern int cause_intr_connect(int level, intr_func_t handler, uint intr_spl_mask);
+extern int cause_intr_disconnect(int level);
+extern void intr_reserve_hardwired(cnodeid_t);
+extern void intr_clear_all(nasid_t);
+extern void intr_dumpvec(cnodeid_t cnode, void (*pf)(char *, ...));
+extern int protected_broadcast(hubreg_t intrbit);
+
+/* error_dump.c */
+extern char *hub_rrb_err_type[];
+extern char *hub_wrb_err_type[];
+
+void nmi_dump(void);
+void install_cpu_nmi_handler(int slice);
+
+/* klclock.c */
+extern void hub_rtc_init(cnodeid_t);
+
+/* bte.c */
+void bte_lateinit(void);
+void bte_wait_for_xfer_completion(void *);
+
+/* klgraph.c */
+void klhwg_add_all_nodes(devfs_handle_t);
+void klhwg_add_all_modules(devfs_handle_t);
+
+/* klidbg.c */
+void install_klidbg_functions(void);
+
+/* klnuma.c */
+extern void replicate_kernel_text(int numnodes);
+extern __psunsigned_t get_freemem_start(cnodeid_t cnode);
+extern void setup_replication_mask(int maxnodes);
+
+/* init.c */
+extern cnodeid_t get_compact_nodeid(void);	/* get compact node id */
+#ifndef CONFIG_IA64_SGI_IO
+extern void init_platform_nodepda(nodepda_t *npda, cnodeid_t node);
+extern void init_platform_pda(pda_t *ppda, cpuid_t cpu);
+#endif
+extern void per_cpu_init(void);
+extern void per_hub_init(cnodeid_t);
+#ifndef CONFIG_IA64_SGI_IO
+extern cpumask_t boot_cpumask;
+#endif
+extern int is_fine_dirmode(void);
+extern void update_node_information(cnodeid_t);
+ 
+#ifndef CONFIG_IA64_SGI_IO
+/* clksupport.c */
+extern void early_counter_intr(eframe_t *);
+#endif
+
+/* hubio.c */
+extern void hubio_init(void);
+extern void hub_merge_clean(nasid_t nasid);
+extern void hub_set_piomode(nasid_t nasid, int conveyor);
+
+/* huberror.c */
+extern void hub_error_init(cnodeid_t);
+extern void dump_error_spool(cpuid_t cpu, void (*pf)(char *, ...));
+extern void hubni_error_handler(char *, int);
+extern int check_ni_errors(void);
+
+/* Used for debugger to signal upper software a breakpoint has taken place */
+
+extern void		*debugger_update;
+extern __psunsigned_t	debugger_stopped;
+
+/* 
+ * IP27 piomap, created by hub_pio_alloc.
+ * xtalk_info MUST BE FIRST, since this structure is cast to a
+ * xtalk_piomap_s by generic xtalk routines.
+ */
+struct hub_piomap_s {
+	struct xtalk_piomap_s	hpio_xtalk_info;/* standard crosstalk pio info */
+	devfs_handle_t		hpio_hub;	/* which hub's mapping registers are set up */
+	short			hpio_holdcnt;	/* count of current users of bigwin mapping */
+	char			hpio_bigwin_num;/* if big window map, which one */
+	int 			hpio_flags;	/* defined below */
+};
+/* hub_piomap flags */
+#define HUB_PIOMAP_IS_VALID		0x1
+#define HUB_PIOMAP_IS_BIGWINDOW		0x2
+#define HUB_PIOMAP_IS_FIXED		0x4
+
+#define	hub_piomap_xt_piomap(hp)	(&hp->hpio_xtalk_info)
+#define	hub_piomap_hub_v(hp)	(hp->hpio_hub)
+#define	hub_piomap_winnum(hp)	(hp->hpio_bigwin_num)
+
+#if TBD
+ /* Ensure that hpio_xtalk_info is first */
+ #assert (&(((struct hub_piomap_s *)0)->hpio_xtalk_info) == 0)
+#endif
+
+
+/* 
+ * IP27 dmamap, created by hub_pio_alloc.
+ * xtalk_info MUST BE FIRST, since this structure is cast to a
+ * xtalk_dmamap_s by generic xtalk routines.
+ */
+struct hub_dmamap_s {
+	struct xtalk_dmamap_s	hdma_xtalk_info;/* standard crosstalk dma info */
+	devfs_handle_t		hdma_hub;	/* which hub we go through */
+	int			hdma_flags;	/* defined below */
+};
+/* hub_dmamap flags */
+#define HUB_DMAMAP_IS_VALID		0x1
+#define HUB_DMAMAP_USED			0x2
+#define	HUB_DMAMAP_IS_FIXED		0x4
+
+#if TBD
+ /* Ensure that hdma_xtalk_info is first */
+ #assert (&(((struct hub_dmamap_s *)0)->hdma_xtalk_info) == 0)
+#endif
+
+/* 
+ * IP27 interrupt handle, created by hub_intr_alloc.
+ * xtalk_info MUST BE FIRST, since this structure is cast to a
+ * xtalk_intr_s by generic xtalk routines.
+ */
+struct hub_intr_s {
+	struct xtalk_intr_s	i_xtalk_info;	/* standard crosstalk intr info */
+	ilvl_t			i_swlevel;	/* software level for blocking intr */
+	cpuid_t			i_cpuid;	/* which cpu */
+	int			i_bit;		/* which bit */
+	int			i_flags;
+};
+/* flag values */
+#define HUB_INTR_IS_ALLOCED	0x1	/* for debug: allocated */
+#define HUB_INTR_IS_CONNECTED	0x4	/* for debug: connected to a software driver */
+
+#if TBD
+ /* Ensure that i_xtalk_info is first */
+ #assert (&(((struct hub_intr_s *)0)->i_xtalk_info) == 0)
+#endif
+
+
+/* IP27 hub-specific information stored under INFO_LBL_HUB_INFO */
+/* TBD: IP27-dependent stuff currently in nodepda.h should be here */
+typedef struct hubinfo_s {
+	nodepda_t			*h_nodepda;	/* pointer to node's private data area */
+	cnodeid_t			h_cnodeid;	/* compact nodeid */
+	nasid_t				h_nasid;	/* nasid */
+
+	/* structures for PIO management */
+	xwidgetnum_t			h_widgetid;	/* my widget # (as viewed from xbow) */
+	struct hub_piomap_s		h_small_window_piomap[HUB_WIDGET_ID_MAX+1];
+	sv_t				h_bwwait;	/* wait for big window to free */
+	spinlock_t			h_bwlock;	/* guard big window piomap's */
+	spinlock_t			h_crblock;      /* gaurd CRB error handling */
+	int				h_num_big_window_fixed;	/* count number of FIXED maps */
+	struct hub_piomap_s		h_big_window_piomap[HUB_NUM_BIG_WINDOW];
+	hub_intr_t			hub_ii_errintr;
+} *hubinfo_t;
+
+#define hubinfo_get(vhdl, infoptr) ((void)hwgraph_info_get_LBL \
+	(vhdl, INFO_LBL_NODE_INFO, (arbitrary_info_t *)infoptr))
+
+#define hubinfo_set(vhdl, infoptr) (void)hwgraph_info_add_LBL \
+	(vhdl, INFO_LBL_NODE_INFO, (arbitrary_info_t)infoptr)
+
+#define	hubinfo_to_hubv(hinfo, hub_v)	(hinfo->h_nodepda->node_vertex)
+
+/*
+ * Hub info PIO map access functions.
+ */
+#define	hubinfo_bwin_piomap_get(hinfo, win) 	\
+			(&hinfo->h_big_window_piomap[win])
+#define	hubinfo_swin_piomap_get(hinfo, win)	\
+			(&hinfo->h_small_window_piomap[win])
+	
+/* IP27 cpu-specific information stored under INFO_LBL_CPU_INFO */
+/* TBD: IP27-dependent stuff currently in pda.h should be here */
+typedef struct cpuinfo_s {
+#ifndef CONFIG_IA64_SGI_IO
+	pda_t		*ci_cpupda;	/* pointer to CPU's private data area */
+#endif
+	cpuid_t		ci_cpuid;	/* CPU ID */
+} *cpuinfo_t;
+
+#define cpuinfo_get(vhdl, infoptr) ((void)hwgraph_info_get_LBL \
+	(vhdl, INFO_LBL_CPU_INFO, (arbitrary_info_t *)infoptr))
+
+#define cpuinfo_set(vhdl, infoptr) (void)hwgraph_info_add_LBL \
+	(vhdl, INFO_LBL_CPU_INFO, (arbitrary_info_t)infoptr)
+
+/* Special initialization function for xswitch vertices created during startup. */
+extern void xswitch_vertex_init(devfs_handle_t xswitch);
+
+extern xtalk_provider_t hub_provider;
+
+/* du.c */
+int ducons_write(char *buf, int len);
+
+/* memerror.c */
+
+extern void install_eccintr(cpuid_t cpu);
+extern void memerror_get_stats(cnodeid_t cnode,
+			       int *bank_stats, int *bank_stats_max);
+extern void probe_md_errors(nasid_t);
+/* sysctlr.c */
+extern void sysctlr_init(void);
+extern void sysctlr_power_off(int sdonly);
+extern void sysctlr_keepalive(void);
+
+#define valid_cpuid(_x)		(((_x) >= 0) && ((_x) < maxcpus))
+
+/* Useful definitions to get the memory dimm given a physical
+ * address.
+ */
+#define paddr_dimm(_pa)		((_pa & MD_BANK_MASK) >> MD_BANK_SHFT)
+#define paddr_cnode(_pa)	(NASID_TO_COMPACT_NODEID(NASID_GET(_pa)))
+extern void membank_pathname_get(paddr_t,char *);
+
+/* To redirect the output into the error buffer */
+#define errbuf_print(_s)	printf("#%s",_s)
+
+extern void crbx(nasid_t nasid, void (*pf)(char *, ...));
+void bootstrap(void);
+
+/* sndrv.c */
+extern int sndrv_attach(devfs_handle_t vertex);
+
+#endif /* _ASM_SN_PRIVATE_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/synergy.h linux/include/asm-ia64/sn/synergy.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/synergy.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/synergy.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,127 @@
+#ifndef ASM_IA64_SN_SYNERGY_H
+#define ASM_IA64_SN_SYNERGY_H
+
+#include "asm/io.h"
+#include "asm/sn/intr_public.h"
+
+
+/*
+ * Definitions for the synergy asic driver
+ * 
+ * These are for SGI platforms only.
+ *
+ * Copyright (C) 2000 Silicon Graphics, Inc
+ * Copyright (C) 2000 Alan Mayer (ajm@sgi.com)
+ */
+
+
+#define SSPEC_BASE	(0xe0000000000)
+#define LB_REG_BASE	(SSPEC_BASE + 0x0)
+
+#define VEC_MASK3A_ADDR	(0x2a0 + LB_REG_BASE + __IA64_UNCACHED_OFFSET)
+#define VEC_MASK3B_ADDR	(0x2a8 + LB_REG_BASE + __IA64_UNCACHED_OFFSET)
+#define VEC_MASK3A	(0x2a0)
+#define VEC_MASK3B	(0x2a8)
+
+#define VEC_MASK2A_ADDR	(0x2b0 + LB_REG_BASE + __IA64_UNCACHED_OFFSET)
+#define VEC_MASK2B_ADDR	(0x2b8 + LB_REG_BASE + __IA64_UNCACHED_OFFSET)
+#define VEC_MASK2A	(0x2b0)
+#define VEC_MASK2B	(0x2b8)
+
+#define VEC_MASK1A_ADDR	(0x2c0 + LB_REG_BASE + __IA64_UNCACHED_OFFSET)
+#define VEC_MASK1B_ADDR	(0x2c8 + LB_REG_BASE + __IA64_UNCACHED_OFFSET)
+#define VEC_MASK1A	(0x2c0)
+#define VEC_MASK1B	(0x2c8)
+
+#define VEC_MASK0A_ADDR	(0x2d0 + LB_REG_BASE + __IA64_UNCACHED_OFFSET)
+#define VEC_MASK0B_ADDR	(0x2d8 + LB_REG_BASE + __IA64_UNCACHED_OFFSET)
+#define VEC_MASK0A	(0x2d0)
+#define VEC_MASK0B	(0x2d8)
+
+#define WRITE_LOCAL_SYNERGY_REG(addr, value)	__synergy_out(addr, value)
+
+#define HUBREG_CAST             (volatile hubreg_t *)
+#define NODE_OFFSET(_n)         (UINT64_CAST (_n) << NODE_SIZE_BITS)
+#define SYN_UNCACHED_SPACE      0xc000000000000000
+#define NODE_HSPEC_BASE(_n)     (HSPEC_BASE + NODE_OFFSET(_n))
+#define NODE_LREG_BASE(_n)      (NODE_HSPEC_BASE(_n) + 0x30000000)
+#define RREG_BASE(_n)           (NODE_LREG_BASE(_n))
+#define REMOTE_HSPEC(_n, _x)    (HUBREG_CAST (RREG_BASE(_n) + (_x)))
+#define    HSPEC_SYNERGY0_0          0x04000000    /* Synergy0 Registers     */
+#define    HSPEC_SYNERGY1_0          0x05000000    /* Synergy1 Registers     */
+#define HS_SYNERGY_STRIDE               (HSPEC_SYNERGY1_0 - HSPEC_SYNERGY0_0)
+
+
+#define HUB_L(_a)                       *(_a)
+#define HUB_S(_a, _d)                   *(_a) = (_d)
+
+
+#define REMOTE_SYNERGY_LOAD(nasid, fsb, reg)  __remote_synergy_in(nasid, fsb, reg)
+#define REMOTE_SYNERGY_STORE(nasid, fsb, reg, val) __remote_synergy_out(nasid, fsb, reg, val)
+
+extern inline void
+__remote_synergy_out(int nasid, int fsb, unsigned long reg, unsigned long val) {
+	unsigned long addr = ((RREG_BASE(nasid)) + 
+		((HSPEC_SYNERGY0_0 | (fsb)*HS_SYNERGY_STRIDE) | ((reg) << 2)));
+
+	HUB_S((unsigned long *)(addr),      (val) >> 48);
+	HUB_S((unsigned long *)(addr+0x08), (val) >> 32);
+	HUB_S((unsigned long *)(addr+0x10), (val) >> 16);
+	HUB_S((unsigned long *)(addr+0x18), (val)      );
+	__ia64_mf_a();
+}
+
+extern inline unsigned long
+__remote_synergy_in(int nasid, int fsb, unsigned long reg) {
+	volatile unsigned long *addr = (unsigned long *) ((RREG_BASE(nasid)) + 
+		((HSPEC_SYNERGY0_0 | (fsb)*HS_SYNERGY_STRIDE) | (reg)));
+	unsigned long ret;
+
+	ret = *addr;
+	__ia64_mf_a();
+	return ret;
+}
+
+extern inline void
+__synergy_out(unsigned long addr, unsigned long value)
+{
+	volatile unsigned long *adr = (unsigned long *)
+			(addr | __IA64_UNCACHED_OFFSET);
+
+	*adr = value;
+	__ia64_mf_a();
+}
+
+#define READ_LOCAL_SYNERGY_REG(addr)	__synergy_in(addr)
+
+extern inline unsigned long
+__synergy_in(unsigned long addr)
+{
+	unsigned long ret, *adr = (unsigned long *)
+			(addr | __IA64_UNCACHED_OFFSET);
+
+	ret = *adr;
+	__ia64_mf_a();
+	return ret;
+}
+
+struct sn1_intr_action {
+	void (*handler)(int, void *, struct pt_regs *);
+	void *intr_arg;
+	unsigned long flags;
+	struct sn1_intr_action * next;
+};
+
+typedef struct synergy_da_s {
+	hub_intmasks_t	s_intmasks;
+}synergy_da_t;
+
+struct sn1_cnode_action_list {
+	spinlock_t action_list_lock;
+	struct sn1_intr_action *action_list;
+};
+	
+
+/* Temporary defintions for testing: */
+
+#endif ASM_IA64_SN_SYNERGY_H
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/systeminfo.h linux/include/asm-ia64/sn/systeminfo.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/systeminfo.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/systeminfo.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,72 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_SYSTEMINFO_H
+#define _ASM_SN_SYSTEMINFO_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MAX_SERIAL_SIZE 16
+
+typedef struct module_info_s {
+	uint64_t serial_num;
+	int mod_num;
+	char serial_str[MAX_SERIAL_SIZE];
+} module_info_t;
+
+
+
+/*
+ * Commands to sysinfo()
+ */
+
+#define SI_SYSNAME		1	/* return name of operating system */
+#define SI_HOSTNAME		2	/* return name of node */
+#define SI_RELEASE 		3	/* return release of operating system */
+#define SI_VERSION		4	/* return version field of utsname */
+#define SI_MACHINE		5	/* return kind of machine */
+#define SI_ARCHITECTURE		6	/* return instruction set arch */
+#define SI_HW_SERIAL		7	/* return hardware serial number */
+#define SI_HW_PROVIDER		8	/* return hardware manufacturer */
+#define SI_SRPC_DOMAIN		9	/* return secure RPC domain */
+#define SI_INITTAB_NAME	       10	/* return name of inittab file used */
+
+#define _MIPS_SI_VENDOR		100	/* return system provider */
+#define _MIPS_SI_OS_PROVIDER	101	/* return OS manufacturer */
+#define _MIPS_SI_OS_NAME	102	/* return OS name */
+#define _MIPS_SI_HW_NAME	103	/* return system name */
+#define _MIPS_SI_NUM_PROCESSORS	104	/* return number of processors */
+#define _MIPS_SI_HOSTID		105	/* return hostid */
+#define _MIPS_SI_OSREL_MAJ	106	/* return OS major release number */
+#define _MIPS_SI_OSREL_MIN	107	/* return OS minor release number */
+#define _MIPS_SI_OSREL_PATCH	108	/* return OS release number */
+#define _MIPS_SI_PROCESSORS	109	/* return CPU revison id */
+#define _MIPS_SI_AVAIL_PROCESSORS 110	/* return number of available processors */
+#define	_MIPS_SI_SERIAL		111
+/*
+ * These commands are unpublished interfaces to sysinfo().
+ */
+#define SI_SET_HOSTNAME		258	/* set name of node */
+					/*  -unpublished option */
+#define SI_SET_SRPC_DOMAIN	265	/* set secure RPC domain */
+					/* -unpublished option */
+
+#if !defined(__KERNEL__)
+int sysinfo(int, char *, long);
+int get_num_modules(void);
+int get_module_info(int, module_info_t *, size_t);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _ASM_SN_SYSTEMINFO_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/types.h linux/include/asm-ia64/sn/types.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/types.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/types.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,36 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 1999 by Ralf Baechle
+ */
+#ifndef _ASM_SN_TYPES_H
+#define _ASM_SN_TYPES_H
+
+#include <linux/config.h>
+#include <linux/types.h>
+
+typedef unsigned long 	cpuid_t;
+typedef unsigned long 	cpumask_t;
+/* typedef unsigned long	cnodemask_t; */
+typedef signed short	nasid_t;	/* node id in numa-as-id space */
+typedef signed short	cnodeid_t;	/* node id in compact-id space */
+typedef signed char	partid_t;	/* partition ID type */
+typedef signed short	moduleid_t;	/* user-visible module number type */
+typedef signed short	cmoduleid_t;	/* kernel compact module id type */
+typedef unsigned char	clusterid_t;	/* Clusterid of the cell */
+
+#if defined(CONFIG_IA64_SGI_IO)
+#define __psunsigned_t uint64_t
+#define lock_t uint64_t
+#define sv_t uint64_t
+
+typedef unsigned long iopaddr_t;
+typedef unsigned char uchar_t;
+typedef unsigned long paddr_t;
+typedef unsigned long pfn_t;
+#endif        /* CONFIG_IA64_SGI_IO */
+
+#endif /* _ASM_SN_TYPES_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/vector.h linux/include/asm-ia64/sn/vector.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/vector.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/vector.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,117 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_VECTOR_H
+#define _ASM_SN_VECTOR_H
+
+#define NET_VEC_NULL            ((net_vec_t)  0)
+#define NET_VEC_BAD             ((net_vec_t) -1)
+
+#ifdef RTL
+
+#define VEC_POLLS_W		16	/* Polls before write times out */
+#define VEC_POLLS_R		16	/* Polls before read times out */
+#define VEC_POLLS_X		16	/* Polls before exch times out */
+
+#define VEC_RETRIES_W		1	/* Retries before write fails */
+#define VEC_RETRIES_R		1	/* Retries before read fails */
+#define VEC_RETRIES_X		1	/* Retries before exch fails */
+
+#else /* RTL */
+
+#define VEC_POLLS_W		128	/* Polls before write times out */
+#define VEC_POLLS_R		128	/* Polls before read times out */
+#define VEC_POLLS_X		128	/* Polls before exch times out */
+
+#define VEC_RETRIES_W		8	/* Retries before write fails */
+#define VEC_RETRIES_R           8	/* Retries before read fails */
+#define VEC_RETRIES_X		4	/* Retries before exch fails */
+
+#endif /* RTL */
+
+#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+#define VECTOR_PARMS		LB_VECTOR_PARMS
+#define VECTOR_ROUTE		LB_VECTOR_ROUTE
+#define VECTOR_DATA		LB_VECTOR_DATA
+#define VECTOR_STATUS		LB_VECTOR_STATUS
+#define VECTOR_RETURN		LB_VECTOR_RETURN
+#define VECTOR_READ_DATA	LB_VECTOR_READ_DATA
+#define VECTOR_STATUS_CLEAR	LB_VECTOR_STATUS_CLEAR
+#define VP_PIOID_SHFT		LVP_PIOID_SHFT
+#define VP_PIOID_MASK		LVP_PIOID_MASK
+#define VP_WRITEID_SHFT		LVP_WRITEID_SHFT
+#define VP_WRITEID_MASK		LVP_WRITEID_MASK
+#define VP_ADDRESS_MASK		LVP_ADDRESS_MASK
+#define VP_TYPE_SHFT		LVP_TYPE_SHFT
+#define VP_TYPE_MASK		LVP_TYPE_MASK
+#define VS_VALID		LVS_VALID
+#define VS_OVERRUN		LVS_OVERRUN
+#define VS_TARGET_SHFT		LVS_TARGET_SHFT
+#define VS_TARGET_MASK		LVS_TARGET_MASK
+#define VS_PIOID_SHFT		LVS_PIOID_SHFT
+#define VS_PIOID_MASK		LVS_PIOID_MASK
+#define VS_WRITEID_SHFT		LVS_WRITEID_SHFT
+#define VS_WRITEID_MASK		LVS_WRITEID_MASK
+#define VS_ADDRESS_MASK		LVS_ADDRESS_MASK
+#define VS_TYPE_SHFT		LVS_TYPE_SHFT
+#define VS_TYPE_MASK		LVS_TYPE_MASK
+#define VS_ERROR_MASK		LVS_ERROR_MASK
+#endif
+
+#define NET_ERROR_NONE          0       /* No error             */
+#define NET_ERROR_HARDWARE     -1       /* Hardware error       */
+#define NET_ERROR_OVERRUN      -2       /* Extra response(s)    */
+#define NET_ERROR_REPLY        -3       /* Reply parms mismatch */
+#define NET_ERROR_ADDRESS      -4       /* Addr error response  */
+#define NET_ERROR_COMMAND      -5       /* Cmd error response   */
+#define NET_ERROR_PROT         -6       /* Prot error response  */
+#define NET_ERROR_TIMEOUT      -7       /* Too many retries     */
+#define NET_ERROR_VECTOR       -8       /* Invalid vector/path  */
+#define NET_ERROR_ROUTERLOCK   -9       /* Timeout locking rtr  */
+#define NET_ERROR_INVAL	       -10	/* Invalid vector request */
+
+#if defined(_LANGUAGE_C) || defined(_LANGUAGE_C_PLUS_PLUS)
+typedef uint64_t              net_reg_t;
+typedef uint64_t              net_vec_t;
+
+int             vector_write(net_vec_t dest,
+                              int write_id, int address,
+                              uint64_t value);
+
+int             vector_read(net_vec_t dest,
+                             int write_id, int address,
+                             uint64_t *value);
+
+int             vector_write_node(net_vec_t dest, nasid_t nasid,
+                              int write_id, int address,
+                              uint64_t value);
+
+int             vector_read_node(net_vec_t dest, nasid_t nasid,
+                             int write_id, int address,
+                             uint64_t *value);
+
+int             vector_length(net_vec_t vec);
+net_vec_t       vector_get(net_vec_t vec, int n);
+net_vec_t       vector_prefix(net_vec_t vec, int n);
+net_vec_t       vector_modify(net_vec_t entry, int n, int route);
+net_vec_t       vector_reverse(net_vec_t vec);
+net_vec_t       vector_concat(net_vec_t vec1, net_vec_t vec2);
+
+char		*net_errmsg(int);
+
+#ifndef _STANDALONE
+int hub_vector_write(cnodeid_t cnode, net_vec_t vector, int writeid,
+	int addr, net_reg_t value);
+int hub_vector_read(cnodeid_t cnode, net_vec_t vector, int writeid,
+	int addr, net_reg_t *value);
+#endif
+
+#endif /* _LANGUAGE_C || _LANGUAGE_C_PLUS_PLUS */
+
+#endif /* _ASM_SN_VECTOR_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/war.h linux/include/asm-ia64/sn/war.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/war.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/war.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,17 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_WAR_H
+#define _ASM_SN_WAR_H
+
+#if defined (CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
+#include <asm/sn/sn1/war.h>
+#endif
+
+#endif /* _ASM_SN_WAR_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/xtalk/xbow.h linux/include/asm-ia64/sn/xtalk/xbow.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/xtalk/xbow.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/xtalk/xbow.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,895 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_SN_XTALK_XBOW_H
+#define _ASM_SN_SN_XTALK_XBOW_H
+
+/*
+ * xbow.h - header file for crossbow chip and xbow section of xbridge
+ */
+
+#include <asm/sn/xtalk/xtalk.h>
+#include <asm/sn/xtalk/xwidget.h>
+#include <asm/sn/xtalk/xswitch.h>
+#ifdef LANGUAGE_C
+#include <asm/sn/xtalk/xbow_info.h>
+#endif
+
+
+#define	XBOW_DRV_PREFIX	"xbow_"
+
+/* The crossbow chip supports 8 8/16 bits I/O ports, numbered 0x8 through 0xf.
+ * It also implements the widget 0 address space and register set.
+ */
+#define XBOW_PORT_0	0x0
+#define XBOW_PORT_8	0x8
+#define XBOW_PORT_9	0x9
+#define XBOW_PORT_A	0xa
+#define XBOW_PORT_B	0xb
+#define XBOW_PORT_C	0xc
+#define XBOW_PORT_D	0xd
+#define XBOW_PORT_E	0xe
+#define XBOW_PORT_F	0xf
+
+#define MAX_XBOW_PORTS	8	/* number of ports on xbow chip */
+#define BASE_XBOW_PORT	XBOW_PORT_8	/* Lowest external port */
+#define MAX_PORT_NUM	0x10	/* maximum port number + 1 */
+#define XBOW_WIDGET_ID	0	/* xbow is itself widget 0 */
+
+#define	XBOW_CREDIT	4
+
+#define MAX_XBOW_NAME 	16
+
+#if LANGUAGE_C
+typedef uint32_t      xbowreg_t;
+
+#define XBOWCONST	(xbowreg_t)
+
+/* Generic xbow register, given base and offset */
+#define XBOW_REG_PTR(base, offset) ((volatile xbowreg_t*) \
+	((__psunsigned_t)(base) + (__psunsigned_t)(offset)))
+
+/* Register set for each xbow link */
+typedef volatile struct xb_linkregs_s {
+#ifdef LITTLE_ENDIAN
+/* 
+ * we access these through synergy unswizzled space, so the address
+ * gets twiddled (i.e. references to 0x4 actually go to 0x0 and vv.)
+ * That's why we put the register first and filler second.
+ */
+    xbowreg_t               link_ibf;
+    xbowreg_t               filler0;	/* filler for proper alignment */
+    xbowreg_t               link_control;
+    xbowreg_t               filler1;
+    xbowreg_t               link_status;
+    xbowreg_t               filler2;
+    xbowreg_t               link_arb_upper;
+    xbowreg_t               filler3;
+    xbowreg_t               link_arb_lower;
+    xbowreg_t               filler4;
+    xbowreg_t               link_status_clr;
+    xbowreg_t               filler5;
+    xbowreg_t               link_reset;
+    xbowreg_t               filler6;
+    xbowreg_t               link_aux_status;
+    xbowreg_t               filler7;
+#else
+    xbowreg_t               filler0;	/* filler for proper alignment */
+    xbowreg_t               link_ibf;
+    xbowreg_t               filler1;
+    xbowreg_t               link_control;
+    xbowreg_t               filler2;
+    xbowreg_t               link_status;
+    xbowreg_t               filler3;
+    xbowreg_t               link_arb_upper;
+    xbowreg_t               filler4;
+    xbowreg_t               link_arb_lower;
+    xbowreg_t               filler5;
+    xbowreg_t               link_status_clr;
+    xbowreg_t               filler6;
+    xbowreg_t               link_reset;
+    xbowreg_t               filler7;
+    xbowreg_t               link_aux_status;
+#endif /* LITTLE_ENDIAN */
+} xb_linkregs_t;
+
+typedef volatile struct xbow_s {
+    /* standard widget configuration                       0x000000-0x000057 */
+    widget_cfg_t            xb_widget;  /* 0x000000 */
+
+    /* helper fieldnames for accessing bridge widget */
+
+#define xb_wid_id                       xb_widget.w_id
+#define xb_wid_stat                     xb_widget.w_status
+#define xb_wid_err_upper                xb_widget.w_err_upper_addr
+#define xb_wid_err_lower                xb_widget.w_err_lower_addr
+#define xb_wid_control                  xb_widget.w_control
+#define xb_wid_req_timeout              xb_widget.w_req_timeout
+#define xb_wid_int_upper                xb_widget.w_intdest_upper_addr
+#define xb_wid_int_lower                xb_widget.w_intdest_lower_addr
+#define xb_wid_err_cmdword              xb_widget.w_err_cmd_word
+#define xb_wid_llp                      xb_widget.w_llp_cfg
+#define xb_wid_stat_clr                 xb_widget.w_tflush
+
+#ifdef LITTLE_ENDIAN
+/* 
+ * we access these through synergy unswizzled space, so the address
+ * gets twiddled (i.e. references to 0x4 actually go to 0x0 and vv.)
+ * That's why we put the register first and filler second.
+ */
+    /* xbow-specific widget configuration                  0x000058-0x0000FF */
+    xbowreg_t               xb_wid_arb_reload;  /* 0x00005C */
+    xbowreg_t               _pad_000058;
+    xbowreg_t               xb_perf_ctr_a;      /* 0x000064 */
+    xbowreg_t               _pad_000060;
+    xbowreg_t               xb_perf_ctr_b;      /* 0x00006c */
+    xbowreg_t               _pad_000068;
+    xbowreg_t               xb_nic;     /* 0x000074 */
+    xbowreg_t               _pad_000070;
+
+    /* Xbridge only */
+    xbowreg_t               xb_w0_rst_fnc;      /* 0x00007C */
+    xbowreg_t               _pad_000078;
+    xbowreg_t               xb_l8_rst_fnc;      /* 0x000084 */
+    xbowreg_t               _pad_000080;
+    xbowreg_t               xb_l9_rst_fnc;      /* 0x00008c */
+    xbowreg_t               _pad_000088;
+    xbowreg_t               xb_la_rst_fnc;      /* 0x000094 */
+    xbowreg_t               _pad_000090;
+    xbowreg_t               xb_lb_rst_fnc;      /* 0x00009c */
+    xbowreg_t               _pad_000098;
+    xbowreg_t               xb_lc_rst_fnc;      /* 0x0000a4 */
+    xbowreg_t               _pad_0000a0;
+    xbowreg_t               xb_ld_rst_fnc;      /* 0x0000ac */
+    xbowreg_t               _pad_0000a8;
+    xbowreg_t               xb_le_rst_fnc;      /* 0x0000b4 */
+    xbowreg_t               _pad_0000b0;
+    xbowreg_t               xb_lf_rst_fnc;      /* 0x0000bc */
+    xbowreg_t               _pad_0000b8;
+    xbowreg_t               xb_lock;            /* 0x0000c4 */
+    xbowreg_t               _pad_0000c0;
+    xbowreg_t               xb_lock_clr;        /* 0x0000cc */
+    xbowreg_t               _pad_0000c8;
+    /* end of Xbridge only */
+    xbowreg_t               _pad_0000d0[12];
+#else
+    /* xbow-specific widget configuration                  0x000058-0x0000FF */
+    xbowreg_t               _pad_000058;
+    xbowreg_t               xb_wid_arb_reload;  /* 0x00005C */
+    xbowreg_t               _pad_000060;
+    xbowreg_t               xb_perf_ctr_a;      /* 0x000064 */
+    xbowreg_t               _pad_000068;
+    xbowreg_t               xb_perf_ctr_b;      /* 0x00006c */
+    xbowreg_t               _pad_000070;
+    xbowreg_t               xb_nic;     /* 0x000074 */
+
+    /* Xbridge only */
+    xbowreg_t               _pad_000078;
+    xbowreg_t               xb_w0_rst_fnc;      /* 0x00007C */
+    xbowreg_t               _pad_000080;
+    xbowreg_t               xb_l8_rst_fnc;      /* 0x000084 */
+    xbowreg_t               _pad_000088;
+    xbowreg_t               xb_l9_rst_fnc;      /* 0x00008c */
+    xbowreg_t               _pad_000090;
+    xbowreg_t               xb_la_rst_fnc;      /* 0x000094 */
+    xbowreg_t               _pad_000098;
+    xbowreg_t               xb_lb_rst_fnc;      /* 0x00009c */
+    xbowreg_t               _pad_0000a0;
+    xbowreg_t               xb_lc_rst_fnc;      /* 0x0000a4 */
+    xbowreg_t               _pad_0000a8;
+    xbowreg_t               xb_ld_rst_fnc;      /* 0x0000ac */
+    xbowreg_t               _pad_0000b0;
+    xbowreg_t               xb_le_rst_fnc;      /* 0x0000b4 */
+    xbowreg_t               _pad_0000b8;
+    xbowreg_t               xb_lf_rst_fnc;      /* 0x0000bc */
+    xbowreg_t               _pad_0000c0;
+    xbowreg_t               xb_lock;            /* 0x0000c4 */
+    xbowreg_t               _pad_0000c8;
+    xbowreg_t               xb_lock_clr;        /* 0x0000cc */
+    /* end of Xbridge only */
+    xbowreg_t               _pad_0000d0[12];
+#endif /* LITTLE_ENDIAN */
+
+    /* Link Specific Registers, port 8..15                 0x000100-0x000300 */
+    xb_linkregs_t           xb_link_raw[MAX_XBOW_PORTS];
+#define xb_link(p)      xb_link_raw[(p) & (MAX_XBOW_PORTS - 1)]
+
+} xbow_t;
+
+/* Configuration structure which describes each xbow link */
+typedef struct xbow_cfg_s {
+    int			    xb_port;	/* port number (0-15) */
+    int			    xb_flags;	/* port software flags */
+    short		    xb_shift;	/* shift for arb reg (mask is 0xff) */
+    short		    xb_ul;	/* upper or lower arb reg */
+    int			    xb_pad;	/* use this later (pad to ptr align) */
+    xb_linkregs_t	   *xb_linkregs;	/* pointer to link registers */
+    widget_cfg_t	   *xb_widget;	/* pointer to widget registers */
+    char		    xb_name[MAX_XBOW_NAME];	/* port name */
+    xbowreg_t		    xb_sh_arb_upper;	/* shadow upper arb register */
+    xbowreg_t		    xb_sh_arb_lower;	/* shadow lower arb register */
+} xbow_cfg_t;
+
+#define XB_FLAGS_EXISTS		0x1	/* device exists */
+#define XB_FLAGS_MASTER		0x2
+#define XB_FLAGS_SLAVE		0x0
+#define XB_FLAGS_GBR		0x4
+#define XB_FLAGS_16BIT		0x8
+#define XB_FLAGS_8BIT		0x0
+
+/* get xbow config information for port p */
+#define XB_CONFIG(p)	xbow_cfg[xb_ports[p]]
+
+/* is widget port number valid?  (based on version 7.0 of xbow spec) */
+#define XBOW_WIDGET_IS_VALID(wid) ((wid) >= XBOW_PORT_8 && (wid) <= XBOW_PORT_F)
+
+/* whether to use upper or lower arbitration register, given source widget id */
+#define XBOW_ARB_IS_UPPER(wid) 	((wid) >= XBOW_PORT_8 && (wid) <= XBOW_PORT_B)
+#define XBOW_ARB_IS_LOWER(wid) 	((wid) >= XBOW_PORT_C && (wid) <= XBOW_PORT_F)
+
+/* offset of arbitration register, given source widget id */
+#define XBOW_ARB_OFF(wid) 	(XBOW_ARB_IS_UPPER(wid) ? 0x1c : 0x24)
+
+#endif				/* LANGUAGE_C */
+
+#define	XBOW_WID_ID		WIDGET_ID
+#define	XBOW_WID_STAT		WIDGET_STATUS
+#define	XBOW_WID_ERR_UPPER	WIDGET_ERR_UPPER_ADDR
+#define	XBOW_WID_ERR_LOWER	WIDGET_ERR_LOWER_ADDR
+#define	XBOW_WID_CONTROL	WIDGET_CONTROL
+#define	XBOW_WID_REQ_TO		WIDGET_REQ_TIMEOUT
+#define	XBOW_WID_INT_UPPER	WIDGET_INTDEST_UPPER_ADDR
+#define	XBOW_WID_INT_LOWER	WIDGET_INTDEST_LOWER_ADDR
+#define	XBOW_WID_ERR_CMDWORD	WIDGET_ERR_CMD_WORD
+#define	XBOW_WID_LLP		WIDGET_LLP_CFG
+#define	XBOW_WID_STAT_CLR	WIDGET_TFLUSH
+#define XBOW_WID_ARB_RELOAD 	0x5c
+#define XBOW_WID_PERF_CTR_A 	0x64
+#define XBOW_WID_PERF_CTR_B 	0x6c
+#define XBOW_WID_NIC 		0x74
+
+/* Xbridge only */
+#define XBOW_W0_RST_FNC		0x00007C
+#define	XBOW_L8_RST_FNC		0x000084
+#define	XBOW_L9_RST_FNC		0x00008c
+#define	XBOW_LA_RST_FNC		0x000094
+#define	XBOW_LB_RST_FNC		0x00009c
+#define	XBOW_LC_RST_FNC		0x0000a4
+#define	XBOW_LD_RST_FNC		0x0000ac
+#define	XBOW_LE_RST_FNC		0x0000b4
+#define	XBOW_LF_RST_FNC		0x0000bc
+#define XBOW_RESET_FENCE(x) ((x) > 7 && (x) < 16) ? \
+				(XBOW_W0_RST_FNC + ((x) - 7) * 8) : \
+				((x) == 0) ? XBOW_W0_RST_FNC : 0
+#define XBOW_LOCK		0x0000c4
+#define XBOW_LOCK_CLR		0x0000cc
+/* End of Xbridge only */
+
+/* used only in ide, but defined here within the reserved portion */
+/*              of the widget0 address space (before 0xf4) */
+#define	XBOW_WID_UNDEF		0xe4
+
+/* pointer to link arbitration register, given xbow base, dst and src widget id */
+#define XBOW_PRIO_ARBREG_PTR(base, dst_wid, src_wid) \
+	XBOW_REG_PTR(XBOW_PRIO_LINKREGS_PTR(base, dst_wid), XBOW_ARB_OFF(src_wid))
+
+/* pointer to link registers base, given xbow base and destination widget id */
+#define XBOW_PRIO_LINKREGS_PTR(base, dst_wid) (xb_linkregs_t*) \
+	XBOW_REG_PTR(base, XB_LINK_REG_BASE(dst_wid))
+
+/* xbow link register set base, legal value for x is 0x8..0xf */
+#define	XB_LINK_BASE		0x100
+#define	XB_LINK_OFFSET		0x40
+#define	XB_LINK_REG_BASE(x)	(XB_LINK_BASE + ((x) & (MAX_XBOW_PORTS - 1)) * XB_LINK_OFFSET)
+
+#define	XB_LINK_IBUF_FLUSH(x)	(XB_LINK_REG_BASE(x) + 0x4)
+#define	XB_LINK_CTRL(x)		(XB_LINK_REG_BASE(x) + 0xc)
+#define	XB_LINK_STATUS(x)	(XB_LINK_REG_BASE(x) + 0x14)
+#define	XB_LINK_ARB_UPPER(x)	(XB_LINK_REG_BASE(x) + 0x1c)
+#define	XB_LINK_ARB_LOWER(x)	(XB_LINK_REG_BASE(x) + 0x24)
+#define	XB_LINK_STATUS_CLR(x)	(XB_LINK_REG_BASE(x) + 0x2c)
+#define	XB_LINK_RESET(x)	(XB_LINK_REG_BASE(x) + 0x34)
+#define	XB_LINK_AUX_STATUS(x)	(XB_LINK_REG_BASE(x) + 0x3c)
+
+/* link_control(x) */
+#define	XB_CTRL_LINKALIVE_IE		0x80000000	/* link comes alive */
+     /* reserved:			0x40000000 */
+#define	XB_CTRL_PERF_CTR_MODE_MSK	0x30000000	/* perf counter mode */
+#define	XB_CTRL_IBUF_LEVEL_MSK		0x0e000000	/* input packet buffer level */
+#define	XB_CTRL_8BIT_MODE		0x01000000	/* force link into 8 bit mode */
+#define XB_CTRL_BAD_LLP_PKT		0x00800000	/* force bad LLP packet */
+#define XB_CTRL_WIDGET_CR_MSK		0x007c0000	/* LLP widget credit mask */
+#define XB_CTRL_WIDGET_CR_SHFT	18			/* LLP widget credit shift */
+#define XB_CTRL_ILLEGAL_DST_IE		0x00020000	/* illegal destination */
+#define XB_CTRL_OALLOC_IBUF_IE		0x00010000	/* overallocated input buffer */
+     /* reserved:			0x0000fe00 */
+#define XB_CTRL_BNDWDTH_ALLOC_IE	0x00000100	/* bandwidth alloc */
+#define XB_CTRL_RCV_CNT_OFLOW_IE	0x00000080	/* rcv retry overflow */
+#define XB_CTRL_XMT_CNT_OFLOW_IE	0x00000040	/* xmt retry overflow */
+#define XB_CTRL_XMT_MAX_RTRY_IE		0x00000020	/* max transmit retry */
+#define XB_CTRL_RCV_IE			0x00000010	/* receive */
+#define XB_CTRL_XMT_RTRY_IE		0x00000008	/* transmit retry */
+     /* reserved:			0x00000004 */
+#define	XB_CTRL_MAXREQ_TOUT_IE		0x00000002	/* maximum request timeout */
+#define	XB_CTRL_SRC_TOUT_IE		0x00000001	/* source timeout */
+
+/* link_status(x) */
+#define	XB_STAT_LINKALIVE		XB_CTRL_LINKALIVE_IE
+     /* reserved:			0x7ff80000 */
+#define	XB_STAT_MULTI_ERR		0x00040000	/* multi error */
+#define	XB_STAT_ILLEGAL_DST_ERR		XB_CTRL_ILLEGAL_DST_IE
+#define	XB_STAT_OALLOC_IBUF_ERR		XB_CTRL_OALLOC_IBUF_IE
+#define	XB_STAT_BNDWDTH_ALLOC_ID_MSK	0x0000ff00	/* port bitmask */
+#define	XB_STAT_RCV_CNT_OFLOW_ERR	XB_CTRL_RCV_CNT_OFLOW_IE
+#define	XB_STAT_XMT_CNT_OFLOW_ERR	XB_CTRL_XMT_CNT_OFLOW_IE
+#define	XB_STAT_XMT_MAX_RTRY_ERR	XB_CTRL_XMT_MAX_RTRY_IE
+#define	XB_STAT_RCV_ERR			XB_CTRL_RCV_IE
+#define	XB_STAT_XMT_RTRY_ERR		XB_CTRL_XMT_RTRY_IE
+     /* reserved:			0x00000004 */
+#define	XB_STAT_MAXREQ_TOUT_ERR		XB_CTRL_MAXREQ_TOUT_IE
+#define	XB_STAT_SRC_TOUT_ERR		XB_CTRL_SRC_TOUT_IE
+
+/* link_aux_status(x) */
+#define	XB_AUX_STAT_RCV_CNT	0xff000000
+#define	XB_AUX_STAT_XMT_CNT	0x00ff0000
+#define	XB_AUX_STAT_TOUT_DST	0x0000ff00
+#define	XB_AUX_LINKFAIL_RST_BAD	0x00000040
+#define	XB_AUX_STAT_PRESENT	0x00000020
+#define	XB_AUX_STAT_PORT_WIDTH	0x00000010
+     /*	reserved:		0x0000000f */
+
+/*
+ * link_arb_upper/link_arb_lower(x), (reg) should be the link_arb_upper
+ * register if (x) is 0x8..0xb, link_arb_lower if (x) is 0xc..0xf
+ */
+#define	XB_ARB_GBR_MSK		0x1f
+#define	XB_ARB_RR_MSK		0x7
+#define	XB_ARB_GBR_SHFT(x)	(((x) & 0x3) * 8)
+#define	XB_ARB_RR_SHFT(x)	(((x) & 0x3) * 8 + 5)
+#define	XB_ARB_GBR_CNT(reg,x)	((reg) >> XB_ARB_GBR_SHFT(x) & XB_ARB_GBR_MSK)
+#define	XB_ARB_RR_CNT(reg,x)	((reg) >> XB_ARB_RR_SHFT(x) & XB_ARB_RR_MSK)
+
+/* XBOW_WID_STAT */
+#define	XB_WID_STAT_LINK_INTR_SHFT	(24)
+#define	XB_WID_STAT_LINK_INTR_MASK	(0xFF << XB_WID_STAT_LINK_INTR_SHFT)
+#define	XB_WID_STAT_LINK_INTR(x)	(0x1 << (((x)&7) + XB_WID_STAT_LINK_INTR_SHFT))
+#define	XB_WID_STAT_WIDGET0_INTR	0x00800000
+#define XB_WID_STAT_SRCID_MASK		0x000003c0	/* Xbridge only */
+#define	XB_WID_STAT_REG_ACC_ERR		0x00000020
+#define XB_WID_STAT_RECV_TOUT		0x00000010	/* Xbridge only */
+#define XB_WID_STAT_ARB_TOUT		0x00000008	/* Xbridge only */
+#define	XB_WID_STAT_XTALK_ERR		0x00000004
+#define XB_WID_STAT_DST_TOUT		0x00000002	/* Xbridge only */
+#define	XB_WID_STAT_MULTI_ERR		0x00000001
+
+#define XB_WID_STAT_SRCID_SHFT		6
+
+/* XBOW_WID_CONTROL */
+#define XB_WID_CTRL_REG_ACC_IE		XB_WID_STAT_REG_ACC_ERR
+#define XB_WID_CTRL_RECV_TOUT		XB_WID_STAT_RECV_TOUT
+#define XB_WID_CTRL_ARB_TOUT		XB_WID_STAT_ARB_TOUT
+#define XB_WID_CTRL_XTALK_IE		XB_WID_STAT_XTALK_ERR
+
+/* XBOW_WID_INT_UPPER */
+/* defined in xwidget.h for WIDGET_INTDEST_UPPER_ADDR */
+
+/* XBOW WIDGET part number, in the ID register */
+#define XBOW_WIDGET_PART_NUM	0x0		/* crossbow */
+#define XXBOW_WIDGET_PART_NUM	0xd000		/* Xbridge */
+#define	XBOW_WIDGET_MFGR_NUM	0x0
+#define	XXBOW_WIDGET_MFGR_NUM	0x0
+
+#define	XBOW_REV_1_0		0x1	/* xbow rev 1.0 is "1" */
+#define	XBOW_REV_1_1		0x2	/* xbow rev 1.1 is "2" */
+#define XBOW_REV_1_2		0x3	/* xbow rev 1.2 is "3" */
+#define XBOW_REV_1_3		0x4	/* xbow rev 1.3 is "4" */
+#define XBOW_REV_2_0		0x5	/* xbow rev 2.0 is "5" */
+
+#define XXBOW_PART_REV_1_0		(XXBOW_WIDGET_PART_NUM << 4 | 0x1 )
+#define XXBOW_PART_REV_2_0		(XXBOW_WIDGET_PART_NUM << 4 | 0x2 )
+
+/* XBOW_WID_ARB_RELOAD */
+#define	XBOW_WID_ARB_RELOAD_INT	0x3f	/* GBR reload interval */
+
+
+#define nasid_has_xbridge(nasid)	\
+	(XWIDGET_PART_NUM(XWIDGET_ID_READ(nasid, 0)) == XXBOW_WIDGET_PART_NUM)
+
+
+#ifdef _LANGUAGE_C
+/*
+ * XBOW Widget 0 Register formats.
+ * Format for many of these registers are similar to the standard
+ * widget register format described as part of xtalk specification
+ * Standard widget register field format description is available in
+ * xwidget.h
+ * Following structures define the format for xbow widget 0 registers
+ */
+/*
+ * Xbow Widget 0 Command error word
+ */
+#ifdef LITTLE_ENDIAN
+
+typedef union xbw0_cmdword_u {
+    xbowreg_t               cmdword;
+    struct {
+	uint32_t              rsvd:8,		/* Reserved */
+                                barr:1,         /* Barrier operation */
+                                error:1,        /* Error Occured */
+                                vbpm:1,         /* Virtual Backplane message */
+                                gbr:1,  /* GBR enable ?                 */
+                                ds:2,   /* Data size                    */
+                                ct:1,   /* Is it a coherent transaction */
+                                tnum:5,         /* Transaction Number */
+                                pactyp:4,       /* Packet type: */
+                                srcid:4,        /* Source ID number */
+                                destid:4;       /* Desination ID number */
+
+    } xbw0_cmdfield;
+} xbw0_cmdword_t;
+
+#else
+
+typedef union xbw0_cmdword_u {
+    xbowreg_t		    cmdword;
+    struct {
+	uint32_t		destid:4,	/* Desination ID number */
+				srcid:4,	/* Source ID number */
+				pactyp:4,	/* Packet type: */
+				tnum:5,		/* Transaction Number */
+				ct:1,	/* Is it a coherent transaction */
+				ds:2,	/* Data size			*/
+				gbr:1,	/* GBR enable ?			*/
+				vbpm:1,		/* Virtual Backplane message */
+				error:1,	/* Error Occured */
+				barr:1,		/* Barrier operation */
+				rsvd:8;		/* Reserved */
+    } xbw0_cmdfield;
+} xbw0_cmdword_t;
+
+#endif
+
+#define	xbcmd_destid	xbw0_cmdfield.destid
+#define	xbcmd_srcid	xbw0_cmdfield.srcid
+#define	xbcmd_pactyp	xbw0_cmdfield.pactyp
+#define	xbcmd_tnum	xbw0_cmdfield.tnum
+#define	xbcmd_ct	xbw0_cmdfield.ct
+#define	xbcmd_ds	xbw0_cmdfield.ds
+#define	xbcmd_gbr	xbw0_cmdfield.gbr
+#define	xbcmd_vbpm	xbw0_cmdfield.vbpm
+#define	xbcmd_error	xbw0_cmdfield.error
+#define	xbcmd_barr	xbw0_cmdfield.barr
+
+/*
+ * Values for field PACTYP in xbow error command word
+ */
+#define	XBCMDTYP_READREQ	0	/* Read Request   packet  */
+#define	XBCMDTYP_READRESP	1	/* Read Response packet   */
+#define	XBCMDTYP_WRREQ_RESP	2	/* Write Request with response    */
+#define	XBCMDTYP_WRRESP		3	/* Write Response */
+#define	XBCMDTYP_WRREQ_NORESP	4	/* Write request with  No Response */
+#define	XBCMDTYP_FETCHOP	6	/* Fetch & Op packet      */
+#define	XBCMDTYP_STOREOP	8	/* Store & Op packet      */
+#define	XBCMDTYP_SPLPKT_REQ	0xE	/* Special packet request */
+#define	XBCMDTYP_SPLPKT_RESP	0xF	/* Special packet response        */
+
+/*
+ * Values for field ds (datasize) in xbow error command word
+ */
+#define	XBCMDSZ_DOUBLEWORD	0
+#define	XBCMDSZ_QUARTRCACHE	1
+#define	XBCMDSZ_FULLCACHE	2
+
+/*
+ * Xbow widget 0 Status register format.
+ */
+#ifdef LITTLE_ENDIAN
+
+typedef union xbw0_status_u {
+    xbowreg_t               statusword;
+    struct {
+       uint32_t		mult_err:1,	/* Multiple error occured */
+                                connect_tout:1, /* Connection timeout   */
+                                xtalk_err:1,    /* Xtalk pkt with error bit */
+                                /* End of Xbridge only */
+                                w0_arb_tout,    /* arbiter timeout err */
+                                w0_recv_tout,   /* receive timeout err */
+                                /* Xbridge only */
+                                regacc_err:1,   /* Reg Access error     */
+                                src_id:4,       /* source id. Xbridge only */
+                                resvd1:13,
+                                wid0intr:1;     /* Widget 0 err intr */
+    } xbw0_stfield;
+} xbw0_status_t;
+
+#else
+
+typedef union xbw0_status_u {
+    xbowreg_t		    statusword;
+    struct {
+	uint32_t		linkXintr:8,	/* link(x) error intr */
+				wid0intr:1,	/* Widget 0 err intr */
+				resvd1:13,
+				src_id:4,	/* source id. Xbridge only */
+				regacc_err:1,	/* Reg Access error	*/
+				/* Xbridge only */
+				w0_recv_tout,	/* receive timeout err */
+				w0_arb_tout,	/* arbiter timeout err */
+				/* End of Xbridge only */
+				xtalk_err:1,	/* Xtalk pkt with error bit */
+				connect_tout:1, /* Connection timeout	*/
+				mult_err:1;	/* Multiple error occured */
+    } xbw0_stfield;
+} xbw0_status_t;
+
+#endif
+
+#define	xbst_linkXintr		xbw0_stfield.linkXintr
+#define	xbst_w0intr		xbw0_stfield.wid0intr
+#define	xbst_regacc_err		xbw0_stfield.regacc_err
+#define	xbst_xtalk_err		xbw0_stfield.xtalk_err
+#define	xbst_connect_tout	xbw0_stfield.connect_tout
+#define	xbst_mult_err		xbw0_stfield.mult_err
+#define xbst_src_id		xbw0_stfield.src_id	    /* Xbridge only */
+#define xbst_w0_recv_tout	xbw0_stfield.w0_recv_tout   /* Xbridge only */
+#define xbst_w0_arb_tout	xbw0_stfield.w0_arb_tout    /* Xbridge only */
+
+/*
+ * Xbow widget 0 Control register format
+ */
+#ifdef LITTLE_ENDIAN
+
+typedef union xbw0_ctrl_u {
+    xbowreg_t               ctrlword;
+    struct {
+	uint32_t              
+				resvd3:1,
+                                conntout_intr:1,
+                                xtalkerr_intr:1,
+                                w0_arg_tout_intr:1,     /* Xbridge only */
+                                w0_recv_tout_intr:1,    /* Xbridge only */
+                                accerr_intr:1,
+                                enable_w0_tout_cntr:1,  /* Xbridge only */
+                                enable_watchdog:1,      /* Xbridge only */
+                                resvd1:24;
+    } xbw0_ctrlfield;
+} xbw0_ctrl_t;
+
+#else
+
+typedef union xbw0_ctrl_u {
+    xbowreg_t		    ctrlword;
+    struct {
+	uint32_t
+				resvd1:24,
+				enable_watchdog:1,	/* Xbridge only */
+				enable_w0_tout_cntr:1,	/* Xbridge only */
+				accerr_intr:1,
+				w0_recv_tout_intr:1,	/* Xbridge only */
+				w0_arg_tout_intr:1,	/* Xbridge only */
+				xtalkerr_intr:1,
+				conntout_intr:1,
+				resvd3:1;
+    } xbw0_ctrlfield;
+} xbw0_ctrl_t;
+
+#endif
+
+#ifdef LITTLE_ENDIAN
+
+typedef union xbow_linkctrl_u {
+    xbowreg_t               xbl_ctrlword;
+    struct {
+	uint32_t 		srcto_intr:1,
+                                maxto_intr:1, 
+                                rsvd3:1,
+                                trx_retry_intr:1, 
+                                rcv_err_intr:1, 
+                                trx_max_retry_intr:1,
+                                trxov_intr:1, 
+                                rcvov_intr:1,
+                                bwalloc_intr:1, 
+                                rsvd2:7, 
+                                obuf_intr:1,
+                                idest_intr:1, 
+                                llp_credit:5, 
+                                force_badllp:1,
+                                send_bm8:1, 
+                                inbuf_level:3, 
+                                perf_mode:2,
+                                rsvd1:1, 
+       		                alive_intr:1;
+
+    } xb_linkcontrol;
+} xbow_linkctrl_t;
+
+#else
+
+typedef union xbow_linkctrl_u {
+    xbowreg_t		    xbl_ctrlword;
+    struct {
+	uint32_t		alive_intr:1, 
+				rsvd1:1, 
+				perf_mode:2,
+				inbuf_level:3, 
+				send_bm8:1, 
+				force_badllp:1,
+				llp_credit:5, 
+				idest_intr:1, 
+				obuf_intr:1,
+				rsvd2:7, 
+				bwalloc_intr:1, 
+				rcvov_intr:1,
+				trxov_intr:1, 
+				trx_max_retry_intr:1,
+				rcv_err_intr:1, 
+				trx_retry_intr:1, 
+				rsvd3:1,
+				maxto_intr:1, 
+				srcto_intr:1;
+    } xb_linkcontrol;
+} xbow_linkctrl_t;
+
+#endif
+
+
+#define	xbctl_accerr_intr	(xbw0_ctrlfield.accerr_intr)
+#define	xbctl_xtalkerr_intr	(xbw0_ctrlfield.xtalkerr_intr)
+#define	xbctl_cnntout_intr	(xbw0_ctrlfield.conntout_intr)
+
+#define	XBW0_CTRL_ACCERR_INTR	(1 << 5)
+#define	XBW0_CTRL_XTERR_INTR	(1 << 2)
+#define	XBW0_CTRL_CONNTOUT_INTR	(1 << 1)
+
+/*
+ * Xbow Link specific Registers structure definitions.
+ */
+
+#ifdef LITTLE_ENDIAN
+
+typedef union xbow_linkX_status_u {
+    xbowreg_t               linkstatus;
+    struct {
+	uint32_t               pkt_toutsrc:1,
+                                pkt_toutconn:1, /* max_req_tout in Xbridge */
+                                pkt_toutdest:1, /* reserved in Xbridge */
+                                llp_xmitretry:1,
+                                llp_rcverror:1,
+                                llp_maxtxretry:1,
+                                llp_txovflow:1,
+                                llp_rxovflow:1,
+                                bw_errport:8,   /* BW allocation error port   */
+                                ioe:1,          /* Input overallocation error */
+                                illdest:1,
+                                merror:1,
+                                resvd1:12,
+				alive:1;
+    } xb_linkstatus;
+} xbwX_stat_t;
+
+#else
+
+typedef union xbow_linkX_status_u {
+    xbowreg_t		    linkstatus;
+    struct {
+	uint32_t		alive:1,
+				resvd1:12,
+				merror:1,
+				illdest:1,
+				ioe:1,		/* Input overallocation error */
+				bw_errport:8,	/* BW allocation error port   */
+				llp_rxovflow:1,
+				llp_txovflow:1,
+				llp_maxtxretry:1,
+				llp_rcverror:1,
+				llp_xmitretry:1,
+				pkt_toutdest:1, /* reserved in Xbridge */
+				pkt_toutconn:1, /* max_req_tout in Xbridge */
+				pkt_toutsrc:1;
+    } xb_linkstatus;
+} xbwX_stat_t;
+
+#endif
+
+#define	link_alive		xb_linkstatus.alive
+#define	link_multierror		xb_linkstatus.merror
+#define	link_illegal_dest	xb_linkstatus.illdest
+#define	link_ioe		xb_linkstatus.ioe
+#define link_max_req_tout	xb_linkstatus.pkt_toutconn  /* Xbridge */
+#define link_pkt_toutconn	xb_linkstatus.pkt_toutconn  /* Xbow */
+#define link_pkt_toutdest	xb_linkstatus.pkt_toutdest
+#define	link_pkt_toutsrc	xb_linkstatus.pkt_toutsrc
+
+#ifdef LITTLE_ENDIAN
+
+typedef union xbow_aux_linkX_status_u {
+    xbowreg_t               aux_linkstatus;
+    struct {
+	uint32_t 		rsvd2:4,
+                                bit_mode_8:1,
+                                wid_present:1,
+                                fail_mode:1,
+                                rsvd1:1,
+                                to_src_loc:8,
+                                tx_retry_cnt:8,
+				rx_err_cnt:8;
+    } xb_aux_linkstatus;
+} xbow_aux_link_status_t;
+
+#else
+
+typedef union xbow_aux_linkX_status_u {
+    xbowreg_t		    aux_linkstatus;
+    struct {
+	uint32_t		rx_err_cnt:8,
+				tx_retry_cnt:8,
+				to_src_loc:8,
+				rsvd1:1,
+				fail_mode:1,
+				wid_present:1,
+				bit_mode_8:1,
+				rsvd2:4;
+    } xb_aux_linkstatus;
+} xbow_aux_link_status_t;
+
+#endif
+
+
+#ifdef LITTLE_ENDIAN
+
+typedef union xbow_perf_count_u {
+    xbowreg_t               xb_counter_val;
+    struct {
+        uint32_t 		count:20,
+                                link_select:3,
+				rsvd:9;
+    } xb_perf;
+} xbow_perfcount_t;
+
+#else
+
+typedef union xbow_perf_count_u {
+    xbowreg_t               xb_counter_val;
+    struct {
+	uint32_t              rsvd:9, 
+				link_select:3, 
+				count:20;
+    } xb_perf;
+} xbow_perfcount_t;
+
+#endif
+
+#define XBOW_COUNTER_MASK	0xFFFFF
+
+extern int              xbow_widget_present(xbow_t * xbow, int port);
+
+extern xwidget_intr_preset_f xbow_intr_preset;
+extern xswitch_reset_link_f xbow_reset_link;
+void                    xbow_mlreset(xbow_t *);
+
+/* ========================================================================
+ */
+
+#ifdef	MACROFIELD_LINE
+/*
+ * This table forms a relation between the byte offset macros normally
+ * used for ASM coding and the calculated byte offsets of the fields
+ * in the C structure.
+ *
+ * See xbow_check.c xbow_html.c for further details.
+ */
+#ifndef MACROFIELD_LINE_BITFIELD
+#define MACROFIELD_LINE_BITFIELD(m)	/* ignored */
+#endif
+
+struct macrofield_s     xbow_macrofield[] =
+{
+
+    MACROFIELD_LINE(XBOW_WID_ID, xb_wid_id)
+    MACROFIELD_LINE(XBOW_WID_STAT, xb_wid_stat)
+    MACROFIELD_LINE_BITFIELD(XB_WID_STAT_LINK_INTR(0xF))
+    MACROFIELD_LINE_BITFIELD(XB_WID_STAT_LINK_INTR(0xE))
+    MACROFIELD_LINE_BITFIELD(XB_WID_STAT_LINK_INTR(0xD))
+    MACROFIELD_LINE_BITFIELD(XB_WID_STAT_LINK_INTR(0xC))
+    MACROFIELD_LINE_BITFIELD(XB_WID_STAT_LINK_INTR(0xB))
+    MACROFIELD_LINE_BITFIELD(XB_WID_STAT_LINK_INTR(0xA))
+    MACROFIELD_LINE_BITFIELD(XB_WID_STAT_LINK_INTR(0x9))
+    MACROFIELD_LINE_BITFIELD(XB_WID_STAT_LINK_INTR(0x8))
+    MACROFIELD_LINE_BITFIELD(XB_WID_STAT_WIDGET0_INTR)
+    MACROFIELD_LINE_BITFIELD(XB_WID_STAT_REG_ACC_ERR)
+    MACROFIELD_LINE_BITFIELD(XB_WID_STAT_XTALK_ERR)
+    MACROFIELD_LINE_BITFIELD(XB_WID_STAT_MULTI_ERR)
+    MACROFIELD_LINE(XBOW_WID_ERR_UPPER, xb_wid_err_upper)
+    MACROFIELD_LINE(XBOW_WID_ERR_LOWER, xb_wid_err_lower)
+    MACROFIELD_LINE(XBOW_WID_CONTROL, xb_wid_control)
+    MACROFIELD_LINE_BITFIELD(XB_WID_CTRL_REG_ACC_IE)
+    MACROFIELD_LINE_BITFIELD(XB_WID_CTRL_XTALK_IE)
+    MACROFIELD_LINE(XBOW_WID_REQ_TO, xb_wid_req_timeout)
+    MACROFIELD_LINE(XBOW_WID_INT_UPPER, xb_wid_int_upper)
+    MACROFIELD_LINE(XBOW_WID_INT_LOWER, xb_wid_int_lower)
+    MACROFIELD_LINE(XBOW_WID_ERR_CMDWORD, xb_wid_err_cmdword)
+    MACROFIELD_LINE(XBOW_WID_LLP, xb_wid_llp)
+    MACROFIELD_LINE(XBOW_WID_STAT_CLR, xb_wid_stat_clr)
+    MACROFIELD_LINE(XBOW_WID_ARB_RELOAD, xb_wid_arb_reload)
+    MACROFIELD_LINE(XBOW_WID_PERF_CTR_A, xb_perf_ctr_a)
+    MACROFIELD_LINE(XBOW_WID_PERF_CTR_B, xb_perf_ctr_b)
+    MACROFIELD_LINE(XBOW_WID_NIC, xb_nic)
+    MACROFIELD_LINE(XB_LINK_REG_BASE(8), xb_link(8))
+    MACROFIELD_LINE(XB_LINK_IBUF_FLUSH(8), xb_link(8).link_ibf)
+    MACROFIELD_LINE(XB_LINK_CTRL(8), xb_link(8).link_control)
+    MACROFIELD_LINE_BITFIELD(XB_CTRL_LINKALIVE_IE)
+    MACROFIELD_LINE_BITFIELD(XB_CTRL_PERF_CTR_MODE_MSK)
+    MACROFIELD_LINE_BITFIELD(XB_CTRL_IBUF_LEVEL_MSK)
+    MACROFIELD_LINE_BITFIELD(XB_CTRL_8BIT_MODE)
+    MACROFIELD_LINE_BITFIELD(XB_CTRL_BAD_LLP_PKT)
+    MACROFIELD_LINE_BITFIELD(XB_CTRL_WIDGET_CR_MSK)
+    MACROFIELD_LINE_BITFIELD(XB_CTRL_ILLEGAL_DST_IE)
+    MACROFIELD_LINE_BITFIELD(XB_CTRL_OALLOC_IBUF_IE)
+    MACROFIELD_LINE_BITFIELD(XB_CTRL_BNDWDTH_ALLOC_IE)
+    MACROFIELD_LINE_BITFIELD(XB_CTRL_RCV_CNT_OFLOW_IE)
+    MACROFIELD_LINE_BITFIELD(XB_CTRL_XMT_CNT_OFLOW_IE)
+    MACROFIELD_LINE_BITFIELD(XB_CTRL_XMT_MAX_RTRY_IE)
+    MACROFIELD_LINE_BITFIELD(XB_CTRL_RCV_IE)
+    MACROFIELD_LINE_BITFIELD(XB_CTRL_XMT_RTRY_IE)
+    MACROFIELD_LINE_BITFIELD(XB_CTRL_MAXREQ_TOUT_IE)
+    MACROFIELD_LINE_BITFIELD(XB_CTRL_SRC_TOUT_IE)
+    MACROFIELD_LINE(XB_LINK_STATUS(8), xb_link(8).link_status)
+    MACROFIELD_LINE_BITFIELD(XB_STAT_LINKALIVE)
+    MACROFIELD_LINE_BITFIELD(XB_STAT_MULTI_ERR)
+    MACROFIELD_LINE_BITFIELD(XB_STAT_ILLEGAL_DST_ERR)
+    MACROFIELD_LINE_BITFIELD(XB_STAT_OALLOC_IBUF_ERR)
+    MACROFIELD_LINE_BITFIELD(XB_STAT_BNDWDTH_ALLOC_ID_MSK)
+    MACROFIELD_LINE_BITFIELD(XB_STAT_RCV_CNT_OFLOW_ERR)
+    MACROFIELD_LINE_BITFIELD(XB_STAT_XMT_CNT_OFLOW_ERR)
+    MACROFIELD_LINE_BITFIELD(XB_STAT_XMT_MAX_RTRY_ERR)
+    MACROFIELD_LINE_BITFIELD(XB_STAT_RCV_ERR)
+    MACROFIELD_LINE_BITFIELD(XB_STAT_XMT_RTRY_ERR)
+    MACROFIELD_LINE_BITFIELD(XB_STAT_MAXREQ_TOUT_ERR)
+    MACROFIELD_LINE_BITFIELD(XB_STAT_SRC_TOUT_ERR)
+    MACROFIELD_LINE(XB_LINK_ARB_UPPER(8), xb_link(8).link_arb_upper)
+    MACROFIELD_LINE_BITFIELD(XB_ARB_RR_MSK << XB_ARB_RR_SHFT(0xb))
+    MACROFIELD_LINE_BITFIELD(XB_ARB_GBR_MSK << XB_ARB_GBR_SHFT(0xb))
+    MACROFIELD_LINE_BITFIELD(XB_ARB_RR_MSK << XB_ARB_RR_SHFT(0xa))
+    MACROFIELD_LINE_BITFIELD(XB_ARB_GBR_MSK << XB_ARB_GBR_SHFT(0xa))
+    MACROFIELD_LINE_BITFIELD(XB_ARB_RR_MSK << XB_ARB_RR_SHFT(0x9))
+    MACROFIELD_LINE_BITFIELD(XB_ARB_GBR_MSK << XB_ARB_GBR_SHFT(0x9))
+    MACROFIELD_LINE_BITFIELD(XB_ARB_RR_MSK << XB_ARB_RR_SHFT(0x8))
+    MACROFIELD_LINE_BITFIELD(XB_ARB_GBR_MSK << XB_ARB_GBR_SHFT(0x8))
+    MACROFIELD_LINE(XB_LINK_ARB_LOWER(8), xb_link(8).link_arb_lower)
+    MACROFIELD_LINE_BITFIELD(XB_ARB_RR_MSK << XB_ARB_RR_SHFT(0xf))
+    MACROFIELD_LINE_BITFIELD(XB_ARB_GBR_MSK << XB_ARB_GBR_SHFT(0xf))
+    MACROFIELD_LINE_BITFIELD(XB_ARB_RR_MSK << XB_ARB_RR_SHFT(0xe))
+    MACROFIELD_LINE_BITFIELD(XB_ARB_GBR_MSK << XB_ARB_GBR_SHFT(0xe))
+    MACROFIELD_LINE_BITFIELD(XB_ARB_RR_MSK << XB_ARB_RR_SHFT(0xd))
+    MACROFIELD_LINE_BITFIELD(XB_ARB_GBR_MSK << XB_ARB_GBR_SHFT(0xd))
+    MACROFIELD_LINE_BITFIELD(XB_ARB_RR_MSK << XB_ARB_RR_SHFT(0xc))
+    MACROFIELD_LINE_BITFIELD(XB_ARB_GBR_MSK << XB_ARB_GBR_SHFT(0xc))
+    MACROFIELD_LINE(XB_LINK_STATUS_CLR(8), xb_link(8).link_status_clr)
+    MACROFIELD_LINE(XB_LINK_RESET(8), xb_link(8).link_reset)
+    MACROFIELD_LINE(XB_LINK_AUX_STATUS(8), xb_link(8).link_aux_status)
+    MACROFIELD_LINE_BITFIELD(XB_AUX_STAT_RCV_CNT)
+    MACROFIELD_LINE_BITFIELD(XB_AUX_STAT_XMT_CNT)
+    MACROFIELD_LINE_BITFIELD(XB_AUX_LINKFAIL_RST_BAD)
+    MACROFIELD_LINE_BITFIELD(XB_AUX_STAT_PRESENT)
+    MACROFIELD_LINE_BITFIELD(XB_AUX_STAT_PORT_WIDTH)
+    MACROFIELD_LINE_BITFIELD(XB_AUX_STAT_TOUT_DST)
+    MACROFIELD_LINE(XB_LINK_REG_BASE(0x8), xb_link(0x8))
+    MACROFIELD_LINE(XB_LINK_REG_BASE(0x9), xb_link(0x9))
+    MACROFIELD_LINE(XB_LINK_REG_BASE(0xA), xb_link(0xA))
+    MACROFIELD_LINE(XB_LINK_REG_BASE(0xB), xb_link(0xB))
+    MACROFIELD_LINE(XB_LINK_REG_BASE(0xC), xb_link(0xC))
+    MACROFIELD_LINE(XB_LINK_REG_BASE(0xD), xb_link(0xD))
+    MACROFIELD_LINE(XB_LINK_REG_BASE(0xE), xb_link(0xE))
+    MACROFIELD_LINE(XB_LINK_REG_BASE(0xF), xb_link(0xF))
+};				/* xbow_macrofield[] */
+
+#endif				/* MACROFIELD_LINE */
+
+#endif				/* _LANGUAGE_C */
+#endif                          /* _ASM_SN_SN_XTALK_XBOW_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/xtalk/xbow_info.h linux/include/asm-ia64/sn/xtalk/xbow_info.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/xtalk/xbow_info.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/xtalk/xbow_info.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,67 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_XTALK_XBOW_INFO_H
+#define _ASM_SN_XTALK_XBOW_INFO_H
+
+#define XBOW_PERF_MODES	       0x03
+#define XBOW_PERF_COUNTERS     0x02
+
+#define XBOW_MONITOR_NONE      0x00
+#define XBOW_MONITOR_SRC_LINK  0x01
+#define XBOW_MONITOR_DEST_LINK 0x02
+#define XBOW_MONITOR_INP_PKT   0x03
+#define XBOW_MONITOR_MULTIPLEX 0x04
+
+#define XBOW_LINK_MULTIPLEX    0x20
+
+#define XBOW_PERF_TIMEOUT	4
+#define XBOW_STATS_TIMEOUT	HZ
+
+typedef struct xbow_perf_link {
+    uint64_t              xlp_cumulative[XBOW_PERF_MODES];
+    unsigned char           xlp_link_alive;
+} xbow_perf_link_t;
+
+
+typedef struct xbow_link_status {
+    uint64_t              rx_err_count;
+    uint64_t              tx_retry_count;
+} xbow_link_status_t;
+
+
+
+typedef struct xbow_perf {
+    uint32_t              xp_current;
+    unsigned char           xp_link;
+    unsigned char           xp_mode;
+    unsigned char           xp_curlink;
+    unsigned char           xp_curmode;
+    volatile uint32_t    *xp_perf_reg;
+} xbow_perf_t;
+
+extern void             xbow_update_perf_counters(devfs_handle_t);
+extern xbow_perf_link_t *xbow_get_perf_counters(devfs_handle_t);
+extern int              xbow_enable_perf_counter(devfs_handle_t, int, int, int);
+
+#define XBOWIOC_PERF_ENABLE	  	1
+#define XBOWIOC_PERF_DISABLE	 	2
+#define XBOWIOC_PERF_GET	 	3
+#define XBOWIOC_LLP_ERROR_ENABLE 	4
+#define XBOWIOC_LLP_ERROR_DISABLE	5
+#define XBOWIOC_LLP_ERROR_GET	 	6
+
+
+struct xbow_perfarg_t {
+    int                     link;
+    int                     mode;
+    int                     counter;
+};
+
+#endif				/* _ASM_SN_XTALK_XBOW_INFO_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/xtalk/xswitch.h linux/include/asm-ia64/sn/xtalk/xswitch.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/xtalk/xswitch.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/xtalk/xswitch.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,59 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_XTALK_XSWITCH_H
+#define _ASM_SN_XTALK_XSWITCH_H
+
+/*
+ * xswitch.h - controls the format of the data
+ * provided by xswitch verticies back to the
+ * xtalk bus providers.
+ */
+
+#if LANGUAGE_C
+
+typedef struct xswitch_info_s *xswitch_info_t;
+
+typedef int
+                        xswitch_reset_link_f(devfs_handle_t xconn);
+
+typedef struct xswitch_provider_s {
+    xswitch_reset_link_f   *reset_link;
+} xswitch_provider_t;
+
+extern void             xswitch_provider_register(devfs_handle_t sw_vhdl, xswitch_provider_t * xsw_fns);
+
+xswitch_reset_link_f    xswitch_reset_link;
+
+extern xswitch_info_t   xswitch_info_new(devfs_handle_t vhdl);
+
+extern void             xswitch_info_link_is_ok(xswitch_info_t xswitch_info,
+						xwidgetnum_t port);
+extern void             xswitch_info_vhdl_set(xswitch_info_t xswitch_info,
+					      xwidgetnum_t port,
+					      devfs_handle_t xwidget);
+extern void             xswitch_info_master_assignment_set(xswitch_info_t xswitch_info,
+						       xwidgetnum_t port,
+					       devfs_handle_t master_vhdl);
+
+extern xswitch_info_t   xswitch_info_get(devfs_handle_t vhdl);
+
+extern int              xswitch_info_link_ok(xswitch_info_t xswitch_info,
+					     xwidgetnum_t port);
+extern devfs_handle_t     xswitch_info_vhdl_get(xswitch_info_t xswitch_info,
+					      xwidgetnum_t port);
+extern devfs_handle_t     xswitch_info_master_assignment_get(xswitch_info_t xswitch_info,
+						      xwidgetnum_t port);
+
+extern int		xswitch_id_get(devfs_handle_t vhdl);
+extern void		xswitch_id_set(devfs_handle_t vhdl,int xbow_num);
+
+#endif				/* LANGUAGE_C */
+
+#endif				/* _ASM_SN_XTALK_XSWITCH_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/xtalk/xtalk.h linux/include/asm-ia64/sn/xtalk/xtalk.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/xtalk/xtalk.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/xtalk/xtalk.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,408 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_XTALK_XTALK_H
+#define _ASM_SN_XTALK_XTALK_H
+
+/*
+ * xtalk.h -- platform-independent crosstalk interface
+ */
+/*
+ * User-level device driver visible types
+ */
+typedef char            xwidgetnum_t;	/* xtalk widget number  (0..15) */
+
+#define XWIDGET_NONE		-1
+
+typedef int xwidget_part_num_t;	/* xtalk widget part number */
+
+#define XWIDGET_PART_NUM_NONE	-1
+
+typedef int             xwidget_rev_num_t;	/* xtalk widget revision number */
+
+#define XWIDGET_REV_NUM_NONE	-1
+
+typedef int xwidget_mfg_num_t;	/* xtalk widget manufacturing ID */
+
+#define XWIDGET_MFG_NUM_NONE	-1
+
+typedef struct xtalk_piomap_s *xtalk_piomap_t;
+
+/* It is often convenient to fold the XIO target port
+ * number into the XIO address.
+ */
+#define	XIO_NOWHERE	(0xFFFFFFFFFFFFFFFFull)
+#define	XIO_ADDR_BITS	(0x0000FFFFFFFFFFFFull)
+#define	XIO_PORT_BITS	(0xF000000000000000ull)
+#define	XIO_PORT_SHIFT	(60)
+
+#define	XIO_PACKED(x)	(((x)&XIO_PORT_BITS) != 0)
+#define	XIO_ADDR(x)	((x)&XIO_ADDR_BITS)
+#define	XIO_PORT(x)	((xwidgetnum_t)(((x)&XIO_PORT_BITS) >> XIO_PORT_SHIFT))
+#define	XIO_PACK(p,o)	((((uint64_t)(p))<<XIO_PORT_SHIFT) | ((o)&XIO_ADDR_BITS))
+
+
+/*
+ * Kernel/driver only definitions
+ */
+#if __KERNEL__
+
+#include <asm/types.h>
+#include <asm/sn/types.h>
+#include <asm/sn/alenlist.h>
+#include <asm/sn/ioerror.h>
+#include <asm/sn/iobus.h>
+#include <asm/sn/dmamap.h>
+
+struct xwidget_hwid_s;
+
+/*
+ *    Acceptable flag bits for xtalk service calls
+ *
+ * XTALK_FIXED: require that mappings be established
+ *	using fixed sharable resources; address
+ *	translation results will be permanently
+ *	available. (PIOMAP_FIXED and DMAMAP_FIXED are
+ *	the same numeric value and are acceptable).
+ * XTALK_NOSLEEP: if any part of the operation would
+ *	sleep waiting for resoruces, return an error
+ *	instead. (PIOMAP_NOSLEEP and DMAMAP_NOSLEEP are
+ *	the same numeric value and are acceptable).
+ * XTALK_INPLACE: when operating on alenlist structures,
+ *	reuse the source alenlist rather than creating a
+ *	new one. (PIOMAP_INPLACE and DMAMAP_INPLACE are
+ *	the same numeric value and are acceptable).
+ */
+#define	XTALK_FIXED		DMAMAP_FIXED
+#define	XTALK_NOSLEEP		DMAMAP_NOSLEEP
+#define	XTALK_INPLACE		DMAMAP_INPLACE
+
+/* PIO MANAGEMENT */
+typedef xtalk_piomap_t
+xtalk_piomap_alloc_f    (devfs_handle_t dev,	/* set up mapping for this device */
+			 device_desc_t dev_desc,	/* device descriptor */
+			 iopaddr_t xtalk_addr,	/* map for this xtalk_addr range */
+			 size_t byte_count,
+			 size_t byte_count_max,		/* maximum size of a mapping */
+			 unsigned flags);	/* defined in sys/pio.h */
+typedef void
+xtalk_piomap_free_f     (xtalk_piomap_t xtalk_piomap);
+
+typedef caddr_t
+xtalk_piomap_addr_f     (xtalk_piomap_t xtalk_piomap,	/* mapping resources */
+			 iopaddr_t xtalk_addr,	/* map for this xtalk address */
+			 size_t byte_count);	/* map this many bytes */
+
+typedef void
+xtalk_piomap_done_f     (xtalk_piomap_t xtalk_piomap);
+
+typedef caddr_t
+xtalk_piotrans_addr_f   (devfs_handle_t dev,	/* translate for this device */
+			 device_desc_t dev_desc,	/* device descriptor */
+			 iopaddr_t xtalk_addr,	/* Crosstalk address */
+			 size_t byte_count,	/* map this many bytes */
+			 unsigned flags);	/* (currently unused) */
+
+extern caddr_t
+xtalk_pio_addr		(devfs_handle_t dev,	/* translate for this device */
+			 device_desc_t dev_desc,	/* device descriptor */
+			 iopaddr_t xtalk_addr,	/* Crosstalk address */
+			 size_t byte_count,	/* map this many bytes */
+			 xtalk_piomap_t *xtalk_piomapp,	/* RETURNS mapping resources */
+			 unsigned flags);	/* (currently unused) */
+
+/* DMA MANAGEMENT */
+
+typedef struct xtalk_dmamap_s *xtalk_dmamap_t;
+
+typedef xtalk_dmamap_t
+xtalk_dmamap_alloc_f    (devfs_handle_t dev,	/* set up mappings for this device */
+			 device_desc_t dev_desc,	/* device descriptor */
+			 size_t byte_count_max,		/* max size of a mapping */
+			 unsigned flags);	/* defined in dma.h */
+
+typedef void
+xtalk_dmamap_free_f     (xtalk_dmamap_t dmamap);
+
+typedef iopaddr_t
+xtalk_dmamap_addr_f     (xtalk_dmamap_t dmamap,		/* use these mapping resources */
+			 paddr_t paddr,		/* map for this address */
+			 size_t byte_count);	/* map this many bytes */
+
+typedef alenlist_t
+xtalk_dmamap_list_f     (xtalk_dmamap_t dmamap,		/* use these mapping resources */
+			 alenlist_t alenlist,	/* map this address/length list */
+			 unsigned flags);
+
+typedef void
+xtalk_dmamap_done_f     (xtalk_dmamap_t dmamap);
+
+typedef iopaddr_t
+xtalk_dmatrans_addr_f   (devfs_handle_t dev,	/* translate for this device */
+			 device_desc_t dev_desc,	/* device descriptor */
+			 paddr_t paddr,		/* system physical address */
+			 size_t byte_count,	/* length */
+			 unsigned flags);
+
+typedef alenlist_t
+xtalk_dmatrans_list_f   (devfs_handle_t dev,	/* translate for this device */
+			 device_desc_t dev_desc,	/* device descriptor */
+			 alenlist_t palenlist,	/* system address/length list */
+			 unsigned flags);
+
+typedef void
+xtalk_dmamap_drain_f	(xtalk_dmamap_t map);	/* drain this map's channel */
+
+typedef void
+xtalk_dmaaddr_drain_f	(devfs_handle_t vhdl,	/* drain channel from this device */
+			 paddr_t addr,		/* to this physical address */
+			 size_t bytes);		/* for this many bytes */
+
+typedef void
+xtalk_dmalist_drain_f	(devfs_handle_t vhdl,	/* drain channel from this device */
+			 alenlist_t list);	/* for this set of physical blocks */
+
+
+/* INTERRUPT MANAGEMENT */
+
+/*
+ * A xtalk interrupt resource handle.  When resources are allocated
+ * in order to satisfy a xtalk_intr_alloc request, a xtalk_intr handle
+ * is returned.  xtalk_intr_connect associates a software handler with
+
+ * these system resources.
+ */
+typedef struct xtalk_intr_s *xtalk_intr_t;
+
+
+/*
+ * When a crosstalk device connects an interrupt, it passes in a function
+ * that knows how to set its xtalk interrupt register appropriately.  The
+ * low-level interrupt code may invoke this function later in order to
+ * migrate an interrupt transparently to the device driver(s) that use this
+ * interrupt.
+ *
+ * The argument passed to this function contains enough information for a
+ * crosstalk device to (re-)target an interrupt.  A function of this type
+ * must be supplied by every crosstalk driver.
+ */
+typedef int
+xtalk_intr_setfunc_f    (xtalk_intr_t intr_hdl);	/* interrupt handle */
+
+typedef xtalk_intr_t
+xtalk_intr_alloc_f      (devfs_handle_t dev,	/* which crosstalk device */
+			 device_desc_t dev_desc,	/* device descriptor */
+			 devfs_handle_t owner_dev);	/* owner of this intr */
+
+typedef void
+xtalk_intr_free_f       (xtalk_intr_t intr_hdl);
+
+typedef int
+xtalk_intr_connect_f    (xtalk_intr_t intr_hdl,		/* xtalk intr resource handle */
+			 intr_func_t intr_func,		/* xtalk intr handler */
+			 void *intr_arg,	/* arg to intr handler */
+			 xtalk_intr_setfunc_f *setfunc,		/* func to set intr hw */
+			 void *setfunc_arg,	/* arg to setfunc. This must be */
+							/* sufficient to determine which */
+							/* interrupt on which board needs */
+							/* to be set. */
+			 void *thread);		/* which intr thread to use */
+
+typedef void
+xtalk_intr_disconnect_f (xtalk_intr_t intr_hdl);
+
+typedef devfs_handle_t
+xtalk_intr_cpu_get_f    (xtalk_intr_t intr_hdl);	/* xtalk intr resource handle */
+
+/* CONFIGURATION MANAGEMENT */
+
+typedef void
+xtalk_provider_startup_f (devfs_handle_t xtalk_provider);
+
+typedef void
+xtalk_provider_shutdown_f (devfs_handle_t xtalk_provider);
+
+typedef void
+xtalk_widgetdev_enable_f (devfs_handle_t, int);
+
+typedef void
+xtalk_widgetdev_shutdown_f (devfs_handle_t, int);
+
+typedef int
+xtalk_dma_enabled_f (devfs_handle_t);
+
+/* Error Management */
+
+typedef int
+xtalk_error_devenable_f (devfs_handle_t xconn_vhdl,
+			 int devnum,
+			 int error_code);
+
+/* Early Action Support */
+typedef caddr_t
+xtalk_early_piotrans_addr_f (xwidget_part_num_t part_num,
+			     xwidget_mfg_num_t mfg_num,
+			     int which,
+			     iopaddr_t xtalk_addr,
+			     size_t byte_count,
+			     unsigned flags);
+
+/*
+ * Adapters that provide a crosstalk interface adhere to this software interface.
+ */
+typedef struct xtalk_provider_s {
+    /* PIO MANAGEMENT */
+    xtalk_piomap_alloc_f   *piomap_alloc;
+    xtalk_piomap_free_f    *piomap_free;
+    xtalk_piomap_addr_f    *piomap_addr;
+    xtalk_piomap_done_f    *piomap_done;
+    xtalk_piotrans_addr_f  *piotrans_addr;
+
+    /* DMA MANAGEMENT */
+    xtalk_dmamap_alloc_f   *dmamap_alloc;
+    xtalk_dmamap_free_f    *dmamap_free;
+    xtalk_dmamap_addr_f    *dmamap_addr;
+    xtalk_dmamap_list_f    *dmamap_list;
+    xtalk_dmamap_done_f    *dmamap_done;
+    xtalk_dmatrans_addr_f  *dmatrans_addr;
+    xtalk_dmatrans_list_f  *dmatrans_list;
+    xtalk_dmamap_drain_f   *dmamap_drain;
+    xtalk_dmaaddr_drain_f  *dmaaddr_drain;
+    xtalk_dmalist_drain_f  *dmalist_drain;
+
+    /* INTERRUPT MANAGEMENT */
+    xtalk_intr_alloc_f     *intr_alloc;
+    xtalk_intr_free_f      *intr_free;
+    xtalk_intr_connect_f   *intr_connect;
+    xtalk_intr_disconnect_f *intr_disconnect;
+    xtalk_intr_cpu_get_f   *intr_cpu_get;
+
+    /* CONFIGURATION MANAGEMENT */
+    xtalk_provider_startup_f *provider_startup;
+    xtalk_provider_shutdown_f *provider_shutdown;
+
+    /* Error Management     */
+    xtalk_error_devenable_f *error_devenable;
+} xtalk_provider_t;
+
+/* Crosstalk devices use these standard Crosstalk provider interfaces */
+extern xtalk_piomap_alloc_f xtalk_piomap_alloc;
+extern xtalk_piomap_free_f xtalk_piomap_free;
+extern xtalk_piomap_addr_f xtalk_piomap_addr;
+extern xtalk_piomap_done_f xtalk_piomap_done;
+extern xtalk_piotrans_addr_f xtalk_piotrans_addr;
+extern xtalk_dmamap_alloc_f xtalk_dmamap_alloc;
+extern xtalk_dmamap_free_f xtalk_dmamap_free;
+extern xtalk_dmamap_addr_f xtalk_dmamap_addr;
+extern xtalk_dmamap_list_f xtalk_dmamap_list;
+extern xtalk_dmamap_done_f xtalk_dmamap_done;
+extern xtalk_dmatrans_addr_f xtalk_dmatrans_addr;
+extern xtalk_dmatrans_list_f xtalk_dmatrans_list;
+extern xtalk_dmamap_drain_f xtalk_dmamap_drain;
+extern xtalk_dmaaddr_drain_f xtalk_dmaaddr_drain;
+extern xtalk_dmalist_drain_f xtalk_dmalist_drain;
+extern xtalk_intr_alloc_f xtalk_intr_alloc;
+extern xtalk_intr_free_f xtalk_intr_free;
+extern xtalk_intr_connect_f xtalk_intr_connect;
+extern xtalk_intr_disconnect_f xtalk_intr_disconnect;
+extern xtalk_intr_cpu_get_f xtalk_intr_cpu_get;
+extern xtalk_provider_startup_f xtalk_provider_startup;
+extern xtalk_provider_shutdown_f xtalk_provider_shutdown;
+extern xtalk_widgetdev_enable_f xtalk_widgetdev_enable;
+extern xtalk_widgetdev_shutdown_f xtalk_widgetdev_shutdown;
+extern xtalk_dma_enabled_f xtalk_dma_enabled;
+extern xtalk_error_devenable_f xtalk_error_devenable;
+extern xtalk_early_piotrans_addr_f xtalk_early_piotrans_addr;
+
+/* error management */
+
+extern int              xtalk_error_handler(devfs_handle_t,
+					    int,
+					    ioerror_mode_t,
+					    ioerror_t *);
+
+/*
+ * Generic crosstalk interface, for use with all crosstalk providers
+ * and all crosstalk devices.
+ */
+typedef unchar xtalk_intr_vector_t;	/* crosstalk interrupt vector (0..255) */
+
+#define XTALK_INTR_VECTOR_NONE	(xtalk_intr_vector_t)0
+
+/* Generic crosstalk interrupt interfaces */
+extern devfs_handle_t     xtalk_intr_dev_get(xtalk_intr_t xtalk_intr);
+extern xwidgetnum_t     xtalk_intr_target_get(xtalk_intr_t xtalk_intr);
+extern xtalk_intr_vector_t xtalk_intr_vector_get(xtalk_intr_t xtalk_intr);
+extern iopaddr_t        xtalk_intr_addr_get(xtalk_intr_t xtalk_intr);
+extern devfs_handle_t     xtalk_intr_cpu_get(xtalk_intr_t xtalk_intr);
+extern void            *xtalk_intr_sfarg_get(xtalk_intr_t xtalk_intr);
+
+extern int		xtalk_intr_flags_get(xtalk_intr_t xtalk_intr);
+/* XTALK_INTR flags */
+#define XTALK_INTR_NOTHREAD	1	/* interrupt handler wants to be called at interrupt level */
+
+/* Generic crosstalk pio interfaces */
+extern devfs_handle_t     xtalk_pio_dev_get(xtalk_piomap_t xtalk_piomap);
+extern xwidgetnum_t     xtalk_pio_target_get(xtalk_piomap_t xtalk_piomap);
+extern iopaddr_t        xtalk_pio_xtalk_addr_get(xtalk_piomap_t xtalk_piomap);
+extern size_t           xtalk_pio_mapsz_get(xtalk_piomap_t xtalk_piomap);
+extern caddr_t          xtalk_pio_kvaddr_get(xtalk_piomap_t xtalk_piomap);
+
+/* Generic crosstalk dma interfaces */
+extern devfs_handle_t     xtalk_dma_dev_get(xtalk_dmamap_t xtalk_dmamap);
+extern xwidgetnum_t     xtalk_dma_target_get(xtalk_dmamap_t xtalk_dmamap);
+
+/* Register/unregister Crosstalk providers and get implementation handle */
+extern void             xtalk_set_early_piotrans_addr(xtalk_early_piotrans_addr_f *);
+extern void             xtalk_provider_register(devfs_handle_t provider, xtalk_provider_t *xtalk_fns);
+extern void             xtalk_provider_unregister(devfs_handle_t provider);
+extern xtalk_provider_t *xtalk_provider_fns_get(devfs_handle_t provider);
+
+/* Crosstalk Switch generic layer, for use by initialization code */
+extern void             xswitch_census(devfs_handle_t xswitchv);
+extern void             xswitch_init_widgets(devfs_handle_t xswitchv);
+
+/* early init interrupt management */
+
+typedef void
+xwidget_intr_preset_f   (void *which_widget,
+			 int which_widget_intr,
+			 xwidgetnum_t targ,
+			 iopaddr_t addr,
+			 xtalk_intr_vector_t vect);
+
+typedef void
+xtalk_intr_prealloc_f   (void *which_xtalk,
+			 xtalk_intr_vector_t xtalk_vector,
+			 xwidget_intr_preset_f *preset_func,
+			 void *which_widget,
+			 int which_widget_intr);
+
+typedef void
+xtalk_intr_preconn_f    (void *which_xtalk,
+			 xtalk_intr_vector_t xtalk_vector,
+			 intr_func_t intr_func,
+			 intr_arg_t intr_arg);
+
+
+#define XTALK_ADDR_TO_UPPER(xtalk_addr) (((iopaddr_t)(xtalk_addr) >> 32) & 0xffff)
+#define XTALK_ADDR_TO_LOWER(xtalk_addr) ((iopaddr_t)(xtalk_addr) & 0xffffffff)
+
+typedef xtalk_intr_setfunc_f *xtalk_intr_setfunc_t;
+
+typedef void		xtalk_iter_f(devfs_handle_t vhdl);
+
+extern void		xtalk_iterate(char *prefix, xtalk_iter_f *func);
+
+extern int		xtalk_device_powerup(devfs_handle_t, xwidgetnum_t);
+extern int		xtalk_device_shutdown(devfs_handle_t, xwidgetnum_t);
+extern int		xtalk_device_inquiry(devfs_handle_t, xwidgetnum_t);
+
+#endif				/* __KERNEL__ */
+#endif				/* _ASM_SN_XTALK_XTALK_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/xtalk/xtalk_private.h linux/include/asm-ia64/sn/xtalk/xtalk_private.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/xtalk/xtalk_private.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/xtalk/xtalk_private.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,90 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_XTALK_XTALK_PRIVATE_H
+#define _ASM_SN_XTALK_XTALK_PRIVATE_H
+
+#ifdef IRIX
+#include <sys/ioerror.h>	/* for error function and arg types */
+#else
+#include <asm/sn/ioerror.h>        /* for error function and arg types */
+#endif
+
+/*
+ * xtalk_private.h -- private definitions for xtalk
+ * crosstalk drivers should NOT include this file.
+ */
+
+/*
+ * All Crosstalk providers set up PIO using this information.
+ */
+struct xtalk_piomap_s {
+    devfs_handle_t            xp_dev;	/* a requestor of this mapping */
+    xwidgetnum_t            xp_target;	/* target (node's widget number) */
+    iopaddr_t               xp_xtalk_addr;	/* which crosstalk addr is mapped */
+    size_t                  xp_mapsz;	/* size of this mapping */
+    caddr_t                 xp_kvaddr;	/* kernel virtual address to use */
+};
+
+/*
+ * All Crosstalk providers set up DMA using this information.
+ */
+struct xtalk_dmamap_s {
+    devfs_handle_t            xd_dev;	/* a requestor of this mapping */
+    xwidgetnum_t            xd_target;	/* target (node's widget number) */
+};
+
+/*
+ * All Crosstalk providers set up interrupts using this information.
+ */
+struct xtalk_intr_s {
+    int                     xi_flags;	/* XTALK_INTR flags */
+    devfs_handle_t            xi_dev;	/* requestor of this intr */
+    xwidgetnum_t            xi_target;	/* master's widget number */
+    xtalk_intr_vector_t     xi_vector;	/* 8-bit interrupt vector */
+    iopaddr_t               xi_addr;	/* xtalk address to generate intr */
+    void                   *xi_sfarg;	/* argument for setfunc */
+    xtalk_intr_setfunc_t    xi_setfunc;		/* device's setfunc routine */
+};
+
+/*
+ * Xtalk interrupt handler structure access functions
+ */
+#define	xtalk_intr_arg(xt)	((xt)->xi_sfarg)
+
+#define	xwidget_hwid_is_sn0_xswitch(_hwid)	\
+		(((_hwid)->part_num == XBOW_WIDGET_PART_NUM ) &&  	\
+		 ((_hwid)->mfg_num == XBOW_WIDGET_MFGR_NUM ))
+
+#define	xwidget_hwid_is_sn1_xswitch(_hwid)	\
+		(((_hwid)->part_num == XXBOW_WIDGET_PART_NUM ) &&  	\
+		 ((_hwid)->mfg_num == XXBOW_WIDGET_MFGR_NUM ))
+
+#define	xwidget_hwid_is_xswitch(_hwid)	\
+		(xwidget_hwid_is_sn0_xswitch(_hwid) ||			\
+			xwidget_hwid_is_sn1_xswitch(_hwid))
+
+/* common iograph info for all widgets,
+ * stashed in FASTINFO of widget connection points.
+ */
+struct xwidget_info_s {
+    char                   *w_fingerprint;
+    devfs_handle_t            w_vertex;	/* back pointer to vertex */
+    xwidgetnum_t            w_id;	/* widget id */
+    struct xwidget_hwid_s   w_hwid;	/* hardware identification (part/rev/mfg) */
+    devfs_handle_t            w_master;	/* CACHED widget's master */
+    xwidgetnum_t            w_masterid;		/* CACHED widget's master's widgetnum */
+    error_handler_f        *w_efunc;	/* error handling function */
+    error_handler_arg_t     w_einfo;	/* first parameter for efunc */
+    char		   *w_name;	/* canonical hwgraph name */	
+};
+
+extern char             widget_info_fingerprint[];
+
+#endif				/* _ASM_SN_XTALK_XTALK_PRIVATE_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/xtalk/xtalkaddrs.h linux/include/asm-ia64/sn/xtalk/xtalkaddrs.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/xtalk/xtalkaddrs.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/xtalk/xtalkaddrs.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,111 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_XTALK_XTALKADDRS_H
+#define _ASM_SN_XTALK_XTALKADDRS_H
+
+/*
+ * CrossTalk to SN0 Hub addressing support
+ *
+ * This file defines the mapping conventions used by the Hub's
+ * I/O interface when it receives a read or write request from 
+ * a CrossTalk widget.  
+ *
+ * Format for non-Memory accesses:
+ *
+ *  +--------------+------------------------------------------------+
+ *  | 0  | XXXXX   |        SN0Addr                                |
+ *  +----+---------+------------------------------------------------+
+ *    47  46     40 39                                             0
+ *	bit 47 indicates Memory (0)
+ *	bits 46..40 are unused
+ *	bits 39..0 hold the memory address
+ *			(bits 39..31 hold the nodeID in N mode
+ *			 bits 39..32 hold the nodeID in M mode
+ * By design, this looks exactly like a 0-extended SN0 Address, so
+ * we don't need to do any conversions.
+ *
+ *
+ *
+ * Format for non-Memory accesses:
+ *
+ *  +--------------+------+---------+------+--+---------------------+
+ *  | 1  | DstNode | XXXX | BigW=0  | SW=1 | 1|   Addr              |
+ *  +----+---------+------+---------+------+--+---------------------+
+ *    47  46     38 37  31 30     28 27  24 23 22                  0
+ *
+ *	bit 47 indicates IO (1)
+ *      bits 46..38 hold the destination node ID
+ *      bits 37..31 are unused
+ *      bits 30..28 hold the big window being addressed
+ *      bits 27..24 hold the small window being addressed
+ *                  0 always refers to the xbow
+ *                  1 always refers to the hub itself
+ *      bit 23 indicates local (0) or remote (1)
+ *             no accessing checks are done if this bit is 0
+ *      bits 22..0 hold the register address
+ *                 bits 22..21 determine which section of the hub
+ *                              00 -> PI
+ *                              01 -> MD
+ *                              10 -> IO
+ *                              11 -> NI
+ * This looks very much like a REMOTE_HUB access, except the nodeID
+ * is in a different place, and the highest xtalk bit is set.
+ */
+
+/* Hub-specific xtalk definitions */
+
+#define HX_MEM_BIT		0L	/* Hub's idea of xtalk memory access */
+#define HX_IO_BIT		1L	/* Hub's idea of xtalk register access */
+#define HX_ACCTYPE_SHIFT	47
+
+#if CONFIG_SGI_IP35 || CONFIG_IA64_SGI_SN1 || CONFIG_IA64_GENERIC
+#define HX_NODE_SHIFT		39
+#endif
+
+#define HX_BIGWIN_SHIFT		28
+
+#define HX_SWIN_SHIFT		23
+
+#define HX_LOCACC		0L	/* local access */
+#define HX_REMACC		1L	/* remote access */
+#define HX_ACCESS_SHIFT		23
+
+/*
+ * Pre-calculate the fixed portion of a crosstalk address that maps
+ * to local register space on a hub.
+ */
+#define HX_REG_BASE		((HX_IO_BIT<<HX_ACCTYPE_SHIFT) + \
+				(0L<<HX_BIGWIN_SHIFT) + \
+				(1L<<HX_SWIN_SHIFT) + IALIAS_SIZE + \
+				(HX_REMACC<<HX_ACCESS_SHIFT))
+
+/* 
+ * Return a crosstalk address which a widget can use to access a
+ * designated register on a designated node.
+ */
+#define HUBREG_AS_XTALKADDR(nasid, regaddr) \
+	((iopaddr_t)(HX_REG_BASE + (((long)nasid)<<HX_NODE_SHIFT) + ((long)regaddr)))
+
+#if TBD
+#assert sizeof(iopaddr_t) == 8
+#endif /* TBD */
+
+/*
+ * Get widget part number, given node id and widget id. 
+ * Always do a 32-bit read, because some widgets, e.g., Bridge, require so.
+ * Widget ID is at offset 0 for 64-bit access.  Add 4 to get lower 32 bits
+ * in big endian mode. 
+ * XXX Double check this with Hub, Xbow, Bridge and other hardware folks.
+ */
+#define XWIDGET_ID_READ(nasid, widget) \
+        (widgetreg_t)(*(volatile uint32_t *)(NODE_SWIN_BASE(nasid, widget) + WIDGET_ID))
+
+
+#endif /* _ASM_SN_XTALK_XTALKADDRS_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/sn/xtalk/xwidget.h linux/include/asm-ia64/sn/xtalk/xwidget.h
--- v2.4.0-prerelease/linux/include/asm-ia64/sn/xtalk/xwidget.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-ia64/sn/xtalk/xwidget.h	Thu Jan  4 13:00:15 2001
@@ -0,0 +1,308 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef __ASM_SN_XTALK_XWIDGET_H__
+#define __ASM_SN_XTALK_XWIDGET_H__
+
+/*
+ * xwidget.h - generic crosstalk widget header file
+ */
+
+#include <asm/sn/xtalk/xtalk.h>
+#if LANGUAGE_C
+#include <asm/sn/cdl.h>
+#endif /* LANGUAGE_C */
+
+#ifdef LITTLE_ENDIAN
+#define WIDGET_ID			0x00
+#define WIDGET_STATUS			0x08
+#define WIDGET_ERR_UPPER_ADDR		0x10
+#define WIDGET_ERR_LOWER_ADDR		0x18
+#define WIDGET_CONTROL			0x20
+#define WIDGET_REQ_TIMEOUT		0x28
+#define WIDGET_INTDEST_UPPER_ADDR	0x30
+#define WIDGET_INTDEST_LOWER_ADDR	0x38
+#define WIDGET_ERR_CMD_WORD		0x40
+#define WIDGET_LLP_CFG			0x48
+#define WIDGET_TFLUSH			0x50
+#else	/* !LITTLE_ENDIAN */
+#define WIDGET_ID                       0x04
+#define WIDGET_STATUS                   0x0c
+#define WIDGET_ERR_UPPER_ADDR           0x14
+#define WIDGET_ERR_LOWER_ADDR           0x1c
+#define WIDGET_CONTROL                  0x24
+#define WIDGET_REQ_TIMEOUT              0x2c
+#define WIDGET_INTDEST_UPPER_ADDR       0x34
+#define WIDGET_INTDEST_LOWER_ADDR       0x3c
+#define WIDGET_ERR_CMD_WORD             0x44
+#define WIDGET_LLP_CFG                  0x4c
+#define WIDGET_TFLUSH                   0x54
+#endif
+
+/* WIDGET_ID */
+#define WIDGET_REV_NUM			0xf0000000
+#define WIDGET_PART_NUM			0x0ffff000
+#define WIDGET_MFG_NUM			0x00000ffe
+#define WIDGET_REV_NUM_SHFT		28
+#define WIDGET_PART_NUM_SHFT		12
+#define WIDGET_MFG_NUM_SHFT		1
+
+#define XWIDGET_PART_NUM(widgetid) (((widgetid) & WIDGET_PART_NUM) >> WIDGET_PART_NUM_SHFT)
+#define XWIDGET_REV_NUM(widgetid) (((widgetid) & WIDGET_REV_NUM) >> WIDGET_REV_NUM_SHFT)
+#define XWIDGET_MFG_NUM(widgetid) (((widgetid) & WIDGET_MFG_NUM) >> WIDGET_MFG_NUM_SHFT)
+#define XWIDGET_PART_REV_NUM(widgetid) ((XWIDGET_PART_NUM(widgetid) << 4) | \
+					XWIDGET_REV_NUM(widgetid))
+
+/* WIDGET_STATUS */
+#define WIDGET_LLP_REC_CNT		0xff000000
+#define WIDGET_LLP_TX_CNT		0x00ff0000
+#define WIDGET_PENDING			0x0000001f
+
+/* WIDGET_ERR_UPPER_ADDR */
+#define	WIDGET_ERR_UPPER_ADDR_ONLY	0x0000ffff
+
+/* WIDGET_CONTROL */
+#define WIDGET_F_BAD_PKT		0x00010000
+#define WIDGET_LLP_XBAR_CRD		0x0000f000
+#define	WIDGET_LLP_XBAR_CRD_SHFT	12
+#define WIDGET_CLR_RLLP_CNT		0x00000800
+#define WIDGET_CLR_TLLP_CNT		0x00000400
+#define WIDGET_SYS_END			0x00000200
+#define WIDGET_MAX_TRANS		0x000001f0
+#define WIDGET_PCI_SPEED		0x00000030
+#define WIDGET_PCI_SPEED_SHFT		4
+#define WIDGET_PCI_SPEED_33MHZ 0
+#define WIDGET_PCI_SPEED_66MHZ 1
+#define WIDGET_WIDGET_ID		0x0000000f
+
+/* WIDGET_INTDEST_UPPER_ADDR */
+#define WIDGET_INT_VECTOR		0xff000000
+#define WIDGET_INT_VECTOR_SHFT		24
+#define WIDGET_TARGET_ID		0x000f0000
+#define WIDGET_TARGET_ID_SHFT		16
+#define WIDGET_UPP_ADDR			0x0000ffff
+
+/* WIDGET_ERR_CMD_WORD */
+#define WIDGET_DIDN			0xf0000000
+#define WIDGET_SIDN			0x0f000000
+#define WIDGET_PACTYP			0x00f00000
+#define WIDGET_TNUM			0x000f8000
+#define WIDGET_COHERENT			0x00004000
+#define WIDGET_DS			0x00003000
+#define WIDGET_GBR			0x00000800
+#define WIDGET_VBPM			0x00000400
+#define WIDGET_ERROR			0x00000200
+#define WIDGET_BARRIER			0x00000100
+
+/* WIDGET_LLP_CFG */
+#define WIDGET_LLP_MAXRETRY		0x03ff0000
+#define WIDGET_LLP_MAXRETRY_SHFT	16
+#define WIDGET_LLP_NULLTIMEOUT		0x0000fc00
+#define WIDGET_LLP_NULLTIMEOUT_SHFT	10
+#define WIDGET_LLP_MAXBURST		0x000003ff
+#define WIDGET_LLP_MAXBURST_SHFT	0
+
+/*
+ * according to the crosstalk spec, only 32-bits access to the widget
+ * configuration registers is allowed.  some widgets may allow 64-bits
+ * access but software should not depend on it.  registers beyond the
+ * widget target flush register are widget dependent thus will not be
+ * defined here
+ */
+#if _LANGUAGE_C
+typedef uint32_t      widgetreg_t;
+
+/* widget configuration registers */
+typedef volatile struct widget_cfg {
+#ifdef LITTLE_ENDIAN
+/*
+ * we access these through synergy unswizzled space, so the address
+ * gets twiddled (i.e. references to 0x4 actually go to 0x0 and vv.)
+ * That's why we put the register first and filler second.
+ */
+    widgetreg_t		    w_id;	/* 0x04 */
+    widgetreg_t		    w_pad_0;	/* 0x00 */
+    widgetreg_t		    w_status;	/* 0x0c */
+    widgetreg_t		    w_pad_1;	/* 0x08 */
+    widgetreg_t		    w_err_upper_addr;	/* 0x14 */
+    widgetreg_t		    w_pad_2;	/* 0x10 */
+    widgetreg_t		    w_err_lower_addr;	/* 0x1c */
+    widgetreg_t		    w_pad_3;	/* 0x18 */
+    widgetreg_t		    w_control;	/* 0x24 */
+    widgetreg_t		    w_pad_4;	/* 0x20 */
+    widgetreg_t		    w_req_timeout;	/* 0x2c */
+    widgetreg_t		    w_pad_5;	/* 0x28 */
+    widgetreg_t		    w_intdest_upper_addr;	/* 0x34 */
+    widgetreg_t		    w_pad_6;	/* 0x30 */
+    widgetreg_t		    w_intdest_lower_addr;	/* 0x3c */
+    widgetreg_t		    w_pad_7;	/* 0x38 */
+    widgetreg_t		    w_err_cmd_word;	/* 0x44 */
+    widgetreg_t		    w_pad_8;	/* 0x40 */
+    widgetreg_t		    w_llp_cfg;	/* 0x4c */
+    widgetreg_t		    w_pad_9;	/* 0x48 */
+    widgetreg_t		    w_tflush;	/* 0x54 */
+    widgetreg_t		    w_pad_10;	/* 0x50 */
+#else
+    widgetreg_t		    w_pad_0;	/* 0x00 */
+    widgetreg_t		    w_id;	/* 0x04 */
+    widgetreg_t		    w_pad_1;	/* 0x08 */
+    widgetreg_t		    w_status;	/* 0x0c */
+    widgetreg_t		    w_pad_2;	/* 0x10 */
+    widgetreg_t		    w_err_upper_addr;	/* 0x14 */
+    widgetreg_t		    w_pad_3;	/* 0x18 */
+    widgetreg_t		    w_err_lower_addr;	/* 0x1c */
+    widgetreg_t		    w_pad_4;	/* 0x20 */
+    widgetreg_t		    w_control;	/* 0x24 */
+    widgetreg_t		    w_pad_5;	/* 0x28 */
+    widgetreg_t		    w_req_timeout;	/* 0x2c */
+    widgetreg_t		    w_pad_6;	/* 0x30 */
+    widgetreg_t		    w_intdest_upper_addr;	/* 0x34 */
+    widgetreg_t		    w_pad_7;	/* 0x38 */
+    widgetreg_t		    w_intdest_lower_addr;	/* 0x3c */
+    widgetreg_t		    w_pad_8;	/* 0x40 */
+    widgetreg_t		    w_err_cmd_word;	/* 0x44 */
+    widgetreg_t		    w_pad_9;	/* 0x48 */
+    widgetreg_t		    w_llp_cfg;	/* 0x4c */
+    widgetreg_t		    w_pad_10;	/* 0x50 */
+    widgetreg_t		    w_tflush;	/* 0x54 */
+#endif /* LITTLE_ENDIAN */
+} widget_cfg_t;
+
+#ifdef LITTLE_ENDIAN
+typedef struct {
+    unsigned                other:8;
+    unsigned                bo:1;
+    unsigned                error:1;
+    unsigned                vbpm:1;
+    unsigned                gbr:1;
+    unsigned                ds:2;
+    unsigned                ct:1;
+    unsigned                tnum:5;
+    unsigned                pactyp:4;
+    unsigned                sidn:4;
+    unsigned                didn:4;
+} w_err_cmd_word_f;
+#else
+typedef struct {
+    unsigned                didn:4;
+    unsigned                sidn:4;
+    unsigned                pactyp:4;
+    unsigned                tnum:5;
+    unsigned                ct:1;
+    unsigned                ds:2;
+    unsigned                gbr:1;
+    unsigned                vbpm:1;
+    unsigned                error:1;
+    unsigned                bo:1;
+    unsigned                other:8;
+} w_err_cmd_word_f;
+#endif
+
+#ifdef LITTLE_ENDIAN
+typedef union {
+    w_err_cmd_word_f        f;
+    widgetreg_t             r;
+} w_err_cmd_word_u;
+#else
+typedef union {
+    widgetreg_t             r;
+    w_err_cmd_word_f        f;
+} w_err_cmd_word_u;
+#endif
+
+/* IO widget initialization function */
+typedef struct xwidget_info_s *xwidget_info_t;
+
+/*
+ * Crosstalk Widget Hardware Identification, as defined in the Crosstalk spec.
+ */
+#ifdef LITTLE_ENDIAN
+typedef struct xwidget_hwid_s {
+    xwidget_mfg_num_t       mfg_num;
+    xwidget_rev_num_t       rev_num;
+    xwidget_part_num_t      part_num;
+}                      *xwidget_hwid_t;
+#else
+typedef struct xwidget_hwid_s {
+    xwidget_part_num_t      part_num;
+    xwidget_rev_num_t       rev_num;
+    xwidget_mfg_num_t       mfg_num;
+}                      *xwidget_hwid_t;
+#endif
+
+
+/*
+ * Returns 1 if a driver that handles devices described by hwid1 is able
+ * to manage a device with hardwareid hwid2.  NOTE: We don't check rev
+ * numbers at all.
+ */
+#define XWIDGET_HARDWARE_ID_MATCH(hwid1, hwid2) \
+	(((hwid1)->part_num == (hwid2)->part_num) && \
+	(((hwid1)->mfg_num == XWIDGET_MFG_NUM_NONE) || \
+	((hwid2)->mfg_num == XWIDGET_MFG_NUM_NONE) || \
+	((hwid1)->mfg_num == (hwid2)->mfg_num)))
+
+
+/* Generic crosstalk widget initialization interface */
+#if __KERNEL__
+
+extern int              xwidget_driver_register(xwidget_part_num_t part_num,
+						xwidget_mfg_num_t mfg_num,
+						char *driver_prefix,
+						unsigned flags);
+
+extern void             xwidget_driver_unregister(char *driver_prefix);
+
+extern int              xwidget_register(struct xwidget_hwid_s *hwid,
+					 devfs_handle_t dev,
+					 xwidgetnum_t id,
+					 devfs_handle_t master,
+					 xwidgetnum_t targetid,
+					 async_attach_t aa);
+
+extern int		xwidget_unregister(devfs_handle_t);
+extern void	        xwidget_error_register(devfs_handle_t xwidget,
+						error_handler_f * efunc,
+						error_handler_arg_t einfo);
+
+extern void             xwidget_reset(devfs_handle_t xwidget);
+extern void             xwidget_gfx_reset(devfs_handle_t xwidget);
+extern char		*xwidget_name_get(devfs_handle_t xwidget);	
+
+/* Generic crosstalk widget information access interface */
+extern xwidget_info_t   xwidget_info_chk(devfs_handle_t widget);
+extern xwidget_info_t   xwidget_info_get(devfs_handle_t widget);
+extern void             xwidget_info_set(devfs_handle_t widget, xwidget_info_t widget_info);
+extern devfs_handle_t     xwidget_info_dev_get(xwidget_info_t xwidget_info);
+extern xwidgetnum_t     xwidget_info_id_get(xwidget_info_t xwidget_info);
+extern int              xwidget_info_type_get(xwidget_info_t xwidget_info);
+extern int              xwidget_info_state_get(xwidget_info_t xwidget_info);
+extern devfs_handle_t     xwidget_info_master_get(xwidget_info_t xwidget_info);
+extern xwidgetnum_t     xwidget_info_masterid_get(xwidget_info_t xwidget_info);
+extern xwidget_part_num_t xwidget_info_part_num_get(xwidget_info_t xwidget_info);
+extern xwidget_rev_num_t xwidget_info_rev_num_get(xwidget_info_t xwidget_info);
+extern xwidget_mfg_num_t xwidget_info_mfg_num_get(xwidget_info_t xwidget_info);
+
+
+/*
+ * TBD: DELETE THIS ENTIRE STRUCTURE!  Equivalent is now in
+ * xtalk_private.h: xwidget_info_s
+ * This is just here for now because we still have a lot of
+ * junk referencing it.
+ * However, since nobody looks inside ...
+ */
+typedef struct v_widget_s {
+    unsigned                v_widget_s_is_really_empty;
+#define	v_widget_s_is_really_empty	and using this would be a syntax error.
+} v_widget_t;
+#endif				/* _KERNEL */
+
+#endif				/* _LANGUAGE_C */
+
+#endif				/* __ASM_SN_XTALK_XWIDGET_H__ */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/spinlock.h linux/include/asm-ia64/spinlock.h
--- v2.4.0-prerelease/linux/include/asm-ia64/spinlock.h	Tue Oct 31 12:42:27 2000
+++ linux/include/asm-ia64/spinlock.h	Thu Jan  4 12:50:18 2001
@@ -18,8 +18,9 @@
 #undef NEW_LOCK
 
 #ifdef NEW_LOCK
+
 typedef struct { 
-	volatile unsigned char lock;
+	volatile unsigned int lock;
 } spinlock_t;
 
 #define SPIN_LOCK_UNLOCKED			(spinlock_t) { 0 }
@@ -38,7 +39,7 @@
 		"mov r30=1\n"								\
 		"mov ar.ccv=r0\n"							\
 		";;\n"									\
-		IA64_SEMFIX"cmpxchg1.acq r30=[%0],r30,ar.ccv\n"				\
+		IA64_SEMFIX"cmpxchg4.acq r30=[%0],r30,ar.ccv\n"				\
 		";;\n"									\
 		"cmp.ne p15,p0=r30,r0\n"						\
 		"(p15) br.call.spnt.few b7=ia64_spinlock_contention\n"			\
@@ -48,18 +49,16 @@
 		: "ar.ccv", "ar.pfs", "b7", "p15", "r28", "r29", "r30", "memory");	\
 }
 
-#define spin_trylock(x)								\
-({										\
-	register char *addr __asm__ ("r31") = (char *) &(x)->lock;		\
-	register long result;							\
-										\
-	__asm__ __volatile__ (							\
-		"mov r30=1\n"							\
-		"mov ar.ccv=r0\n"						\
-		";;\n"								\
-		IA64_SEMFIX"cmpxchg1.acq %0=[%1],r30,ar.ccv\n"			\
-		: "=r"(result) : "r"(addr) : "ar.ccv", "r30", "memory");	\
-	(result == 0);								\
+#define spin_trylock(x)									\
+({											\
+	register long result;								\
+											\
+	__asm__ __volatile__ (								\
+		"mov ar.ccv=r0\n"							\
+		";;\n"									\
+		IA64_SEMFIX"cmpxchg4.acq %0=[%2],%1,ar.ccv\n"				\
+		: "=r"(result) : "r"(1), "r"(&(x)->lock) : "ar.ccv", "memory");		\
+	(result == 0);									\
 })
 
 #define spin_is_locked(x)	((x)->lock != 0)
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/system.h linux/include/asm-ia64/system.h
--- v2.4.0-prerelease/linux/include/asm-ia64/system.h	Tue Oct 31 12:42:27 2000
+++ linux/include/asm-ia64/system.h	Thu Jan  4 12:50:18 2001
@@ -27,7 +27,8 @@
 
 #define GATE_ADDR		(0xa000000000000000 + PAGE_SIZE)
 
-#if defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) || defined(CONFIG_ITANIUM_BSTEP_SPECIFIC)
+#if defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) \
+    || defined(CONFIG_ITANIUM_B0_SPECIFIC) || defined(CONFIG_ITANIUM_B1_SPECIFIC)
   /* Workaround for Errata 97.  */
 # define IA64_SEMFIX_INSN	mf;
 # define IA64_SEMFIX	"mf;"
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/uaccess.h linux/include/asm-ia64/uaccess.h
--- v2.4.0-prerelease/linux/include/asm-ia64/uaccess.h	Tue Oct 31 12:42:27 2000
+++ linux/include/asm-ia64/uaccess.h	Thu Jan  4 12:50:18 2001
@@ -125,46 +125,28 @@
 struct __large_struct { unsigned long buf[100]; };
 #define __m(x) (*(struct __large_struct *)(x))
 
-#define __get_user_64(addr)								\
+/* We need to declare the __ex_table section before we can use it in .xdata.  */
+__asm__ (".section \"__ex_table\", \"a\"\n\t.previous");
+
+#define __get_user_64(addr)									\
 	__asm__ ("\n1:\tld8 %0=%2%P2\t// %0 and %1 get overwritten by exception handler\n"	\
-		 "2:\n"									\
-		 "\t.section __ex_table,\"a\"\n"					\
-		 "\t\tdata4 @gprel(1b)\n"						\
-		 "\t\tdata4 (2b-1b)|1\n"						\
-		 "\t.previous"								\
-		: "=r"(__gu_val), "=r"(__gu_err)					\
-		: "m"(__m(addr)), "1"(__gu_err));
+		 "2:\n\t.xdata4 \"__ex_table\", @gprel(1b), (2b-1b)|1\n"			\
+		: "=r"(__gu_val), "=r"(__gu_err) : "m"(__m(addr)), "1"(__gu_err));
 
-#define __get_user_32(addr)								\
+#define __get_user_32(addr)									\
 	__asm__ ("\n1:\tld4 %0=%2%P2\t// %0 and %1 get overwritten by exception handler\n"	\
-		 "2:\n"									\
-		 "\t.section __ex_table,\"a\"\n"					\
-		 "\t\tdata4 @gprel(1b)\n"						\
-		 "\t\tdata4 (2b-1b)|1\n"						\
-		 "\t.previous"								\
-		: "=r"(__gu_val), "=r"(__gu_err)					\
-		: "m"(__m(addr)), "1"(__gu_err));
+		 "2:\n\t.xdata4 \"__ex_table\", @gprel(1b), (2b-1b)|1\n"			\
+		: "=r"(__gu_val), "=r"(__gu_err) : "m"(__m(addr)), "1"(__gu_err));
 
-#define __get_user_16(addr)								\
+#define __get_user_16(addr)									\
 	__asm__ ("\n1:\tld2 %0=%2%P2\t// %0 and %1 get overwritten by exception handler\n"	\
-		 "2:\n"									\
-		 "\t.section __ex_table,\"a\"\n"					\
-		 "\t\tdata4 @gprel(1b)\n"						\
-		 "\t\tdata4 (2b-1b)|1\n"						\
-		 "\t.previous"								\
-		: "=r"(__gu_val), "=r"(__gu_err)					\
-		: "m"(__m(addr)), "1"(__gu_err));
+		 "2:\n\t.xdata4 \"__ex_table\", @gprel(1b), (2b-1b)|1\n"			\
+		: "=r"(__gu_val), "=r"(__gu_err) : "m"(__m(addr)), "1"(__gu_err));
 
-#define __get_user_8(addr)								\
+#define __get_user_8(addr)									\
 	__asm__ ("\n1:\tld1 %0=%2%P2\t// %0 and %1 get overwritten by exception handler\n"	\
-		 "2:\n"									\
-		 "\t.section __ex_table,\"a\"\n"					\
-		 "\t\tdata4 @gprel(1b)\n"						\
-		 "\t\tdata4 (2b-1b)|1\n"						\
-		 "\t.previous"								\
-		: "=r"(__gu_val), "=r"(__gu_err)					\
-		: "m"(__m(addr)), "1"(__gu_err));
-
+		 "2:\n\t.xdata4 \"__ex_table\", @gprel(1b), (2b-1b)|1\n"			\
+		: "=r"(__gu_val), "=r"(__gu_err) : "m"(__m(addr)), "1"(__gu_err));
 
 extern void __put_user_unknown (void);
 
@@ -206,46 +188,26 @@
 #define __put_user_64(x,addr)								\
 	__asm__ __volatile__ (								\
 		 "\n1:\tst8 %1=%r2%P1\t// %0 gets overwritten by exception handler\n"	\
-		 "2:\n"									\
-		 "\t.section __ex_table,\"a\"\n"					\
-		 "\t\tdata4 @gprel(1b)\n"						\
-		 "\t\tdata4 2b-1b\n"							\
-		 "\t.previous"								\
-		: "=r"(__pu_err)							\
-		: "m"(__m(addr)), "rO"(x), "0"(__pu_err))
+		 "2:\n\t.xdata4 \"__ex_table\", @gprel(1b), (2b-1b)\n"			\
+		: "=r"(__pu_err) : "m"(__m(addr)), "rO"(x), "0"(__pu_err))
 
 #define __put_user_32(x,addr)								\
 	__asm__ __volatile__ (								\
 		 "\n1:\tst4 %1=%r2%P1\t// %0 gets overwritten by exception handler\n"	\
-		 "2:\n"									\
-		 "\t.section __ex_table,\"a\"\n"					\
-		 "\t\tdata4 @gprel(1b)\n"						\
-		 "\t\tdata4 2b-1b\n"							\
-		 "\t.previous"								\
-		: "=r"(__pu_err)							\
-		: "m"(__m(addr)), "rO"(x), "0"(__pu_err))
+		 "2:\n\t.xdata4 \"__ex_table\", @gprel(1b), (2b-1b)\n"			\
+		: "=r"(__pu_err) : "m"(__m(addr)), "rO"(x), "0"(__pu_err))
 
 #define __put_user_16(x,addr)								\
 	__asm__ __volatile__ (								\
 		 "\n1:\tst2 %1=%r2%P1\t// %0 gets overwritten by exception handler\n"	\
-		 "2:\n"									\
-		 "\t.section __ex_table,\"a\"\n"					\
-		 "\t\tdata4 @gprel(1b)\n"						\
-		 "\t\tdata4 2b-1b\n"							\
-		 "\t.previous"								\
-		: "=r"(__pu_err)							\
-		: "m"(__m(addr)), "rO"(x), "0"(__pu_err))
+		 "2:\n\t.xdata4 \"__ex_table\", @gprel(1b), (2b-1b)\n"			\
+		: "=r"(__pu_err) : "m"(__m(addr)), "rO"(x), "0"(__pu_err))
 
 #define __put_user_8(x,addr)								\
 	__asm__ __volatile__ (								\
 		 "\n1:\tst1 %1=%r2%P1\t// %0 gets overwritten by exception handler\n"	\
-		 "2:\n"									\
-		 "\t.section __ex_table,\"a\"\n"					\
-		 "\t\tdata4 @gprel(1b)\n"						\
-		 "\t\tdata4 2b-1b\n"							\
-		 "\t.previous"								\
-		: "=r"(__pu_err)							\
-		: "m"(__m(addr)), "rO"(x), "0"(__pu_err))
+		 "2:\n\t.xdata4 \"__ex_table\", @gprel(1b), (2b-1b)\n"			\
+		: "=r"(__pu_err) : "m"(__m(addr)), "rO"(x), "0"(__pu_err))
 
 /*
  * Complex access routines
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-ia64/unistd.h linux/include/asm-ia64/unistd.h
--- v2.4.0-prerelease/linux/include/asm-ia64/unistd.h	Tue Oct 31 12:42:27 2000
+++ linux/include/asm-ia64/unistd.h	Thu Jan  4 12:50:18 2001
@@ -160,7 +160,7 @@
 #define __NR_nanosleep			1168
 #define __NR_nfsservctl			1169
 #define __NR_prctl			1170
-#define __NR_getpagesize		1171
+/* 1171 is reserved for backwards compatibility with old __NR_getpagesize */
 #define __NR_mmap2			1172
 #define __NR_pciconfig_read		1173
 #define __NR_pciconfig_write		1174
@@ -196,7 +196,7 @@
 #define __NR_getsockopt			1204
 #define __NR_sendmsg			1205
 #define __NR_recvmsg			1206
-#define __NR_sys_pivot_root		1207
+#define __NR_pivot_root			1207
 #define __NR_mincore			1208
 #define __NR_madvise			1209
 #define __NR_stat			1210
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-m68k/delay.h linux/include/asm-m68k/delay.h
--- v2.4.0-prerelease/linux/include/asm-m68k/delay.h	Wed Sep 25 00:47:41 1996
+++ linux/include/asm-m68k/delay.h	Thu Jan  4 13:00:55 2001
@@ -1,10 +1,12 @@
 #ifndef _M68K_DELAY_H
 #define _M68K_DELAY_H
 
+#include <asm/param.h>
+
 /*
  * Copyright (C) 1994 Hamish Macdonald
  *
- * Delay routines, using a pre-computed "loops_per_second" value.
+ * Delay routines, using a pre-computed "loops_per_jiffy" value.
  */
 
 extern __inline__ void __delay(unsigned long loops)
@@ -13,6 +15,8 @@
 		: "=d" (loops) : "0" (loops));
 }
 
+extern void __bad_udelay(void);
+
 /*
  * Use only for very small delays ( < 1 msec).  Should probably use a
  * lookup table, really, as the multiplications take much too long with
@@ -20,16 +24,24 @@
  * first constant multiplications gets optimized away if the delay is
  * a constant)  
  */
-extern __inline__ void udelay(unsigned long usecs)
+static inline void __const_udelay(unsigned long xloops)
 {
 	unsigned long tmp;
 
-	usecs *= 4295;		/* 2**32 / 1000000 */
 	__asm__ ("mulul %2,%0:%1"
-		: "=d" (usecs), "=d" (tmp)
-		: "d" (usecs), "1" (loops_per_sec));
-	__delay(usecs);
+		: "=d" (xloops), "=d" (tmp)
+		: "d" (xloops), "1" (loops_per_jiffy));
+	__delay(xloops * HZ);
+}
+
+static inline void __udelay(unsigned long usecs)
+{
+	__const_udelay(usecs * 4295);	/* 2**32 / 1000000 */
 }
+
+#define udelay(n) (__builtin_constant_p(n) ? \
+	((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 4295)) : \
+	__udelay(n))
 
 extern __inline__ unsigned long muldiv(unsigned long a, unsigned long b, unsigned long c)
 {
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-m68k/floppy.h linux/include/asm-m68k/floppy.h
--- v2.4.0-prerelease/linux/include/asm-m68k/floppy.h	Tue May 11 09:57:14 1999
+++ linux/include/asm-m68k/floppy.h	Thu Jan  4 13:00:55 2001
@@ -15,6 +15,7 @@
 
 asmlinkage void floppy_hardint(int irq, void *dev_id, struct pt_regs * regs);
 
+#undef MAX_DMA_ADDRESS
 #define MAX_DMA_ADDRESS   0x00  /* nothing like that */
 
 extern spinlock_t  dma_spin_lock;
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-m68k/io.h linux/include/asm-m68k/io.h
--- v2.4.0-prerelease/linux/include/asm-m68k/io.h	Sat Nov 27 15:27:49 1999
+++ linux/include/asm-m68k/io.h	Thu Jan  4 13:00:55 2001
@@ -14,7 +14,7 @@
 #include <asm/virtconvert.h>
 
 /*
- * These are for ISA/PCI shared memory _only_ and should never be used
+ * These are for PCI shared memory _only_ and should never be used
  * on any other type of memory, including Zorro memory. They are meant to
  * access the bus in the bus byte order which is little-endian!.
  *
@@ -47,8 +47,11 @@
 #define outb(x,addr) ((void) writeb(x,addr))
 #define outb_p(x,addr) outb(x,addr)
 
+#ifndef CONFIG_SUN3
 #define IO_SPACE_LIMIT 0xffff
-
+#else
+#define IO_SPACE_LIMIT 0x0fffffff
+#endif
 
 /* Values for nocacheflag and cmode */
 #define IOMAP_FULL_CACHING		0
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-m68k/movs.h linux/include/asm-m68k/movs.h
--- v2.4.0-prerelease/linux/include/asm-m68k/movs.h	Wed Jan 26 12:44:21 2000
+++ linux/include/asm-m68k/movs.h	Thu Jan  4 13:00:55 2001
@@ -10,46 +10,46 @@
 /* Set DFC register value */
 
 #define SET_DFC(x) \
-        __asm__ __volatile__ ("movec %0,%%dfc" : : "r" (x))
+        __asm__ __volatile__ (" movec %0,%/dfc" : : "d" (x));
 
 /* Get DFC register value */
 
 #define GET_DFC(x) \
-        __asm__ __volatile__ ("movec %%dfc,%0" : "=r" (x))
+        __asm__ __volatile__ (" movec %/dfc, %0" : "=d" (x) : );
 
 /* Set SFC register value */
 
 #define SET_SFC(x) \
-        __asm__ __volatile__ ("movec %0,%%sfc" : : "r" (x))
+        __asm__ __volatile__ (" movec %0,%/sfc" : : "d" (x));
 
 /* Get SFC register value */
 
 #define GET_SFC(x) \
-        __asm__ __volatile__ ("movec %%sfc,%0" : "=r" (x))
+        __asm__ __volatile__ (" movec %/sfc, %0" : "=d" (x) : );
 
 #define SET_VBR(x) \
-        __asm__ __volatile__ ("movec %0,%%vbr" : : "r" (x))
+        __asm__ __volatile__ (" movec %0,%/vbr" : : "r" (x));
 
 #define GET_VBR(x) \
-        __asm__ __volatile__ ("movec %%vbr,%0" : "=r" (x))
+        __asm__ __volatile__ (" movec %/vbr, %0" : "=g" (x) : );
 
-/* Set a byte using the "moves" instruction */
+/* Set a byte using the "movs" instruction */
 
 #define SET_CONTROL_BYTE(addr,value) \
-        __asm__ __volatile__ ("movesb %1,%0" : "=m" (addr) : "d" (value))
+        __asm__ __volatile__ (" movsb %0, %1@" : : "d" (value), "a" (addr));
 
-/* Get a byte using the "moves" instruction */
+/* Get a byte using the "movs" instruction */
 
 #define GET_CONTROL_BYTE(addr,value) \
-        __asm__ __volatile__ ("movesb %1,%0" : "=d" (value) : "m" (addr))
+        __asm__ __volatile__ (" movsb %1@, %0" : "=d" (value) : "a" (addr));
 
-/* Set a (long)word using the "moves" instruction */
+/* Set a (long)word using the "movs" instruction */
 
 #define SET_CONTROL_WORD(addr,value) \
-        __asm__ __volatile__ ("movesl %1,%0" : "=m" (addr) : "r" (value))
+        __asm__ __volatile__ (" movsl %0, %1@" : : "d" (value), "a" (addr));
 
-/* Get a (long)word using the "moves" instruction */
+/* Get a (long)word using the "movs" instruction */
 
 #define GET_CONTROL_WORD(addr,value) \
-        __asm__ __volatile__ ("movesl %1,%0" : "=d" (value) : "m" (addr))
+        __asm__ __volatile__ (" movsl %1@, %0" : "=d" (value) : "a" (addr));
 #endif
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-m68k/param.h linux/include/asm-m68k/param.h
--- v2.4.0-prerelease/linux/include/asm-m68k/param.h	Mon Dec 11 17:59:45 2000
+++ linux/include/asm-m68k/param.h	Thu Jan  4 13:00:55 2001
@@ -1,17 +1,11 @@
 #ifndef _M68K_PARAM_H
 #define _M68K_PARAM_H
 
-#include <linux/config.h>
-
 #ifndef HZ
 #define HZ 100
 #endif
 
-#ifndef CONFIG_SUN3
-#define EXEC_PAGESIZE	4096
-#else
 #define EXEC_PAGESIZE	8192
-#endif
 
 #ifndef NGROUPS
 #define NGROUPS		32
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-m68k/serial.h linux/include/asm-m68k/serial.h
--- v2.4.0-prerelease/linux/include/asm-m68k/serial.h	Tue May 11 09:57:14 1999
+++ linux/include/asm-m68k/serial.h	Thu Jan  4 13:00:55 2001
@@ -35,6 +35,9 @@
 #define FOURPORT_FLAGS ASYNC_FOURPORT
 #define ACCENT_FLAGS 0
 #define BOCA_FLAGS 0
+#define RS_TABLE_SIZE  64
+#else
+#define RS_TABLE_SIZE  4
 #endif
 	
 #define STD_SERIAL_PORT_DEFNS			\
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-m68k/traps.h linux/include/asm-m68k/traps.h
--- v2.4.0-prerelease/linux/include/asm-m68k/traps.h	Tue Jan 19 10:58:34 1999
+++ linux/include/asm-m68k/traps.h	Thu Jan  4 13:00:55 2001
@@ -151,7 +151,7 @@
 # define MMU060_RW_W	(0x00800000)	/* write */
 # define MMU060_RW_R	(0x01000000)	/* read */
 # define MMU060_RW_RMW	(0x01800000)	/* read/modify/write */
-# define MMU060_W		(0x00800000)	/* general write, includes rmw */
+# define MMU060_W	(0x00800000)	/* general write, includes rmw */
 #define	MMU060_SIZ	(0x00600000)	/* transfer size */
 #define	MMU060_TT	(0x00180000)	/* transfer type (TT) bits */
 #define	MMU060_TM	(0x00070000)	/* transfer modifier (TM) bits */
@@ -172,12 +172,11 @@
 #define	MMU060_SEE	(0x00000001)	/* software emulated error */
 
 /* cases of missing or invalid descriptors */
-#define MMU060_DESC_ERR	(MMU060_TWE | MMU060_PTA | MMU060_PTB | \
-						 MMU060_IL  | MMU060_PF)
+#define MMU060_DESC_ERR (MMU060_PTA | MMU060_PTB | \
+			 MMU060_IL  | MMU060_PF)
 /* bits that indicate real errors */
-#define MMU060_ERR_BITS	(MMU060_PBE | MMU060_SBE | MMU060_DESC_ERR | \
-						 MMU060_SP  | MMU060_WP  | MMU060_RE | \
-						 MMU060_WE)
+#define MMU060_ERR_BITS (MMU060_PBE | MMU060_SBE | MMU060_DESC_ERR | MMU060_SP | \
+			 MMU060_WP  | MMU060_TWE | MMU060_RE       | MMU060_WE)
 
 /* structure for stack frames */
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-sh/bugs.h linux/include/asm-sh/bugs.h
--- v2.4.0-prerelease/linux/include/asm-sh/bugs.h	Wed Aug  9 13:59:04 2000
+++ linux/include/asm-sh/bugs.h	Thu Jan  4 13:19:13 2001
@@ -16,10 +16,10 @@
 
 static void __init check_bugs(void)
 {
-	extern unsigned long loops_per_sec;
+	extern unsigned long loops_per_jiffy;
 	char *p= &system_utsname.machine[2]; /* "sh" */
 
-	cpu_data->loops_per_sec = loops_per_sec;
+	cpu_data->loops_per_jiffy = loops_per_jiffy;
 	
 	switch (cpu_data->type) {
 	case CPU_SH7708:
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-sh/delay.h linux/include/asm-sh/delay.h
--- v2.4.0-prerelease/linux/include/asm-sh/delay.h	Sun Oct  8 10:50:36 2000
+++ linux/include/asm-sh/delay.h	Thu Jan  4 13:19:13 2001
@@ -2,41 +2,19 @@
 #define __ASM_SH_DELAY_H
 
 /*
- * Copyright (C) 1999  Kaz Kojima
+ * Copyright (C) 1993 Linus Torvalds
+ *
+ * Delay routines calling functions in arch/sh/lib/delay.c
  */
+ 
+extern void __bad_udelay(void);
 
-#include <linux/config.h>
-
-extern __inline__ void __delay(unsigned long loops)
-{
-	__asm__ __volatile__(
-		"tst	%0, %0\n\t"
-		"1:\t"
-		"bf/s	1b\n\t"
-		" dt	%0"
-		: "=r" (loops)
-		: "0" (loops)
-		: "t");
-}
-
-extern __inline__ void __udelay(unsigned long usecs, unsigned long lps)
-{
-	usecs *= 0x000010c6;		/* 2**32 / 1000000 */
-	__asm__("dmulu.l	%0, %2\n\t"
-		"sts	$mach, %0"
-		: "=r" (usecs)
-		: "0" (usecs), "r" (lps)
-		: "macl", "mach");
-        __delay(usecs);
-}
-
-
-#ifdef CONFIG_SMP
-#define __udelay_val cpu_data[smp_processor_id()].udelay_val
-#else
-#define __udelay_val (current_cpu_data.loops_per_sec)
-#endif
-
-#define udelay(usecs) __udelay((usecs),__udelay_val)
+extern void __udelay(unsigned long usecs);
+extern void __const_udelay(unsigned long usecs);
+extern void __delay(unsigned long loops);
+
+#define udelay(n) (__builtin_constant_p(n) ? \
+	((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c6ul)) : \
+	__udelay(n))
 
 #endif /* __ASM_SH_DELAY_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-sh/mmu_context.h linux/include/asm-sh/mmu_context.h
--- v2.4.0-prerelease/linux/include/asm-sh/mmu_context.h	Thu Aug 10 13:30:05 2000
+++ linux/include/asm-sh/mmu_context.h	Thu Jan  4 13:19:13 2001
@@ -104,6 +104,7 @@
 #define MMU_PTEL	0xFF000004	/* Page table entry register LOW */
 #define MMU_TTB		0xFF000008	/* Translation table base register */
 #define MMU_TEA		0xFF00000C	/* TLB Exception Address */
+#define MMU_PTEA	0xFF000034	/* Page table entry assistance register */
 
 #define MMUCR		0xFF000010	/* MMU Control Register */
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-sh/param.h linux/include/asm-sh/param.h
--- v2.4.0-prerelease/linux/include/asm-sh/param.h	Mon Aug 30 18:12:59 1999
+++ linux/include/asm-sh/param.h	Thu Jan  4 13:19:13 2001
@@ -17,4 +17,8 @@
 
 #define MAXHOSTNAMELEN	64	/* max length of hostname */
 
+#ifdef __KERNEL__
+#define CLOCKS_PER_SEC	HZ	/* frequency at which times() counts */
+#endif
+
 #endif /* __ASM_SH_PARAM_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-sh/pgtable-2level.h linux/include/asm-sh/pgtable-2level.h
--- v2.4.0-prerelease/linux/include/asm-sh/pgtable-2level.h	Sun Mar  5 09:33:55 2000
+++ linux/include/asm-sh/pgtable-2level.h	Thu Jan  4 13:19:13 2001
@@ -29,9 +29,9 @@
  * setup: the pgd is never bad, and a pmd always exists (as it's folded
  * into the pgd entry)
  */
-extern inline int pgd_none(pgd_t pgd)		{ return 0; }
-extern inline int pgd_bad(pgd_t pgd)		{ return 0; }
-extern inline int pgd_present(pgd_t pgd)	{ return 1; }
+static inline int pgd_none(pgd_t pgd)		{ return 0; }
+static inline int pgd_bad(pgd_t pgd)		{ return 0; }
+static inline int pgd_present(pgd_t pgd)	{ return 1; }
 #define pgd_clear(xp)	do { } while (0)
 
 /*
@@ -50,7 +50,7 @@
 #define pgd_page(pgd) \
 ((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
 
-extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
+static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
 {
 	return (pmd_t *) dir;
 }
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-sh/pgtable.h linux/include/asm-sh/pgtable.h
--- v2.4.0-prerelease/linux/include/asm-sh/pgtable.h	Sun Nov 19 18:44:21 2000
+++ linux/include/asm-sh/pgtable.h	Thu Jan  4 13:19:13 2001
@@ -105,8 +105,23 @@
 #define _PAGE_ACCESSED 	0x400  /* software: page referenced */
 #define _PAGE_U0_SHARED 0x800  /* software: page is shared in user space */
 
+
+/* software: moves to PTEA.TC (Timing Control) */
+#define _PAGE_PCC_AREA5	0x00000000	/* use BSC registers for area5 */
+#define _PAGE_PCC_AREA6	0x80000000	/* use BSC registers for area6 */
+
+/* software: moves to PTEA.SA[2:0] (Space Attributes) */
+#define _PAGE_PCC_IODYN 0x00000001	/* IO space, dynamically sized bus */
+#define _PAGE_PCC_IO8	0x20000000	/* IO space, 8 bit bus */
+#define _PAGE_PCC_IO16	0x20000001	/* IO space, 16 bit bus */
+#define _PAGE_PCC_COM8	0x40000000	/* Common Memory space, 8 bit bus */
+#define _PAGE_PCC_COM16	0x40000001	/* Common Memory space, 16 bit bus */
+#define _PAGE_PCC_ATR8	0x60000000	/* Attribute Memory space, 8 bit bus */
+#define _PAGE_PCC_ATR16	0x60000001	/* Attribute Memory space, 6 bit bus */
+
+
 /* Mask which drop software flags */
-#define _PAGE_FLAGS_HARDWARE_MASK	0x1ffff1ff
+#define _PAGE_FLAGS_HARDWARE_MASK	0x1ffff1fe
 /* Hardware flags: SZ=1 (4k-byte) */
 #define _PAGE_FLAGS_HARD		0x00000010
 
@@ -126,6 +141,8 @@
 #define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
 #define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_CACHABLE | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD)
 #define PAGE_KERNEL_RO	__pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD)
+#define PAGE_KERNEL_PCC(slot, type) \
+			__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_FLAGS_HARD | (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | (type))
 
 /*
  * As i386 and MIPS, SuperH can't do page protection for execute, and
@@ -178,23 +195,23 @@
  * The following only work if pte_present() is true.
  * Undefined behaviour if not..
  */
-extern inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
-extern inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
-extern inline int pte_dirty(pte_t pte){ return pte_val(pte) & _PAGE_DIRTY; }
-extern inline int pte_young(pte_t pte){ return pte_val(pte) & _PAGE_ACCESSED; }
-extern inline int pte_write(pte_t pte){ return pte_val(pte) & _PAGE_RW; }
-extern inline int pte_shared(pte_t pte){ return pte_val(pte) & _PAGE_SHARED; }
-
-extern inline pte_t pte_rdprotect(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
-extern inline pte_t pte_exprotect(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
-extern inline pte_t pte_mkclean(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
-extern inline pte_t pte_mkold(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
-extern inline pte_t pte_wrprotect(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); return pte; }
-extern inline pte_t pte_mkread(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; }
-extern inline pte_t pte_mkexec(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; }
-extern inline pte_t pte_mkdirty(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
-extern inline pte_t pte_mkyoung(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
-extern inline pte_t pte_mkwrite(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; }
+static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
+static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
+static inline int pte_dirty(pte_t pte){ return pte_val(pte) & _PAGE_DIRTY; }
+static inline int pte_young(pte_t pte){ return pte_val(pte) & _PAGE_ACCESSED; }
+static inline int pte_write(pte_t pte){ return pte_val(pte) & _PAGE_RW; }
+static inline int pte_shared(pte_t pte){ return pte_val(pte) & _PAGE_SHARED; }
+
+static inline pte_t pte_rdprotect(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
+static inline pte_t pte_exprotect(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
+static inline pte_t pte_mkclean(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
+static inline pte_t pte_mkold(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
+static inline pte_t pte_wrprotect(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); return pte; }
+static inline pte_t pte_mkread(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; }
+static inline pte_t pte_mkexec(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; }
+static inline pte_t pte_mkdirty(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
+static inline pte_t pte_mkyoung(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
+static inline pte_t pte_mkwrite(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; }
 
 /*
  * Conversion functions: convert a page and protection to a page entry,
@@ -215,7 +232,7 @@
 #define mk_pte_phys(physpage, pgprot) \
 ({ pte_t __pte; set_pte(&__pte, __pte(physpage + pgprot_val(pgprot))); __pte; })
 
-extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 { set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; }
 
 #define page_pte(page) page_pte_prot(page, __pgprot(0))
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-sh/processor.h linux/include/asm-sh/processor.h
--- v2.4.0-prerelease/linux/include/asm-sh/processor.h	Mon Jan  1 09:38:36 2001
+++ linux/include/asm-sh/processor.h	Thu Jan  4 13:19:13 2001
@@ -29,7 +29,7 @@
 
 struct sh_cpuinfo {
 	enum cpu_type type;
-	unsigned long loops_per_sec;
+	unsigned long loops_per_jiffy;
 
 	char	hard_math;
 
@@ -164,9 +164,9 @@
 	unsigned long __dummy;
 
 	/* Set FD flag in SR */
-	__asm__ __volatile__("stc	$sr, %0\n\t"
+	__asm__ __volatile__("stc	sr, %0\n\t"
 			     "or	%1, %0\n\t"
-			     "ldc	%0, $sr"
+			     "ldc	%0, sr"
 			     : "=&r" (__dummy)
 			     : "r" (SR_FD));
 }
@@ -176,9 +176,9 @@
 	unsigned long __dummy;
 
 	/* Clear out FD flag in SR */
-	__asm__ __volatile__("stc	$sr, %0\n\t"
+	__asm__ __volatile__("stc	sr, %0\n\t"
 			     "and	%1, %0\n\t"
-			     "ldc	%0, $sr"
+			     "ldc	%0, sr"
 			     : "=&r" (__dummy)
 			     : "r" (~SR_FD));
 }
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-sh/ptrace.h linux/include/asm-sh/ptrace.h
--- v2.4.0-prerelease/linux/include/asm-sh/ptrace.h	Mon Jun 19 17:59:38 2000
+++ linux/include/asm-sh/ptrace.h	Thu Jan  4 13:19:13 2001
@@ -42,6 +42,11 @@
 #define REG_XDREG14	47
 #define REG_FPSCR	48
 
+#define PTRACE_SETOPTIONS         21
+
+/* options set using PTRACE_SETOPTIONS */
+#define PTRACE_O_TRACESYSGOOD     0x00000001
+
 /*
  * This struct defines the way the registers are stored on the
  * kernel stack during a system call or other kernel entry.
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-sh/rtc.h linux/include/asm-sh/rtc.h
--- v2.4.0-prerelease/linux/include/asm-sh/rtc.h	Wed Dec 31 16:00:00 1969
+++ linux/include/asm-sh/rtc.h	Thu Jan  4 13:19:13 2001
@@ -0,0 +1,12 @@
+#ifndef _ASM_RTC_H
+#define _ASM_RTC_H
+
+#include <asm/machvec.h>
+
+#define rtc_gettimeofday sh_mv.mv_rtc_gettimeofday
+#define rtc_settimeofday sh_mv.mv_rtc_settimeofday
+
+extern void sh_rtc_gettimeofday(struct timeval *tv);
+extern int sh_rtc_settimeofday(const struct timeval *tv);
+
+#endif /* _ASM_RTC_H */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-sh/siginfo.h linux/include/asm-sh/siginfo.h
--- v2.4.0-prerelease/linux/include/asm-sh/siginfo.h	Wed May 24 18:38:26 2000
+++ linux/include/asm-sh/siginfo.h	Thu Jan  4 13:19:13 2001
@@ -216,7 +216,7 @@
 #ifdef __KERNEL__
 #include <linux/string.h>
 
-extern inline void copy_siginfo(siginfo_t *to, siginfo_t *from)
+static inline void copy_siginfo(siginfo_t *to, siginfo_t *from)
 {
 	if (from->si_code < 0)
 		memcpy(to, from, sizeof(siginfo_t));
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-sh/timex.h linux/include/asm-sh/timex.h
--- v2.4.0-prerelease/linux/include/asm-sh/timex.h	Fri Jul 21 14:21:06 2000
+++ linux/include/asm-sh/timex.h	Thu Jan  4 13:19:13 2001
@@ -6,7 +6,7 @@
 #ifndef __ASM_SH_TIMEX_H
 #define __ASM_SH_TIMEX_H
 
-#define CLOCK_TICK_RATE	1193180 /* Underlying HZ */
+#define CLOCK_TICK_RATE	(current_cpu_data.module_clock/4) /* Underlying HZ */
 #define CLOCK_TICK_FACTOR	20	/* Factor of both 1000000 and CLOCK_TICK_RATE */
 #define FINETUNE ((((((long)LATCH * HZ - CLOCK_TICK_RATE) << SHIFT_HZ) * \
 	(1000000/CLOCK_TICK_FACTOR) / (CLOCK_TICK_RATE/CLOCK_TICK_FACTOR)) \
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-sh/uaccess.h linux/include/asm-sh/uaccess.h
--- v2.4.0-prerelease/linux/include/asm-sh/uaccess.h	Sun Oct  8 10:50:36 2000
+++ linux/include/asm-sh/uaccess.h	Thu Jan  4 13:19:13 2001
@@ -56,7 +56,7 @@
 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
 #define __access_ok(addr,size) (__range_ok(addr,size) == 0)
 
-extern inline int verify_area(int type, const void * addr, unsigned long size)
+static inline int verify_area(int type, const void * addr, unsigned long size)
 {
 	return access_ok(type,addr,size) ? 0 : -EFAULT;
 }
@@ -66,7 +66,7 @@
  * They automatically use the right size if we just have the right
  * pointer type ...
  *
- * As MIPS uses the same address space for kernel and user data, we
+ * As SuperH uses the same address space for kernel and user data, we
  * can just do these as direct assignments.
  *
  * Careful to not
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-sparc/atomic.h linux/include/asm-sparc/atomic.h
--- v2.4.0-prerelease/linux/include/asm-sparc/atomic.h	Sun Oct  8 10:50:36 2000
+++ linux/include/asm-sparc/atomic.h	Mon Jan  1 10:37:41 2001
@@ -133,6 +133,8 @@
 #define atomic_inc(v) atomic_add(1,(v))
 #define atomic_dec(v) atomic_sub(1,(v))
 
+#define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0)
+
 #endif /* !(__KERNEL__) */
 
 #endif /* !(__ARCH_SPARC_ATOMIC__) */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-sparc/contregs.h linux/include/asm-sparc/contregs.h
--- v2.4.0-prerelease/linux/include/asm-sparc/contregs.h	Mon Jan  1 09:38:36 2001
+++ linux/include/asm-sparc/contregs.h	Mon Jan  1 10:37:41 2001
@@ -1,4 +1,4 @@
-/* $Id: contregs.h,v 1.7 1998/01/30 10:59:48 jj Exp $ */
+/* $Id: contregs.h,v 1.8 2000/12/28 22:49:11 davem Exp $ */
 #ifndef _SPARC_CONTREGS_H
 #define _SPARC_CONTREGS_H
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-sparc/delay.h linux/include/asm-sparc/delay.h
--- v2.4.0-prerelease/linux/include/asm-sparc/delay.h	Mon Jan 12 15:15:45 1998
+++ linux/include/asm-sparc/delay.h	Mon Jan  1 10:37:41 2001
@@ -1,4 +1,4 @@
-/* $Id: delay.h,v 1.10 1997/11/07 18:24:30 mj Exp $
+/* $Id: delay.h,v 1.11 2001/01/01 01:46:15 davem Exp $
  * delay.h: Linux delay routines on the Sparc.
  *
  * Copyright (C) 1994 David S. Miller (davem@caip.rutgers.edu).
@@ -7,7 +7,7 @@
 #ifndef __SPARC_DELAY_H
 #define __SPARC_DELAY_H
 
-extern unsigned long loops_per_sec;
+extern unsigned long loops_per_jiffy;
 
 extern __inline__ void __delay(unsigned long loops)
 {
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-sparc/processor.h linux/include/asm-sparc/processor.h
--- v2.4.0-prerelease/linux/include/asm-sparc/processor.h	Mon Jan  1 09:38:36 2001
+++ linux/include/asm-sparc/processor.h	Mon Jan  1 10:37:41 2001
@@ -1,4 +1,4 @@
-/* $Id: processor.h,v 1.78 2000/11/30 08:37:31 anton Exp $
+/* $Id: processor.h,v 1.80 2000/12/31 10:05:43 davem Exp $
  * include/asm-sparc/processor.h
  *
  * Copyright (C) 1994 David S. Miller (davem@caip.rutgers.edu)
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-sparc/semaphore-helper.h linux/include/asm-sparc/semaphore-helper.h
--- v2.4.0-prerelease/linux/include/asm-sparc/semaphore-helper.h	Mon May  8 22:00:01 2000
+++ linux/include/asm-sparc/semaphore-helper.h	Wed Dec 31 16:00:00 1969
@@ -1,169 +0,0 @@
-#ifndef _SPARC_SEMAPHORE_HELPER_H
-#define _SPARC_SEMAPHORE_HELPER_H
-
-#include <linux/config.h>
-
-/*
- * (barely) SMP- and interrupt-safe semaphore helper functions, sparc version.
- *
- * (C) Copyright 1999 David S. Miller (davem@redhat.com)
- * (C) Copyright 1999 Jakub Jelinek (jj@ultra.linux.cz)
- */
-#define wake_one_more(sem)	atomic_inc(&(sem)->waking)
-static __inline__ int waking_non_zero(struct semaphore *sem)
-{
-	int ret;
-
-#ifdef CONFIG_SMP
-	int tmp;
-
-	__asm__ __volatile__("
-		rd	%%psr, %%g1
-		or	%%g1, %3, %0
-		wr	%0, 0x0, %%psr
-		nop; nop; nop;
-1:		ldstub	[%2 + 3], %0
-		tst	%0
-		bne	1b
-		ld	[%2], %0
-		andn	%0, 0xff, %1
-		subcc	%0, 0x1ff, %0
-		bl,a	1f
-		 mov	0, %0
-		mov	%0, %1
-		mov	1, %0
-1:		st	%1, [%2]
-		wr	%%g1, 0x0, %%psr
-		nop; nop; nop\n" 
-	: "=&r" (ret), "=&r" (tmp)
-	: "r" (&sem->waking), "i" (PSR_PIL)
-	: "g1", "memory", "cc");
-#else
-	__asm__ __volatile__("
-		rd	%%psr, %%g1
-		or	%%g1, %2, %0
-		wr	%0, 0x0, %%psr
-		nop; nop; nop;
-		ld	[%1], %0
-		subcc	%0, 1, %0
-		bl,a	1f
-		 mov	0, %0
-		st	%0, [%1]
-		mov	1, %0
-1:		wr	%%g1, 0x0, %%psr
-		nop; nop; nop\n" 
-	: "=&r" (ret)
-	: "r" (&sem->waking), "i" (PSR_PIL)
-	: "g1", "memory", "cc");
-#endif
-	return ret;
-}
-
-static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
-						    struct task_struct *tsk)
-{
-	int ret;
-
-#ifdef CONFIG_SMP
-	int tmp;
-
-	__asm__ __volatile__("
-		rd	%%psr, %%g1
-		or	%%g1, %3, %0
-		wr	%0, 0x0, %%psr
-		nop; nop; nop;
-1:		ldstub	[%2 + 3], %0
-		tst	%0
-		bne	1b
-		ld	[%2], %0
-		andn	%0, 0xff, %1
-		subcc	%0, 0x1ff, %0
-		bl,a	1f
-		 mov	0, %0
-		mov	%0, %1
-		mov	1, %0
-1:		st	%1, [%2]
-		wr	%%g1, 0x0, %%psr
-		nop; nop; nop\n" 
-	: "=&r" (ret), "=&r" (tmp)
-	: "r" (&sem->waking), "i" (PSR_PIL)
-	: "g1", "memory", "cc");
-#else
-	__asm__ __volatile__("
-		rd	%%psr, %%g1
-		or	%%g1, %2, %0
-		wr	%0, 0x0, %%psr
-		nop; nop; nop;
-		ld	[%1], %0
-		subcc	%0, 1, %0
-		bl,a	1f
-		 mov	0, %0
-		st	%0, [%1]
-		mov	1, %0
-1:		wr	%%g1, 0x0, %%psr
-		nop; nop; nop\n" 
-	: "=&r" (ret)
-	: "r" (&sem->waking), "i" (PSR_PIL)
-	: "g1", "memory", "cc");
-#endif
-	if(ret == 0 && signal_pending(tsk)) {
-		atomic_inc(&sem->count);
-		ret = -EINTR;
-	}
-	return ret;
-}
-
-static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
-{
-	int ret;
-
-#ifdef CONFIG_SMP
-	int tmp;
-
-	__asm__ __volatile__("
-		rd	%%psr, %%g1
-		or	%%g1, %3, %0
-		wr	%0, 0x0, %%psr
-		nop; nop; nop;
-1:		ldstub	[%2 + 3], %0
-		tst	%0
-		bne	1b
-		ld	[%2], %0
-		andn	%0, 0xff, %1
-		subcc	%0, 0x1ff, %0
-		bl,a	1f
-		 mov	0, %0
-		mov	%0, %1
-		mov	1, %0
-1:		st	%1, [%2]
-		wr	%%g1, 0x0, %%psr
-		nop; nop; nop\n" 
-	: "=&r" (ret), "=&r" (tmp)
-	: "r" (&sem->waking), "i" (PSR_PIL)
-	: "g1", "memory", "cc");
-#else
-	__asm__ __volatile__("
-		rd	%%psr, %%g1
-		or	%%g1, %2, %0
-		wr	%0, 0x0, %%psr
-		nop; nop; nop;
-		ld	[%1], %0
-		subcc	%0, 1, %0
-		bl,a	1f
-		 mov	0, %0
-		st	%0, [%1]
-		mov	1, %0
-1:		wr	%%g1, 0x0, %%psr
-		nop; nop; nop\n" 
-	: "=&r" (ret)
-	: "r" (&sem->waking), "i" (PSR_PIL)
-	: "g1", "memory", "cc");
-#endif
-	ret = !ret;
-	if(ret == 1)
-		atomic_inc(&sem->count);
-	return ret;
-}
-
-#endif /* !(_SPARC_SEMAPHORE_HELPER_H) */
-
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-sparc/semaphore.h linux/include/asm-sparc/semaphore.h
--- v2.4.0-prerelease/linux/include/asm-sparc/semaphore.h	Thu Jan 27 06:32:14 2000
+++ linux/include/asm-sparc/semaphore.h	Mon Jan  1 10:37:41 2001
@@ -10,7 +10,7 @@
 
 struct semaphore {
 	atomic_t count;
-	atomic_t waking;
+	int sleepers;
 	wait_queue_head_t wait;
 #if WAITQUEUE_DEBUG
 	long __magic;
@@ -25,7 +25,7 @@
 #endif
 
 #define __SEMAPHORE_INITIALIZER(name,count) \
-{ ATOMIC_INIT(count), ATOMIC_INIT(0), __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
+{ ATOMIC_INIT(count), 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
 	__SEM_DEBUG_INIT(name) }
 
 #define __MUTEX_INITIALIZER(name) \
@@ -37,10 +37,10 @@
 #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
 #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
 
-extern inline void sema_init (struct semaphore *sem, int val)
+static inline void sema_init (struct semaphore *sem, int val)
 {
 	atomic_set(&sem->count, val);
-	atomic_set(&sem->waking, 0);
+	sem->sleepers = 0;
 	init_waitqueue_head(&sem->wait);
 #if WAITQUEUE_DEBUG
 	sem->__magic = (long)&sem->__magic;
@@ -62,7 +62,7 @@
 extern int __down_trylock(struct semaphore * sem);
 extern void __up(struct semaphore * sem);
 
-extern inline void down(struct semaphore * sem)
+static inline void down(struct semaphore * sem)
 {
 	register atomic_t *ptr asm("g1");
 	register int increment asm("g2");
@@ -97,7 +97,7 @@
 	: "g3", "g4", "g7", "memory", "cc");
 }
 
-extern inline int down_interruptible(struct semaphore * sem)
+static inline int down_interruptible(struct semaphore * sem)
 {
 	register atomic_t *ptr asm("g1");
 	register int increment asm("g2");
@@ -135,7 +135,7 @@
 	return increment;
 }
 
-extern inline int down_trylock(struct semaphore * sem)
+static inline int down_trylock(struct semaphore * sem)
 {
 	register atomic_t *ptr asm("g1");
 	register int increment asm("g2");
@@ -173,7 +173,7 @@
 	return increment;
 }
 
-extern inline void up(struct semaphore * sem)
+static inline void up(struct semaphore * sem)
 {
 	register atomic_t *ptr asm("g1");
 	register int increment asm("g2");
@@ -262,7 +262,7 @@
 #define DECLARE_RWSEM_READ_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS-1)
 #define DECLARE_RWSEM_WRITE_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,0)
 
-extern inline void init_rwsem(struct rw_semaphore *sem)
+static inline void init_rwsem(struct rw_semaphore *sem)
 {
 	sem->count = RW_LOCK_BIAS;
 	sem->lock = 0;
@@ -282,7 +282,7 @@
 extern void ___up_read(/* Special calling convention */ void);
 extern void ___up_write(/* Special calling convention */ void);
 
-extern inline void down_read(struct rw_semaphore *sem)
+static inline void down_read(struct rw_semaphore *sem)
 {
 	register atomic_t *ptr asm("g1");
 
@@ -308,7 +308,7 @@
 #endif
 }
 
-extern inline void down_write(struct rw_semaphore *sem)
+static inline void down_write(struct rw_semaphore *sem)
 {
 	register atomic_t *ptr asm("g1");
 
@@ -342,7 +342,7 @@
  * case is when there was a writer waiting, and we've
  * bumped the count to 0: we must wake the writer up.
  */
-extern inline void __up_read(struct rw_semaphore *sem)
+static inline void __up_read(struct rw_semaphore *sem)
 {
 	register atomic_t *ptr asm("g1");
 
@@ -360,7 +360,7 @@
 /* releasing the writer is easy -- just release it and
  * wake up any sleepers.
  */
-extern inline void __up_write(struct rw_semaphore *sem)
+static inline void __up_write(struct rw_semaphore *sem)
 {
 	register atomic_t *ptr asm("g1");
 
@@ -375,7 +375,7 @@
 	: "g2", "g3", "g4", "g7", "memory", "cc");
 }
 
-extern inline void up_read(struct rw_semaphore *sem)
+static inline void up_read(struct rw_semaphore *sem)
 {
 #if WAITQUEUE_DEBUG
 	if (!sem->write_not_granted)
@@ -387,7 +387,7 @@
 	__up_read(sem);
 }
 
-extern inline void up_write(struct rw_semaphore *sem)
+static inline void up_write(struct rw_semaphore *sem)
 {
 #if WAITQUEUE_DEBUG
 	if (!sem->read_not_granted)
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-sparc64/delay.h linux/include/asm-sparc64/delay.h
--- v2.4.0-prerelease/linux/include/asm-sparc64/delay.h	Mon May 22 09:50:54 2000
+++ linux/include/asm-sparc64/delay.h	Thu Jan  4 12:50:17 2001
@@ -1,4 +1,4 @@
-/* $Id: delay.h,v 1.9 2000/05/09 17:40:15 davem Exp $
+/* $Id: delay.h,v 1.11 2001/01/02 08:15:32 davem Exp $
  * delay.h: Linux delay routines on the V9.
  *
  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu).
@@ -8,6 +8,7 @@
 #define __SPARC64_DELAY_H
 
 #include <linux/config.h>
+#include <linux/param.h>
 #ifdef CONFIG_SMP
 #include <linux/sched.h>
 #include <asm/smp.h>
@@ -37,13 +38,13 @@
 "	: "=r" (usecs)
 	: "r" (usecs), "r" (lps));
 
-	__delay(usecs);
+	__delay(usecs * HZ);
 }
 
 #ifdef CONFIG_SMP
 #define __udelay_val cpu_data[smp_processor_id()].udelay_val
 #else
-#define __udelay_val loops_per_sec
+#define __udelay_val loops_per_jiffy
 #endif
 
 #define udelay(usecs) __udelay((usecs),__udelay_val)
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/asm-sparc64/processor.h linux/include/asm-sparc64/processor.h
--- v2.4.0-prerelease/linux/include/asm-sparc64/processor.h	Mon Jan  1 09:38:36 2001
+++ linux/include/asm-sparc64/processor.h	Mon Jan  1 10:37:41 2001
@@ -1,4 +1,4 @@
-/* $Id: processor.h,v 1.66 2000/11/29 05:56:12 anton Exp $
+/* $Id: processor.h,v 1.68 2000/12/31 10:05:43 davem Exp $
  * include/asm-sparc64/processor.h
  *
  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/linux/dcache.h linux/include/linux/dcache.h
--- v2.4.0-prerelease/linux/include/linux/dcache.h	Sun Oct  8 10:50:37 2000
+++ linux/include/linux/dcache.h	Thu Jan  4 13:55:18 2001
@@ -115,6 +115,7 @@
 					 * If this dentry points to a directory, then
 					 * s_nfsd_free_path semaphore will be down
 					 */
+#define DCACHE_REFERENCED	0x0008  /* Recently used, don't discard. */
 
 extern spinlock_t dcache_lock;
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/linux/delay.h linux/include/linux/delay.h
--- v2.4.0-prerelease/linux/include/linux/delay.h	Mon Jan  1 09:38:36 2001
+++ linux/include/linux/delay.h	Thu Jan  4 12:50:17 2001
@@ -13,7 +13,7 @@
 
 /*
  * Using udelay() for intervals greater than a few milliseconds can
- * risk overflow for high loops_per_sec (high bogomips) machines. The
+ * risk overflow for high loops_per_jiffy (high bogomips) machines. The
  * mdelay() provides a wrapper to prevent this.  For delays greater
  * than MAX_UDELAY_MS milliseconds, the wrapper is used.  Architecture
  * specific values can be defined in asm-???/delay.h as an override.
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/linux/mc146818rtc.h linux/include/linux/mc146818rtc.h
--- v2.4.0-prerelease/linux/include/linux/mc146818rtc.h	Fri Sep  8 12:52:42 2000
+++ linux/include/linux/mc146818rtc.h	Thu Jan  4 13:56:51 2001
@@ -15,6 +15,8 @@
 #include <linux/rtc.h>			/* get the user-level API */
 #include <asm/mc146818rtc.h>		/* register access macros */
 
+extern spinlock_t rtc_lock;		/* serialize CMOS RAM access */
+
 /**********************************************************************
  * register summary
  **********************************************************************/
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/linux/netfilter_ipv4/ip_conntrack.h linux/include/linux/netfilter_ipv4/ip_conntrack.h
--- v2.4.0-prerelease/linux/include/linux/netfilter_ipv4/ip_conntrack.h	Wed Aug  9 19:00:49 2000
+++ linux/include/linux/netfilter_ipv4/ip_conntrack.h	Mon Jan  1 10:37:41 2001
@@ -101,7 +101,7 @@
 	struct ip_conntrack_tuple_hash tuplehash[IP_CT_DIR_MAX];
 
 	/* Have we seen traffic both ways yet? (bitset) */
-	volatile unsigned int status;
+	volatile unsigned long status;
 
 	/* Timer function; drops refcnt when it goes off. */
 	struct timer_list timeout;
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/linux/netfilter_ipv4/lockhelp.h linux/include/linux/netfilter_ipv4/lockhelp.h
--- v2.4.0-prerelease/linux/include/linux/netfilter_ipv4/lockhelp.h	Wed Aug  9 19:00:49 2000
+++ linux/include/linux/netfilter_ipv4/lockhelp.h	Mon Jan  1 10:37:41 2001
@@ -19,8 +19,8 @@
 struct rwlock_debug
 {
 	rwlock_t l;
-	int read_locked_map;
-	int write_locked_map;
+	long read_locked_map;
+	long write_locked_map;
 };
 
 #define DECLARE_LOCK(l) 						\
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/linux/netfilter_ipv6.h linux/include/linux/netfilter_ipv6.h
--- v2.4.0-prerelease/linux/include/linux/netfilter_ipv6.h	Mon May 22 09:50:55 2000
+++ linux/include/linux/netfilter_ipv6.h	Mon Jan  1 16:17:54 2001
@@ -54,7 +54,7 @@
 #define NF_IP6_NUMHOOKS		5
 
 
-enum nf_ip_hook_priorities {
+enum nf_ip6_hook_priorities {
 	NF_IP6_PRI_FIRST = INT_MIN,
 	NF_IP6_PRI_CONNTRACK = -200,
 	NF_IP6_PRI_MANGLE = -150,
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/linux/pci_ids.h linux/include/linux/pci_ids.h
--- v2.4.0-prerelease/linux/include/linux/pci_ids.h	Mon Jan  1 09:38:36 2001
+++ linux/include/linux/pci_ids.h	Tue Jan  2 16:58:45 2001
@@ -357,6 +357,7 @@
 #define PCI_DEVICE_ID_SI_601		0x0601
 #define PCI_DEVICE_ID_SI_620		0x0620
 #define PCI_DEVICE_ID_SI_630		0x0630
+#define PCI_DEVICE_ID_SI_730		0x0730         
 #define PCI_DEVICE_ID_SI_630_VGA	0x6300
 #define PCI_DEVICE_ID_SI_730_VGA	0x7300
 #define PCI_DEVICE_ID_SI_5107		0x5107
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/linux/rtc.h linux/include/linux/rtc.h
--- v2.4.0-prerelease/linux/include/linux/rtc.h	Tue Jul 11 11:18:53 2000
+++ linux/include/linux/rtc.h	Thu Jan  4 12:50:17 2001
@@ -2,6 +2,8 @@
  * Generic RTC interface.
  * This version contains the part of the user interface to the Real Time Clock
  * service. It is used with both the legacy mc146818 and also  EFI
+ * Struct rtc_time and first 12 ioctl by Paul Gortmaker, 1996 - separated out
+ * from <linux/mc146818rtc.h> to this file for 2.4 kernels.
  * 
  * Copyright (C) 1999 Hewlett-Packard Co.
  * Copyright (C) 1999 Stephane Eranian <eranian@hpl.hp.com>
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/linux/scc.h linux/include/linux/scc.h
--- v2.4.0-prerelease/linux/include/linux/scc.h	Wed Aug 18 11:38:46 1999
+++ linux/include/linux/scc.h	Thu Jan  4 12:50:12 2001
@@ -8,7 +8,7 @@
 /* selection of hardware types */
 
 #define PA0HZP		0x00	/* hardware type for PA0HZP SCC card and compatible */
-#define EAGLE         	0x01    /* hardware type for EAGLE card */
+#define EAGLE		0x01    /* hardware type for EAGLE card */
 #define PC100		0x02	/* hardware type for PC100 card */
 #define PRIMUS		0x04	/* hardware type for PRIMUS-PC (DG9BL) card */
 #define DRSI		0x08	/* hardware type for DRSI PC*Packet card */
@@ -28,10 +28,6 @@
 	SIOCSCCCAL
 };
 
-/* magic number */
-
-#define SCC_MAGIC	0x8530		/* ;-) */
-
 /* Device parameter control (from WAMPES) */
 
 enum L1_params {
@@ -218,7 +214,7 @@
 
 struct scc_channel {
 	int magic;			/* magic word */
-	
+
 	int init;			/* channel exists? */
 
 	struct net_device *dev;		/* link to device control structure */
@@ -226,12 +222,12 @@
 
 	char brand;			/* manufacturer of the board */
 	long clock;			/* used clock */
-	
+
 	io_port ctrl;			/* I/O address of CONTROL register */
 	io_port	data;			/* I/O address of DATA register */
 	io_port special;		/* I/O address of special function port */
 	int irq;			/* Number of Interrupt */
-	
+
 	char option;
 	char enhanced;			/* Enhanced SCC support */
 
@@ -242,17 +238,15 @@
         struct scc_kiss kiss;		/* control structure for KISS params */
         struct scc_stat stat;		/* statistical information */
         struct scc_modem modem; 	/* modem information */
-        
+
         struct sk_buff_head tx_queue;	/* next tx buffer */
         struct sk_buff *rx_buff;	/* pointer to frame currently received */
         struct sk_buff *tx_buff;	/* pointer to frame currently transmitted */
 
 	/* Timer */
-
 	struct timer_list tx_t;		/* tx timer for this channel */
 	struct timer_list tx_wdog;	/* tx watchdogs */
 };
 
-int scc_init(void);
 #endif /* defined(__KERNEL__) */
 #endif /* defined(_SCC_H) */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/linux/sched.h linux/include/linux/sched.h
--- v2.4.0-prerelease/linux/include/linux/sched.h	Mon Jan  1 09:38:36 2001
+++ linux/include/linux/sched.h	Thu Jan  4 13:55:19 2001
@@ -136,6 +136,7 @@
  */
 extern rwlock_t tasklist_lock;
 extern spinlock_t runqueue_lock;
+extern spinlock_t mmlist_lock;
 
 extern void sched_init(void);
 extern void init_idle(void);
@@ -209,6 +210,9 @@
 	int map_count;				/* number of VMAs */
 	struct semaphore mmap_sem;
 	spinlock_t page_table_lock;
+
+	struct list_head mmlist;		/* List of all active mm's */
+
 	unsigned long start_code, end_code, start_data, end_data;
 	unsigned long start_brk, brk, start_stack;
 	unsigned long arg_start, arg_end, env_start, env_end;
@@ -233,6 +237,7 @@
 	map_count:	1, 				\
 	mmap_sem:	__MUTEX_INITIALIZER(name.mmap_sem), \
 	page_table_lock: SPIN_LOCK_UNLOCKED, 		\
+	mmlist:		LIST_HEAD_INIT(name.mmlist),	\
 }
 
 struct signal_struct {
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/linux/udf_fs.h linux/include/linux/udf_fs.h
--- v2.4.0-prerelease/linux/include/linux/udf_fs.h	Mon Apr 10 22:50:40 2000
+++ linux/include/linux/udf_fs.h	Mon Jan  1 09:57:08 2001
@@ -45,15 +45,15 @@
 #ifdef UDFFS_DEBUG
 #define udf_debug(f, a...) \
 	{ \
-		printk (KERN_DEBUG "UDF-fs DEBUG (%s, %d): %s: ", \
+		printk (KERN_DEBUG "UDF-fs DEBUG %s:%d:%s: ", \
 			__FILE__, __LINE__, __FUNCTION__); \
-		printk (## f, ## a); \
+		printk (f, ##a); \
 	}
 #else
 #define udf_debug(f, a...) /**/
 #endif
 
 #define udf_info(f, a...) \
-		printk (KERN_INFO "UDF-fs INFO " ## f, ## a);
+		printk (KERN_INFO "UDF-fs INFO " f, ##a);
 
 #endif /* !defined(_LINUX_UDF_FS_H) */
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/linux/usb.h linux/include/linux/usb.h
--- v2.4.0-prerelease/linux/include/linux/usb.h	Mon Dec 11 17:59:45 2000
+++ linux/include/linux/usb.h	Thu Jan  4 13:56:48 2001
@@ -315,7 +315,41 @@
  * Terminate the driver's table with an all-zeroes entry.
  * Init the fields you care about; zeroes are not used in comparisons.
  */
+#define USB_DEVICE_ID_MATCH_VENDOR		0x0001
+#define USB_DEVICE_ID_MATCH_PRODUCT		0x0002
+#define USB_DEVICE_ID_MATCH_DEV_LO		0x0004
+#define USB_DEVICE_ID_MATCH_DEV_HI		0x0008
+#define USB_DEVICE_ID_MATCH_DEV_CLASS		0x0010
+#define USB_DEVICE_ID_MATCH_DEV_SUBCLASS	0x0020
+#define USB_DEVICE_ID_MATCH_DEV_PROTOCOL	0x0040
+#define USB_DEVICE_ID_MATCH_INT_CLASS		0x0080
+#define USB_DEVICE_ID_MATCH_INT_SUBCLASS	0x0100
+#define USB_DEVICE_ID_MATCH_INT_PROTOCOL	0x0200
+
+#define USB_DEVICE_ID_MATCH_DEVICE		(USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_PRODUCT)
+#define USB_DEVICE_ID_MATCH_DEV_RANGE		(USB_DEVICE_ID_MATCH_DEV_LO | USB_DEVICE_ID_MATCH_DEV_HI)
+#define USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION	(USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_DEV_RANGE)
+#define USB_DEVICE_ID_MATCH_DEV_INFO \
+	(USB_DEVICE_ID_MATCH_DEV_CLASS | USB_DEVICE_ID_MATCH_DEV_SUBCLASS | USB_DEVICE_ID_MATCH_DEV_PROTOCOL)
+#define USB_DEVICE_ID_MATCH_INT_INFO \
+	(USB_DEVICE_ID_MATCH_INT_CLASS | USB_DEVICE_ID_MATCH_INT_SUBCLASS | USB_DEVICE_ID_MATCH_INT_PROTOCOL)
+
+/* Some useful macros */
+#define USB_DEVICE(vend,prod) \
+	match_flags: USB_DEVICE_ID_MATCH_DEVICE, idVendor: (vend), idProduct: (prod)
+#define USB_DEVICE_VER(vend,prod,lo,hi) \
+	match_flags: USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION, idVendor: (vend), idProduct: (prod), bcdDevice_lo: (lo), bcdDevice_hi: (hi)
+#define USB_DEVICE_INFO(cl,sc,pr) \
+	match_flags: USB_DEVICE_ID_MATCH_DEV_INFO, bDeviceClass: (cl), bDeviceSubClass: (sc), bDeviceProtocol: (pr)
+#define USB_INTERFACE_INFO(cl,sc,pr) \
+	match_flags: USB_DEVICE_ID_MATCH_INT_INFO, bInterfaceClass: (cl), bInterfaceSubClass: (sc), bInterfaceProtocol: (pr)
+
 struct usb_device_id {
+	/* This bitmask is used to determine which of the following fields
+	 * are to be used for matching.
+	 */
+	__u16		match_flags;
+
 	/*
 	 * vendor/product codes are checked, if vendor is nonzero
 	 * Range is for device revision (bcdDevice), inclusive;
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/linux/zorro_ids.h linux/include/linux/zorro_ids.h
--- v2.4.0-prerelease/linux/include/linux/zorro_ids.h	Tue Oct 31 12:42:27 2000
+++ linux/include/linux/zorro_ids.h	Thu Jan  4 13:00:55 2001
@@ -435,6 +435,7 @@
 /* unofficial ID */
 #define ZORRO_MANUF_INDIVIDUAL_COMPUTERS			0x1212
 #define  ZORRO_PROD_INDIVIDUAL_COMPUTERS_BUDDHA			ZORRO_ID(INDIVIDUAL_COMPUTERS, 0x00, 0)
+#define  ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF			ZORRO_ID(INDIVIDUAL_COMPUTERS, 0x17, 0)
 #define  ZORRO_PROD_INDIVIDUAL_COMPUTERS_CATWEASEL		ZORRO_ID(INDIVIDUAL_COMPUTERS, 0x2A, 0)
 
 #define ZORRO_MANUF_KUPKE_3					0x1248
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/net/irda/irda.h linux/include/net/irda/irda.h
--- v2.4.0-prerelease/linux/include/net/irda/irda.h	Sun Nov 19 18:44:22 2000
+++ linux/include/net/irda/irda.h	Thu Jan  4 13:55:54 2001
@@ -66,9 +66,9 @@
 #define IRDA_DEBUG(n, args...) (irda_debug >= (n)) ? (printk(KERN_DEBUG args)) : 0
 #define ASSERT(expr, func) \
 if(!(expr)) { \
-        printk( "Assertion failed! %s,%s,%s,line=%d\n",\
-        #expr,__FILE__,__FUNCTION__,__LINE__); \
-        ##func}
+        printk( "Assertion failed! %s:%s:%d %s\n", \
+        __FILE__,__FUNCTION__,__LINE__,(#expr));  \
+        func }
 #else
 #define IRDA_DEBUG(n, args...)
 #define ASSERT(expr, func)
diff -u --recursive --new-file v2.4.0-prerelease/linux/include/net/sock.h linux/include/net/sock.h
--- v2.4.0-prerelease/linux/include/net/sock.h	Mon Jan  1 09:38:36 2001
+++ linux/include/net/sock.h	Thu Jan  4 13:55:59 2001
@@ -458,7 +458,7 @@
 /* Define this to get the sk->debug debugging facility. */
 #define SOCK_DEBUGGING
 #ifdef SOCK_DEBUGGING
-#define SOCK_DEBUG(sk, msg...) do { if((sk) && ((sk)->debug)) printk(KERN_DEBUG ## msg); } while (0)
+#define SOCK_DEBUG(sk, msg...) do { if((sk) && ((sk)->debug)) printk(KERN_DEBUG msg); } while (0)
 #else
 #define SOCK_DEBUG(sk, msg...) do { } while (0)
 #endif
diff -u --recursive --new-file v2.4.0-prerelease/linux/init/main.c linux/init/main.c
--- v2.4.0-prerelease/linux/init/main.c	Mon Jan  1 09:38:36 2001
+++ linux/init/main.c	Wed Jan  3 20:45:26 2001
@@ -633,6 +633,9 @@
 		(*call)();
 		call++;
 	} while (call < &__initcall_end);
+
+	/* Make sure there is no pending stuff from the initcall sequence */
+	flush_scheduled_tasks();
 }
 
 /*
diff -u --recursive --new-file v2.4.0-prerelease/linux/kernel/exit.c linux/kernel/exit.c
--- v2.4.0-prerelease/linux/kernel/exit.c	Mon Jan  1 09:38:36 2001
+++ linux/kernel/exit.c	Thu Jan  4 01:00:35 2001
@@ -382,6 +382,7 @@
 	 */
 
 	write_lock_irq(&tasklist_lock);
+	current->state = TASK_ZOMBIE;
 	do_notify_parent(current, current->exit_signal);
 	while (current->p_cptr != NULL) {
 		p = current->p_cptr;
@@ -415,9 +416,6 @@
 		}
 	}
 	write_unlock_irq(&tasklist_lock);
-
-	if (current->leader)
-		disassociate_ctty(1);
 }
 
 NORET_TYPE void do_exit(long code)
@@ -437,20 +435,26 @@
 #ifdef CONFIG_BSD_PROCESS_ACCT
 	acct_process(code);
 #endif
+	__exit_mm(tsk);
+
 	lock_kernel();
 	sem_exit();
-	__exit_mm(tsk);
 	__exit_files(tsk);
 	__exit_fs(tsk);
 	exit_sighand(tsk);
 	exit_thread();
-	tsk->state = TASK_ZOMBIE;
-	tsk->exit_code = code;
-	exit_notify();
+
+	if (current->leader)
+		disassociate_ctty(1);
+
 	put_exec_domain(tsk->exec_domain);
 	if (tsk->binfmt && tsk->binfmt->module)
 		__MOD_DEC_USE_COUNT(tsk->binfmt->module);
+
+	tsk->exit_code = code;
+	exit_notify();
 	schedule();
+	BUG();
 /*
  * In order to get rid of the "volatile function does return" message
  * I did this little loop that confuses gcc to think do_exit really
diff -u --recursive --new-file v2.4.0-prerelease/linux/kernel/fork.c linux/kernel/fork.c
--- v2.4.0-prerelease/linux/kernel/fork.c	Mon Jan  1 09:38:36 2001
+++ linux/kernel/fork.c	Wed Jan  3 20:45:26 2001
@@ -192,6 +192,8 @@
 	return retval;
 }
 
+spinlock_t mmlist_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED;
+
 #define allocate_mm()	(kmem_cache_alloc(mm_cachep, SLAB_KERNEL))
 #define free_mm(mm)	(kmem_cache_free(mm_cachep, (mm)))
 
@@ -242,7 +244,9 @@
  */
 void mmput(struct mm_struct *mm)
 {
-	if (atomic_dec_and_test(&mm->mm_users)) {
+	if (atomic_dec_and_lock(&mm->mm_users, &mmlist_lock)) {
+		list_del(&mm->mmlist);
+		spin_unlock(&mmlist_lock);
 		exit_mmap(mm);
 		mmdrop(mm);
 	}
@@ -272,9 +276,9 @@
 	}
 }
 
-static inline int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
+static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
 {
-	struct mm_struct * mm;
+	struct mm_struct * mm, *oldmm;
 	int retval;
 
 	tsk->min_flt = tsk->maj_flt = 0;
@@ -289,12 +293,13 @@
 	 *
 	 * We need to steal a active VM for that..
 	 */
-	mm = current->mm;
-	if (!mm)
+	oldmm = current->mm;
+	if (!oldmm)
 		return 0;
 
 	if (clone_flags & CLONE_VM) {
-		atomic_inc(&mm->mm_users);
+		atomic_inc(&oldmm->mm_users);
+		mm = oldmm;
 		goto good_mm;
 	}
 
@@ -304,16 +309,25 @@
 		goto fail_nomem;
 
 	/* Copy the current MM stuff.. */
-	memcpy(mm, current->mm, sizeof(*mm));
+	memcpy(mm, oldmm, sizeof(*mm));
 	if (!mm_init(mm))
 		goto fail_nomem;
 
-	tsk->mm = mm;
-	tsk->active_mm = mm;
-
-	down(&current->mm->mmap_sem);
+	down(&oldmm->mmap_sem);
 	retval = dup_mmap(mm);
-	up(&current->mm->mmap_sem);
+	up(&oldmm->mmap_sem);
+
+	/*
+	 * Add it to the mmlist after the parent.
+	 *
+	 * Doing it this way means that we can order
+	 * the list, and fork() won't mess up the
+	 * ordering significantly.
+	 */
+	spin_lock(&mmlist_lock);
+	list_add(&mm->mmlist, &oldmm->mmlist);
+	spin_unlock(&mmlist_lock);
+
 	if (retval)
 		goto free_pt;
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/kernel/ksyms.c linux/kernel/ksyms.c
--- v2.4.0-prerelease/linux/kernel/ksyms.c	Mon Jan  1 09:38:36 2001
+++ linux/kernel/ksyms.c	Tue Jan  2 16:45:37 2001
@@ -430,10 +430,8 @@
 EXPORT_SYMBOL(do_gettimeofday);
 EXPORT_SYMBOL(do_settimeofday);
 
-#ifdef CONFIG_X86
+#if !defined(__ia64__)
 EXPORT_SYMBOL(loops_per_jiffy);
-#elif !defined(__ia64__)
-EXPORT_SYMBOL(loops_per_sec);
 #endif
 
 EXPORT_SYMBOL(kstat);
diff -u --recursive --new-file v2.4.0-prerelease/linux/kernel/sched.c linux/kernel/sched.c
--- v2.4.0-prerelease/linux/kernel/sched.c	Thu Jan  4 13:50:38 2001
+++ linux/kernel/sched.c	Thu Jan  4 13:50:38 2001
@@ -1151,17 +1151,13 @@
 	else
 		printk("\n");
 
+#ifdef CONFIG_X86
+/* This is very useful, but only works on x86 right now */
 	{
-		struct sigqueue *q;
-		char s[sizeof(sigset_t)*2+1], b[sizeof(sigset_t)*2+1]; 
-
-		render_sigset_t(&p->pending.signal, s);
-		render_sigset_t(&p->blocked, b);
-		printk("   sig: %d %s %s :", signal_pending(p), s, b);
-		for (q = p->pending.head; q ; q = q->next)
-			printk(" %d", q->info.si_signo);
-		printk(" X\n");
+		extern void show_trace(unsigned long);
+		show_trace(p->thread.esp);
 	}
+#endif
 }
 
 char * render_sigset_t(sigset_t *set, char *buffer)
diff -u --recursive --new-file v2.4.0-prerelease/linux/kernel/signal.c linux/kernel/signal.c
--- v2.4.0-prerelease/linux/kernel/signal.c	Tue Oct 31 12:42:27 2000
+++ linux/kernel/signal.c	Wed Jan  3 20:45:26 2001
@@ -750,16 +750,6 @@
 	status = tsk->exit_code & 0x7f;
 	why = SI_KERNEL;	/* shouldn't happen */
 	switch (tsk->state) {
-	case TASK_ZOMBIE:
-		if (tsk->exit_code & 0x80)
-			why = CLD_DUMPED;
-		else if (tsk->exit_code & 0x7f)
-			why = CLD_KILLED;
-		else {
-			why = CLD_EXITED;
-			status = tsk->exit_code >> 8;
-		}
-		break;
 	case TASK_STOPPED:
 		/* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
 		if (tsk->ptrace & PT_PTRACED)
@@ -769,8 +759,14 @@
 		break;
 
 	default:
-		printk(KERN_DEBUG "eh? notify_parent with state %ld?\n",
-		       tsk->state);
+		if (tsk->exit_code & 0x80)
+			why = CLD_DUMPED;
+		else if (tsk->exit_code & 0x7f)
+			why = CLD_KILLED;
+		else {
+			why = CLD_EXITED;
+			status = tsk->exit_code >> 8;
+		}
 		break;
 	}
 	info.si_code = why;
diff -u --recursive --new-file v2.4.0-prerelease/linux/mm/filemap.c linux/mm/filemap.c
--- v2.4.0-prerelease/linux/mm/filemap.c	Mon Jan  1 09:38:36 2001
+++ linux/mm/filemap.c	Tue Jan  2 18:59:45 2001
@@ -219,13 +219,12 @@
 	page_cache_release(page);
 }
 
-void truncate_list_pages(struct list_head *head, unsigned long start, unsigned partial)
+static int FASTCALL(truncate_list_pages(struct list_head *, unsigned long, unsigned *));
+static int truncate_list_pages(struct list_head *head, unsigned long start, unsigned *partial)
 {
 	struct list_head *curr;
 	struct page * page;
 
-repeat:
-	spin_lock(&pagecache_lock);
 	curr = head->next;
 	while (curr != head) {
 		unsigned long offset;
@@ -235,37 +234,29 @@
 		offset = page->index;
 
 		/* Is one of the pages to truncate? */
-		if ((offset >= start) || (partial && (offset + 1) == start)) {
+		if ((offset >= start) || (*partial && (offset + 1) == start)) {
 			if (TryLockPage(page)) {
 				page_cache_get(page);
 				spin_unlock(&pagecache_lock);
 				wait_on_page(page);
 				page_cache_release(page);
-				goto repeat;
+				return 1;
 			}
 			page_cache_get(page);
 			spin_unlock(&pagecache_lock);
 
-			if (partial && (offset + 1) == start) {
-				truncate_partial_page(page, partial);
-				partial = 0;
+			if (*partial && (offset + 1) == start) {
+				truncate_partial_page(page, *partial);
+				*partial = 0;
 			} else 
 				truncate_complete_page(page);
 
 			UnlockPage(page);
 			page_cache_release(page);
-
-			/*
-			 * We have done things without the pagecache lock,
-			 * so we'll have to repeat the scan.
-			 * It's not possible to deadlock here because
-			 * we are guaranteed to make progress. (ie. we have
-			 * just removed a page)
-			 */
-			goto repeat;
+			return 1;
 		}
 	}
-	spin_unlock(&pagecache_lock);
+	return 0;
 }
 
 
@@ -283,9 +274,15 @@
 	unsigned long start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
 	unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
 
-	truncate_list_pages(&mapping->clean_pages, start, partial);
-	truncate_list_pages(&mapping->dirty_pages, start, partial);
-	truncate_list_pages(&mapping->locked_pages, start, partial);
+repeat:
+	spin_lock(&pagecache_lock);
+	if (truncate_list_pages(&mapping->clean_pages, start, &partial))
+		goto repeat;
+	if (truncate_list_pages(&mapping->dirty_pages, start, &partial))
+		goto repeat;
+	if (truncate_list_pages(&mapping->locked_pages, start, &partial))
+		goto repeat;
+	spin_unlock(&pagecache_lock);
 }
 
 static inline struct page * __find_page_nolock(struct address_space *mapping, unsigned long offset, struct page *page)
@@ -2498,6 +2495,7 @@
 	while (count) {
 		unsigned long bytes, index, offset;
 		char *kaddr;
+		int deactivate = 1;
 
 		/*
 		 * Try to find the page in the cache. If it isn't there,
@@ -2506,8 +2504,10 @@
 		offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
 		index = pos >> PAGE_CACHE_SHIFT;
 		bytes = PAGE_CACHE_SIZE - offset;
-		if (bytes > count)
+		if (bytes > count) {
 			bytes = count;
+			deactivate = 0;
+		}
 
 		/*
 		 * Bring in the user page that we will copy from _first_.
@@ -2551,7 +2551,8 @@
 unlock:
 		/* Mark it unlocked again and drop the page.. */
 		UnlockPage(page);
-		deactivate_page(page);
+		if (deactivate)
+			deactivate_page(page);
 		page_cache_release(page);
 
 		if (status < 0)
diff -u --recursive --new-file v2.4.0-prerelease/linux/mm/memory.c linux/mm/memory.c
--- v2.4.0-prerelease/linux/mm/memory.c	Mon Jan  1 09:38:36 2001
+++ linux/mm/memory.c	Mon Jan  1 10:37:41 2001
@@ -483,9 +483,10 @@
 			goto out_unlock;
 		}
 		map = get_page_map(map);
-		if (map)
+		if (map) {
+			flush_dcache_page(map);
 			atomic_inc(&map->count);
-		else
+		} else
 			printk (KERN_INFO "Mapped page missing [%d]\n", i);
 		spin_unlock(&mm->page_table_lock);
 		iobuf->maplist[i] = map;
diff -u --recursive --new-file v2.4.0-prerelease/linux/mm/page_alloc.c linux/mm/page_alloc.c
--- v2.4.0-prerelease/linux/mm/page_alloc.c	Mon Dec 11 17:59:45 2000
+++ linux/mm/page_alloc.c	Wed Jan  3 09:59:06 2001
@@ -68,16 +68,6 @@
 	struct page *base;
 	zone_t *zone;
 
-	/*
-	 * Subtle. We do not want to test this in the inlined part of
-	 * __free_page() - it's a rare condition and just increases
-	 * cache footprint unnecesserily. So we do an 'incorrect'
-	 * decrement on page->count for reserved pages, but this part
-	 * makes it safe.
-	 */
-	if (PageReserved(page))
-		return;
-
 	if (page->buffers)
 		BUG();
 	if (page->mapping)
@@ -427,7 +417,9 @@
 		if (order > 0 && (gfp_mask & __GFP_WAIT)) {
 			zone = zonelist->zones;
 			/* First, clean some dirty pages. */
+			current->flags |= PF_MEMALLOC;
 			page_launder(gfp_mask, 1);
+			current->flags &= ~PF_MEMALLOC;
 			for (;;) {
 				zone_t *z = *(zone++);
 				if (!z)
@@ -556,7 +548,7 @@
 
 void __free_pages(struct page *page, unsigned long order)
 {
-	if (put_page_testzero(page))
+	if (!PageReserved(page) && put_page_testzero(page))
 		__free_pages_ok(page, order);
 }
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/mm/vmscan.c linux/mm/vmscan.c
--- v2.4.0-prerelease/linux/mm/vmscan.c	Mon Jan  1 09:38:36 2001
+++ linux/mm/vmscan.c	Wed Jan  3 20:45:26 2001
@@ -49,8 +49,10 @@
 	if ((!VALID_PAGE(page)) || PageReserved(page))
 		goto out_failed;
 
-	if (mm->swap_cnt)
-		mm->swap_cnt--;
+	if (!mm->swap_cnt)
+		return 1;
+
+	mm->swap_cnt--;
 
 	onlist = PageActive(page);
 	/* Don't look at this pte if it's been accessed recently. */
@@ -79,6 +81,7 @@
 	 * bits in hardware.
 	 */
 	pte = ptep_get_and_clear(page_table);
+	flush_tlb_page(vma, address);
 
 	/*
 	 * Is the page already in the swap cache? If so, then
@@ -98,7 +101,6 @@
 drop_pte:
 		UnlockPage(page);
 		mm->rss--;
-		flush_tlb_page(vma, address);
 		deactivate_page(page);
 		page_cache_release(page);
 out_failed:
@@ -193,8 +195,6 @@
 		result = try_to_swap_out(mm, vma, address, pte, gfp_mask);
 		if (result)
 			return result;
-		if (!mm->swap_cnt)
-			return 0;
 		address += PAGE_SIZE;
 		pte++;
 	} while (address && (address < end));
@@ -224,8 +224,6 @@
 		int result = swap_out_pmd(mm, vma, pmd, address, end, gfp_mask);
 		if (result)
 			return result;
-		if (!mm->swap_cnt)
-			return 0;
 		address = (address + PMD_SIZE) & PMD_MASK;
 		pmd++;
 	} while (address && (address < end));
@@ -250,8 +248,6 @@
 		int result = swap_out_pgd(mm, vma, pgdir, address, end, gfp_mask);
 		if (result)
 			return result;
-		if (!mm->swap_cnt)
-			return 0;
 		address = (address + PGDIR_SIZE) & PGDIR_MASK;
 		pgdir++;
 	} while (address && (address < end));
@@ -260,29 +256,28 @@
 
 static int swap_out_mm(struct mm_struct * mm, int gfp_mask)
 {
+	int result = 0;
 	unsigned long address;
 	struct vm_area_struct* vma;
 
 	/*
 	 * Go through process' page directory.
 	 */
-	address = mm->swap_address;
 
 	/*
 	 * Find the proper vm-area after freezing the vma chain 
 	 * and ptes.
 	 */
 	spin_lock(&mm->page_table_lock);
+	address = mm->swap_address;
 	vma = find_vma(mm, address);
 	if (vma) {
 		if (address < vma->vm_start)
 			address = vma->vm_start;
 
 		for (;;) {
-			int result = swap_out_vma(mm, vma, address, gfp_mask);
+			result = swap_out_vma(mm, vma, address, gfp_mask);
 			if (result)
-				return result;
-			if (!mm->swap_cnt)
 				goto out_unlock;
 			vma = vma->vm_next;
 			if (!vma)
@@ -296,9 +291,7 @@
 
 out_unlock:
 	spin_unlock(&mm->page_table_lock);
-
-	/* We didn't find anything for the process */
-	return 0;
+	return result;
 }
 
 /*
@@ -309,13 +302,11 @@
 #define SWAP_SHIFT 5
 #define SWAP_MIN 8
 
-static int swap_out(unsigned int priority, int gfp_mask, unsigned long idle_time)
+static int swap_out(unsigned int priority, int gfp_mask)
 {
-	struct task_struct * p;
 	int counter;
 	int __ret = 0;
 
-	lock_kernel();
 	/* 
 	 * We make one or two passes through the task list, indexed by 
 	 * assign = {0, 1}:
@@ -335,24 +326,18 @@
 		counter = 1;
 
 	for (; counter >= 0; counter--) {
+		struct list_head *p;
 		unsigned long max_cnt = 0;
 		struct mm_struct *best = NULL;
-		int pid = 0;
 		int assign = 0;
 		int found_task = 0;
 	select:
-		read_lock(&tasklist_lock);
-		p = init_task.next_task;
-		for (; p != &init_task; p = p->next_task) {
-			struct mm_struct *mm = p->mm;
-			if (!p->swappable || !mm)
-				continue;
+		spin_lock(&mmlist_lock);
+		p = init_mm.mmlist.next;
+		for (; p != &init_mm.mmlist; p = p->next) {
+			struct mm_struct *mm = list_entry(p, struct mm_struct, mmlist);
 	 		if (mm->rss <= 0)
 				continue;
-			/* Skip tasks which haven't slept long enough yet when idle-swapping. */
-			if (idle_time && !assign && (!(p->state & TASK_INTERRUPTIBLE) ||
-					time_after(p->sleep_time + idle_time * HZ, jiffies)))
-				continue;
 			found_task++;
 			/* Refresh swap_cnt? */
 			if (assign == 1) {
@@ -363,29 +348,32 @@
 			if (mm->swap_cnt > max_cnt) {
 				max_cnt = mm->swap_cnt;
 				best = mm;
-				pid = p->pid;
 			}
 		}
-		read_unlock(&tasklist_lock);
+
+		/* Make sure it doesn't disappear */
+		if (best)
+			atomic_inc(&best->mm_users);
+		spin_unlock(&mmlist_lock);
+
+		/*
+		 * We have dropped the tasklist_lock, but we
+		 * know that "mm" still exists: we are running
+		 * with the big kernel lock, and exit_mm()
+		 * cannot race with us.
+		 */
 		if (!best) {
 			if (!assign && found_task > 0) {
 				assign = 1;
 				goto select;
 			}
-			goto out;
+			break;
 		} else {
-			int ret;
-
-			atomic_inc(&best->mm_count);
-			ret = swap_out_mm(best, gfp_mask);
-			mmdrop(best);
-
-			__ret = 1;
-			goto out;
+			__ret = swap_out_mm(best, gfp_mask);
+			mmput(best);
+			break;
 		}
 	}
-out:
-	unlock_kernel();
 	return __ret;
 }
 
@@ -848,7 +836,6 @@
 static int refill_inactive(unsigned int gfp_mask, int user)
 {
 	int priority, count, start_count, made_progress;
-	unsigned long idle_time;
 
 	count = inactive_shortage() + free_shortage();
 	if (user)
@@ -858,17 +845,6 @@
 	/* Always trim SLAB caches when memory gets low. */
 	kmem_cache_reap(gfp_mask);
 
-	/*
-	 * Calculate the minimum time (in seconds) a process must
-	 * have slept before we consider it for idle swapping.
-	 * This must be the number of seconds it takes to go through
-	 * all of the cache. Doing this idle swapping makes the VM
-	 * smoother once we start hitting swap.
-	 */
-	idle_time = atomic_read(&page_cache_size);
-	idle_time += atomic_read(&buffermem_pages);
-	idle_time /= (inactive_target + 1);
-
 	priority = 6;
 	do {
 		made_progress = 0;
@@ -878,8 +854,7 @@
 			schedule();
 		}
 
-		while (refill_inactive_scan(priority, 1) ||
-				swap_out(priority, gfp_mask, idle_time)) {
+		while (refill_inactive_scan(priority, 1)) {
 			made_progress = 1;
 			if (--count <= 0)
 				goto done;
@@ -896,7 +871,7 @@
 		/*
 		 * Then, try to page stuff out..
 		 */
-		while (swap_out(priority, gfp_mask, 0)) {
+		while (swap_out(priority, gfp_mask)) {
 			made_progress = 1;
 			if (--count <= 0)
 				goto done;
diff -u --recursive --new-file v2.4.0-prerelease/linux/net/atm/Makefile linux/net/atm/Makefile
--- v2.4.0-prerelease/linux/net/atm/Makefile	Mon Jan  1 09:38:36 2001
+++ linux/net/atm/Makefile	Mon Jan  1 09:54:07 2001
@@ -11,7 +11,7 @@
 
 O_TARGET= atm.o
 
-export-objs = common.o atm_misc.o raw.o resources.o ipcommon.o proc.o lane_mpoa_init.o
+export-objs = common.o atm_misc.o raw.o resources.o ipcommon.o proc.o
 
 multi-list = mpoa.o
 mpoa-objs = mpc.o mpoa_caches.o mpoa_proc.o
@@ -33,7 +33,7 @@
 obj-y += proc.o
 endif
 
-obj-$(CONFIG_ATM_LANE) += lec.o lane_mpoa_init.o
+obj-$(CONFIG_ATM_LANE) += lec.o
 obj-$(CONFIG_ATM_MPOA) += mpoa.o
 
 include $(TOPDIR)/Rules.make
diff -u --recursive --new-file v2.4.0-prerelease/linux/net/atm/common.c linux/net/atm/common.c
--- v2.4.0-prerelease/linux/net/atm/common.c	Mon Jan  1 09:38:36 2001
+++ linux/net/atm/common.c	Mon Jan  1 09:54:07 2001
@@ -5,6 +5,7 @@
 
 #include <linux/config.h>
 #include <linux/module.h>
+#include <linux/kmod.h>
 #include <linux/net.h>		/* struct socket, struct net_proto, struct
 				   proto_ops */
 #include <linux/atm.h>		/* ATM stuff */
@@ -679,7 +680,7 @@
 				goto done;
 			}
                         if (atm_lane_ops.lecd_attach == NULL)
-                                atm_lane_init();
+				atm_lane_init();
                         if (atm_lane_ops.lecd_attach == NULL) { /* try again */
 				ret_val = -ENOSYS;
 				goto done;
@@ -1086,3 +1087,61 @@
 		return -EINVAL;
 	return atm_do_getsockopt(sock,level,optname,optval,len);
 }
+
+
+/*
+ * lane_mpoa_init.c: A couple of helper functions
+ * to make modular LANE and MPOA client easier to implement
+ */
+
+/*
+ * This is how it goes:
+ *
+ * if xxxx is not compiled as module, call atm_xxxx_init_ops()
+ *    from here
+ * else call atm_mpoa_init_ops() from init_module() within
+ *    the kernel when xxxx module is loaded
+ *
+ * In either case function pointers in struct atm_xxxx_ops
+ * are initialized to their correct values. Either they
+ * point to functions in the module or in the kernel
+ */
+ 
+extern struct atm_mpoa_ops atm_mpoa_ops; /* in common.c */
+extern struct atm_lane_ops atm_lane_ops; /* in common.c */
+
+#if defined(CONFIG_ATM_MPOA) || defined(CONFIG_ATM_MPOA_MODULE)
+void atm_mpoa_init(void)
+{
+#ifndef CONFIG_ATM_MPOA_MODULE /* not module */
+        atm_mpoa_init_ops(&atm_mpoa_ops);
+#else
+	request_module("mpoa");
+#endif
+
+        return;
+}
+#endif
+
+#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
+#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
+						unsigned char *addr) = NULL;
+void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) = NULL;
+#if defined(CONFIG_ATM_LANE_MODULE) || defined(CONFIG_BRIDGE_MODULE)
+EXPORT_SYMBOL(br_fdb_get_hook);
+EXPORT_SYMBOL(br_fdb_put_hook);
+#endif /* defined(CONFIG_ATM_LANE_MODULE) || defined(CONFIG_BRIDGE_MODULE) */
+#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */
+
+void atm_lane_init(void)
+{
+#ifndef CONFIG_ATM_LANE_MODULE /* not module */
+        atm_lane_init_ops(&atm_lane_ops);
+#else
+	request_module("lec");
+#endif
+
+        return;
+}        
+#endif
diff -u --recursive --new-file v2.4.0-prerelease/linux/net/atm/lane_mpoa_init.c linux/net/atm/lane_mpoa_init.c
--- v2.4.0-prerelease/linux/net/atm/lane_mpoa_init.c	Fri Apr 14 09:37:20 2000
+++ linux/net/atm/lane_mpoa_init.c	Wed Dec 31 16:00:00 1969
@@ -1,58 +0,0 @@
-#include <linux/config.h>
-#include <linux/module.h>
-
-#include "mpc.h"
-#include "lec.h"
-
-/*
- * lane_mpoa_init.c: A couple of helper functions
- * to make modular LANE and MPOA client easier to implement
- */
-
-/*
- * This is how it goes:
- *
- * if xxxx is not compiled as module, call atm_xxxx_init_ops()
- *    from here
- * else call atm_mpoa_init_ops() from init_module() within
- *    the kernel when xxxx module is loaded
- *
- * In either case function pointers in struct atm_xxxx_ops
- * are initialized to their correct values. Either they
- * point to functions in the module or in the kernel
- */
- 
-extern struct atm_mpoa_ops atm_mpoa_ops; /* in common.c */
-extern struct atm_lane_ops atm_lane_ops; /* in common.c */
-
-#if defined(CONFIG_ATM_MPOA) || defined(CONFIG_ATM_MPOA_MODULE)
-void atm_mpoa_init(void)
-{
-#ifndef CONFIG_ATM_MPOA_MODULE /* not module */
-        atm_mpoa_init_ops(&atm_mpoa_ops);
-#endif
-
-        return;
-}
-#endif
-
-#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
-#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
-struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
-						unsigned char *addr) = NULL;
-void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) = NULL;
-#if defined(CONFIG_ATM_LANE_MODULE) || defined(CONFIG_BRIDGE_MODULE)
-EXPORT_SYMBOL(br_fdb_get_hook);
-EXPORT_SYMBOL(br_fdb_put_hook);
-#endif /* defined(CONFIG_ATM_LANE_MODULE) || defined(CONFIG_BRIDGE_MODULE) */
-#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */
-
-void atm_lane_init(void)
-{
-#ifndef CONFIG_ATM_LANE_MODULE /* not module */
-        atm_lane_init_ops(&atm_lane_ops);
-#endif
-
-        return;
-}        
-#endif
diff -u --recursive --new-file v2.4.0-prerelease/linux/net/atm/lec.c linux/net/atm/lec.c
--- v2.4.0-prerelease/linux/net/atm/lec.c	Mon Jan  1 09:38:36 2001
+++ linux/net/atm/lec.c	Thu Jan  4 13:00:55 2001
@@ -39,6 +39,7 @@
 
 /* Modular too */
 #include <linux/module.h>
+#include <linux/init.h>
 
 #include "lec.h"
 #include "lec_arpc.h"
@@ -828,8 +829,7 @@
 	return;
 }
 
-#ifdef MODULE
-int init_module(void)
+static int __init lane_module_init(void)
 {
         extern struct atm_lane_ops atm_lane_ops;
 
@@ -838,17 +838,12 @@
         return 0;
 }
 
-void cleanup_module(void)
+static void __exit lane_module_cleanup(void)
 {
         int i;
         extern struct atm_lane_ops atm_lane_ops;
         struct lec_priv *priv;
 
-        if (MOD_IN_USE) {
-                printk(KERN_NOTICE "lec.c: module in use\n");
-                return;
-        }
-
         atm_lane_ops.lecd_attach = NULL;
         atm_lane_ops.mcast_attach = NULL;
         atm_lane_ops.vcc_attach = NULL;
@@ -857,7 +852,9 @@
         for (i = 0; i < MAX_LEC_ITF; i++) {
                 if (dev_lec[i] != NULL) {
                         priv = (struct lec_priv *)dev_lec[i]->priv;
+#if defined(CONFIG_TR)
                         unregister_trdev(dev_lec[i]);
+#endif
                         kfree(dev_lec[i]);
                         dev_lec[i] = NULL;
                 }
@@ -865,7 +862,9 @@
 
         return;                                    
 }
-#endif /* MODULE */
+
+module_init(lane_module_init);
+module_exit(lane_module_cleanup);
 
 /*
  * LANE2: 3.1.3, LE_RESOLVE.request
@@ -2170,4 +2169,3 @@
         lec_arp_put(priv->lec_arp_tables,entry);
         lec_arp_unlock(priv);  
 }
-
diff -u --recursive --new-file v2.4.0-prerelease/linux/net/decnet/af_decnet.c linux/net/decnet/af_decnet.c
--- v2.4.0-prerelease/linux/net/decnet/af_decnet.c	Sun Nov 19 18:44:23 2000
+++ linux/net/decnet/af_decnet.c	Mon Jan  1 09:54:07 2001
@@ -2133,6 +2133,9 @@
 
 static void __exit decnet_exit(void)
 {
+	sock_unregister(AF_DECnet);
+	dev_remove_pack(&dn_dix_packet_type);
+
 #ifdef CONFIG_SYSCTL
 	dn_unregister_sysctl();
 #endif /* CONFIG_SYSCTL */
@@ -2148,9 +2151,6 @@
 #endif /* CONFIG_DECNET_ROUTER */
 
 	proc_net_remove("decnet");
-
-	dev_remove_pack(&dn_dix_packet_type);
-	sock_unregister(AF_DECnet);
 }
 
 module_init(decnet_init);
diff -u --recursive --new-file v2.4.0-prerelease/linux/net/decnet/dn_table.c linux/net/decnet/dn_table.c
--- v2.4.0-prerelease/linux/net/decnet/dn_table.c	Tue Jul 18 16:09:27 2000
+++ linux/net/decnet/dn_table.c	Mon Jan  1 09:54:07 2001
@@ -902,6 +902,10 @@
 
 void __exit dn_fib_table_cleanup(void)
 {
+	int i;
+
+	for (i = 0; i < DN_NUM_TABLES + 1; ++i)
+		dn_fib_del_tree(i);
+
 	return;
 }
-
diff -u --recursive --new-file v2.4.0-prerelease/linux/net/ipv4/arp.c linux/net/ipv4/arp.c
--- v2.4.0-prerelease/linux/net/ipv4/arp.c	Tue Oct 31 12:42:27 2000
+++ linux/net/ipv4/arp.c	Mon Jan  1 10:23:21 2001
@@ -292,7 +292,7 @@
 			neigh->output = neigh->ops->output;
 			return 0;
 #endif
-		}
+		;}
 #endif
 		if (neigh->type == RTN_MULTICAST) {
 			neigh->nud_state = NUD_NOARP;
diff -u --recursive --new-file v2.4.0-prerelease/linux/net/ipv4/netfilter/ipt_LOG.c linux/net/ipv4/netfilter/ipt_LOG.c
--- v2.4.0-prerelease/linux/net/ipv4/netfilter/ipt_LOG.c	Thu Aug 10 12:35:15 2000
+++ linux/net/ipv4/netfilter/ipt_LOG.c	Mon Jan  1 09:54:07 2001
@@ -288,7 +288,7 @@
 	if (in && !out) {
 		/* MAC logging for input chain only. */
 		printk("MAC=");
-		if ((*pskb)->dev && (*pskb)->dev->hard_header_len && (*pskb)->mac.raw != iph) {
+		if ((*pskb)->dev && (*pskb)->dev->hard_header_len && (*pskb)->mac.raw != (void*)iph) {
 			int i;
 			unsigned char *p = (*pskb)->mac.raw;
 			for (i = 0; i < (*pskb)->dev->hard_header_len; i++,p++)
diff -u --recursive --new-file v2.4.0-prerelease/linux/net/ipv4/tcp_ipv4.c linux/net/ipv4/tcp_ipv4.c
--- v2.4.0-prerelease/linux/net/ipv4/tcp_ipv4.c	Mon Dec 11 17:59:45 2000
+++ linux/net/ipv4/tcp_ipv4.c	Mon Jan  1 11:01:58 2001
@@ -1707,7 +1707,7 @@
 		break;
 	case TCP_TW_RST:
 		goto no_tcp_socket;
-	case TCP_TW_SUCCESS:
+	case TCP_TW_SUCCESS:;
 	}
 	goto discard_it;
 }
diff -u --recursive --new-file v2.4.0-prerelease/linux/net/ipv4/tcp_timer.c linux/net/ipv4/tcp_timer.c
--- v2.4.0-prerelease/linux/net/ipv4/tcp_timer.c	Tue Oct 31 12:42:27 2000
+++ linux/net/ipv4/tcp_timer.c	Mon Jan  1 11:01:58 2001
@@ -384,7 +384,7 @@
 	if (tp->retransmits > sysctl_tcp_retries1)
 		__sk_dst_reset(sk);
 
-out:
+out:;
 }
 
 static void tcp_write_timer(unsigned long data)
diff -u --recursive --new-file v2.4.0-prerelease/linux/net/ipv4/udp.c linux/net/ipv4/udp.c
--- v2.4.0-prerelease/linux/net/ipv4/udp.c	Mon Jan  1 09:38:36 2001
+++ linux/net/ipv4/udp.c	Mon Jan  1 11:01:58 2001
@@ -158,7 +158,7 @@
 			} while ((sk = sk->next) != NULL);
 			best_size_so_far = size;
 			best = result;
-		next:
+		next:;
 		}
 		result = best;
 		for(;; result += UDP_HTABLE_SIZE) {
diff -u --recursive --new-file v2.4.0-prerelease/linux/net/ipv6/addrconf.c linux/net/ipv6/addrconf.c
--- v2.4.0-prerelease/linux/net/ipv6/addrconf.c	Mon Dec 11 17:59:45 2000
+++ linux/net/ipv6/addrconf.c	Mon Jan  1 11:01:58 2001
@@ -206,7 +206,7 @@
 	case AC_RS:
 		ifp->timer.function = addrconf_rs_timer;
 		break;
-	default:
+	default:;
 	}
 	ifp->timer.expires = jiffies + when;
 	add_timer(&ifp->timer);
@@ -2015,7 +2015,7 @@
 		case ARPHRD_IEEE802_TR:	
 			addrconf_dev_config(dev);
 			break;
-		default:
+		default:;
 			/* Ignore all other */
 		}
 	}
diff -u --recursive --new-file v2.4.0-prerelease/linux/net/ipv6/icmp.c linux/net/ipv6/icmp.c
--- v2.4.0-prerelease/linux/net/ipv6/icmp.c	Mon Mar 27 10:35:57 2000
+++ linux/net/ipv6/icmp.c	Mon Jan  1 11:01:58 2001
@@ -542,7 +542,7 @@
 				ntohs(daddr->in6_u.u6_addr16[7]));
 			goto discard_it;
 		}
-	default:
+	default:;
 		/* CHECKSUM_UNNECESSARY */
 	};
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/net/ipv6/tcp_ipv6.c linux/net/ipv6/tcp_ipv6.c
--- v2.4.0-prerelease/linux/net/ipv6/tcp_ipv6.c	Mon Dec 11 17:59:45 2000
+++ linux/net/ipv6/tcp_ipv6.c	Mon Jan  1 11:01:58 2001
@@ -1639,7 +1639,7 @@
 		break;
 	case TCP_TW_RST:
 		goto no_tcp_socket;
-	case TCP_TW_SUCCESS:
+	case TCP_TW_SUCCESS:;
 	}
 	goto discard_it;
 }
diff -u --recursive --new-file v2.4.0-prerelease/linux/net/ipv6/udp.c linux/net/ipv6/udp.c
--- v2.4.0-prerelease/linux/net/ipv6/udp.c	Mon Dec 11 17:59:45 2000
+++ linux/net/ipv6/udp.c	Mon Jan  1 11:01:58 2001
@@ -83,7 +83,7 @@
 			} while ((sk = sk->next) != NULL);
 			best_size_so_far = size;
 			best = result;
-		next:
+		next:;
 		}
 		result = best;
 		for(;; result += UDP_HTABLE_SIZE) {
diff -u --recursive --new-file v2.4.0-prerelease/linux/net/irda/irias_object.c linux/net/irda/irias_object.c
--- v2.4.0-prerelease/linux/net/irda/irias_object.c	Sun Nov 19 18:44:24 2000
+++ linux/net/irda/irias_object.c	Mon Jan  1 09:54:07 2001
@@ -34,7 +34,7 @@
 /*
  *  Used when a missing value needs to be returned
  */
-struct ias_value missing = { IAS_MISSING, 0, 0, {0}};
+struct ias_value missing = { IAS_MISSING, 0, 0, 0};
 
 /*
  * Function strdup (str)
diff -u --recursive --new-file v2.4.0-prerelease/linux/net/irda/irqueue.c linux/net/irda/irqueue.c
--- v2.4.0-prerelease/linux/net/irda/irqueue.c	Sun Nov 19 18:44:24 2000
+++ linux/net/irda/irqueue.c	Thu Jan  4 13:00:55 2001
@@ -8,6 +8,8 @@
  * Created at:    Tue Jun  9 13:29:31 1998
  * Modified at:   Sun Dec 12 13:48:22 1999
  * Modified by:   Dag Brattli <dagb@cs.uit.no>
+ * Modified at:   Thu Jan  4 14:29:10 CET 2001
+ * Modified by:   Marc Zyngier <mzyngier@freesurf.fr>
  * 
  *     Copyright (C) 1998-1999, Aage Kvalnes <aage@cs.uit.no>
  *     Copyright (C) 1998, Dag Brattli, 
@@ -142,69 +144,6 @@
 }
 
 /*
- * Function hashbin_lock (hashbin, hashv, name)
- *
- *    Lock the hashbin
- *
- */
-void hashbin_lock(hashbin_t* hashbin, __u32 hashv, char* name, 
-		  unsigned long flags)
-{
-	int bin;
-	
-	IRDA_DEBUG(0, "hashbin_lock\n");
-
-	ASSERT(hashbin != NULL, return;);
-	ASSERT(hashbin->magic == HB_MAGIC, return;);
-
-	/*
-	 * Locate hashbin
-	 */
-	if (name)
-		hashv = hash(name);
-	bin = GET_HASHBIN(hashv);
-	
-	/* Synchronize */
-	if ( hashbin->hb_type & HB_GLOBAL )
-		spin_lock_irqsave(&hashbin->hb_mutex[ bin], flags);
-	else {
-		save_flags(flags);
-		cli();
-	}
-}
-
-/*
- * Function hashbin_unlock (hashbin, hashv, name)
- *
- *    Unlock the hashbin
- *
- */
-void hashbin_unlock(hashbin_t* hashbin, __u32 hashv, char* name, 
-		    unsigned long flags)
-{
-	int bin;
-
-	IRDA_DEBUG(0, "hashbin_unlock()\n");
-
-	ASSERT(hashbin != NULL, return;);
-	ASSERT(hashbin->magic == HB_MAGIC, return;);
-	
-	/*
-	 * Locate hashbin
-	 */
-	if (name )
-		hashv = hash(name);
-	bin = GET_HASHBIN(hashv);
-	
-	/* Release lock */
-	if ( hashbin->hb_type & HB_GLOBAL)
-		spin_unlock_irq( &hashbin->hb_mutex[ bin]);
-	else if (hashbin->hb_type & HB_LOCAL) {
-		restore_flags( flags);
-	}
-}
-
-/*
  * Function hashbin_insert (hashbin, entry, name)
  *
  *    Insert an entry into the hashbin
@@ -258,7 +197,7 @@
 	/* Release lock */
 	if ( hashbin->hb_type & HB_GLOBAL) {
 
-		spin_unlock_irq( &hashbin->hb_mutex[ bin]);
+		spin_unlock_irqrestore( &hashbin->hb_mutex[ bin], flags);
 
 	} else if ( hashbin->hb_type & HB_LOCAL) {
 		restore_flags( flags);
@@ -327,7 +266,7 @@
 	
 	/* Release lock */
 	if ( hashbin->hb_type & HB_GLOBAL) {
-		spin_unlock_irq( &hashbin->hb_mutex[ bin]);
+		spin_unlock_irqrestore( &hashbin->hb_mutex[ bin], flags);
 
 	} else if ( hashbin->hb_type & HB_LOCAL) {
 		restore_flags( flags);
@@ -436,7 +375,7 @@
 
 	/* Release lock */
 	if ( hashbin->hb_type & HB_GLOBAL) {
-		spin_unlock_irq( &hashbin->hb_mutex[ bin]);
+		spin_unlock_irqrestore( &hashbin->hb_mutex[ bin], flags);
 
 	} else if ( hashbin->hb_type & HB_LOCAL) {
 		restore_flags( flags);
@@ -511,7 +450,7 @@
 
 	/* Release lock */
 	if ( hashbin->hb_type & HB_GLOBAL) {
-		spin_unlock_irq( &hashbin->hb_mutex[ bin]);
+		spin_unlock_irqrestore( &hashbin->hb_mutex[ bin], flags);
 
 	} else if ( hashbin->hb_type & HB_LOCAL) {
 		restore_flags( flags);
diff -u --recursive --new-file v2.4.0-prerelease/linux/net/packet/af_packet.c linux/net/packet/af_packet.c
--- v2.4.0-prerelease/linux/net/packet/af_packet.c	Mon Dec 11 17:59:45 2000
+++ linux/net/packet/af_packet.c	Mon Jan  1 11:01:58 2001
@@ -1139,7 +1139,7 @@
 	case PACKET_MR_ALLMULTI:
 		dev_set_allmulti(dev, what);
 		break;
-	default:
+	default:;
 	}
 }
 
diff -u --recursive --new-file v2.4.0-prerelease/linux/net/sched/sch_api.c linux/net/sched/sch_api.c
--- v2.4.0-prerelease/linux/net/sched/sch_api.c	Thu Jun 22 07:23:26 2000
+++ linux/net/sched/sch_api.c	Mon Jan  1 09:57:08 2001
@@ -1203,7 +1203,7 @@
 
 #define INIT_QDISC(name) { \
           extern struct Qdisc_ops name##_qdisc_ops; \
-          register_qdisc(&##name##_qdisc_ops); \
+          register_qdisc(& name##_qdisc_ops);       \
 	}
 
 	INIT_QDISC(pfifo);