Merge master.kernel.org:/home/rmk/linux-2.6-arm
diff --git a/Makefile b/Makefile
index be33d75..91de6be 100644
--- a/Makefile
+++ b/Makefile
@@ -372,7 +372,7 @@
 # Files to ignore in find ... statements
 
 RCS_FIND_IGNORE := \( -name SCCS -o -name BitKeeper -o -name .svn -o -name CVS -o -name .pc -o -name .hg \) -prune -o
-RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exclude CVS --exclude .pc --exclude .hg
+export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exclude CVS --exclude .pc --exclude .hg
 
 # ===========================================================================
 # Rules shared between *config targets and build targets
diff --git a/arch/ia64/lib/swiotlb.c b/arch/ia64/lib/swiotlb.c
index dbc0b3e..a604efc 100644
--- a/arch/ia64/lib/swiotlb.c
+++ b/arch/ia64/lib/swiotlb.c
@@ -123,8 +123,8 @@
 	/*
 	 * Get IO TLB memory from the low pages
 	 */
-	io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs *
-					       (1 << IO_TLB_SHIFT));
+	io_tlb_start = alloc_bootmem_low_pages_limit(io_tlb_nslabs *
+					     (1 << IO_TLB_SHIFT), 0x100000000);
 	if (!io_tlb_start)
 		panic("Cannot allocate SWIOTLB buffer");
 	io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
diff --git a/arch/ppc64/kernel/vdso32/gettimeofday.S b/arch/ppc64/kernel/vdso32/gettimeofday.S
index 07f1c1c..e243c1d 100644
--- a/arch/ppc64/kernel/vdso32/gettimeofday.S
+++ b/arch/ppc64/kernel/vdso32/gettimeofday.S
@@ -109,7 +109,7 @@
 	lwz	r6,(CFG_TB_TO_XS+4)(r9)
 	mulhwu	r4,r7,r5
 	mulhwu	r6,r7,r6
-	mullw	r6,r7,r5
+	mullw	r0,r7,r5
 	addc	r6,r6,r0
 
 	/* At this point, we have the scaled xsec value in r4 + XER:CA
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index e36c5da..3937adf 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -96,7 +96,7 @@
 static acpi_status
 do_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
 {
-	int *busnr = (int *)data;
+	unsigned long *busnr = (unsigned long *)data;
 	struct acpi_resource_address64 address;
 
 	if (resource->id != ACPI_RSTYPE_ADDRESS16 &&
@@ -115,13 +115,13 @@
 static int get_root_bridge_busnr(acpi_handle handle)
 {
 	acpi_status status;
-	int bus, bbn;
+	unsigned long bus, bbn;
 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 
 	acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
 
 	status = acpi_evaluate_integer(handle, METHOD_NAME__BBN, NULL,
-				       (unsigned long *)&bbn);
+				       &bbn);
 	if (status == AE_NOT_FOUND) {
 		/* Assume bus = 0 */
 		printk(KERN_INFO PREFIX
@@ -153,7 +153,7 @@
 	}
       exit:
 	acpi_os_free(buffer.pointer);
-	return bbn;
+	return (int)bbn;
 }
 
 static acpi_status
diff --git a/drivers/char/drm/mga_dma.c b/drivers/char/drm/mga_dma.c
index fc7d4a5..c8e1b6c 100644
--- a/drivers/char/drm/mga_dma.c
+++ b/drivers/char/drm/mga_dma.c
@@ -437,7 +437,7 @@
 				    drm_mga_dma_bootstrap_t * dma_bs)
 {
 	drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private;
-	const unsigned int warp_size = mga_warp_microcode_size(dev_priv);
+	unsigned int warp_size = mga_warp_microcode_size(dev_priv);
 	int err;
 	unsigned  offset;
 	const unsigned secondary_size = dma_bs->secondary_bin_count
@@ -499,6 +499,12 @@
 		return err;
 	}
 
+	/* Make drm_addbufs happy by not trying to create a mapping for less
+	 * than a page.
+	 */
+	if (warp_size < PAGE_SIZE)
+		warp_size = PAGE_SIZE;
+
 	offset = 0;
 	err = drm_addmap( dev, offset, warp_size,
 			  _DRM_AGP, _DRM_READ_ONLY, & dev_priv->warp );
@@ -587,7 +593,7 @@
 				    drm_mga_dma_bootstrap_t * dma_bs)
 {
 	drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private;
-	const unsigned int warp_size = mga_warp_microcode_size(dev_priv);
+	unsigned int warp_size = mga_warp_microcode_size(dev_priv);
 	unsigned int primary_size;
 	unsigned int bin_count;
 	int err;
@@ -599,6 +605,12 @@
 		return DRM_ERR(EFAULT);
 	}
 
+	/* Make drm_addbufs happy by not trying to create a mapping for less
+	 * than a page.
+	 */
+	if (warp_size < PAGE_SIZE)
+		warp_size = PAGE_SIZE;
+
 	/* The proper alignment is 0x100 for this mapping */
 	err = drm_addmap(dev, 0, warp_size, _DRM_CONSISTENT,
 			 _DRM_READ_ONLY, &dev_priv->warp);
@@ -812,6 +824,10 @@
 	}
 
 	if (! dev_priv->used_new_dma_init) {
+
+		dev_priv->dma_access = MGA_PAGPXFER;
+		dev_priv->wagp_enable = MGA_WAGP_ENABLE;
+
 		dev_priv->status = drm_core_findmap(dev, init->status_offset);
 		if (!dev_priv->status) {
 			DRM_ERROR("failed to find status page!\n");
@@ -928,7 +944,7 @@
 		drm_mga_private_t *dev_priv = dev->dev_private;
 
 		if ((dev_priv->warp != NULL) 
-		    && (dev_priv->mmio->type != _DRM_CONSISTENT))
+		    && (dev_priv->warp->type != _DRM_CONSISTENT))
 			drm_core_ioremapfree(dev_priv->warp, dev);
 
 		if ((dev_priv->primary != NULL) 
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
index 6a6acbd..4cf9b8f 100644
--- a/drivers/ieee1394/ohci1394.c
+++ b/drivers/ieee1394/ohci1394.c
@@ -2283,8 +2283,9 @@
 {
 	struct ohci1394_iso_tasklet *t;
 	unsigned long mask;
+	unsigned long flags;
 
-	spin_lock(&ohci->iso_tasklet_list_lock);
+	spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
 
 	list_for_each_entry(t, &ohci->iso_tasklet_list, link) {
 		mask = 1 << t->context;
@@ -2295,8 +2296,7 @@
 			tasklet_schedule(&t->tasklet);
 	}
 
-	spin_unlock(&ohci->iso_tasklet_list_lock);
-
+	spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
 }
 
 static irqreturn_t ohci_irq_handler(int irq, void *dev_id,
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index 315f5ca..0470f77 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -412,6 +412,7 @@
 static ssize_t raw1394_read(struct file *file, char __user * buffer,
 			    size_t count, loff_t * offset_is_ignored)
 {
+	unsigned long flags;
 	struct file_info *fi = (struct file_info *)file->private_data;
 	struct list_head *lh;
 	struct pending_request *req;
@@ -435,10 +436,10 @@
 		}
 	}
 
-	spin_lock_irq(&fi->reqlists_lock);
+	spin_lock_irqsave(&fi->reqlists_lock, flags);
 	lh = fi->req_complete.next;
 	list_del(lh);
-	spin_unlock_irq(&fi->reqlists_lock);
+	spin_unlock_irqrestore(&fi->reqlists_lock, flags);
 
 	req = list_entry(lh, struct pending_request, list);
 
@@ -486,6 +487,7 @@
 
 static int state_initialized(struct file_info *fi, struct pending_request *req)
 {
+	unsigned long flags;
 	struct host_info *hi;
 	struct raw1394_khost_list *khl;
 
@@ -499,7 +501,7 @@
 
 	switch (req->req.type) {
 	case RAW1394_REQ_LIST_CARDS:
-		spin_lock_irq(&host_info_lock);
+		spin_lock_irqsave(&host_info_lock, flags);
 		khl = kmalloc(sizeof(struct raw1394_khost_list) * host_count,
 			      SLAB_ATOMIC);
 
@@ -513,7 +515,7 @@
 				khl++;
 			}
 		}
-		spin_unlock_irq(&host_info_lock);
+		spin_unlock_irqrestore(&host_info_lock, flags);
 
 		if (khl != NULL) {
 			req->req.error = RAW1394_ERROR_NONE;
@@ -528,7 +530,7 @@
 		break;
 
 	case RAW1394_REQ_SET_CARD:
-		spin_lock_irq(&host_info_lock);
+		spin_lock_irqsave(&host_info_lock, flags);
 		if (req->req.misc < host_count) {
 			list_for_each_entry(hi, &host_info_list, list) {
 				if (!req->req.misc--)
@@ -550,7 +552,7 @@
 		} else {
 			req->req.error = RAW1394_ERROR_INVALID_ARG;
 		}
-		spin_unlock_irq(&host_info_lock);
+		spin_unlock_irqrestore(&host_info_lock, flags);
 
 		req->req.length = 0;
 		break;
@@ -569,7 +571,6 @@
 {
 	int channel = req->req.misc;
 
-	spin_lock_irq(&host_info_lock);
 	if ((channel > 63) || (channel < -64)) {
 		req->req.error = RAW1394_ERROR_INVALID_ARG;
 	} else if (channel >= 0) {
@@ -601,7 +602,6 @@
 
 	req->req.length = 0;
 	queue_complete_req(req);
-	spin_unlock_irq(&host_info_lock);
 }
 
 static void handle_fcp_listen(struct file_info *fi, struct pending_request *req)
@@ -627,6 +627,7 @@
 static int handle_async_request(struct file_info *fi,
 				struct pending_request *req, int node)
 {
+	unsigned long flags;
 	struct hpsb_packet *packet = NULL;
 	u64 addr = req->req.address & 0xffffffffffffULL;
 
@@ -761,9 +762,9 @@
 	hpsb_set_packet_complete_task(packet,
 				      (void (*)(void *))queue_complete_cb, req);
 
-	spin_lock_irq(&fi->reqlists_lock);
+	spin_lock_irqsave(&fi->reqlists_lock, flags);
 	list_add_tail(&req->list, &fi->req_pending);
-	spin_unlock_irq(&fi->reqlists_lock);
+	spin_unlock_irqrestore(&fi->reqlists_lock, flags);
 
 	packet->generation = req->req.generation;
 
@@ -779,6 +780,7 @@
 static int handle_iso_send(struct file_info *fi, struct pending_request *req,
 			   int channel)
 {
+	unsigned long flags;
 	struct hpsb_packet *packet;
 
 	packet = hpsb_make_isopacket(fi->host, req->req.length, channel & 0x3f,
@@ -804,9 +806,9 @@
 				      (void (*)(void *))queue_complete_req,
 				      req);
 
-	spin_lock_irq(&fi->reqlists_lock);
+	spin_lock_irqsave(&fi->reqlists_lock, flags);
 	list_add_tail(&req->list, &fi->req_pending);
-	spin_unlock_irq(&fi->reqlists_lock);
+	spin_unlock_irqrestore(&fi->reqlists_lock, flags);
 
 	/* Update the generation of the packet just before sending. */
 	packet->generation = req->req.generation;
@@ -821,6 +823,7 @@
 
 static int handle_async_send(struct file_info *fi, struct pending_request *req)
 {
+	unsigned long flags;
 	struct hpsb_packet *packet;
 	int header_length = req->req.misc & 0xffff;
 	int expect_response = req->req.misc >> 16;
@@ -867,9 +870,9 @@
 	hpsb_set_packet_complete_task(packet,
 				      (void (*)(void *))queue_complete_cb, req);
 
-	spin_lock_irq(&fi->reqlists_lock);
+	spin_lock_irqsave(&fi->reqlists_lock, flags);
 	list_add_tail(&req->list, &fi->req_pending);
-	spin_unlock_irq(&fi->reqlists_lock);
+	spin_unlock_irqrestore(&fi->reqlists_lock, flags);
 
 	/* Update the generation of the packet just before sending. */
 	packet->generation = req->req.generation;
@@ -885,6 +888,7 @@
 static int arm_read(struct hpsb_host *host, int nodeid, quadlet_t * buffer,
 		    u64 addr, size_t length, u16 flags)
 {
+	unsigned long irqflags;
 	struct pending_request *req;
 	struct host_info *hi;
 	struct file_info *fi = NULL;
@@ -899,7 +903,7 @@
 	       "addr: %4.4x %8.8x length: %Zu", nodeid,
 	       (u16) ((addr >> 32) & 0xFFFF), (u32) (addr & 0xFFFFFFFF),
 	       length);
-	spin_lock(&host_info_lock);
+	spin_lock_irqsave(&host_info_lock, irqflags);
 	hi = find_host_info(host);	/* search address-entry */
 	if (hi != NULL) {
 		list_for_each_entry(fi, &hi->file_info_list, list) {
@@ -924,7 +928,7 @@
 	if (!found) {
 		printk(KERN_ERR "raw1394: arm_read FAILED addr_entry not found"
 		       " -> rcode_address_error\n");
-		spin_unlock(&host_info_lock);
+		spin_unlock_irqrestore(&host_info_lock, irqflags);
 		return (RCODE_ADDRESS_ERROR);
 	} else {
 		DBGMSG("arm_read addr_entry FOUND");
@@ -954,7 +958,7 @@
 		req = __alloc_pending_request(SLAB_ATOMIC);
 		if (!req) {
 			DBGMSG("arm_read -> rcode_conflict_error");
-			spin_unlock(&host_info_lock);
+			spin_unlock_irqrestore(&host_info_lock, irqflags);
 			return (RCODE_CONFLICT_ERROR);	/* A resource conflict was detected.
 							   The request may be retried */
 		}
@@ -974,7 +978,7 @@
 		if (!(req->data)) {
 			free_pending_request(req);
 			DBGMSG("arm_read -> rcode_conflict_error");
-			spin_unlock(&host_info_lock);
+			spin_unlock_irqrestore(&host_info_lock, irqflags);
 			return (RCODE_CONFLICT_ERROR);	/* A resource conflict was detected.
 							   The request may be retried */
 		}
@@ -1031,13 +1035,14 @@
 			    sizeof(struct arm_request));
 		queue_complete_req(req);
 	}
-	spin_unlock(&host_info_lock);
+	spin_unlock_irqrestore(&host_info_lock, irqflags);
 	return (rcode);
 }
 
 static int arm_write(struct hpsb_host *host, int nodeid, int destid,
 		     quadlet_t * data, u64 addr, size_t length, u16 flags)
 {
+	unsigned long irqflags;
 	struct pending_request *req;
 	struct host_info *hi;
 	struct file_info *fi = NULL;
@@ -1052,7 +1057,7 @@
 	       "addr: %4.4x %8.8x length: %Zu", nodeid,
 	       (u16) ((addr >> 32) & 0xFFFF), (u32) (addr & 0xFFFFFFFF),
 	       length);
-	spin_lock(&host_info_lock);
+	spin_lock_irqsave(&host_info_lock, irqflags);
 	hi = find_host_info(host);	/* search address-entry */
 	if (hi != NULL) {
 		list_for_each_entry(fi, &hi->file_info_list, list) {
@@ -1077,7 +1082,7 @@
 	if (!found) {
 		printk(KERN_ERR "raw1394: arm_write FAILED addr_entry not found"
 		       " -> rcode_address_error\n");
-		spin_unlock(&host_info_lock);
+		spin_unlock_irqrestore(&host_info_lock, irqflags);
 		return (RCODE_ADDRESS_ERROR);
 	} else {
 		DBGMSG("arm_write addr_entry FOUND");
@@ -1106,7 +1111,7 @@
 		req = __alloc_pending_request(SLAB_ATOMIC);
 		if (!req) {
 			DBGMSG("arm_write -> rcode_conflict_error");
-			spin_unlock(&host_info_lock);
+			spin_unlock_irqrestore(&host_info_lock, irqflags);
 			return (RCODE_CONFLICT_ERROR);	/* A resource conflict was detected.
 							   The request my be retried */
 		}
@@ -1118,7 +1123,7 @@
 		if (!(req->data)) {
 			free_pending_request(req);
 			DBGMSG("arm_write -> rcode_conflict_error");
-			spin_unlock(&host_info_lock);
+			spin_unlock_irqrestore(&host_info_lock, irqflags);
 			return (RCODE_CONFLICT_ERROR);	/* A resource conflict was detected.
 							   The request may be retried */
 		}
@@ -1165,7 +1170,7 @@
 			    sizeof(struct arm_request));
 		queue_complete_req(req);
 	}
-	spin_unlock(&host_info_lock);
+	spin_unlock_irqrestore(&host_info_lock, irqflags);
 	return (rcode);
 }
 
@@ -1173,6 +1178,7 @@
 		    u64 addr, quadlet_t data, quadlet_t arg, int ext_tcode,
 		    u16 flags)
 {
+	unsigned long irqflags;
 	struct pending_request *req;
 	struct host_info *hi;
 	struct file_info *fi = NULL;
@@ -1198,7 +1204,7 @@
 		       (u32) (addr & 0xFFFFFFFF), ext_tcode & 0xFF,
 		       be32_to_cpu(data), be32_to_cpu(arg));
 	}
-	spin_lock(&host_info_lock);
+	spin_lock_irqsave(&host_info_lock, irqflags);
 	hi = find_host_info(host);	/* search address-entry */
 	if (hi != NULL) {
 		list_for_each_entry(fi, &hi->file_info_list, list) {
@@ -1224,7 +1230,7 @@
 	if (!found) {
 		printk(KERN_ERR "raw1394: arm_lock FAILED addr_entry not found"
 		       " -> rcode_address_error\n");
-		spin_unlock(&host_info_lock);
+		spin_unlock_irqrestore(&host_info_lock, irqflags);
 		return (RCODE_ADDRESS_ERROR);
 	} else {
 		DBGMSG("arm_lock addr_entry FOUND");
@@ -1307,7 +1313,7 @@
 		req = __alloc_pending_request(SLAB_ATOMIC);
 		if (!req) {
 			DBGMSG("arm_lock -> rcode_conflict_error");
-			spin_unlock(&host_info_lock);
+			spin_unlock_irqrestore(&host_info_lock, irqflags);
 			return (RCODE_CONFLICT_ERROR);	/* A resource conflict was detected.
 							   The request may be retried */
 		}
@@ -1316,7 +1322,7 @@
 		if (!(req->data)) {
 			free_pending_request(req);
 			DBGMSG("arm_lock -> rcode_conflict_error");
-			spin_unlock(&host_info_lock);
+			spin_unlock_irqrestore(&host_info_lock, irqflags);
 			return (RCODE_CONFLICT_ERROR);	/* A resource conflict was detected.
 							   The request may be retried */
 		}
@@ -1382,7 +1388,7 @@
 			    sizeof(struct arm_response) + 2 * sizeof(*store));
 		queue_complete_req(req);
 	}
-	spin_unlock(&host_info_lock);
+	spin_unlock_irqrestore(&host_info_lock, irqflags);
 	return (rcode);
 }
 
@@ -1390,6 +1396,7 @@
 		      u64 addr, octlet_t data, octlet_t arg, int ext_tcode,
 		      u16 flags)
 {
+	unsigned long irqflags;
 	struct pending_request *req;
 	struct host_info *hi;
 	struct file_info *fi = NULL;
@@ -1422,7 +1429,7 @@
 		       (u32) ((be64_to_cpu(arg) >> 32) & 0xFFFFFFFF),
 		       (u32) (be64_to_cpu(arg) & 0xFFFFFFFF));
 	}
-	spin_lock(&host_info_lock);
+	spin_lock_irqsave(&host_info_lock, irqflags);
 	hi = find_host_info(host);	/* search addressentry in file_info's for host */
 	if (hi != NULL) {
 		list_for_each_entry(fi, &hi->file_info_list, list) {
@@ -1449,7 +1456,7 @@
 		printk(KERN_ERR
 		       "raw1394: arm_lock64 FAILED addr_entry not found"
 		       " -> rcode_address_error\n");
-		spin_unlock(&host_info_lock);
+		spin_unlock_irqrestore(&host_info_lock, irqflags);
 		return (RCODE_ADDRESS_ERROR);
 	} else {
 		DBGMSG("arm_lock64 addr_entry FOUND");
@@ -1533,7 +1540,7 @@
 		DBGMSG("arm_lock64 -> entering notification-section");
 		req = __alloc_pending_request(SLAB_ATOMIC);
 		if (!req) {
-			spin_unlock(&host_info_lock);
+			spin_unlock_irqrestore(&host_info_lock, irqflags);
 			DBGMSG("arm_lock64 -> rcode_conflict_error");
 			return (RCODE_CONFLICT_ERROR);	/* A resource conflict was detected.
 							   The request may be retried */
@@ -1542,7 +1549,7 @@
 		req->data = kmalloc(size, SLAB_ATOMIC);
 		if (!(req->data)) {
 			free_pending_request(req);
-			spin_unlock(&host_info_lock);
+			spin_unlock_irqrestore(&host_info_lock, irqflags);
 			DBGMSG("arm_lock64 -> rcode_conflict_error");
 			return (RCODE_CONFLICT_ERROR);	/* A resource conflict was detected.
 							   The request may be retried */
@@ -1609,7 +1616,7 @@
 			    sizeof(struct arm_response) + 2 * sizeof(*store));
 		queue_complete_req(req);
 	}
-	spin_unlock(&host_info_lock);
+	spin_unlock_irqrestore(&host_info_lock, irqflags);
 	return (rcode);
 }
 
@@ -1980,6 +1987,7 @@
 	struct hpsb_packet *packet = NULL;
 	int retval = 0;
 	quadlet_t data;
+	unsigned long flags;
 
 	data = be32_to_cpu((u32) req->req.sendb);
 	DBGMSG("write_phypacket called - quadlet 0x%8.8x ", data);
@@ -1990,9 +1998,9 @@
 	req->packet = packet;
 	hpsb_set_packet_complete_task(packet,
 				      (void (*)(void *))queue_complete_cb, req);
-	spin_lock_irq(&fi->reqlists_lock);
+	spin_lock_irqsave(&fi->reqlists_lock, flags);
 	list_add_tail(&req->list, &fi->req_pending);
-	spin_unlock_irq(&fi->reqlists_lock);
+	spin_unlock_irqrestore(&fi->reqlists_lock, flags);
 	packet->generation = req->req.generation;
 	retval = hpsb_send_packet(packet);
 	DBGMSG("write_phypacket send_packet called => retval: %d ", retval);
@@ -2659,14 +2667,15 @@
 {
 	struct file_info *fi = file->private_data;
 	unsigned int mask = POLLOUT | POLLWRNORM;
+	unsigned long flags;
 
 	poll_wait(file, &fi->poll_wait_complete, pt);
 
-	spin_lock_irq(&fi->reqlists_lock);
+	spin_lock_irqsave(&fi->reqlists_lock, flags);
 	if (!list_empty(&fi->req_complete)) {
 		mask |= POLLIN | POLLRDNORM;
 	}
-	spin_unlock_irq(&fi->reqlists_lock);
+	spin_unlock_irqrestore(&fi->reqlists_lock, flags);
 
 	return mask;
 }
@@ -2710,6 +2719,7 @@
 	struct arm_addr *arm_addr = NULL;
 	int another_host;
 	int csr_mod = 0;
+	unsigned long flags;
 
 	if (fi->iso_state != RAW1394_ISO_INACTIVE)
 		raw1394_iso_shutdown(fi);
@@ -2720,13 +2730,11 @@
 		}
 	}
 
-	spin_lock_irq(&host_info_lock);
+	spin_lock_irqsave(&host_info_lock, flags);
 	fi->listen_channels = 0;
-	spin_unlock_irq(&host_info_lock);
 
 	fail = 0;
 	/* set address-entries invalid */
-	spin_lock_irq(&host_info_lock);
 
 	while (!list_empty(&fi->addr_list)) {
 		another_host = 0;
@@ -2777,14 +2785,14 @@
 		vfree(addr->addr_space_buffer);
 		kfree(addr);
 	}			/* while */
-	spin_unlock_irq(&host_info_lock);
+	spin_unlock_irqrestore(&host_info_lock, flags);
 	if (fail > 0) {
 		printk(KERN_ERR "raw1394: during addr_list-release "
 		       "error(s) occurred \n");
 	}
 
 	while (!done) {
-		spin_lock_irq(&fi->reqlists_lock);
+		spin_lock_irqsave(&fi->reqlists_lock, flags);
 
 		while (!list_empty(&fi->req_complete)) {
 			lh = fi->req_complete.next;
@@ -2798,7 +2806,7 @@
 		if (list_empty(&fi->req_pending))
 			done = 1;
 
-		spin_unlock_irq(&fi->reqlists_lock);
+		spin_unlock_irqrestore(&fi->reqlists_lock, flags);
 
 		if (!done)
 			down_interruptible(&fi->complete_sem);
@@ -2828,9 +2836,9 @@
 		     fi->host->id);
 
 	if (fi->state == connected) {
-		spin_lock_irq(&host_info_lock);
+		spin_lock_irqsave(&host_info_lock, flags);
 		list_del(&fi->list);
-		spin_unlock_irq(&host_info_lock);
+		spin_unlock_irqrestore(&host_info_lock, flags);
 
 		put_device(&fi->host->device);
 	}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 2897df9..e947607 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -3063,6 +3063,7 @@
 	 * many dirty RAID5 blocks.
 	 */
 
+	allow_signal(SIGKILL);
 	complete(thread->event);
 	while (!kthread_should_stop()) {
 		void (*run)(mddev_t *);
@@ -3111,7 +3112,7 @@
 	thread->mddev = mddev;
 	thread->name = name;
 	thread->timeout = MAX_SCHEDULE_TIMEOUT;
-	thread->tsk = kthread_run(md_thread, thread, mdname(thread->mddev));
+	thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev));
 	if (IS_ERR(thread->tsk)) {
 		kfree(thread);
 		return NULL;
@@ -3569,6 +3570,7 @@
 	try_again:
 		if (signal_pending(current)) {
 			flush_signals(current);
+			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
 			goto skip;
 		}
 		ITERATE_MDDEV(mddev2,tmp) {
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index cf3daaa..15ceaf6 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -578,8 +578,9 @@
 				txfid, NULL);
 	if (err) {
 		netif_start_queue(dev);
-		printk(KERN_ERR "%s: Error %d transmitting packet\n",
-		       dev->name, err);
+		if (net_ratelimit())
+			printk(KERN_ERR "%s: Error %d transmitting packet\n",
+				dev->name, err);
 		stats->tx_errors++;
 		goto fail;
 	}
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 82bd884..3b03b0b 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -43,7 +43,7 @@
 extern unsigned long __init bootmem_bootmap_pages (unsigned long);
 extern unsigned long __init init_bootmem (unsigned long addr, unsigned long memend);
 extern void __init free_bootmem (unsigned long addr, unsigned long size);
-extern void * __init __alloc_bootmem (unsigned long size, unsigned long align, unsigned long goal);
+extern void * __init __alloc_bootmem_limit (unsigned long size, unsigned long align, unsigned long goal, unsigned long limit);
 #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
 extern void __init reserve_bootmem (unsigned long addr, unsigned long size);
 #define alloc_bootmem(x) \
@@ -54,6 +54,16 @@
 	__alloc_bootmem((x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
 #define alloc_bootmem_low_pages(x) \
 	__alloc_bootmem((x), PAGE_SIZE, 0)
+
+#define alloc_bootmem_limit(x, limit)						\
+	__alloc_bootmem_limit((x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS), (limit))
+#define alloc_bootmem_low_limit(x, limit)			\
+	__alloc_bootmem_limit((x), SMP_CACHE_BYTES, 0, (limit))
+#define alloc_bootmem_pages_limit(x, limit)					\
+	__alloc_bootmem_limit((x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS), (limit))
+#define alloc_bootmem_low_pages_limit(x, limit)		\
+	__alloc_bootmem_limit((x), PAGE_SIZE, 0, (limit))
+
 #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
 extern unsigned long __init free_all_bootmem (void);
 
@@ -61,7 +71,7 @@
 extern void __init reserve_bootmem_node (pg_data_t *pgdat, unsigned long physaddr, unsigned long size);
 extern void __init free_bootmem_node (pg_data_t *pgdat, unsigned long addr, unsigned long size);
 extern unsigned long __init free_all_bootmem_node (pg_data_t *pgdat);
-extern void * __init __alloc_bootmem_node (pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal);
+extern void * __init __alloc_bootmem_node_limit (pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal, unsigned long limit);
 #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
 #define alloc_bootmem_node(pgdat, x) \
 	__alloc_bootmem_node((pgdat), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
@@ -69,6 +79,14 @@
 	__alloc_bootmem_node((pgdat), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
 #define alloc_bootmem_low_pages_node(pgdat, x) \
 	__alloc_bootmem_node((pgdat), (x), PAGE_SIZE, 0)
+
+#define alloc_bootmem_node_limit(pgdat, x, limit)				\
+	__alloc_bootmem_node_limit((pgdat), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS), (limit))
+#define alloc_bootmem_pages_node_limit(pgdat, x, limit)				\
+	__alloc_bootmem_node_limit((pgdat), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS), (limit))
+#define alloc_bootmem_low_pages_node_limit(pgdat, x, limit)		\
+	__alloc_bootmem_node_limit((pgdat), (x), PAGE_SIZE, 0, (limit))
+
 #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
 
 #ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP
@@ -105,5 +123,15 @@
 #endif
 extern int __initdata hashdist;		/* Distribute hashes across NUMA nodes? */
 
+static inline void *__alloc_bootmem (unsigned long size, unsigned long align, unsigned long goal)
+{
+	return __alloc_bootmem_limit(size, align, goal, 0);
+}
+
+static inline void *__alloc_bootmem_node (pg_data_t *pgdat, unsigned long size, unsigned long align,
+				     unsigned long goal)
+{
+	return __alloc_bootmem_node_limit(pgdat, size, align, goal, 0);
+}
 
 #endif /* _LINUX_BOOTMEM_H */
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index e670b0d..42cb7d70 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -155,11 +155,24 @@
 {
 	file->f_op = &hugetlbfs_file_operations;
 }
+
+static inline int valid_hugetlb_file_off(struct vm_area_struct *vma, 
+					  unsigned long address) 
+{
+	struct inode *inode = vma->vm_file->f_dentry->d_inode;
+	loff_t file_off = address - vma->vm_start;
+	
+	file_off += (vma->vm_pgoff << PAGE_SHIFT);
+	
+	return (file_off < inode->i_size);
+}
+
 #else /* !CONFIG_HUGETLBFS */
 
 #define is_file_hugepages(file)		0
 #define set_file_hugepages(file)	BUG()
 #define hugetlb_zero_setup(size)	ERR_PTR(-ENOSYS)
+#define valid_hugetlb_file_off(vma, address) 	0
 
 #endif /* !CONFIG_HUGETLBFS */
 
diff --git a/kernel/fork.c b/kernel/fork.c
index 533ce27..280bd44 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -848,7 +848,7 @@
 {
 	unsigned long new_flags = p->flags;
 
-	new_flags &= ~PF_SUPERPRIV;
+	new_flags &= ~(PF_SUPERPRIV | PF_NOFREEZE);
 	new_flags |= PF_FORKNOEXEC;
 	if (!(clone_flags & CLONE_PTRACE))
 		p->ptrace = 0;
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 7a51a55..b3f3edc 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -387,25 +387,19 @@
 	if (unlikely(p == NULL))
 		return 0;
 
+	spin_lock(&p->sighand->siglock);
 	if (!list_empty(&timer->it.cpu.entry)) {
-		read_lock(&tasklist_lock);
-		if (unlikely(p->signal == NULL)) {
-			/*
-			 * We raced with the reaping of the task.
-			 * The deletion should have cleared us off the list.
-			 */
-			BUG_ON(!list_empty(&timer->it.cpu.entry));
-		} else {
-			/*
-			 * Take us off the task's timer list.
-			 */
-			spin_lock(&p->sighand->siglock);
-			list_del(&timer->it.cpu.entry);
-			spin_unlock(&p->sighand->siglock);
-		}
-		read_unlock(&tasklist_lock);
+		/*
+		 * Take us off the task's timer list.  We don't need to
+		 * take tasklist_lock and check for the task being reaped.
+		 * If it was reaped, it already called posix_cpu_timers_exit
+		 * and posix_cpu_timers_exit_group to clear all the timers
+		 * that pointed to it.
+		 */
+		list_del(&timer->it.cpu.entry);
+		put_task_struct(p);
 	}
-	put_task_struct(p);
+	spin_unlock(&p->sighand->siglock);
 
 	return 0;
 }
diff --git a/mm/bootmem.c b/mm/bootmem.c
index c1330cc..a58699b 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -154,10 +154,10 @@
  */
 static void * __init
 __alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size,
-		unsigned long align, unsigned long goal)
+	      unsigned long align, unsigned long goal, unsigned long limit)
 {
 	unsigned long offset, remaining_size, areasize, preferred;
-	unsigned long i, start = 0, incr, eidx;
+	unsigned long i, start = 0, incr, eidx, end_pfn = bdata->node_low_pfn;
 	void *ret;
 
 	if(!size) {
@@ -166,7 +166,14 @@
 	}
 	BUG_ON(align & (align-1));
 
-	eidx = bdata->node_low_pfn - (bdata->node_boot_start >> PAGE_SHIFT);
+	if (limit && bdata->node_boot_start >= limit)
+		return NULL;
+
+        limit >>=PAGE_SHIFT;
+	if (limit && end_pfn > limit)
+		end_pfn = limit;
+
+	eidx = end_pfn - (bdata->node_boot_start >> PAGE_SHIFT);
 	offset = 0;
 	if (align &&
 	    (bdata->node_boot_start & (align - 1UL)) != 0)
@@ -178,11 +185,12 @@
 	 * first, then we try to allocate lower pages.
 	 */
 	if (goal && (goal >= bdata->node_boot_start) && 
-	    ((goal >> PAGE_SHIFT) < bdata->node_low_pfn)) {
+	    ((goal >> PAGE_SHIFT) < end_pfn)) {
 		preferred = goal - bdata->node_boot_start;
 
 		if (bdata->last_success >= preferred)
-			preferred = bdata->last_success;
+			if (!limit || (limit && limit > bdata->last_success))
+				preferred = bdata->last_success;
 	} else
 		preferred = 0;
 
@@ -382,14 +390,15 @@
 	return(free_all_bootmem_core(NODE_DATA(0)));
 }
 
-void * __init __alloc_bootmem (unsigned long size, unsigned long align, unsigned long goal)
+void * __init __alloc_bootmem_limit (unsigned long size, unsigned long align, unsigned long goal,
+				unsigned long limit)
 {
 	pg_data_t *pgdat = pgdat_list;
 	void *ptr;
 
 	for_each_pgdat(pgdat)
 		if ((ptr = __alloc_bootmem_core(pgdat->bdata, size,
-						align, goal)))
+						 align, goal, limit)))
 			return(ptr);
 
 	/*
@@ -400,14 +409,16 @@
 	return NULL;
 }
 
-void * __init __alloc_bootmem_node (pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal)
+
+void * __init __alloc_bootmem_node_limit (pg_data_t *pgdat, unsigned long size, unsigned long align,
+				     unsigned long goal, unsigned long limit)
 {
 	void *ptr;
 
-	ptr = __alloc_bootmem_core(pgdat->bdata, size, align, goal);
+	ptr = __alloc_bootmem_core(pgdat->bdata, size, align, goal, limit);
 	if (ptr)
 		return (ptr);
 
-	return __alloc_bootmem(size, align, goal);
+	return __alloc_bootmem_limit(size, align, goal, limit);
 }
 
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 901ac52..a1b30d4 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -274,21 +274,22 @@
 {
 	pte_t *src_pte, *dst_pte, entry;
 	struct page *ptepage;
-	unsigned long addr = vma->vm_start;
-	unsigned long end = vma->vm_end;
+	unsigned long addr;
 
-	while (addr < end) {
+	for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
 		dst_pte = huge_pte_alloc(dst, addr);
 		if (!dst_pte)
 			goto nomem;
+		spin_lock(&src->page_table_lock);
 		src_pte = huge_pte_offset(src, addr);
-		BUG_ON(!src_pte || pte_none(*src_pte)); /* prefaulted */
-		entry = *src_pte;
-		ptepage = pte_page(entry);
-		get_page(ptepage);
-		add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE);
-		set_huge_pte_at(dst, addr, dst_pte, entry);
-		addr += HPAGE_SIZE;
+		if (src_pte && !pte_none(*src_pte)) {
+			entry = *src_pte;
+			ptepage = pte_page(entry);
+			get_page(ptepage);
+			add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE);
+			set_huge_pte_at(dst, addr, dst_pte, entry);
+		}
+		spin_unlock(&src->page_table_lock);
 	}
 	return 0;
 
@@ -323,8 +324,8 @@
 
 		page = pte_page(pte);
 		put_page(page);
+		add_mm_counter(mm, rss,  - (HPAGE_SIZE / PAGE_SIZE));
 	}
-	add_mm_counter(mm, rss,  -((end - start) >> PAGE_SHIFT));
 	flush_tlb_range(vma, start, end);
 }
 
@@ -403,6 +404,7 @@
 	BUG_ON(!is_vm_hugetlb_page(vma));
 
 	vpfn = vaddr/PAGE_SIZE;
+	spin_lock(&mm->page_table_lock);
 	while (vaddr < vma->vm_end && remainder) {
 
 		if (pages) {
@@ -415,8 +417,13 @@
 			 * indexing below to work. */
 			pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
 
-			/* hugetlb should be locked, and hence, prefaulted */
-			WARN_ON(!pte || pte_none(*pte));
+			/* the hugetlb file might have been truncated */
+			if (!pte || pte_none(*pte)) {
+				remainder = 0;
+				if (!i)
+					i = -EFAULT;
+				break;
+			}
 
 			page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
 
@@ -434,7 +441,7 @@
 		--remainder;
 		++i;
 	}
-
+	spin_unlock(&mm->page_table_lock);
 	*length = remainder;
 	*position = vaddr;
 
diff --git a/mm/memory.c b/mm/memory.c
index ae8161f..8c88b97 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2045,8 +2045,18 @@
 
 	inc_page_state(pgfault);
 
-	if (is_vm_hugetlb_page(vma))
-		return VM_FAULT_SIGBUS;	/* mapping truncation does this. */
+	if (unlikely(is_vm_hugetlb_page(vma))) {
+		if (valid_hugetlb_file_off(vma, address))
+			/* We get here only if there was a stale(zero) TLB entry 
+			 * (because of  HW prefetching). 
+			 * Low-level arch code (if needed) should have already
+			 * purged the stale entry as part of this fault handling.  
+			 * Here we just return.
+			 */
+			return VM_FAULT_MINOR; 
+		else
+			return VM_FAULT_SIGBUS;	/* mapping truncation does this. */
+	}
 
 	/*
 	 * We need the page table lock to synchronize with kswapd