ieee1394/dma: LIndent fixes

This patch contains fixes by LIndent.

Signed-off-by: Jens-Michael Hoffmann <jensmh@gmx.de>
Signed-off-by: Jody McIntyre <scjody@modernduck.com>
diff --git a/drivers/ieee1394/dma.c b/drivers/ieee1394/dma.c
index b79ddb4..9fb2769 100644
--- a/drivers/ieee1394/dma.c
+++ b/drivers/ieee1394/dma.c
@@ -23,7 +23,8 @@
 	prog->bus_addr = 0;
 }
 
-int  dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes, struct pci_dev *dev)
+int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes,
+			  struct pci_dev *dev)
 {
 	/* round up to page size */
 	n_bytes = PAGE_ALIGN(n_bytes);
@@ -32,7 +33,8 @@
 
 	prog->kvirt = pci_alloc_consistent(dev, n_bytes, &prog->bus_addr);
 	if (!prog->kvirt) {
-		printk(KERN_ERR "dma_prog_region_alloc: pci_alloc_consistent() failed\n");
+		printk(KERN_ERR
+		       "dma_prog_region_alloc: pci_alloc_consistent() failed\n");
 		dma_prog_region_free(prog);
 		return -ENOMEM;
 	}
@@ -45,7 +47,8 @@
 void dma_prog_region_free(struct dma_prog_region *prog)
 {
 	if (prog->kvirt) {
-		pci_free_consistent(prog->dev, prog->n_pages << PAGE_SHIFT, prog->kvirt, prog->bus_addr);
+		pci_free_consistent(prog->dev, prog->n_pages << PAGE_SHIFT,
+				    prog->kvirt, prog->bus_addr);
 	}
 
 	prog->kvirt = NULL;
@@ -65,7 +68,8 @@
 	dma->sglist = NULL;
 }
 
-int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, struct pci_dev *dev, int direction)
+int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes,
+		     struct pci_dev *dev, int direction)
 {
 	unsigned int i;
 
@@ -95,14 +99,16 @@
 
 	/* fill scatter/gather list with pages */
 	for (i = 0; i < dma->n_pages; i++) {
-		unsigned long va = (unsigned long) dma->kvirt + (i << PAGE_SHIFT);
+		unsigned long va =
+		    (unsigned long)dma->kvirt + (i << PAGE_SHIFT);
 
 		dma->sglist[i].page = vmalloc_to_page((void *)va);
 		dma->sglist[i].length = PAGE_SIZE;
 	}
 
 	/* map sglist to the IOMMU */
-	dma->n_dma_pages = pci_map_sg(dev, dma->sglist, dma->n_pages, direction);
+	dma->n_dma_pages =
+	    pci_map_sg(dev, dma->sglist, dma->n_pages, direction);
 
 	if (dma->n_dma_pages == 0) {
 		printk(KERN_ERR "dma_region_alloc: pci_map_sg() failed\n");
@@ -114,7 +120,7 @@
 
 	return 0;
 
-err:
+      err:
 	dma_region_free(dma);
 	return -ENOMEM;
 }
@@ -122,7 +128,8 @@
 void dma_region_free(struct dma_region *dma)
 {
 	if (dma->n_dma_pages) {
-		pci_unmap_sg(dma->dev, dma->sglist, dma->n_pages, dma->direction);
+		pci_unmap_sg(dma->dev, dma->sglist, dma->n_pages,
+			     dma->direction);
 		dma->n_dma_pages = 0;
 		dma->dev = NULL;
 	}
@@ -137,7 +144,8 @@
 
 /* find the scatterlist index and remaining offset corresponding to a
    given offset from the beginning of the buffer */
-static inline int dma_region_find(struct dma_region *dma, unsigned long offset, unsigned long *rem)
+static inline int dma_region_find(struct dma_region *dma, unsigned long offset,
+				  unsigned long *rem)
 {
 	int i;
 	unsigned long off = offset;
@@ -156,15 +164,18 @@
 	return i;
 }
 
-dma_addr_t dma_region_offset_to_bus(struct dma_region *dma, unsigned long offset)
+dma_addr_t dma_region_offset_to_bus(struct dma_region * dma,
+				    unsigned long offset)
 {
 	unsigned long rem = 0;
 
-	struct scatterlist *sg = &dma->sglist[dma_region_find(dma, offset, &rem)];
+	struct scatterlist *sg =
+	    &dma->sglist[dma_region_find(dma, offset, &rem)];
 	return sg_dma_address(sg) + rem;
 }
 
-void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset, unsigned long len)
+void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset,
+			     unsigned long len)
 {
 	int first, last;
 	unsigned long rem;
@@ -175,10 +186,12 @@
 	first = dma_region_find(dma, offset, &rem);
 	last = dma_region_find(dma, offset + len - 1, &rem);
 
-	pci_dma_sync_sg_for_cpu(dma->dev, &dma->sglist[first], last - first + 1, dma->direction);
+	pci_dma_sync_sg_for_cpu(dma->dev, &dma->sglist[first], last - first + 1,
+				dma->direction);
 }
 
-void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset, unsigned long len)
+void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset,
+				unsigned long len)
 {
 	int first, last;
 	unsigned long rem;
@@ -189,44 +202,47 @@
 	first = dma_region_find(dma, offset, &rem);
 	last = dma_region_find(dma, offset + len - 1, &rem);
 
-	pci_dma_sync_sg_for_device(dma->dev, &dma->sglist[first], last - first + 1, dma->direction);
+	pci_dma_sync_sg_for_device(dma->dev, &dma->sglist[first],
+				   last - first + 1, dma->direction);
 }
 
 #ifdef CONFIG_MMU
 
 /* nopage() handler for mmap access */
 
-static struct page*
-dma_region_pagefault(struct vm_area_struct *area, unsigned long address, int *type)
+static struct page *dma_region_pagefault(struct vm_area_struct *area,
+					 unsigned long address, int *type)
 {
 	unsigned long offset;
 	unsigned long kernel_virt_addr;
 	struct page *ret = NOPAGE_SIGBUS;
 
-	struct dma_region *dma = (struct dma_region*) area->vm_private_data;
+	struct dma_region *dma = (struct dma_region *)area->vm_private_data;
 
 	if (!dma->kvirt)
 		goto out;
 
-	if ( (address < (unsigned long) area->vm_start) ||
-	    (address > (unsigned long) area->vm_start + (dma->n_pages << PAGE_SHIFT)) )
+	if ((address < (unsigned long)area->vm_start) ||
+	    (address >
+	     (unsigned long)area->vm_start + (dma->n_pages << PAGE_SHIFT)))
 		goto out;
 
 	if (type)
 		*type = VM_FAULT_MINOR;
 	offset = address - area->vm_start;
-	kernel_virt_addr = (unsigned long) dma->kvirt + offset;
-	ret = vmalloc_to_page((void*) kernel_virt_addr);
+	kernel_virt_addr = (unsigned long)dma->kvirt + offset;
+	ret = vmalloc_to_page((void *)kernel_virt_addr);
 	get_page(ret);
-out:
+      out:
 	return ret;
 }
 
 static struct vm_operations_struct dma_region_vm_ops = {
-	.nopage	= dma_region_pagefault,
+	.nopage = dma_region_pagefault,
 };
 
-int dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_struct *vma)
+int dma_region_mmap(struct dma_region *dma, struct file *file,
+		    struct vm_area_struct *vma)
 {
 	unsigned long size;
 
@@ -250,11 +266,12 @@
 	return 0;
 }
 
-#else /* CONFIG_MMU */
+#else				/* CONFIG_MMU */
 
-int dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_struct *vma)
+int dma_region_mmap(struct dma_region *dma, struct file *file,
+		    struct vm_area_struct *vma)
 {
 	return -EINVAL;
 }
 
-#endif /* CONFIG_MMU */
+#endif				/* CONFIG_MMU */