net: Use netdev_alloc_skb_ip_align()

Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 975e25b..32031ea 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -2560,7 +2560,7 @@
 		struct sk_buff *skb;
 		entry = vp->dirty_rx % RX_RING_SIZE;
 		if (vp->rx_skbuff[entry] == NULL) {
-			skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
+			skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
 			if (skb == NULL) {
 				static unsigned long last_jif;
 				if (time_after(jiffies, last_jif + 10 * HZ)) {
@@ -2572,7 +2572,6 @@
 				break;			/* Bad news!  */
 			}
 
-			skb_reserve(skb, NET_IP_ALIGN);
 			vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
 			vp->rx_skbuff[entry] = skb;
 		}
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 83a1922..ab451bb 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -549,14 +549,12 @@
 			pr_debug("%s: rx slot %d status 0x%x len %d\n",
 			       dev->name, rx_tail, status, len);
 
-		new_skb = netdev_alloc_skb(dev, buflen + NET_IP_ALIGN);
+		new_skb = netdev_alloc_skb_ip_align(dev, buflen);
 		if (!new_skb) {
 			dev->stats.rx_dropped++;
 			goto rx_next;
 		}
 
-		skb_reserve(new_skb, NET_IP_ALIGN);
-
 		dma_unmap_single(&cp->pdev->dev, mapping,
 				 buflen, PCI_DMA_FROMDEVICE);
 
@@ -1057,12 +1055,10 @@
 		struct sk_buff *skb;
 		dma_addr_t mapping;
 
-		skb = netdev_alloc_skb(dev, cp->rx_buf_sz + NET_IP_ALIGN);
+		skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz);
 		if (!skb)
 			goto err_out;
 
-		skb_reserve(skb, NET_IP_ALIGN);
-
 		mapping = dma_map_single(&cp->pdev->dev, skb->data,
 					 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
 		cp->rx_skb[i] = skb;
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 4a36287..7e333f7 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -2004,9 +2004,8 @@
 		/* Malloc up new buffer, compatible with net-2e. */
 		/* Omit the four octet CRC from the length. */
 
-		skb = netdev_alloc_skb(dev, pkt_size + NET_IP_ALIGN);
+		skb = netdev_alloc_skb_ip_align(dev, pkt_size);
 		if (likely(skb)) {
-			skb_reserve (skb, NET_IP_ALIGN);	/* 16 byte align the IP fields. */
 #if RX_BUF_IDX == 3
 			wrap_copy(skb, rx_ring, ring_offset+4, pkt_size);
 #else
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index 955da73..8b889ab 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -1433,14 +1433,12 @@
 
 			packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
 					RRS_PKT_SIZE_MASK) - 4; /* CRC */
-			skb = netdev_alloc_skb(netdev,
-					       packet_size + NET_IP_ALIGN);
+			skb = netdev_alloc_skb_ip_align(netdev, packet_size);
 			if (skb == NULL) {
 				dev_warn(&pdev->dev, "%s: Memory squeeze,"
 					"deferring packet.\n", netdev->name);
 				goto skip_pkt;
 			}
-			skb_reserve(skb, NET_IP_ALIGN);
 			skb->dev = netdev;
 			memcpy(skb->data, (u8 *)(prrs + 1), packet_size);
 			skb_put(skb, packet_size);
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 00569dc..963df50 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -1864,21 +1864,14 @@
 
 		rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use);
 
-		skb = netdev_alloc_skb(adapter->netdev,
-				       adapter->rx_buffer_len + NET_IP_ALIGN);
+		skb = netdev_alloc_skb_ip_align(adapter->netdev,
+						adapter->rx_buffer_len);
 		if (unlikely(!skb)) {
 			/* Better luck next round */
 			adapter->netdev->stats.rx_dropped++;
 			break;
 		}
 
-		/*
-		 * Make buffer alignment 2 beyond a 16 byte boundary
-		 * this will result in a 16 byte aligned IP header after
-		 * the 14 byte MAC header is removed
-		 */
-		skb_reserve(skb, NET_IP_ALIGN);
-
 		buffer_info->alloced = 1;
 		buffer_info->skb = skb;
 		buffer_info->length = (u16) adapter->rx_buffer_len;
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index ab68886..0d26807 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -409,7 +409,7 @@
 		if (rxd->status.ok && rxd->status.pkt_size >= 60) {
 			int rx_size = (int)(rxd->status.pkt_size - 4);
 			/* alloc new buffer */
-			skb = netdev_alloc_skb(netdev, rx_size + NET_IP_ALIGN);
+			skb = netdev_alloc_skb_ip_align(netdev, rx_size);
 			if (NULL == skb) {
 				printk(KERN_WARNING
 					"%s: Mem squeeze, deferring packet.\n",
@@ -421,7 +421,6 @@
 				netdev->stats.rx_dropped++;
 				break;
 			}
-			skb_reserve(skb, NET_IP_ALIGN);
 			skb->dev = netdev;
 			memcpy(skb->data, rxd->packet, rx_size);
 			skb_put(skb, rx_size);
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index ba29dc3..1f6c548 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -320,16 +320,13 @@
 		if (len < copybreak) {
 			struct sk_buff *nskb;
 
-			nskb = netdev_alloc_skb(dev, len + NET_IP_ALIGN);
+			nskb = netdev_alloc_skb_ip_align(dev, len);
 			if (!nskb) {
 				/* forget packet, just rearm desc */
 				priv->stats.rx_dropped++;
 				continue;
 			}
 
-			/* since we're copying the data, we can align
-			 * them properly */
-			skb_reserve(nskb, NET_IP_ALIGN);
 			dma_sync_single_for_cpu(kdev, desc->address,
 						len, DMA_FROM_DEVICE);
 			memcpy(nskb->data, skb->data, len);
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 0e92a1f..e0f9d64 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -756,7 +756,7 @@
 	if ((adapter->cap == 0x400) && !vtm)
 		vlanf = 0;
 
-	skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN);
+	skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
 	if (!skb) {
 		if (net_ratelimit())
 			dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
@@ -764,8 +764,6 @@
 		return;
 	}
 
-	skb_reserve(skb, NET_IP_ALIGN);
-
 	skb_fill_rx_data(adapter, skb, rxcp);
 
 	if (do_pkt_csum(rxcp, adapter->rx_csum))
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 61f9da2..6782223 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -380,9 +380,8 @@
 		return NULL;
 	}
 
-	skb = netdev_alloc_skb(priv->dev, CPMAC_SKB_SIZE);
+	skb = netdev_alloc_skb_ip_align(priv->dev, CPMAC_SKB_SIZE);
 	if (likely(skb)) {
-		skb_reserve(skb, 2);
 		skb_put(desc->skb, desc->datalen);
 		desc->skb->protocol = eth_type_trans(desc->skb, priv->dev);
 		desc->skb->ip_summed = CHECKSUM_NONE;
@@ -991,12 +990,11 @@
 
 	priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
 	for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) {
-		skb = netdev_alloc_skb(dev, CPMAC_SKB_SIZE);
+		skb = netdev_alloc_skb_ip_align(dev, CPMAC_SKB_SIZE);
 		if (unlikely(!skb)) {
 			res = -ENOMEM;
 			goto fail_desc;
 		}
-		skb_reserve(skb, 2);
 		desc->skb = skb;
 		desc->data_mapping = dma_map_single(&dev->dev, skb->data,
 						    CPMAC_SKB_SIZE,
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index 7fa7a90..ce8fef1 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -505,7 +505,8 @@
 			entry = np->old_rx % RX_RING_SIZE;
 			/* Dropped packets don't need to re-allocate */
 			if (np->rx_skbuff[entry] == NULL) {
-				skb = netdev_alloc_skb (dev, np->rx_buf_sz);
+				skb = netdev_alloc_skb_ip_align(dev,
+								np->rx_buf_sz);
 				if (skb == NULL) {
 					np->rx_ring[entry].fraginfo = 0;
 					printk (KERN_INFO
@@ -514,8 +515,6 @@
 					break;
 				}
 				np->rx_skbuff[entry] = skb;
-				/* 16 byte align the IP header */
-				skb_reserve (skb, 2);
 				np->rx_ring[entry].fraginfo =
 				    cpu_to_le64 (pci_map_single
 					 (np->pdev, skb->data, np->rx_buf_sz,
@@ -576,7 +575,9 @@
 	/* Allocate the rx buffers */
 	for (i = 0; i < RX_RING_SIZE; i++) {
 		/* Allocated fixed size of skbuff */
-		struct sk_buff *skb = netdev_alloc_skb (dev, np->rx_buf_sz);
+		struct sk_buff *skb;
+
+		skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
 		np->rx_skbuff[i] = skb;
 		if (skb == NULL) {
 			printk (KERN_ERR
@@ -584,7 +585,6 @@
 				dev->name);
 			break;
 		}
-		skb_reserve (skb, 2);	/* 16 byte align the IP header. */
 		/* Rubicon now supports 40 bits of addressing space. */
 		np->rx_ring[i].fraginfo =
 		    cpu_to_le64 ( pci_map_single (
@@ -871,13 +871,11 @@
 						  PCI_DMA_FROMDEVICE);
 				skb_put (skb = np->rx_skbuff[entry], pkt_len);
 				np->rx_skbuff[entry] = NULL;
-			} else if ((skb = netdev_alloc_skb(dev, pkt_len + 2))) {
+			} else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) {
 				pci_dma_sync_single_for_cpu(np->pdev,
 							    desc_to_dma(desc),
 							    np->rx_buf_sz,
 							    PCI_DMA_FROMDEVICE);
-				/* 16 byte align the IP header */
-				skb_reserve (skb, 2);
 				skb_copy_to_linear_data (skb,
 						  np->rx_skbuff[entry]->data,
 						  pkt_len);
@@ -907,7 +905,7 @@
 		struct sk_buff *skb;
 		/* Dropped packets don't need to re-allocate */
 		if (np->rx_skbuff[entry] == NULL) {
-			skb = netdev_alloc_skb(dev, np->rx_buf_sz);
+			skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
 			if (skb == NULL) {
 				np->rx_ring[entry].fraginfo = 0;
 				printk (KERN_INFO
@@ -917,8 +915,6 @@
 				break;
 			}
 			np->rx_skbuff[entry] = skb;
-			/* 16 byte align the IP header */
-			skb_reserve (skb, 2);
 			np->rx_ring[entry].fraginfo =
 			    cpu_to_le64 (pci_map_single
 					 (np->pdev, skb->data, np->rx_buf_sz,
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 679965c..ff83efd 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1839,11 +1839,10 @@
 #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
 static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
 {
-	if (!(rx->skb = netdev_alloc_skb(nic->netdev, RFD_BUF_LEN + NET_IP_ALIGN)))
+	if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN)))
 		return -ENOMEM;
 
-	/* Align, init, and map the RFD. */
-	skb_reserve(rx->skb, NET_IP_ALIGN);
+	/* Init, and map the RFD. */
 	skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
 	rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
 		RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 6a61414..c938114 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -3866,9 +3866,8 @@
 		 * of reassembly being done in the stack */
 		if (length < copybreak) {
 			struct sk_buff *new_skb =
-			    netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
+			    netdev_alloc_skb_ip_align(netdev, length);
 			if (new_skb) {
-				skb_reserve(new_skb, NET_IP_ALIGN);
 				skb_copy_to_linear_data_offset(new_skb,
 							       -NET_IP_ALIGN,
 							       (skb->data -
@@ -3937,9 +3936,7 @@
 	struct e1000_buffer *buffer_info;
 	struct sk_buff *skb;
 	unsigned int i;
-	unsigned int bufsz = 256 -
-	                     16 /*for skb_reserve */ -
-	                     NET_IP_ALIGN;
+	unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
 
 	i = rx_ring->next_to_use;
 	buffer_info = &rx_ring->buffer_info[i];
@@ -3951,7 +3948,7 @@
 			goto check_page;
 		}
 
-		skb = netdev_alloc_skb(netdev, bufsz);
+		skb = netdev_alloc_skb_ip_align(netdev, bufsz);
 		if (unlikely(!skb)) {
 			/* Better luck next round */
 			adapter->alloc_rx_buff_failed++;
@@ -3964,7 +3961,7 @@
 			DPRINTK(PROBE, ERR, "skb align check failed: %u bytes "
 					     "at %p\n", bufsz, skb->data);
 			/* Try again, without freeing the previous */
-			skb = netdev_alloc_skb(netdev, bufsz);
+			skb = netdev_alloc_skb_ip_align(netdev, bufsz);
 			/* Failed allocation, critical failure */
 			if (!skb) {
 				dev_kfree_skb(oldskb);
@@ -3982,12 +3979,6 @@
 			/* Use new allocation */
 			dev_kfree_skb(oldskb);
 		}
-		/* Make buffer alignment 2 beyond a 16 byte boundary
-		 * this will result in a 16 byte aligned IP header after
-		 * the 14 byte MAC header is removed
-		 */
-		skb_reserve(skb, NET_IP_ALIGN);
-
 		buffer_info->skb = skb;
 		buffer_info->length = adapter->rx_buffer_len;
 check_page:
@@ -4044,7 +4035,7 @@
 	struct e1000_buffer *buffer_info;
 	struct sk_buff *skb;
 	unsigned int i;
-	unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
+	unsigned int bufsz = adapter->rx_buffer_len;
 
 	i = rx_ring->next_to_use;
 	buffer_info = &rx_ring->buffer_info[i];
@@ -4056,7 +4047,7 @@
 			goto map_skb;
 		}
 
-		skb = netdev_alloc_skb(netdev, bufsz);
+		skb = netdev_alloc_skb_ip_align(netdev, bufsz);
 		if (unlikely(!skb)) {
 			/* Better luck next round */
 			adapter->alloc_rx_buff_failed++;
@@ -4069,7 +4060,7 @@
 			DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
 					     "at %p\n", bufsz, skb->data);
 			/* Try again, without freeing the previous */
-			skb = netdev_alloc_skb(netdev, bufsz);
+			skb = netdev_alloc_skb_ip_align(netdev, bufsz);
 			/* Failed allocation, critical failure */
 			if (!skb) {
 				dev_kfree_skb(oldskb);
@@ -4088,12 +4079,6 @@
 			/* Use new allocation */
 			dev_kfree_skb(oldskb);
 		}
-		/* Make buffer alignment 2 beyond a 16 byte boundary
-		 * this will result in a 16 byte aligned IP header after
-		 * the 14 byte MAC header is removed
-		 */
-		skb_reserve(skb, NET_IP_ALIGN);
-
 		buffer_info->skb = skb;
 		buffer_info->length = adapter->rx_buffer_len;
 map_skb:
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 21af398..3769248 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -167,7 +167,7 @@
 	struct e1000_buffer *buffer_info;
 	struct sk_buff *skb;
 	unsigned int i;
-	unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
+	unsigned int bufsz = adapter->rx_buffer_len;
 
 	i = rx_ring->next_to_use;
 	buffer_info = &rx_ring->buffer_info[i];
@@ -179,20 +179,13 @@
 			goto map_skb;
 		}
 
-		skb = netdev_alloc_skb(netdev, bufsz);
+		skb = netdev_alloc_skb_ip_align(netdev, bufsz);
 		if (!skb) {
 			/* Better luck next round */
 			adapter->alloc_rx_buff_failed++;
 			break;
 		}
 
-		/*
-		 * Make buffer alignment 2 beyond a 16 byte boundary
-		 * this will result in a 16 byte aligned IP header after
-		 * the 14 byte MAC header is removed
-		 */
-		skb_reserve(skb, NET_IP_ALIGN);
-
 		buffer_info->skb = skb;
 map_skb:
 		buffer_info->dma = pci_map_single(pdev, skb->data,
@@ -284,21 +277,14 @@
 			     cpu_to_le64(ps_page->dma);
 		}
 
-		skb = netdev_alloc_skb(netdev,
-				       adapter->rx_ps_bsize0 + NET_IP_ALIGN);
+		skb = netdev_alloc_skb_ip_align(netdev,
+						adapter->rx_ps_bsize0);
 
 		if (!skb) {
 			adapter->alloc_rx_buff_failed++;
 			break;
 		}
 
-		/*
-		 * Make buffer alignment 2 beyond a 16 byte boundary
-		 * this will result in a 16 byte aligned IP header after
-		 * the 14 byte MAC header is removed
-		 */
-		skb_reserve(skb, NET_IP_ALIGN);
-
 		buffer_info->skb = skb;
 		buffer_info->dma = pci_map_single(pdev, skb->data,
 						  adapter->rx_ps_bsize0,
@@ -359,9 +345,7 @@
 	struct e1000_buffer *buffer_info;
 	struct sk_buff *skb;
 	unsigned int i;
-	unsigned int bufsz = 256 -
-	                     16 /* for skb_reserve */ -
-	                     NET_IP_ALIGN;
+	unsigned int bufsz = 256 - 16 /* for skb_reserve */;
 
 	i = rx_ring->next_to_use;
 	buffer_info = &rx_ring->buffer_info[i];
@@ -373,19 +357,13 @@
 			goto check_page;
 		}
 
-		skb = netdev_alloc_skb(netdev, bufsz);
+		skb = netdev_alloc_skb_ip_align(netdev, bufsz);
 		if (unlikely(!skb)) {
 			/* Better luck next round */
 			adapter->alloc_rx_buff_failed++;
 			break;
 		}
 
-		/* Make buffer alignment 2 beyond a 16 byte boundary
-		 * this will result in a 16 byte aligned IP header after
-		 * the 14 byte MAC header is removed
-		 */
-		skb_reserve(skb, NET_IP_ALIGN);
-
 		buffer_info->skb = skb;
 check_page:
 		/* allocate a new page if necessary */
@@ -513,9 +491,8 @@
 		 */
 		if (length < copybreak) {
 			struct sk_buff *new_skb =
-			    netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
+			    netdev_alloc_skb_ip_align(netdev, length);
 			if (new_skb) {
-				skb_reserve(new_skb, NET_IP_ALIGN);
 				skb_copy_to_linear_data_offset(new_skb,
 							       -NET_IP_ALIGN,
 							       (skb->data -
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 41bd7ae..7f8fcc2 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -447,7 +447,9 @@
 	max_index_mask = q_skba->len - 1;
 	for (i = 0; i < fill_wqes; i++) {
 		u64 tmp_addr;
-		struct sk_buff *skb = netdev_alloc_skb(dev, packet_size);
+		struct sk_buff *skb;
+
+		skb = netdev_alloc_skb_ip_align(dev, packet_size);
 		if (!skb) {
 			q_skba->os_skbs = fill_wqes - i;
 			if (q_skba->os_skbs == q_skba->len - 2) {
@@ -457,7 +459,6 @@
 			}
 			break;
 		}
-		skb_reserve(skb, NET_IP_ALIGN);
 
 		skb_arr[index] = skb;
 		tmp_addr = ehea_map_vaddr(skb->data);
@@ -500,7 +501,7 @@
 {
 	return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
 				  nr_of_wqes, EHEA_RWQE2_TYPE,
-				  EHEA_RQ2_PKT_SIZE + NET_IP_ALIGN);
+				  EHEA_RQ2_PKT_SIZE);
 }
 
 
@@ -508,7 +509,7 @@
 {
 	return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
 				  nr_of_wqes, EHEA_RWQE3_TYPE,
-				  EHEA_MAX_PACKET_SIZE + NET_IP_ALIGN);
+				  EHEA_MAX_PACKET_SIZE);
 }
 
 static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index d69d52e..f875751 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -870,19 +870,6 @@
 	dev_kfree_skb_any(buf->os_buf);
 }
 
-static inline struct sk_buff *enic_rq_alloc_skb(struct net_device *netdev,
-	unsigned int size)
-{
-	struct sk_buff *skb;
-
-	skb = netdev_alloc_skb(netdev, size + NET_IP_ALIGN);
-
-	if (skb)
-		skb_reserve(skb, NET_IP_ALIGN);
-
-	return skb;
-}
-
 static int enic_rq_alloc_buf(struct vnic_rq *rq)
 {
 	struct enic *enic = vnic_dev_priv(rq->vdev);
@@ -892,7 +879,7 @@
 	unsigned int os_buf_index = 0;
 	dma_addr_t dma_addr;
 
-	skb = enic_rq_alloc_skb(netdev, len);
+	skb = netdev_alloc_skb_ip_align(netdev, len);
 	if (!skb)
 		return -ENOMEM;
 
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index 34d0c69..0c229a5 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -404,10 +404,10 @@
 
 		if (ethoc_update_rx_stats(priv, &bd) == 0) {
 			int size = bd.stat >> 16;
-			struct sk_buff *skb = netdev_alloc_skb(dev, size);
+			struct sk_buff *skb;
 
 			size -= 4; /* strip the CRC */
-			skb_reserve(skb, 2); /* align TCP/IP header */
+			skb = netdev_alloc_skb_ip_align(dev, size);
 
 			if (likely(skb)) {
 				void *src = phys_to_virt(bd.addr);
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 1d5064a..18bd9fe 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -406,10 +406,9 @@
 /* A few values that may be tweaked. */
 /* Size of each temporary Rx buffer, calculated as:
  * 1518 bytes (ethernet packet) + 2 bytes (to get 8 byte alignment for
- * the card) + 8 bytes of status info + 8 bytes for the Rx Checksum +
- * 2 more because we use skb_reserve.
+ * the card) + 8 bytes of status info + 8 bytes for the Rx Checksum
  */
-#define PKT_BUF_SZ		1538
+#define PKT_BUF_SZ		1536
 
 /* For now, this is going to be set to the maximum size of an ethernet
  * packet.  Eventually, we may want to make it a variable that is
@@ -1151,12 +1150,13 @@
 	}
 	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
 	for (i = 0; i < RX_RING_SIZE; i++) {
-		struct sk_buff *skb = netdev_alloc_skb(dev, hmp->rx_buf_sz);
+		struct sk_buff *skb;
+
+		skb = netdev_alloc_skb_ip_align(dev, hmp->rx_buf_sz);
 		hmp->rx_skbuff[i] = skb;
 		if (skb == NULL)
 			break;
 
-		skb_reserve(skb, 2); /* 16 byte align the IP header. */
                 hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
 			skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
 		hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn |
@@ -1195,7 +1195,7 @@
 	 * card.  -KDU
 	 */
 	hmp->rx_buf_sz = (dev->mtu <= 1492 ? PKT_BUF_SZ :
-		(((dev->mtu+26+7) & ~7) + 2 + 16));
+		(((dev->mtu+26+7) & ~7) + 16));
 
 	/* Initialize all Rx descriptors. */
 	for (i = 0; i < RX_RING_SIZE; i++) {
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 428d504..2ffe099 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -4934,18 +4934,12 @@
 		}
 
 		if (!buffer_info->skb) {
-			skb = netdev_alloc_skb(netdev, bufsz + NET_IP_ALIGN);
+			skb = netdev_alloc_skb_ip_align(netdev, bufsz);
 			if (!skb) {
 				adapter->alloc_rx_buff_failed++;
 				goto no_buffers;
 			}
 
-			/* Make buffer alignment 2 beyond a 16 byte boundary
-			 * this will result in a 16 byte aligned IP header after
-			 * the 14 byte MAC header is removed
-			 */
-			skb_reserve(skb, NET_IP_ALIGN);
-
 			buffer_info->skb = skb;
 			buffer_info->dma = pci_map_single(pdev, skb->data,
 							  bufsz,
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 91024a3..fad7f34 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -170,18 +170,12 @@
 		}
 
 		if (!buffer_info->skb) {
-			skb = netdev_alloc_skb(netdev, bufsz + NET_IP_ALIGN);
+			skb = netdev_alloc_skb_ip_align(netdev, bufsz);
 			if (!skb) {
 				adapter->alloc_rx_buff_failed++;
 				goto no_buffers;
 			}
 
-			/* Make buffer alignment 2 beyond a 16 byte boundary
-			 * this will result in a 16 byte aligned IP header after
-			 * the 14 byte MAC header is removed
-			 */
-			skb_reserve(skb, NET_IP_ALIGN);
-
 			buffer_info->skb = skb;
 			buffer_info->dma = pci_map_single(pdev, skb->data,
 			                                  bufsz,
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index 9f7b5d4..63056e7 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -738,17 +738,12 @@
 
 	IPG_DEBUG_MSG("_get_rxbuff\n");
 
-	skb = netdev_alloc_skb(dev, sp->rxsupport_size + NET_IP_ALIGN);
+	skb = netdev_alloc_skb_ip_align(dev, sp->rxsupport_size);
 	if (!skb) {
 		sp->rx_buff[entry] = NULL;
 		return -ENOMEM;
 	}
 
-	/* Adjust the data start location within the buffer to
-	 * align IP address field to a 16 byte boundary.
-	 */
-	skb_reserve(skb, NET_IP_ALIGN);
-
 	/* Associate the receive buffer with the IPG NIC. */
 	skb->dev = dev;
 
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index f9f633c..1bd0ca1 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1972,9 +1972,8 @@
 		 * of reassembly being done in the stack */
 		if (length < copybreak) {
 			struct sk_buff *new_skb =
-			    netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
+			    netdev_alloc_skb_ip_align(netdev, length);
 			if (new_skb) {
-				skb_reserve(new_skb, NET_IP_ALIGN);
 				skb_copy_to_linear_data_offset(new_skb,
 							       -NET_IP_ALIGN,
 							       (skb->data -
@@ -2057,20 +2056,13 @@
 			goto map_skb;
 		}
 
-		skb = netdev_alloc_skb(netdev, adapter->rx_buffer_len
-			               + NET_IP_ALIGN);
+		skb = netdev_alloc_skb_ip_align(netdev, adapter->rx_buffer_len);
 		if (unlikely(!skb)) {
 			/* Better luck next round */
 			adapter->alloc_rx_buff_failed++;
 			break;
 		}
 
-		/* Make buffer alignment 2 beyond a 16 byte boundary
-		 * this will result in a 16 byte aligned IP header after
-		 * the 14 byte MAC header is removed
-		 */
-		skb_reserve(skb, NET_IP_ALIGN);
-
 		buffer_info->skb = skb;
 		buffer_info->length = adapter->rx_buffer_len;
 map_skb:
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index eb3abd7..4c8a449 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -616,22 +616,14 @@
 
 		if (!bi->skb) {
 			struct sk_buff *skb;
-			skb = netdev_alloc_skb(adapter->netdev,
-			                       (rx_ring->rx_buf_len +
-			                        NET_IP_ALIGN));
+			skb = netdev_alloc_skb_ip_align(adapter->netdev,
+							rx_ring->rx_buf_len);
 
 			if (!skb) {
 				adapter->alloc_rx_buff_failed++;
 				goto no_buffers;
 			}
 
-			/*
-			 * Make buffer alignment 2 beyond a 16 byte boundary
-			 * this will result in a 16 byte aligned IP header after
-			 * the 14 byte MAC header is removed
-			 */
-			skb_reserve(skb, NET_IP_ALIGN);
-
 			bi->skb = skb;
 			bi->dma = pci_map_single(pdev, skb->data,
 			                         rx_ring->rx_buf_len,
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index 1272434..6baf3c9 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -108,9 +108,8 @@
 		if (unlikely(!netif_running(nds[desc->channel])))
 			goto err;
 
-		skb = netdev_alloc_skb(dev, desc->pkt_length + 2);
+		skb = netdev_alloc_skb_ip_align(dev, desc->pkt_length);
 		if (likely(skb != NULL)) {
-			skb_reserve(skb, 2);
 			skb_copy_to_linear_data(skb, buf, desc->pkt_length);
 			skb_put(skb, desc->pkt_length);
 			skb->protocol = eth_type_trans(skb, nds[desc->channel]);
diff --git a/drivers/net/korina.c b/drivers/net/korina.c
index 03199fa..a07a597 100644
--- a/drivers/net/korina.c
+++ b/drivers/net/korina.c
@@ -400,7 +400,7 @@
 			dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);
 
 			/* Malloc up new buffer. */
-			skb_new = netdev_alloc_skb(dev, KORINA_RBSIZE + 2);
+			skb_new = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE);
 
 			if (!skb_new)
 				break;
@@ -417,9 +417,6 @@
 			if (devcs & ETH_RX_MP)
 				dev->stats.multicast++;
 
-			/* 16 bit align */
-			skb_reserve(skb_new, 2);
-
 			lp->rx_skb[lp->rx_next_done] = skb_new;
 		}
 
diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c
index 99e9541..5c45cb5 100644
--- a/drivers/net/ks8842.c
+++ b/drivers/net/ks8842.c
@@ -357,7 +357,7 @@
 
 	/* check the status */
 	if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
-		struct sk_buff *skb = netdev_alloc_skb(netdev, len + 2);
+		struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len);
 
 		dev_dbg(&adapter->pdev->dev, "%s, got package, len: %d\n",
 			__func__, len);
@@ -369,9 +369,6 @@
 			if (status & RXSR_MULTICAST)
 				netdev->stats.multicast++;
 
-			/* Align socket buffer in 4-byte boundary for
-				 better performance. */
-			skb_reserve(skb, 2);
 			data = (u32 *)skb_put(skb, len);
 
 			ks8842_select_bank(adapter, 17);
diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c
index 51e11c3..5b24c67 100644
--- a/drivers/net/lib82596.c
+++ b/drivers/net/lib82596.c
@@ -470,11 +470,11 @@
 
 	for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) {
 		dma_addr_t dma_addr;
-		struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ + 4);
+		struct sk_buff *skb;
 
+		skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
 		if (skb == NULL)
 			return -1;
-		skb_reserve(skb, 2);
 		dma_addr = dma_map_single(dev->dev.parent, skb->data,
 					  PKT_BUF_SZ, DMA_FROM_DEVICE);
 		rbd->v_next = rbd+1;
@@ -697,12 +697,12 @@
 						 (dma_addr_t)SWAP32(rbd->b_data),
 						 PKT_BUF_SZ, DMA_FROM_DEVICE);
 				/* Get fresh skbuff to replace filled one. */
-				newskb = netdev_alloc_skb(dev, PKT_BUF_SZ + 4);
+				newskb = netdev_alloc_skb_ip_align(dev,
+								   PKT_BUF_SZ);
 				if (newskb == NULL) {
 					skb = NULL;	/* drop pkt */
 					goto memory_squeeze;
 				}
-				skb_reserve(newskb, 2);
 
 				/* Pass up the skb already on the Rx ring. */
 				skb_put(skb, pkt_len);
@@ -716,7 +716,7 @@
 				rbd->b_data = SWAP32(dma_addr);
 				DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
 			} else
-				skb = netdev_alloc_skb(dev, pkt_len + 2);
+				skb = netdev_alloc_skb_ip_align(dev, pkt_len);
 memory_squeeze:
 			if (skb == NULL) {
 				/* XXX tulip.c can defer packets here!! */
@@ -730,7 +730,6 @@
 					dma_sync_single_for_cpu(dev->dev.parent,
 								(dma_addr_t)SWAP32(rbd->b_data),
 								PKT_BUF_SZ, DMA_FROM_DEVICE);
-					skb_reserve(skb, 2);
 					memcpy(skb_put(skb, pkt_len), rbd->v_data, pkt_len);
 					dma_sync_single_for_device(dev->dev.parent,
 								   (dma_addr_t)SWAP32(rbd->b_data),
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 50c6a3c..97b1704 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -3555,13 +3555,12 @@
 	if (pkt_size >= rx_copybreak)
 		goto out;
 
-	skb = netdev_alloc_skb(tp->dev, pkt_size + NET_IP_ALIGN);
+	skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
 	if (!skb)
 		goto out;
 
 	pci_dma_sync_single_for_cpu(tp->pci_dev, addr, pkt_size,
 				    PCI_DMA_FROMDEVICE);
-	skb_reserve(skb, NET_IP_ALIGN);
 	skb_copy_from_linear_data(*sk_buff, skb->data, pkt_size);
 	*sk_buff = skb;
 	done = true;
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index 8d60300..b7e0eb4 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -793,7 +793,7 @@
 
 		rx_len -= rx_size_align + 4;
 
-		skb = netdev_alloc_skb(dev, pkt_size + NET_IP_ALIGN);
+		skb = netdev_alloc_skb_ip_align(dev, pkt_size);
 		if (unlikely(!skb)) {
 			if (printk_ratelimit())
 				printk(KERN_ERR "%s: Couldn't allocate a skb_buff for a packet of size %u\n",
@@ -801,8 +801,6 @@
 			goto next;
 		}
 
-		skb_reserve(skb, NET_IP_ALIGN);
-
 		if ((rx_ring_offset + pkt_size) > RX_BUF_LEN) {
 			memcpy(skb_put(skb, RX_BUF_LEN - rx_ring_offset),
 				rx_ring + rx_ring_offset, RX_BUF_LEN - rx_ring_offset);
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index f4dfd1f..6b364a6 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -365,11 +365,10 @@
 					}
 					skb_reserve(newskb, 2);
 				} else {
-					skb = netdev_alloc_skb(dev, len + 2);
-					if (skb) {
-						skb_reserve(skb, 2);
+					skb = netdev_alloc_skb_ip_align(dev, len);
+					if (skb)
 						skb_copy_to_linear_data(skb, rd->skb->data, len);
-					}
+
 					newskb = rd->skb;
 				}
 memory_squeeze:
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index 7cc9898..31233b4 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -536,13 +536,12 @@
 	if (pkt_size >= rx_copybreak)
 		goto out;
 
-	skb = netdev_alloc_skb(tp->dev, pkt_size + 2);
+	skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
 	if (!skb)
 		goto out;
 
 	pci_dma_sync_single_for_cpu(tp->pci_dev, addr, tp->rx_buf_sz,
 				PCI_DMA_FROMDEVICE);
-	skb_reserve(skb, 2);
 	skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
 	*sk_buff = skb;
 	done = true;
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 01f6811..be28ebb 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3070,11 +3070,10 @@
 		goto error;
 
 	if (len < RX_COPY_THRESHOLD) {
-		skb = netdev_alloc_skb(dev, len + 2);
+		skb = netdev_alloc_skb_ip_align(dev, len);
 		if (!skb)
 			goto resubmit;
 
-		skb_reserve(skb, 2);
 		pci_dma_sync_single_for_cpu(skge->hw->pdev,
 					    pci_unmap_addr(e, mapaddr),
 					    len, PCI_DMA_FROMDEVICE);
@@ -3085,11 +3084,11 @@
 		skge_rx_reuse(e, skge->rx_buf_size);
 	} else {
 		struct sk_buff *nskb;
-		nskb = netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN);
+
+		nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size);
 		if (!nskb)
 			goto resubmit;
 
-		skb_reserve(nskb, NET_IP_ALIGN);
 		pci_unmap_single(skge->hw->pdev,
 				 pci_unmap_addr(e, mapaddr),
 				 pci_unmap_len(e, maplen),
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 2ab5c39..3a449d0 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -2191,9 +2191,8 @@
 {
 	struct sk_buff *skb;
 
-	skb = netdev_alloc_skb(sky2->netdev, length + 2);
+	skb = netdev_alloc_skb_ip_align(sky2->netdev, length);
 	if (likely(skb)) {
-		skb_reserve(skb, 2);
 		pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr,
 					    length, PCI_DMA_FROMDEVICE);
 		skb_copy_from_linear_data(re->skb, skb->data, length);
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index 3d31b47..16f23f8 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -1549,7 +1549,8 @@
 		if (tmpCStat & TLAN_CSTAT_EOC)
 			eoc = 1;
 
-		new_skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 );
+		new_skb = netdev_alloc_skb_ip_align(dev,
+						    TLAN_MAX_FRAME_SIZE + 5);
 		if ( !new_skb )
 			goto drop_and_reuse;
 
@@ -1563,7 +1564,6 @@
 		skb->protocol = eth_type_trans( skb, dev );
 		netif_rx( skb );
 
-		skb_reserve( new_skb, NET_IP_ALIGN );
 		head_list->buffer[0].address = pci_map_single(priv->pciDev,
 							      new_skb->data,
 							      TLAN_MAX_FRAME_SIZE,
@@ -1967,13 +1967,12 @@
 		list->cStat = TLAN_CSTAT_READY;
 		list->frameSize = TLAN_MAX_FRAME_SIZE;
 		list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
-		skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 );
+		skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
 		if ( !skb ) {
 			pr_err("TLAN: out of memory for received data.\n" );
 			break;
 		}
 
-		skb_reserve( skb, NET_IP_ALIGN );
 		list->buffer[0].address = pci_map_single(priv->pciDev,
 							 skb->data,
 							 TLAN_MAX_FRAME_SIZE,
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index 7030bd5..a69c4a4 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -802,13 +802,11 @@
 		int rx = data->rxhead;
 		struct sk_buff *skb;
 
-		data->rxskbs[rx] = skb = netdev_alloc_skb(dev,
-							  TSI108_RXBUF_SIZE + 2);
+		skb = netdev_alloc_skb_ip_align(dev, TSI108_RXBUF_SIZE);
+		data->rxskbs[rx] = skb;
 		if (!skb)
 			break;
 
-		skb_reserve(skb, 2); /* Align the data on a 4-byte boundary. */
-
 		data->rxring[rx].buf0 = dma_map_single(NULL, skb->data,
 							TSI108_RX_SKB_SIZE,
 							DMA_FROM_DEVICE);
@@ -1356,7 +1354,7 @@
 	for (i = 0; i < TSI108_RXRING_LEN; i++) {
 		struct sk_buff *skb;
 
-		skb = netdev_alloc_skb(dev, TSI108_RXBUF_SIZE + NET_IP_ALIGN);
+		skb = netdev_alloc_skb_ip_align(dev, TSI108_RXBUF_SIZE);
 		if (!skb) {
 			/* Bah.  No memory for now, but maybe we'll get
 			 * some more later.
@@ -1370,8 +1368,6 @@
 		}
 
 		data->rxskbs[i] = skb;
-		/* Align the payload on a 4-byte boundary */
-		skb_reserve(skb, 2);
 		data->rxskbs[i] = skb;
 		data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data);
 		data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT;
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 1fd7058..4535e89 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -1484,15 +1484,15 @@
 				}
 			}
 		} else {
-			struct sk_buff *skb;
+			struct sk_buff *skb = NULL;
 			/* Length should omit the CRC */
 			int pkt_len = data_size - 4;
 
 			/* Check if the packet is long enough to accept without
 			   copying to a minimally-sized skbuff. */
-			if (pkt_len < rx_copybreak &&
-				(skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN)) != NULL) {
-				skb_reserve(skb, NET_IP_ALIGN);	/* 16 byte align the IP header */
+			if (pkt_len < rx_copybreak)
+				skb = netdev_alloc_skb_ip_align(dev, pkt_len);
+			if (skb) {
 				pci_dma_sync_single_for_cpu(rp->pdev,
 							    rp->rx_skbuff_dma[entry],
 							    rp->rx_buf_sz,
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index e04e5be..144db63 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1949,10 +1949,9 @@
 	if (pkt_size < rx_copybreak) {
 		struct sk_buff *new_skb;
 
-		new_skb = netdev_alloc_skb(vptr->dev, pkt_size + 2);
+		new_skb = netdev_alloc_skb_ip_align(vptr->dev, pkt_size);
 		if (new_skb) {
 			new_skb->ip_summed = rx_skb[0]->ip_summed;
-			skb_reserve(new_skb, 2);
 			skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
 			*rx_skb = new_skb;
 			ret = 0;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 8d00976..556512d 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -283,13 +283,12 @@
 	do {
 		struct skb_vnet_hdr *hdr;
 
-		skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN);
+		skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
 		if (unlikely(!skb)) {
 			oom = true;
 			break;
 		}
 
-		skb_reserve(skb, NET_IP_ALIGN);
 		skb_put(skb, MAX_PACKET_LEN);
 
 		hdr = skb_vnet_hdr(skb);
@@ -344,14 +343,12 @@
 	do {
 		skb_frag_t *f;
 
-		skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN);
+		skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
 		if (unlikely(!skb)) {
 			oom = true;
 			break;
 		}
 
-		skb_reserve(skb, NET_IP_ALIGN);
-
 		f = &skb_shinfo(skb)->frags[0];
 		f->page = get_a_page(vi, gfp);
 		if (!f->page) {
diff --git a/net/core/dev.c b/net/core/dev.c
index 510ff20..28b0b9e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2604,20 +2604,13 @@
 
 struct sk_buff *napi_get_frags(struct napi_struct *napi)
 {
-	struct net_device *dev = napi->dev;
 	struct sk_buff *skb = napi->skb;
 
 	if (!skb) {
-		skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN);
-		if (!skb)
-			goto out;
-
-		skb_reserve(skb, NET_IP_ALIGN);
-
-		napi->skb = skb;
+		skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
+		if (skb)
+			napi->skb = skb;
 	}
-
-out:
 	return skb;
 }
 EXPORT_SYMBOL(napi_get_frags);