[PPPOL2TP]: Make locking calls softirq-safe

Fix locking issues in the pppol2tp driver which can cause a kernel
crash on SMP boxes. There were two problems:-

1. The driver was violating read_lock() and write_lock() scheduling
   rules because it wasn't using softirq-safe locks in softirq
   contexts. So we now consistently use the _bh variants of the lock
   functions.

2. The driver was calling sk_dst_get() in pppol2tp_xmit() which was
   taking sk_dst_lock in softirq context. We now call __sk_dst_get().

Signed-off-by: James Chapman <jchapman@katalix.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index 86e5dba..9a90ced 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -302,14 +302,14 @@
 	struct pppol2tp_session *session;
 	struct hlist_node *walk;
 
-	read_lock(&tunnel->hlist_lock);
+	read_lock_bh(&tunnel->hlist_lock);
 	hlist_for_each_entry(session, walk, session_list, hlist) {
 		if (session->tunnel_addr.s_session == session_id) {
-			read_unlock(&tunnel->hlist_lock);
+			read_unlock_bh(&tunnel->hlist_lock);
 			return session;
 		}
 	}
-	read_unlock(&tunnel->hlist_lock);
+	read_unlock_bh(&tunnel->hlist_lock);
 
 	return NULL;
 }
@@ -320,14 +320,14 @@
 {
 	struct pppol2tp_tunnel *tunnel = NULL;
 
-	read_lock(&pppol2tp_tunnel_list_lock);
+	read_lock_bh(&pppol2tp_tunnel_list_lock);
 	list_for_each_entry(tunnel, &pppol2tp_tunnel_list, list) {
 		if (tunnel->stats.tunnel_id == tunnel_id) {
-			read_unlock(&pppol2tp_tunnel_list_lock);
+			read_unlock_bh(&pppol2tp_tunnel_list_lock);
 			return tunnel;
 		}
 	}
-	read_unlock(&pppol2tp_tunnel_list_lock);
+	read_unlock_bh(&pppol2tp_tunnel_list_lock);
 
 	return NULL;
 }
@@ -344,7 +344,7 @@
 	struct sk_buff *skbp;
 	u16 ns = PPPOL2TP_SKB_CB(skb)->ns;
 
-	spin_lock(&session->reorder_q.lock);
+	spin_lock_bh(&session->reorder_q.lock);
 	skb_queue_walk(&session->reorder_q, skbp) {
 		if (PPPOL2TP_SKB_CB(skbp)->ns > ns) {
 			__skb_insert(skb, skbp->prev, skbp, &session->reorder_q);
@@ -360,7 +360,7 @@
 	__skb_queue_tail(&session->reorder_q, skb);
 
 out:
-	spin_unlock(&session->reorder_q.lock);
+	spin_unlock_bh(&session->reorder_q.lock);
 }
 
 /* Dequeue a single skb.
@@ -442,7 +442,7 @@
 	 * expect to send up next, dequeue it and any other
 	 * in-sequence packets behind it.
 	 */
-	spin_lock(&session->reorder_q.lock);
+	spin_lock_bh(&session->reorder_q.lock);
 	skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
 		if (time_after(jiffies, PPPOL2TP_SKB_CB(skb)->expires)) {
 			session->stats.rx_seq_discards++;
@@ -470,13 +470,13 @@
 				goto out;
 			}
 		}
-		spin_unlock(&session->reorder_q.lock);
+		spin_unlock_bh(&session->reorder_q.lock);
 		pppol2tp_recv_dequeue_skb(session, skb);
-		spin_lock(&session->reorder_q.lock);
+		spin_lock_bh(&session->reorder_q.lock);
 	}
 
 out:
-	spin_unlock(&session->reorder_q.lock);
+	spin_unlock_bh(&session->reorder_q.lock);
 }
 
 /* Internal receive frame. Do the real work of receiving an L2TP data frame
@@ -1059,7 +1059,7 @@
 
 	/* Get routing info from the tunnel socket */
 	dst_release(skb->dst);
-	skb->dst = sk_dst_get(sk_tun);
+	skb->dst = dst_clone(__sk_dst_get(sk_tun));
 	skb_orphan(skb);
 	skb->sk = sk_tun;
 
@@ -1107,7 +1107,7 @@
 	PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
 	       "%s: closing all sessions...\n", tunnel->name);
 
-	write_lock(&tunnel->hlist_lock);
+	write_lock_bh(&tunnel->hlist_lock);
 	for (hash = 0; hash < PPPOL2TP_HASH_SIZE; hash++) {
 again:
 		hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
@@ -1129,7 +1129,7 @@
 			 * disappear as we're jumping between locks.
 			 */
 			sock_hold(sk);
-			write_unlock(&tunnel->hlist_lock);
+			write_unlock_bh(&tunnel->hlist_lock);
 			lock_sock(sk);
 
 			if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
@@ -1154,11 +1154,11 @@
 			 * list so we are guaranteed to make forward
 			 * progress.
 			 */
-			write_lock(&tunnel->hlist_lock);
+			write_lock_bh(&tunnel->hlist_lock);
 			goto again;
 		}
 	}
-	write_unlock(&tunnel->hlist_lock);
+	write_unlock_bh(&tunnel->hlist_lock);
 }
 
 /* Really kill the tunnel.
@@ -1167,9 +1167,9 @@
 static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel)
 {
 	/* Remove from socket list */
-	write_lock(&pppol2tp_tunnel_list_lock);
+	write_lock_bh(&pppol2tp_tunnel_list_lock);
 	list_del_init(&tunnel->list);
-	write_unlock(&pppol2tp_tunnel_list_lock);
+	write_unlock_bh(&pppol2tp_tunnel_list_lock);
 
 	atomic_dec(&pppol2tp_tunnel_count);
 	kfree(tunnel);
@@ -1245,9 +1245,9 @@
 				/* Delete the session socket from the
 				 * hash
 				 */
-				write_lock(&tunnel->hlist_lock);
+				write_lock_bh(&tunnel->hlist_lock);
 				hlist_del_init(&session->hlist);
-				write_unlock(&tunnel->hlist_lock);
+				write_unlock_bh(&tunnel->hlist_lock);
 
 				atomic_dec(&pppol2tp_session_count);
 			}
@@ -1392,9 +1392,9 @@
 
 	/* Add tunnel to our list */
 	INIT_LIST_HEAD(&tunnel->list);
-	write_lock(&pppol2tp_tunnel_list_lock);
+	write_lock_bh(&pppol2tp_tunnel_list_lock);
 	list_add(&tunnel->list, &pppol2tp_tunnel_list);
-	write_unlock(&pppol2tp_tunnel_list_lock);
+	write_unlock_bh(&pppol2tp_tunnel_list_lock);
 	atomic_inc(&pppol2tp_tunnel_count);
 
 	/* Bump the reference count. The tunnel context is deleted
@@ -1599,11 +1599,11 @@
 	sk->sk_user_data = session;
 
 	/* Add session to the tunnel's hash list */
-	write_lock(&tunnel->hlist_lock);
+	write_lock_bh(&tunnel->hlist_lock);
 	hlist_add_head(&session->hlist,
 		       pppol2tp_session_id_hash(tunnel,
 						session->tunnel_addr.s_session));
-	write_unlock(&tunnel->hlist_lock);
+	write_unlock_bh(&tunnel->hlist_lock);
 
 	atomic_inc(&pppol2tp_session_count);
 
@@ -2205,7 +2205,7 @@
 	int next = 0;
 	int i;
 
-	read_lock(&tunnel->hlist_lock);
+	read_lock_bh(&tunnel->hlist_lock);
 	for (i = 0; i < PPPOL2TP_HASH_SIZE; i++) {
 		hlist_for_each_entry(session, walk, &tunnel->session_hlist[i], hlist) {
 			if (curr == NULL) {
@@ -2223,7 +2223,7 @@
 		}
 	}
 out:
-	read_unlock(&tunnel->hlist_lock);
+	read_unlock_bh(&tunnel->hlist_lock);
 	if (!found)
 		session = NULL;
 
@@ -2234,13 +2234,13 @@
 {
 	struct pppol2tp_tunnel *tunnel = NULL;
 
-	read_lock(&pppol2tp_tunnel_list_lock);
+	read_lock_bh(&pppol2tp_tunnel_list_lock);
 	if (list_is_last(&curr->list, &pppol2tp_tunnel_list)) {
 		goto out;
 	}
 	tunnel = list_entry(curr->list.next, struct pppol2tp_tunnel, list);
 out:
-	read_unlock(&pppol2tp_tunnel_list_lock);
+	read_unlock_bh(&pppol2tp_tunnel_list_lock);
 
 	return tunnel;
 }