[INET]: Move bind_hash from tcp_sk to inet_sk

This should really be in a inet_connection_sock, but I'm leaving it
for a later optimization, when some more fields common to INET
transport protocols now in tcp_sk or inet_sk will be chunked out into
inet_connection_sock, for now its better to concentrate on getting the
changes in the core merged to leave the DCCP tree with only DCCP
specific code.

Next changesets will take advantage of this move to generalise things
like tcp_bind_hash, tcp_put_port, tcp_inherit_port, making the later
receive a inet_hashinfo parameter, and even __tcp_tw_hashdance, etc in
the future, when tcp_tw_bucket gets transformed into the struct
timewait_sock hierarchy.

tcp_destroy_sock also is eligible as soon as tcp_orphan_count gets
moved to sk_prot.

A cascade of incremental changes will ultimately make the tcp_lookup
functions be fully generic.

Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 58e36ed..10a9b3a 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -113,9 +113,9 @@
 	struct inet_bind_bucket *tb;
 
 	spin_lock(&head->lock);
-	tb = tcp_sk(sk)->bind_hash;
+	tb = inet_sk(sk)->bind_hash;
 	sk_add_bind_node(child, &tb->owners);
-	tcp_sk(child)->bind_hash = tb;
+	inet_sk(child)->bind_hash = tb;
 	spin_unlock(&head->lock);
 }
 
@@ -129,9 +129,10 @@
 void tcp_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
 		   const unsigned short snum)
 {
-	inet_sk(sk)->num = snum;
+	struct inet_sock *inet = inet_sk(sk);
+	inet->num	= snum;
 	sk_add_bind_node(sk, &tb->owners);
-	tcp_sk(sk)->bind_hash = tb;
+	inet->bind_hash	= tb;
 }
 
 static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb)
@@ -246,9 +247,9 @@
 		   (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
 		tb->fastreuse = 0;
 success:
-	if (!tcp_sk(sk)->bind_hash)
+	if (!inet_sk(sk)->bind_hash)
 		tcp_bind_hash(sk, tb, snum);
-	BUG_TRAP(tcp_sk(sk)->bind_hash == tb);
+	BUG_TRAP(inet_sk(sk)->bind_hash == tb);
  	ret = 0;
 
 fail_unlock:
@@ -269,9 +270,9 @@
 	struct inet_bind_bucket *tb;
 
 	spin_lock(&head->lock);
-	tb = tcp_sk(sk)->bind_hash;
+	tb = inet->bind_hash;
 	__sk_del_bind_node(sk);
-	tcp_sk(sk)->bind_hash = NULL;
+	inet->bind_hash = NULL;
 	inet->num = 0;
 	inet_bind_bucket_destroy(tcp_bucket_cachep, tb);
 	spin_unlock(&head->lock);
@@ -694,7 +695,7 @@
  	}
 
  	head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)];
- 	tb  = tcp_sk(sk)->bind_hash;
+ 	tb  = inet_sk(sk)->bind_hash;
 	spin_lock_bh(&head->lock);
 	if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
 		__tcp_v4_hash(sk, 0);
@@ -1940,7 +1941,7 @@
 	__skb_queue_purge(&tp->ucopy.prequeue);
 
 	/* Clean up a referenced TCP bind bucket. */
-	if (tp->bind_hash)
+	if (inet_sk(sk)->bind_hash)
 		tcp_put_port(sk);
 
 	/*