blob: a6000fbad2949f58a079322f4e328e0c051df896 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
8 *
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +090035 * code. The ACK stuff can wait and needs major
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
82 *
83 * To Fix:
84 *
85 *
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
90 */
91
Joe Perchese005d192012-05-16 19:58:40 +000092#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
93
Randy Dunlap4fc268d2006-01-11 12:17:47 -080094#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/errno.h>
96#include <linux/types.h>
97#include <linux/socket.h>
98#include <linux/in.h>
99#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100#include <linux/module.h>
101#include <linux/proc_fs.h>
102#include <linux/seq_file.h>
103#include <linux/sched.h>
104#include <linux/timer.h>
105#include <linux/string.h>
106#include <linux/sockios.h>
107#include <linux/net.h>
108#include <linux/mm.h>
109#include <linux/slab.h>
110#include <linux/interrupt.h>
111#include <linux/poll.h>
112#include <linux/tcp.h>
113#include <linux/init.h>
Al Viroa1f8e7f72006-10-19 16:08:53 -0400114#include <linux/highmem.h>
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000115#include <linux/user_namespace.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100116#include <linux/static_key.h>
David S. Miller3969eb32012-01-09 13:44:23 -0800117#include <linux/memcontrol.h>
David S. Miller8c1ae102012-05-03 02:25:55 -0400118#include <linux/prefetch.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
120#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
122#include <linux/netdevice.h>
123#include <net/protocol.h>
124#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +0200125#include <net/net_namespace.h>
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700126#include <net/request_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127#include <net/sock.h>
Patrick Ohly20d49472009-02-12 05:03:38 +0000128#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129#include <net/xfrm.h>
130#include <linux/ipsec.h>
Herbert Xuf8451722010-05-24 00:12:34 -0700131#include <net/cls_cgroup.h>
Neil Horman5bc14212011-11-22 05:10:51 +0000132#include <net/netprio_cgroup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
134#include <linux/filter.h>
135
Satoru Moriya3847ce32011-06-17 12:00:03 +0000136#include <trace/events/sock.h>
137
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138#ifdef CONFIG_INET
139#include <net/tcp.h>
140#endif
141
Glauber Costa36b77a52011-12-16 00:51:59 +0000142static DEFINE_MUTEX(proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000143static LIST_HEAD(proto_list);
144
Andrew Mortonc255a452012-07-31 16:43:02 -0700145#ifdef CONFIG_MEMCG_KMEM
Glauber Costa1d62e432012-04-09 19:36:33 -0300146int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000147{
148 struct proto *proto;
149 int ret = 0;
150
Glauber Costa36b77a52011-12-16 00:51:59 +0000151 mutex_lock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000152 list_for_each_entry(proto, &proto_list, node) {
153 if (proto->init_cgroup) {
Glauber Costa1d62e432012-04-09 19:36:33 -0300154 ret = proto->init_cgroup(memcg, ss);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000155 if (ret)
156 goto out;
157 }
158 }
159
Glauber Costa36b77a52011-12-16 00:51:59 +0000160 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000161 return ret;
162out:
163 list_for_each_entry_continue_reverse(proto, &proto_list, node)
164 if (proto->destroy_cgroup)
Glauber Costa1d62e432012-04-09 19:36:33 -0300165 proto->destroy_cgroup(memcg);
Glauber Costa36b77a52011-12-16 00:51:59 +0000166 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000167 return ret;
168}
169
Glauber Costa1d62e432012-04-09 19:36:33 -0300170void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000171{
172 struct proto *proto;
173
Glauber Costa36b77a52011-12-16 00:51:59 +0000174 mutex_lock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000175 list_for_each_entry_reverse(proto, &proto_list, node)
176 if (proto->destroy_cgroup)
Glauber Costa1d62e432012-04-09 19:36:33 -0300177 proto->destroy_cgroup(memcg);
Glauber Costa36b77a52011-12-16 00:51:59 +0000178 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000179}
180#endif
181
Ingo Molnarda21f242006-07-03 00:25:12 -0700182/*
183 * Each address family might have different locking rules, so we have
184 * one slock key per address family:
185 */
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700186static struct lock_class_key af_family_keys[AF_MAX];
187static struct lock_class_key af_family_slock_keys[AF_MAX];
188
Ingo Molnarc5905af2012-02-24 08:31:31 +0100189struct static_key memcg_socket_limit_enabled;
Glauber Costae1aab162011-12-11 21:47:03 +0000190EXPORT_SYMBOL(memcg_socket_limit_enabled);
191
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700192/*
193 * Make lock validator output more readable. (we pre-construct these
194 * strings build-time, so that runtime initialization of socket
195 * locks is fast):
196 */
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700197static const char *const af_family_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700198 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
199 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
200 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
201 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
202 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
203 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
204 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800205 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700206 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800207 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700208 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700209 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800210 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
Aloisio Almeida Jrc7fe3b52011-07-01 19:31:35 -0300211 "sk_lock-AF_NFC" , "sk_lock-AF_MAX"
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700212};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700213static const char *const af_family_slock_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700214 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
215 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
216 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
217 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
218 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
219 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
220 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800221 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700222 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800223 "slock-27" , "slock-28" , "slock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700224 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700225 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800226 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
Aloisio Almeida Jrc7fe3b52011-07-01 19:31:35 -0300227 "slock-AF_NFC" , "slock-AF_MAX"
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700228};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700229static const char *const af_family_clock_key_strings[AF_MAX+1] = {
Peter Zijlstra443aef02007-07-19 01:49:00 -0700230 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
231 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
232 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
233 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
234 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
235 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
236 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800237 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
Peter Zijlstra443aef02007-07-19 01:49:00 -0700238 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
Oliver Hartkoppb4942af2008-07-23 14:06:04 -0700239 "clock-27" , "clock-28" , "clock-AF_CAN" ,
David Howellse51f8022007-07-21 19:30:16 -0700240 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700241 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800242 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
Aloisio Almeida Jrc7fe3b52011-07-01 19:31:35 -0300243 "clock-AF_NFC" , "clock-AF_MAX"
Peter Zijlstra443aef02007-07-19 01:49:00 -0700244};
Ingo Molnarda21f242006-07-03 00:25:12 -0700245
246/*
247 * sk_callback_lock locking rules are per-address-family,
248 * so split the lock classes by using a per-AF key:
249 */
250static struct lock_class_key af_callback_keys[AF_MAX];
251
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252/* Take into consideration the size of the struct sk_buff overhead in the
253 * determination of these values, since that is non-constant across
254 * platforms. This makes socket queueing behavior and performance
255 * not depend upon such differences.
256 */
257#define _SK_MEM_PACKETS 256
Eric Dumazet87fb4b72011-10-13 07:28:54 +0000258#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
260#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
261
262/* Run time adjustable parameters. */
Brian Haleyab32ea52006-09-22 14:15:41 -0700263__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
Hans Schillstrom6d8ebc82012-04-30 08:13:50 +0200264EXPORT_SYMBOL(sysctl_wmem_max);
Brian Haleyab32ea52006-09-22 14:15:41 -0700265__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
Hans Schillstrom6d8ebc82012-04-30 08:13:50 +0200266EXPORT_SYMBOL(sysctl_rmem_max);
Brian Haleyab32ea52006-09-22 14:15:41 -0700267__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
268__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300270/* Maximal space eaten by iovec or ancillary data plus some space */
Brian Haleyab32ea52006-09-22 14:15:41 -0700271int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
Eric Dumazet2a915252009-05-27 11:30:05 +0000272EXPORT_SYMBOL(sysctl_optmem_max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273
Mel Gormanc93bdd02012-07-31 16:44:19 -0700274struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
275EXPORT_SYMBOL_GPL(memalloc_socks);
276
Mel Gorman7cb02402012-07-31 16:44:16 -0700277/**
278 * sk_set_memalloc - sets %SOCK_MEMALLOC
279 * @sk: socket to set it on
280 *
281 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
282 * It's the responsibility of the admin to adjust min_free_kbytes
283 * to meet the requirements
284 */
285void sk_set_memalloc(struct sock *sk)
286{
287 sock_set_flag(sk, SOCK_MEMALLOC);
288 sk->sk_allocation |= __GFP_MEMALLOC;
Mel Gormanc93bdd02012-07-31 16:44:19 -0700289 static_key_slow_inc(&memalloc_socks);
Mel Gorman7cb02402012-07-31 16:44:16 -0700290}
291EXPORT_SYMBOL_GPL(sk_set_memalloc);
292
293void sk_clear_memalloc(struct sock *sk)
294{
295 sock_reset_flag(sk, SOCK_MEMALLOC);
296 sk->sk_allocation &= ~__GFP_MEMALLOC;
Mel Gormanc93bdd02012-07-31 16:44:19 -0700297 static_key_slow_dec(&memalloc_socks);
Mel Gormanc76562b2012-07-31 16:44:41 -0700298
299 /*
300 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
301 * progress of swapping. However, if SOCK_MEMALLOC is cleared while
302 * it has rmem allocations there is a risk that the user of the
303 * socket cannot make forward progress due to exceeding the rmem
304 * limits. By rights, sk_clear_memalloc() should only be called
305 * on sockets being torn down but warn and reset the accounting if
306 * that assumption breaks.
307 */
308 if (WARN_ON(sk->sk_forward_alloc))
309 sk_mem_reclaim(sk);
Mel Gorman7cb02402012-07-31 16:44:16 -0700310}
311EXPORT_SYMBOL_GPL(sk_clear_memalloc);
312
Mel Gormanb4b9e352012-07-31 16:44:26 -0700313int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
314{
315 int ret;
316 unsigned long pflags = current->flags;
317
318 /* these should have been dropped before queueing */
319 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
320
321 current->flags |= PF_MEMALLOC;
322 ret = sk->sk_backlog_rcv(sk, skb);
323 tsk_restore_flags(current, pflags, PF_MEMALLOC);
324
325 return ret;
326}
327EXPORT_SYMBOL(__sk_backlog_rcv);
328
Neil Horman5bc14212011-11-22 05:10:51 +0000329#if defined(CONFIG_CGROUPS)
330#if !defined(CONFIG_NET_CLS_CGROUP)
Herbert Xuf8451722010-05-24 00:12:34 -0700331int net_cls_subsys_id = -1;
332EXPORT_SYMBOL_GPL(net_cls_subsys_id);
333#endif
Neil Horman5bc14212011-11-22 05:10:51 +0000334#if !defined(CONFIG_NETPRIO_CGROUP)
335int net_prio_subsys_id = -1;
336EXPORT_SYMBOL_GPL(net_prio_subsys_id);
337#endif
338#endif
Herbert Xuf8451722010-05-24 00:12:34 -0700339
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
341{
342 struct timeval tv;
343
344 if (optlen < sizeof(tv))
345 return -EINVAL;
346 if (copy_from_user(&tv, optval, sizeof(tv)))
347 return -EFAULT;
Vasily Averinba780732007-05-24 16:58:54 -0700348 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
349 return -EDOM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350
Vasily Averinba780732007-05-24 16:58:54 -0700351 if (tv.tv_sec < 0) {
Andrew Morton6f11df82007-07-09 13:16:00 -0700352 static int warned __read_mostly;
353
Vasily Averinba780732007-05-24 16:58:54 -0700354 *timeo_p = 0;
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700355 if (warned < 10 && net_ratelimit()) {
Vasily Averinba780732007-05-24 16:58:54 -0700356 warned++;
Joe Perchese005d192012-05-16 19:58:40 +0000357 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
358 __func__, current->comm, task_pid_nr(current));
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700359 }
Vasily Averinba780732007-05-24 16:58:54 -0700360 return 0;
361 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 *timeo_p = MAX_SCHEDULE_TIMEOUT;
363 if (tv.tv_sec == 0 && tv.tv_usec == 0)
364 return 0;
365 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
366 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
367 return 0;
368}
369
370static void sock_warn_obsolete_bsdism(const char *name)
371{
372 static int warned;
373 static char warncomm[TASK_COMM_LEN];
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900374 if (strcmp(warncomm, current->comm) && warned < 5) {
375 strcpy(warncomm, current->comm);
Joe Perchese005d192012-05-16 19:58:40 +0000376 pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
377 warncomm, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 warned++;
379 }
380}
381
Eric Dumazet08e29af2011-11-28 12:04:18 +0000382#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
383
384static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900385{
Eric Dumazet08e29af2011-11-28 12:04:18 +0000386 if (sk->sk_flags & flags) {
387 sk->sk_flags &= ~flags;
388 if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +0000389 net_disable_timestamp();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 }
391}
392
393
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800394int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
395{
Eric Dumazet766e90372009-10-14 20:40:11 -0700396 int err;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800397 int skb_len;
Neil Horman3b885782009-10-12 13:26:31 -0700398 unsigned long flags;
399 struct sk_buff_head *list = &sk->sk_receive_queue;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800400
Eric Dumazet0fd7bac2011-12-21 07:11:44 +0000401 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700402 atomic_inc(&sk->sk_drops);
Satoru Moriya3847ce32011-06-17 12:00:03 +0000403 trace_sock_rcvqueue_full(sk, skb);
Eric Dumazet766e90372009-10-14 20:40:11 -0700404 return -ENOMEM;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800405 }
406
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700407 err = sk_filter(sk, skb);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800408 if (err)
Eric Dumazet766e90372009-10-14 20:40:11 -0700409 return err;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800410
Mel Gormanc76562b2012-07-31 16:44:41 -0700411 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700412 atomic_inc(&sk->sk_drops);
413 return -ENOBUFS;
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800414 }
415
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800416 skb->dev = NULL;
417 skb_set_owner_r(skb, sk);
David S. Miller49ad9592008-12-17 22:11:38 -0800418
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800419 /* Cache the SKB length before we tack it onto the receive
420 * queue. Once it is added it no longer belongs to us and
421 * may be freed by other threads of control pulling packets
422 * from the queue.
423 */
424 skb_len = skb->len;
425
Eric Dumazet7fee2262010-05-11 23:19:48 +0000426 /* we escape from rcu protected region, make sure we dont leak
427 * a norefcounted dst
428 */
429 skb_dst_force(skb);
430
Neil Horman3b885782009-10-12 13:26:31 -0700431 spin_lock_irqsave(&list->lock, flags);
432 skb->dropcount = atomic_read(&sk->sk_drops);
433 __skb_queue_tail(list, skb);
434 spin_unlock_irqrestore(&list->lock, flags);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800435
436 if (!sock_flag(sk, SOCK_DEAD))
437 sk->sk_data_ready(sk, skb_len);
Eric Dumazet766e90372009-10-14 20:40:11 -0700438 return 0;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800439}
440EXPORT_SYMBOL(sock_queue_rcv_skb);
441
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200442int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800443{
444 int rc = NET_RX_SUCCESS;
445
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700446 if (sk_filter(sk, skb))
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800447 goto discard_and_relse;
448
449 skb->dev = NULL;
450
Eric Dumazetf545a382012-04-22 23:34:26 +0000451 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
Eric Dumazetc3774112010-04-27 15:13:20 -0700452 atomic_inc(&sk->sk_drops);
453 goto discard_and_relse;
454 }
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200455 if (nested)
456 bh_lock_sock_nested(sk);
457 else
458 bh_lock_sock(sk);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700459 if (!sock_owned_by_user(sk)) {
460 /*
461 * trylock + unlock semantics:
462 */
463 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
464
Peter Zijlstrac57943a2008-10-07 14:18:42 -0700465 rc = sk_backlog_rcv(sk, skb);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700466
467 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
Eric Dumazetf545a382012-04-22 23:34:26 +0000468 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
Zhu Yi8eae9392010-03-04 18:01:40 +0000469 bh_unlock_sock(sk);
470 atomic_inc(&sk->sk_drops);
471 goto discard_and_relse;
472 }
473
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800474 bh_unlock_sock(sk);
475out:
476 sock_put(sk);
477 return rc;
478discard_and_relse:
479 kfree_skb(skb);
480 goto out;
481}
482EXPORT_SYMBOL(sk_receive_skb);
483
Krishna Kumarea94ff32009-10-19 23:46:45 +0000484void sk_reset_txq(struct sock *sk)
485{
486 sk_tx_queue_clear(sk);
487}
488EXPORT_SYMBOL(sk_reset_txq);
489
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800490struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
491{
Eric Dumazetb6c67122010-04-08 23:03:29 +0000492 struct dst_entry *dst = __sk_dst_get(sk);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800493
494 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
Krishna Kumare022f0b2009-10-19 23:46:20 +0000495 sk_tx_queue_clear(sk);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +0000496 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800497 dst_release(dst);
498 return NULL;
499 }
500
501 return dst;
502}
503EXPORT_SYMBOL(__sk_dst_check);
504
505struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
506{
507 struct dst_entry *dst = sk_dst_get(sk);
508
509 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
510 sk_dst_reset(sk);
511 dst_release(dst);
512 return NULL;
513 }
514
515 return dst;
516}
517EXPORT_SYMBOL(sk_dst_check);
518
David S. Miller48788092007-09-14 16:41:03 -0700519static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
520{
521 int ret = -ENOPROTOOPT;
522#ifdef CONFIG_NETDEVICES
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900523 struct net *net = sock_net(sk);
David S. Miller48788092007-09-14 16:41:03 -0700524 char devname[IFNAMSIZ];
525 int index;
526
527 /* Sorry... */
528 ret = -EPERM;
529 if (!capable(CAP_NET_RAW))
530 goto out;
531
532 ret = -EINVAL;
533 if (optlen < 0)
534 goto out;
535
536 /* Bind this socket to a particular device like "eth0",
537 * as specified in the passed interface name. If the
538 * name is "" or the option length is zero the socket
539 * is not bound.
540 */
541 if (optlen > IFNAMSIZ - 1)
542 optlen = IFNAMSIZ - 1;
543 memset(devname, 0, sizeof(devname));
544
545 ret = -EFAULT;
546 if (copy_from_user(devname, optval, optlen))
547 goto out;
548
David S. Miller000ba2e2009-11-05 22:37:11 -0800549 index = 0;
550 if (devname[0] != '\0') {
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800551 struct net_device *dev;
David S. Miller48788092007-09-14 16:41:03 -0700552
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800553 rcu_read_lock();
554 dev = dev_get_by_name_rcu(net, devname);
555 if (dev)
556 index = dev->ifindex;
557 rcu_read_unlock();
David S. Miller48788092007-09-14 16:41:03 -0700558 ret = -ENODEV;
559 if (!dev)
560 goto out;
David S. Miller48788092007-09-14 16:41:03 -0700561 }
562
563 lock_sock(sk);
564 sk->sk_bound_dev_if = index;
565 sk_dst_reset(sk);
566 release_sock(sk);
567
568 ret = 0;
569
570out:
571#endif
572
573 return ret;
574}
575
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800576static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
577{
578 if (valbool)
579 sock_set_flag(sk, bit);
580 else
581 sock_reset_flag(sk, bit);
582}
583
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584/*
585 * This is meant for all protocols to use and covers goings on
586 * at the socket level. Everything here is generic.
587 */
588
589int sock_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -0700590 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591{
Eric Dumazet2a915252009-05-27 11:30:05 +0000592 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 int val;
594 int valbool;
595 struct linger ling;
596 int ret = 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900597
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 /*
599 * Options without arguments
600 */
601
David S. Miller48788092007-09-14 16:41:03 -0700602 if (optname == SO_BINDTODEVICE)
603 return sock_bindtodevice(sk, optval, optlen);
604
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700605 if (optlen < sizeof(int))
606 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900607
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 if (get_user(val, (int __user *)optval))
609 return -EFAULT;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900610
Eric Dumazet2a915252009-05-27 11:30:05 +0000611 valbool = val ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612
613 lock_sock(sk);
614
Eric Dumazet2a915252009-05-27 11:30:05 +0000615 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700616 case SO_DEBUG:
Eric Dumazet2a915252009-05-27 11:30:05 +0000617 if (val && !capable(CAP_NET_ADMIN))
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700618 ret = -EACCES;
Eric Dumazet2a915252009-05-27 11:30:05 +0000619 else
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800620 sock_valbool_flag(sk, SOCK_DBG, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700621 break;
622 case SO_REUSEADDR:
Pavel Emelyanov4a17fd52012-04-19 03:39:36 +0000623 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700624 break;
625 case SO_TYPE:
Jan Engelhardt49c794e2009-08-04 07:28:28 +0000626 case SO_PROTOCOL:
Jan Engelhardt0d6038e2009-08-04 07:28:29 +0000627 case SO_DOMAIN:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700628 case SO_ERROR:
629 ret = -ENOPROTOOPT;
630 break;
631 case SO_DONTROUTE:
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800632 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700633 break;
634 case SO_BROADCAST:
635 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
636 break;
637 case SO_SNDBUF:
638 /* Don't error on this BSD doesn't and if you think
Eric Dumazet82981932012-04-26 20:07:59 +0000639 * about it this is right. Otherwise apps have to
640 * play 'guess the biggest size' games. RCVBUF/SNDBUF
641 * are treated in BSD as hints
642 */
643 val = min_t(u32, val, sysctl_wmem_max);
Patrick McHardyb0573de2005-08-09 19:30:51 -0700644set_sndbuf:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700645 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
Eric Dumazet82981932012-04-26 20:07:59 +0000646 sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
647 /* Wake up sending tasks if we upped the value. */
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700648 sk->sk_write_space(sk);
649 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700651 case SO_SNDBUFFORCE:
652 if (!capable(CAP_NET_ADMIN)) {
653 ret = -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 break;
655 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700656 goto set_sndbuf;
657
658 case SO_RCVBUF:
659 /* Don't error on this BSD doesn't and if you think
Eric Dumazet82981932012-04-26 20:07:59 +0000660 * about it this is right. Otherwise apps have to
661 * play 'guess the biggest size' games. RCVBUF/SNDBUF
662 * are treated in BSD as hints
663 */
664 val = min_t(u32, val, sysctl_rmem_max);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700665set_rcvbuf:
666 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
667 /*
668 * We double it on the way in to account for
669 * "struct sk_buff" etc. overhead. Applications
670 * assume that the SO_RCVBUF setting they make will
671 * allow that much actual data to be received on that
672 * socket.
673 *
674 * Applications are unaware that "struct sk_buff" and
675 * other overheads allocate from the receive buffer
676 * during socket buffer allocation.
677 *
678 * And after considering the possible alternatives,
679 * returning the value we actually used in getsockopt
680 * is the most desirable behavior.
681 */
Eric Dumazet82981932012-04-26 20:07:59 +0000682 sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700683 break;
684
685 case SO_RCVBUFFORCE:
686 if (!capable(CAP_NET_ADMIN)) {
687 ret = -EPERM;
688 break;
689 }
690 goto set_rcvbuf;
691
692 case SO_KEEPALIVE:
693#ifdef CONFIG_INET
Eric Dumazet3e109862012-09-24 07:00:11 +0000694 if (sk->sk_protocol == IPPROTO_TCP &&
695 sk->sk_type == SOCK_STREAM)
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700696 tcp_set_keepalive(sk, valbool);
697#endif
698 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
699 break;
700
701 case SO_OOBINLINE:
702 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
703 break;
704
705 case SO_NO_CHECK:
706 sk->sk_no_check = valbool;
707 break;
708
709 case SO_PRIORITY:
710 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
711 sk->sk_priority = val;
712 else
713 ret = -EPERM;
714 break;
715
716 case SO_LINGER:
717 if (optlen < sizeof(ling)) {
718 ret = -EINVAL; /* 1003.1g */
719 break;
720 }
Eric Dumazet2a915252009-05-27 11:30:05 +0000721 if (copy_from_user(&ling, optval, sizeof(ling))) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700722 ret = -EFAULT;
723 break;
724 }
725 if (!ling.l_onoff)
726 sock_reset_flag(sk, SOCK_LINGER);
727 else {
728#if (BITS_PER_LONG == 32)
729 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
730 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
731 else
732#endif
733 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
734 sock_set_flag(sk, SOCK_LINGER);
735 }
736 break;
737
738 case SO_BSDCOMPAT:
739 sock_warn_obsolete_bsdism("setsockopt");
740 break;
741
742 case SO_PASSCRED:
743 if (valbool)
744 set_bit(SOCK_PASSCRED, &sock->flags);
745 else
746 clear_bit(SOCK_PASSCRED, &sock->flags);
747 break;
748
749 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700750 case SO_TIMESTAMPNS:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700751 if (valbool) {
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700752 if (optname == SO_TIMESTAMP)
753 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
754 else
755 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700756 sock_set_flag(sk, SOCK_RCVTSTAMP);
Patrick Ohly20d49472009-02-12 05:03:38 +0000757 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700758 } else {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700759 sock_reset_flag(sk, SOCK_RCVTSTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700760 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
761 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700762 break;
763
Patrick Ohly20d49472009-02-12 05:03:38 +0000764 case SO_TIMESTAMPING:
765 if (val & ~SOF_TIMESTAMPING_MASK) {
Rémi Denis-Courmontf249fb72009-07-20 00:47:04 +0000766 ret = -EINVAL;
Patrick Ohly20d49472009-02-12 05:03:38 +0000767 break;
768 }
769 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
770 val & SOF_TIMESTAMPING_TX_HARDWARE);
771 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE,
772 val & SOF_TIMESTAMPING_TX_SOFTWARE);
773 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE,
774 val & SOF_TIMESTAMPING_RX_HARDWARE);
775 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
776 sock_enable_timestamp(sk,
777 SOCK_TIMESTAMPING_RX_SOFTWARE);
778 else
779 sock_disable_timestamp(sk,
Eric Dumazet08e29af2011-11-28 12:04:18 +0000780 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
Patrick Ohly20d49472009-02-12 05:03:38 +0000781 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
782 val & SOF_TIMESTAMPING_SOFTWARE);
783 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
784 val & SOF_TIMESTAMPING_SYS_HARDWARE);
785 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
786 val & SOF_TIMESTAMPING_RAW_HARDWARE);
787 break;
788
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700789 case SO_RCVLOWAT:
790 if (val < 0)
791 val = INT_MAX;
792 sk->sk_rcvlowat = val ? : 1;
793 break;
794
795 case SO_RCVTIMEO:
796 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
797 break;
798
799 case SO_SNDTIMEO:
800 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
801 break;
802
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700803 case SO_ATTACH_FILTER:
804 ret = -EINVAL;
805 if (optlen == sizeof(struct sock_fprog)) {
806 struct sock_fprog fprog;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700808 ret = -EFAULT;
809 if (copy_from_user(&fprog, optval, sizeof(fprog)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700812 ret = sk_attach_filter(&fprog, sk);
813 }
814 break;
815
816 case SO_DETACH_FILTER:
Pavel Emelyanov55b33322007-10-17 21:21:26 -0700817 ret = sk_detach_filter(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700818 break;
819
820 case SO_PASSSEC:
821 if (valbool)
822 set_bit(SOCK_PASSSEC, &sock->flags);
823 else
824 clear_bit(SOCK_PASSSEC, &sock->flags);
825 break;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800826 case SO_MARK:
827 if (!capable(CAP_NET_ADMIN))
828 ret = -EPERM;
Eric Dumazet2a915252009-05-27 11:30:05 +0000829 else
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800830 sk->sk_mark = val;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800831 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700832
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 /* We implement the SO_SNDLOWAT etc to
834 not be settable (1003.1g 5.3) */
Neil Horman3b885782009-10-12 13:26:31 -0700835 case SO_RXQ_OVFL:
Johannes Berg8083f0f2011-10-07 03:30:20 +0000836 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
Neil Horman3b885782009-10-12 13:26:31 -0700837 break;
Johannes Berg6e3e9392011-11-09 10:15:42 +0100838
839 case SO_WIFI_STATUS:
840 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
841 break;
842
Pavel Emelyanovef64a542012-02-21 07:31:34 +0000843 case SO_PEEK_OFF:
844 if (sock->ops->set_peek_off)
845 sock->ops->set_peek_off(sk, val);
846 else
847 ret = -EOPNOTSUPP;
848 break;
Ben Greear3bdc0eb2012-02-11 15:39:30 +0000849
850 case SO_NOFCS:
851 sock_valbool_flag(sk, SOCK_NOFCS, valbool);
852 break;
853
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700854 default:
855 ret = -ENOPROTOOPT;
856 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900857 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 release_sock(sk);
859 return ret;
860}
Eric Dumazet2a915252009-05-27 11:30:05 +0000861EXPORT_SYMBOL(sock_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862
863
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000864void cred_to_ucred(struct pid *pid, const struct cred *cred,
865 struct ucred *ucred)
866{
867 ucred->pid = pid_vnr(pid);
868 ucred->uid = ucred->gid = -1;
869 if (cred) {
870 struct user_namespace *current_ns = current_user_ns();
871
Eric W. Biederman76b6db02012-03-14 15:24:19 -0700872 ucred->uid = from_kuid(current_ns, cred->euid);
873 ucred->gid = from_kgid(current_ns, cred->egid);
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000874 }
875}
David S. Miller39247732010-06-16 16:18:25 -0700876EXPORT_SYMBOL_GPL(cred_to_ucred);
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000877
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878int sock_getsockopt(struct socket *sock, int level, int optname,
879 char __user *optval, int __user *optlen)
880{
881 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900882
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700883 union {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900884 int val;
885 struct linger ling;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886 struct timeval tm;
887 } v;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900888
H Hartley Sweeten4d0392b2010-01-15 01:08:58 -0800889 int lv = sizeof(int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890 int len;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900891
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700892 if (get_user(len, optlen))
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900893 return -EFAULT;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700894 if (len < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900896
Eugene Teo50fee1d2009-02-23 15:38:41 -0800897 memset(&v, 0, sizeof(v));
Clément Lecignedf0bca02009-02-12 16:59:09 -0800898
Eric Dumazet2a915252009-05-27 11:30:05 +0000899 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700900 case SO_DEBUG:
901 v.val = sock_flag(sk, SOCK_DBG);
902 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900903
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700904 case SO_DONTROUTE:
905 v.val = sock_flag(sk, SOCK_LOCALROUTE);
906 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900907
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700908 case SO_BROADCAST:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +0000909 v.val = sock_flag(sk, SOCK_BROADCAST);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700910 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700912 case SO_SNDBUF:
913 v.val = sk->sk_sndbuf;
914 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900915
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700916 case SO_RCVBUF:
917 v.val = sk->sk_rcvbuf;
918 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700920 case SO_REUSEADDR:
921 v.val = sk->sk_reuse;
922 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700924 case SO_KEEPALIVE:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +0000925 v.val = sock_flag(sk, SOCK_KEEPOPEN);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700926 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700928 case SO_TYPE:
929 v.val = sk->sk_type;
930 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931
Jan Engelhardt49c794e2009-08-04 07:28:28 +0000932 case SO_PROTOCOL:
933 v.val = sk->sk_protocol;
934 break;
935
Jan Engelhardt0d6038e2009-08-04 07:28:29 +0000936 case SO_DOMAIN:
937 v.val = sk->sk_family;
938 break;
939
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700940 case SO_ERROR:
941 v.val = -sock_error(sk);
Eric Dumazet2a915252009-05-27 11:30:05 +0000942 if (v.val == 0)
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700943 v.val = xchg(&sk->sk_err_soft, 0);
944 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700946 case SO_OOBINLINE:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +0000947 v.val = sock_flag(sk, SOCK_URGINLINE);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700948 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900949
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700950 case SO_NO_CHECK:
951 v.val = sk->sk_no_check;
952 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700954 case SO_PRIORITY:
955 v.val = sk->sk_priority;
956 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900957
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700958 case SO_LINGER:
959 lv = sizeof(v.ling);
Eric Dumazet1b23a5d2012-05-16 05:57:07 +0000960 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700961 v.ling.l_linger = sk->sk_lingertime / HZ;
962 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900963
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700964 case SO_BSDCOMPAT:
965 sock_warn_obsolete_bsdism("getsockopt");
966 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700968 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700969 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
970 !sock_flag(sk, SOCK_RCVTSTAMPNS);
971 break;
972
973 case SO_TIMESTAMPNS:
974 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700975 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976
Patrick Ohly20d49472009-02-12 05:03:38 +0000977 case SO_TIMESTAMPING:
978 v.val = 0;
979 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
980 v.val |= SOF_TIMESTAMPING_TX_HARDWARE;
981 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
982 v.val |= SOF_TIMESTAMPING_TX_SOFTWARE;
983 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE))
984 v.val |= SOF_TIMESTAMPING_RX_HARDWARE;
985 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE))
986 v.val |= SOF_TIMESTAMPING_RX_SOFTWARE;
987 if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE))
988 v.val |= SOF_TIMESTAMPING_SOFTWARE;
989 if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))
990 v.val |= SOF_TIMESTAMPING_SYS_HARDWARE;
991 if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE))
992 v.val |= SOF_TIMESTAMPING_RAW_HARDWARE;
993 break;
994
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700995 case SO_RCVTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +0000996 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700997 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
998 v.tm.tv_sec = 0;
999 v.tm.tv_usec = 0;
1000 } else {
1001 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
1002 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001004 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001006 case SO_SNDTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +00001007 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001008 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
1009 v.tm.tv_sec = 0;
1010 v.tm.tv_usec = 0;
1011 } else {
1012 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
1013 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
1014 }
1015 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001017 case SO_RCVLOWAT:
1018 v.val = sk->sk_rcvlowat;
1019 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -07001020
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001021 case SO_SNDLOWAT:
Eric Dumazet2a915252009-05-27 11:30:05 +00001022 v.val = 1;
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001023 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001025 case SO_PASSCRED:
Eric Dumazet82981932012-04-26 20:07:59 +00001026 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001027 break;
1028
1029 case SO_PEERCRED:
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001030 {
1031 struct ucred peercred;
1032 if (len > sizeof(peercred))
1033 len = sizeof(peercred);
1034 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1035 if (copy_to_user(optval, &peercred, len))
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001036 return -EFAULT;
1037 goto lenout;
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001038 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001039
1040 case SO_PEERNAME:
1041 {
1042 char address[128];
1043
1044 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
1045 return -ENOTCONN;
1046 if (lv < len)
1047 return -EINVAL;
1048 if (copy_to_user(optval, address, len))
1049 return -EFAULT;
1050 goto lenout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001052
1053 /* Dubious BSD thing... Probably nobody even uses it, but
1054 * the UNIX standard wants it for whatever reason... -DaveM
1055 */
1056 case SO_ACCEPTCONN:
1057 v.val = sk->sk_state == TCP_LISTEN;
1058 break;
1059
1060 case SO_PASSSEC:
Eric Dumazet82981932012-04-26 20:07:59 +00001061 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001062 break;
1063
1064 case SO_PEERSEC:
1065 return security_socket_getpeersec_stream(sock, optval, optlen, len);
1066
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -08001067 case SO_MARK:
1068 v.val = sk->sk_mark;
1069 break;
1070
Neil Horman3b885782009-10-12 13:26:31 -07001071 case SO_RXQ_OVFL:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001072 v.val = sock_flag(sk, SOCK_RXQ_OVFL);
Neil Horman3b885782009-10-12 13:26:31 -07001073 break;
1074
Johannes Berg6e3e9392011-11-09 10:15:42 +01001075 case SO_WIFI_STATUS:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001076 v.val = sock_flag(sk, SOCK_WIFI_STATUS);
Johannes Berg6e3e9392011-11-09 10:15:42 +01001077 break;
1078
Pavel Emelyanovef64a542012-02-21 07:31:34 +00001079 case SO_PEEK_OFF:
1080 if (!sock->ops->set_peek_off)
1081 return -EOPNOTSUPP;
1082
1083 v.val = sk->sk_peek_off;
1084 break;
David S. Millerbc2f7992012-02-24 14:48:34 -05001085 case SO_NOFCS:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001086 v.val = sock_flag(sk, SOCK_NOFCS);
David S. Millerbc2f7992012-02-24 14:48:34 -05001087 break;
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001088 default:
1089 return -ENOPROTOOPT;
1090 }
1091
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 if (len > lv)
1093 len = lv;
1094 if (copy_to_user(optval, &v, len))
1095 return -EFAULT;
1096lenout:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001097 if (put_user(len, optlen))
1098 return -EFAULT;
1099 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100}
1101
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001102/*
1103 * Initialize an sk_lock.
1104 *
1105 * (We also register the sk_lock with the lock validator.)
1106 */
Dave Jonesb6f99a22007-03-22 12:27:49 -07001107static inline void sock_lock_init(struct sock *sk)
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001108{
Peter Zijlstraed075362006-12-06 20:35:24 -08001109 sock_lock_init_class_and_name(sk,
1110 af_family_slock_key_strings[sk->sk_family],
1111 af_family_slock_keys + sk->sk_family,
1112 af_family_key_strings[sk->sk_family],
1113 af_family_keys + sk->sk_family);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001114}
1115
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001116/*
1117 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1118 * even temporarly, because of RCU lookups. sk_node should also be left as is.
Eric Dumazet68835ab2010-11-30 19:04:07 +00001119 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001120 */
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -07001121static void sock_copy(struct sock *nsk, const struct sock *osk)
1122{
1123#ifdef CONFIG_SECURITY_NETWORK
1124 void *sptr = nsk->sk_security;
1125#endif
Eric Dumazet68835ab2010-11-30 19:04:07 +00001126 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1127
1128 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1129 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1130
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -07001131#ifdef CONFIG_SECURITY_NETWORK
1132 nsk->sk_security = sptr;
1133 security_sk_clone(osk, nsk);
1134#endif
1135}
1136
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001137/*
1138 * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
1139 * un-modified. Special care is taken when initializing object to zero.
1140 */
1141static inline void sk_prot_clear_nulls(struct sock *sk, int size)
1142{
1143 if (offsetof(struct sock, sk_node.next) != 0)
1144 memset(sk, 0, offsetof(struct sock, sk_node.next));
1145 memset(&sk->sk_node.pprev, 0,
1146 size - offsetof(struct sock, sk_node.pprev));
1147}
1148
1149void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1150{
1151 unsigned long nulls1, nulls2;
1152
1153 nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
1154 nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
1155 if (nulls1 > nulls2)
1156 swap(nulls1, nulls2);
1157
1158 if (nulls1 != 0)
1159 memset((char *)sk, 0, nulls1);
1160 memset((char *)sk + nulls1 + sizeof(void *), 0,
1161 nulls2 - nulls1 - sizeof(void *));
1162 memset((char *)sk + nulls2 + sizeof(void *), 0,
1163 size - nulls2 - sizeof(void *));
1164}
1165EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
1166
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001167static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1168 int family)
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001169{
1170 struct sock *sk;
1171 struct kmem_cache *slab;
1172
1173 slab = prot->slab;
Eric Dumazete912b112009-07-08 19:36:05 +00001174 if (slab != NULL) {
1175 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1176 if (!sk)
1177 return sk;
1178 if (priority & __GFP_ZERO) {
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001179 if (prot->clear_sk)
1180 prot->clear_sk(sk, prot->obj_size);
1181 else
1182 sk_prot_clear_nulls(sk, prot->obj_size);
Eric Dumazete912b112009-07-08 19:36:05 +00001183 }
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001184 } else
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001185 sk = kmalloc(prot->obj_size, priority);
1186
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001187 if (sk != NULL) {
Vegard Nossuma98b65a2009-02-26 14:46:57 +01001188 kmemcheck_annotate_bitfield(sk, flags);
1189
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001190 if (security_sk_alloc(sk, family, priority))
1191 goto out_free;
1192
1193 if (!try_module_get(prot->owner))
1194 goto out_free_sec;
Krishna Kumare022f0b2009-10-19 23:46:20 +00001195 sk_tx_queue_clear(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001196 }
1197
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001198 return sk;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001199
1200out_free_sec:
1201 security_sk_free(sk);
1202out_free:
1203 if (slab != NULL)
1204 kmem_cache_free(slab, sk);
1205 else
1206 kfree(sk);
1207 return NULL;
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001208}
1209
1210static void sk_prot_free(struct proto *prot, struct sock *sk)
1211{
1212 struct kmem_cache *slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001213 struct module *owner;
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001214
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001215 owner = prot->owner;
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001216 slab = prot->slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001217
1218 security_sk_free(sk);
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001219 if (slab != NULL)
1220 kmem_cache_free(slab, sk);
1221 else
1222 kfree(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001223 module_put(owner);
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001224}
1225
Herbert Xuf8451722010-05-24 00:12:34 -07001226#ifdef CONFIG_CGROUPS
1227void sock_update_classid(struct sock *sk)
1228{
Paul E. McKenney11441822010-10-06 17:15:35 -07001229 u32 classid;
Herbert Xuf8451722010-05-24 00:12:34 -07001230
Paul E. McKenney11441822010-10-06 17:15:35 -07001231 rcu_read_lock(); /* doing current task, which cannot vanish. */
1232 classid = task_cls_classid(current);
1233 rcu_read_unlock();
Herbert Xuf8451722010-05-24 00:12:34 -07001234 if (classid && classid != sk->sk_classid)
1235 sk->sk_classid = classid;
1236}
Herbert Xu82862742010-05-24 00:14:10 -07001237EXPORT_SYMBOL(sock_update_classid);
Neil Horman5bc14212011-11-22 05:10:51 +00001238
John Fastabend406a3c62012-07-20 10:39:25 +00001239void sock_update_netprioidx(struct sock *sk, struct task_struct *task)
Neil Horman5bc14212011-11-22 05:10:51 +00001240{
Neil Horman5bc14212011-11-22 05:10:51 +00001241 if (in_interrupt())
1242 return;
Neil Horman2b73bc62012-02-10 05:43:38 +00001243
John Fastabend406a3c62012-07-20 10:39:25 +00001244 sk->sk_cgrp_prioidx = task_netprioidx(task);
Neil Horman5bc14212011-11-22 05:10:51 +00001245}
1246EXPORT_SYMBOL_GPL(sock_update_netprioidx);
Herbert Xuf8451722010-05-24 00:12:34 -07001247#endif
1248
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249/**
1250 * sk_alloc - All socket objects are allocated here
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001251 * @net: the applicable net namespace
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001252 * @family: protocol family
1253 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1254 * @prot: struct proto associated with this new sock instance
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 */
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -07001256struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001257 struct proto *prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258{
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001259 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001261 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262 if (sk) {
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001263 sk->sk_family = family;
1264 /*
1265 * See comment in struct sock definition to understand
1266 * why we need sk_prot_creator -acme
1267 */
1268 sk->sk_prot = sk->sk_prot_creator = prot;
1269 sock_lock_init(sk);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001270 sock_net_set(sk, get_net(net));
Jarek Poplawskid66ee052009-08-30 23:15:36 +00001271 atomic_set(&sk->sk_wmem_alloc, 1);
Herbert Xuf8451722010-05-24 00:12:34 -07001272
1273 sock_update_classid(sk);
John Fastabend406a3c62012-07-20 10:39:25 +00001274 sock_update_netprioidx(sk, current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 }
Frank Filza79af592005-09-27 15:23:38 -07001276
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001277 return sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278}
Eric Dumazet2a915252009-05-27 11:30:05 +00001279EXPORT_SYMBOL(sk_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280
Eric Dumazet2b85a342009-06-11 02:55:43 -07001281static void __sk_free(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282{
1283 struct sk_filter *filter;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284
1285 if (sk->sk_destruct)
1286 sk->sk_destruct(sk);
1287
Paul E. McKenneya898def2010-02-22 17:04:49 -08001288 filter = rcu_dereference_check(sk->sk_filter,
1289 atomic_read(&sk->sk_wmem_alloc) == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290 if (filter) {
Pavel Emelyanov309dd5f2007-10-17 21:21:51 -07001291 sk_filter_uncharge(sk, filter);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00001292 RCU_INIT_POINTER(sk->sk_filter, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 }
1294
Eric Dumazet08e29af2011-11-28 12:04:18 +00001295 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296
1297 if (atomic_read(&sk->sk_omem_alloc))
Joe Perchese005d192012-05-16 19:58:40 +00001298 pr_debug("%s: optmem leakage (%d bytes) detected\n",
1299 __func__, atomic_read(&sk->sk_omem_alloc));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001301 if (sk->sk_peer_cred)
1302 put_cred(sk->sk_peer_cred);
1303 put_pid(sk->sk_peer_pid);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001304 put_net(sock_net(sk));
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001305 sk_prot_free(sk->sk_prot_creator, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306}
Eric Dumazet2b85a342009-06-11 02:55:43 -07001307
1308void sk_free(struct sock *sk)
1309{
1310 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001311 * We subtract one from sk_wmem_alloc and can know if
Eric Dumazet2b85a342009-06-11 02:55:43 -07001312 * some packets are still in some tx queue.
1313 * If not null, sock_wfree() will call __sk_free(sk) later
1314 */
1315 if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1316 __sk_free(sk);
1317}
Eric Dumazet2a915252009-05-27 11:30:05 +00001318EXPORT_SYMBOL(sk_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319
Denis V. Lunevedf02082008-02-29 11:18:32 -08001320/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001321 * Last sock_put should drop reference to sk->sk_net. It has already
1322 * been dropped in sk_change_net. Taking reference to stopping namespace
Denis V. Lunevedf02082008-02-29 11:18:32 -08001323 * is not an option.
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001324 * Take reference to a socket to remove it from hash _alive_ and after that
Denis V. Lunevedf02082008-02-29 11:18:32 -08001325 * destroy it in the context of init_net.
1326 */
1327void sk_release_kernel(struct sock *sk)
1328{
1329 if (sk == NULL || sk->sk_socket == NULL)
1330 return;
1331
1332 sock_hold(sk);
1333 sock_release(sk->sk_socket);
Denis V. Lunev65a18ec2008-04-16 01:59:46 -07001334 release_net(sock_net(sk));
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001335 sock_net_set(sk, get_net(&init_net));
Denis V. Lunevedf02082008-02-29 11:18:32 -08001336 sock_put(sk);
1337}
David S. Miller45af1752008-02-29 11:33:19 -08001338EXPORT_SYMBOL(sk_release_kernel);
Denis V. Lunevedf02082008-02-29 11:18:32 -08001339
Stephen Rothwell475f1b52012-01-09 16:33:16 +11001340static void sk_update_clone(const struct sock *sk, struct sock *newsk)
1341{
1342 if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1343 sock_update_memcg(newsk);
1344}
1345
Eric Dumazete56c57d2011-11-08 17:07:07 -05001346/**
1347 * sk_clone_lock - clone a socket, and lock its clone
1348 * @sk: the socket to clone
1349 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1350 *
1351 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1352 */
1353struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001354{
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001355 struct sock *newsk;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001356
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001357 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001358 if (newsk != NULL) {
1359 struct sk_filter *filter;
1360
Venkat Yekkirala892c1412006-08-04 23:08:56 -07001361 sock_copy(newsk, sk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001362
1363 /* SANITY */
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001364 get_net(sock_net(newsk));
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001365 sk_node_init(&newsk->sk_node);
1366 sock_lock_init(newsk);
1367 bh_lock_sock(newsk);
Eric Dumazetfa438cc2007-03-04 16:05:44 -08001368 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
Zhu Yi8eae9392010-03-04 18:01:40 +00001369 newsk->sk_backlog.len = 0;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001370
1371 atomic_set(&newsk->sk_rmem_alloc, 0);
Eric Dumazet2b85a342009-06-11 02:55:43 -07001372 /*
1373 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1374 */
1375 atomic_set(&newsk->sk_wmem_alloc, 1);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001376 atomic_set(&newsk->sk_omem_alloc, 0);
1377 skb_queue_head_init(&newsk->sk_receive_queue);
1378 skb_queue_head_init(&newsk->sk_write_queue);
Chris Leech97fc2f02006-05-23 17:55:33 -07001379#ifdef CONFIG_NET_DMA
1380 skb_queue_head_init(&newsk->sk_async_wait_queue);
1381#endif
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001382
Eric Dumazetb6c67122010-04-08 23:03:29 +00001383 spin_lock_init(&newsk->sk_dst_lock);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001384 rwlock_init(&newsk->sk_callback_lock);
Peter Zijlstra443aef02007-07-19 01:49:00 -07001385 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1386 af_callback_keys + newsk->sk_family,
1387 af_family_clock_key_strings[newsk->sk_family]);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001388
1389 newsk->sk_dst_cache = NULL;
1390 newsk->sk_wmem_queued = 0;
1391 newsk->sk_forward_alloc = 0;
1392 newsk->sk_send_head = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001393 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1394
1395 sock_reset_flag(newsk, SOCK_DONE);
1396 skb_queue_head_init(&newsk->sk_error_queue);
1397
Eric Dumazet0d7da9d2010-10-25 03:47:05 +00001398 filter = rcu_dereference_protected(newsk->sk_filter, 1);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001399 if (filter != NULL)
1400 sk_filter_charge(newsk, filter);
1401
1402 if (unlikely(xfrm_sk_clone_policy(newsk))) {
1403 /* It is still raw copy of parent, so invalidate
1404 * destructor and make plain sk_free() */
1405 newsk->sk_destruct = NULL;
Thomas Gleixnerb0691c82011-10-25 02:30:50 +00001406 bh_unlock_sock(newsk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001407 sk_free(newsk);
1408 newsk = NULL;
1409 goto out;
1410 }
1411
1412 newsk->sk_err = 0;
1413 newsk->sk_priority = 0;
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001414 /*
1415 * Before updating sk_refcnt, we must commit prior changes to memory
1416 * (Documentation/RCU/rculist_nulls.txt for details)
1417 */
1418 smp_wmb();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001419 atomic_set(&newsk->sk_refcnt, 2);
1420
1421 /*
1422 * Increment the counter in the same struct proto as the master
1423 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1424 * is the same as sk->sk_prot->socks, as this field was copied
1425 * with memcpy).
1426 *
1427 * This _changes_ the previous behaviour, where
1428 * tcp_create_openreq_child always was incrementing the
1429 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1430 * to be taken into account in all callers. -acme
1431 */
1432 sk_refcnt_debug_inc(newsk);
David S. Miller972692e2008-06-17 22:41:38 -07001433 sk_set_socket(newsk, NULL);
Eric Dumazet43815482010-04-29 11:01:49 +00001434 newsk->sk_wq = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001435
Glauber Costaf3f511e2012-01-05 20:16:39 +00001436 sk_update_clone(sk, newsk);
1437
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001438 if (newsk->sk_prot->sockets_allocated)
Glauber Costa180d8cd2011-12-11 21:47:02 +00001439 sk_sockets_allocated_inc(newsk);
Octavian Purdila704da5602010-01-08 00:00:09 -08001440
Eric Dumazet08e29af2011-11-28 12:04:18 +00001441 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
Octavian Purdila704da5602010-01-08 00:00:09 -08001442 net_enable_timestamp();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001443 }
1444out:
1445 return newsk;
1446}
Eric Dumazete56c57d2011-11-08 17:07:07 -05001447EXPORT_SYMBOL_GPL(sk_clone_lock);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001448
Andi Kleen99580892007-04-20 17:12:43 -07001449void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1450{
1451 __sk_dst_set(sk, dst);
1452 sk->sk_route_caps = dst->dev->features;
1453 if (sk->sk_route_caps & NETIF_F_GSO)
Herbert Xu4fcd6b92007-05-31 22:15:50 -07001454 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
Eric Dumazeta4654192010-05-16 00:36:33 -07001455 sk->sk_route_caps &= ~sk->sk_route_nocaps;
Andi Kleen99580892007-04-20 17:12:43 -07001456 if (sk_can_gso(sk)) {
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001457 if (dst->header_len) {
Andi Kleen99580892007-04-20 17:12:43 -07001458 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001459 } else {
Andi Kleen99580892007-04-20 17:12:43 -07001460 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001461 sk->sk_gso_max_size = dst->dev->gso_max_size;
Ben Hutchings14853482012-07-30 16:11:42 +00001462 sk->sk_gso_max_segs = dst->dev->gso_max_segs;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001463 }
Andi Kleen99580892007-04-20 17:12:43 -07001464 }
1465}
1466EXPORT_SYMBOL_GPL(sk_setup_caps);
1467
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468void __init sk_init(void)
1469{
Jan Beulich44813742009-09-21 17:03:05 -07001470 if (totalram_pages <= 4096) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 sysctl_wmem_max = 32767;
1472 sysctl_rmem_max = 32767;
1473 sysctl_wmem_default = 32767;
1474 sysctl_rmem_default = 32767;
Jan Beulich44813742009-09-21 17:03:05 -07001475 } else if (totalram_pages >= 131072) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476 sysctl_wmem_max = 131071;
1477 sysctl_rmem_max = 131071;
1478 }
1479}
1480
1481/*
1482 * Simple resource managers for sockets.
1483 */
1484
1485
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001486/*
1487 * Write buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 */
1489void sock_wfree(struct sk_buff *skb)
1490{
1491 struct sock *sk = skb->sk;
Eric Dumazetd99927f2009-09-24 10:49:24 +00001492 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493
Eric Dumazetd99927f2009-09-24 10:49:24 +00001494 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1495 /*
1496 * Keep a reference on sk_wmem_alloc, this will be released
1497 * after sk_write_space() call
1498 */
1499 atomic_sub(len - 1, &sk->sk_wmem_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 sk->sk_write_space(sk);
Eric Dumazetd99927f2009-09-24 10:49:24 +00001501 len = 1;
1502 }
Eric Dumazet2b85a342009-06-11 02:55:43 -07001503 /*
Eric Dumazetd99927f2009-09-24 10:49:24 +00001504 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1505 * could not do because of in-flight packets
Eric Dumazet2b85a342009-06-11 02:55:43 -07001506 */
Eric Dumazetd99927f2009-09-24 10:49:24 +00001507 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
Eric Dumazet2b85a342009-06-11 02:55:43 -07001508 __sk_free(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509}
Eric Dumazet2a915252009-05-27 11:30:05 +00001510EXPORT_SYMBOL(sock_wfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001512/*
1513 * Read buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 */
1515void sock_rfree(struct sk_buff *skb)
1516{
1517 struct sock *sk = skb->sk;
Eric Dumazetd361fd52010-07-10 22:45:17 +00001518 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519
Eric Dumazetd361fd52010-07-10 22:45:17 +00001520 atomic_sub(len, &sk->sk_rmem_alloc);
1521 sk_mem_uncharge(sk, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522}
Eric Dumazet2a915252009-05-27 11:30:05 +00001523EXPORT_SYMBOL(sock_rfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524
David S. Miller41063e92012-06-19 21:22:05 -07001525void sock_edemux(struct sk_buff *skb)
1526{
Eric Dumazete8123472012-09-02 23:57:18 +00001527 struct sock *sk = skb->sk;
1528
Randy Dunlap1c463e52012-09-10 09:13:07 -07001529#ifdef CONFIG_INET
Eric Dumazete8123472012-09-02 23:57:18 +00001530 if (sk->sk_state == TCP_TIME_WAIT)
1531 inet_twsk_put(inet_twsk(sk));
1532 else
Randy Dunlap1c463e52012-09-10 09:13:07 -07001533#endif
Eric Dumazete8123472012-09-02 23:57:18 +00001534 sock_put(sk);
David S. Miller41063e92012-06-19 21:22:05 -07001535}
1536EXPORT_SYMBOL(sock_edemux);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537
1538int sock_i_uid(struct sock *sk)
1539{
1540 int uid;
1541
Eric Dumazetf064af12010-09-22 12:43:39 +00001542 read_lock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
Eric Dumazetf064af12010-09-22 12:43:39 +00001544 read_unlock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 return uid;
1546}
Eric Dumazet2a915252009-05-27 11:30:05 +00001547EXPORT_SYMBOL(sock_i_uid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548
1549unsigned long sock_i_ino(struct sock *sk)
1550{
1551 unsigned long ino;
1552
Eric Dumazetf064af12010-09-22 12:43:39 +00001553 read_lock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
Eric Dumazetf064af12010-09-22 12:43:39 +00001555 read_unlock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556 return ino;
1557}
Eric Dumazet2a915252009-05-27 11:30:05 +00001558EXPORT_SYMBOL(sock_i_ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559
1560/*
1561 * Allocate a skb from the socket's send buffer.
1562 */
Victor Fusco86a76ca2005-07-08 14:57:47 -07001563struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
Al Virodd0fc662005-10-07 07:46:04 +01001564 gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565{
1566 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
Eric Dumazet2a915252009-05-27 11:30:05 +00001567 struct sk_buff *skb = alloc_skb(size, priority);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568 if (skb) {
1569 skb_set_owner_w(skb, sk);
1570 return skb;
1571 }
1572 }
1573 return NULL;
1574}
Eric Dumazet2a915252009-05-27 11:30:05 +00001575EXPORT_SYMBOL(sock_wmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576
1577/*
1578 * Allocate a skb from the socket's receive buffer.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001579 */
Victor Fusco86a76ca2005-07-08 14:57:47 -07001580struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
Al Virodd0fc662005-10-07 07:46:04 +01001581 gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582{
1583 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1584 struct sk_buff *skb = alloc_skb(size, priority);
1585 if (skb) {
1586 skb_set_owner_r(skb, sk);
1587 return skb;
1588 }
1589 }
1590 return NULL;
1591}
1592
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001593/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594 * Allocate a memory block from the socket's option memory buffer.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001595 */
Al Virodd0fc662005-10-07 07:46:04 +01001596void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597{
Eric Dumazet95c96172012-04-15 05:58:06 +00001598 if ((unsigned int)size <= sysctl_optmem_max &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1600 void *mem;
1601 /* First do the add, to avoid the race if kmalloc
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001602 * might sleep.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 */
1604 atomic_add(size, &sk->sk_omem_alloc);
1605 mem = kmalloc(size, priority);
1606 if (mem)
1607 return mem;
1608 atomic_sub(size, &sk->sk_omem_alloc);
1609 }
1610 return NULL;
1611}
Eric Dumazet2a915252009-05-27 11:30:05 +00001612EXPORT_SYMBOL(sock_kmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613
1614/*
1615 * Free an option memory block.
1616 */
1617void sock_kfree_s(struct sock *sk, void *mem, int size)
1618{
1619 kfree(mem);
1620 atomic_sub(size, &sk->sk_omem_alloc);
1621}
Eric Dumazet2a915252009-05-27 11:30:05 +00001622EXPORT_SYMBOL(sock_kfree_s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623
1624/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1625 I think, these locks should be removed for datagram sockets.
1626 */
Eric Dumazet2a915252009-05-27 11:30:05 +00001627static long sock_wait_for_wmem(struct sock *sk, long timeo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628{
1629 DEFINE_WAIT(wait);
1630
1631 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1632 for (;;) {
1633 if (!timeo)
1634 break;
1635 if (signal_pending(current))
1636 break;
1637 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
Eric Dumazetaa395142010-04-20 13:03:51 +00001638 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1640 break;
1641 if (sk->sk_shutdown & SEND_SHUTDOWN)
1642 break;
1643 if (sk->sk_err)
1644 break;
1645 timeo = schedule_timeout(timeo);
1646 }
Eric Dumazetaa395142010-04-20 13:03:51 +00001647 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648 return timeo;
1649}
1650
1651
1652/*
1653 * Generic send/receive buffer handlers
1654 */
1655
Herbert Xu4cc7f682009-02-04 16:55:54 -08001656struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1657 unsigned long data_len, int noblock,
1658 int *errcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659{
1660 struct sk_buff *skb;
Al Viro7d877f32005-10-21 03:20:43 -04001661 gfp_t gfp_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662 long timeo;
1663 int err;
Jason Wangcc9b17a2012-05-30 21:18:10 +00001664 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1665
1666 err = -EMSGSIZE;
1667 if (npages > MAX_SKB_FRAGS)
1668 goto failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669
1670 gfp_mask = sk->sk_allocation;
1671 if (gfp_mask & __GFP_WAIT)
1672 gfp_mask |= __GFP_REPEAT;
1673
1674 timeo = sock_sndtimeo(sk, noblock);
1675 while (1) {
1676 err = sock_error(sk);
1677 if (err != 0)
1678 goto failure;
1679
1680 err = -EPIPE;
1681 if (sk->sk_shutdown & SEND_SHUTDOWN)
1682 goto failure;
1683
1684 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
Larry Woodmandb38c1792006-11-03 16:05:45 -08001685 skb = alloc_skb(header_len, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686 if (skb) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 int i;
1688
1689 /* No pages, we're done... */
1690 if (!data_len)
1691 break;
1692
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693 skb->truesize += data_len;
1694 skb_shinfo(skb)->nr_frags = npages;
1695 for (i = 0; i < npages; i++) {
1696 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697
1698 page = alloc_pages(sk->sk_allocation, 0);
1699 if (!page) {
1700 err = -ENOBUFS;
1701 skb_shinfo(skb)->nr_frags = i;
1702 kfree_skb(skb);
1703 goto failure;
1704 }
1705
Ian Campbellea2ab692011-08-22 23:44:58 +00001706 __skb_fill_page_desc(skb, i,
1707 page, 0,
1708 (data_len >= PAGE_SIZE ?
1709 PAGE_SIZE :
1710 data_len));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711 data_len -= PAGE_SIZE;
1712 }
1713
1714 /* Full success... */
1715 break;
1716 }
1717 err = -ENOBUFS;
1718 goto failure;
1719 }
1720 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1721 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1722 err = -EAGAIN;
1723 if (!timeo)
1724 goto failure;
1725 if (signal_pending(current))
1726 goto interrupted;
1727 timeo = sock_wait_for_wmem(sk, timeo);
1728 }
1729
1730 skb_set_owner_w(skb, sk);
1731 return skb;
1732
1733interrupted:
1734 err = sock_intr_errno(timeo);
1735failure:
1736 *errcode = err;
1737 return NULL;
1738}
Herbert Xu4cc7f682009-02-04 16:55:54 -08001739EXPORT_SYMBOL(sock_alloc_send_pskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001741struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742 int noblock, int *errcode)
1743{
1744 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
1745}
Eric Dumazet2a915252009-05-27 11:30:05 +00001746EXPORT_SYMBOL(sock_alloc_send_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747
1748static void __lock_sock(struct sock *sk)
Namhyung Kimf39234d2010-09-08 03:48:48 +00001749 __releases(&sk->sk_lock.slock)
1750 __acquires(&sk->sk_lock.slock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751{
1752 DEFINE_WAIT(wait);
1753
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001754 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1756 TASK_UNINTERRUPTIBLE);
1757 spin_unlock_bh(&sk->sk_lock.slock);
1758 schedule();
1759 spin_lock_bh(&sk->sk_lock.slock);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001760 if (!sock_owned_by_user(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761 break;
1762 }
1763 finish_wait(&sk->sk_lock.wq, &wait);
1764}
1765
1766static void __release_sock(struct sock *sk)
Namhyung Kimf39234d2010-09-08 03:48:48 +00001767 __releases(&sk->sk_lock.slock)
1768 __acquires(&sk->sk_lock.slock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769{
1770 struct sk_buff *skb = sk->sk_backlog.head;
1771
1772 do {
1773 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1774 bh_unlock_sock(sk);
1775
1776 do {
1777 struct sk_buff *next = skb->next;
1778
Eric Dumazete4cbb022012-04-30 16:07:09 +00001779 prefetch(next);
Eric Dumazet7fee2262010-05-11 23:19:48 +00001780 WARN_ON_ONCE(skb_dst_is_noref(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781 skb->next = NULL;
Peter Zijlstrac57943a2008-10-07 14:18:42 -07001782 sk_backlog_rcv(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783
1784 /*
1785 * We are in process context here with softirqs
1786 * disabled, use cond_resched_softirq() to preempt.
1787 * This is safe to do because we've taken the backlog
1788 * queue private:
1789 */
1790 cond_resched_softirq();
1791
1792 skb = next;
1793 } while (skb != NULL);
1794
1795 bh_lock_sock(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001796 } while ((skb = sk->sk_backlog.head) != NULL);
Zhu Yi8eae9392010-03-04 18:01:40 +00001797
1798 /*
1799 * Doing the zeroing here guarantee we can not loop forever
1800 * while a wild producer attempts to flood us.
1801 */
1802 sk->sk_backlog.len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803}
1804
1805/**
1806 * sk_wait_data - wait for data to arrive at sk_receive_queue
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001807 * @sk: sock to wait on
1808 * @timeo: for how long
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809 *
1810 * Now socket state including sk->sk_err is changed only under lock,
1811 * hence we may omit checks after joining wait queue.
1812 * We check receive queue before schedule() only as optimization;
1813 * it is very likely that release_sock() added new data.
1814 */
1815int sk_wait_data(struct sock *sk, long *timeo)
1816{
1817 int rc;
1818 DEFINE_WAIT(wait);
1819
Eric Dumazetaa395142010-04-20 13:03:51 +00001820 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1822 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1823 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
Eric Dumazetaa395142010-04-20 13:03:51 +00001824 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825 return rc;
1826}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827EXPORT_SYMBOL(sk_wait_data);
1828
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001829/**
1830 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
1831 * @sk: socket
1832 * @size: memory size to allocate
1833 * @kind: allocation type
1834 *
1835 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
1836 * rmem allocation. This function assumes that protocols which have
1837 * memory_pressure use sk_wmem_queued as write buffer accounting.
1838 */
1839int __sk_mem_schedule(struct sock *sk, int size, int kind)
1840{
1841 struct proto *prot = sk->sk_prot;
1842 int amt = sk_mem_pages(size);
Eric Dumazet8d987e52010-11-09 23:24:26 +00001843 long allocated;
Glauber Costae1aab162011-12-11 21:47:03 +00001844 int parent_status = UNDER_LIMIT;
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001845
1846 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
Glauber Costa180d8cd2011-12-11 21:47:02 +00001847
Glauber Costae1aab162011-12-11 21:47:03 +00001848 allocated = sk_memory_allocated_add(sk, amt, &parent_status);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001849
1850 /* Under limit. */
Glauber Costae1aab162011-12-11 21:47:03 +00001851 if (parent_status == UNDER_LIMIT &&
1852 allocated <= sk_prot_mem_limits(sk, 0)) {
Glauber Costa180d8cd2011-12-11 21:47:02 +00001853 sk_leave_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001854 return 1;
1855 }
1856
Glauber Costae1aab162011-12-11 21:47:03 +00001857 /* Under pressure. (we or our parents) */
1858 if ((parent_status > SOFT_LIMIT) ||
1859 allocated > sk_prot_mem_limits(sk, 1))
Glauber Costa180d8cd2011-12-11 21:47:02 +00001860 sk_enter_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001861
Glauber Costae1aab162011-12-11 21:47:03 +00001862 /* Over hard limit (we or our parents) */
1863 if ((parent_status == OVER_LIMIT) ||
1864 (allocated > sk_prot_mem_limits(sk, 2)))
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001865 goto suppress_allocation;
1866
1867 /* guarantee minimum buffer size under pressure */
1868 if (kind == SK_MEM_RECV) {
1869 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
1870 return 1;
Glauber Costa180d8cd2011-12-11 21:47:02 +00001871
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001872 } else { /* SK_MEM_SEND */
1873 if (sk->sk_type == SOCK_STREAM) {
1874 if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
1875 return 1;
1876 } else if (atomic_read(&sk->sk_wmem_alloc) <
1877 prot->sysctl_wmem[0])
1878 return 1;
1879 }
1880
Glauber Costa180d8cd2011-12-11 21:47:02 +00001881 if (sk_has_memory_pressure(sk)) {
Eric Dumazet17483762008-11-25 21:16:35 -08001882 int alloc;
1883
Glauber Costa180d8cd2011-12-11 21:47:02 +00001884 if (!sk_under_memory_pressure(sk))
Eric Dumazet17483762008-11-25 21:16:35 -08001885 return 1;
Glauber Costa180d8cd2011-12-11 21:47:02 +00001886 alloc = sk_sockets_allocated_read_positive(sk);
1887 if (sk_prot_mem_limits(sk, 2) > alloc *
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001888 sk_mem_pages(sk->sk_wmem_queued +
1889 atomic_read(&sk->sk_rmem_alloc) +
1890 sk->sk_forward_alloc))
1891 return 1;
1892 }
1893
1894suppress_allocation:
1895
1896 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
1897 sk_stream_moderate_sndbuf(sk);
1898
1899 /* Fail only if socket is _under_ its sndbuf.
1900 * In this case we cannot block, so that we have to fail.
1901 */
1902 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
1903 return 1;
1904 }
1905
Satoru Moriya3847ce32011-06-17 12:00:03 +00001906 trace_sock_exceed_buf_limit(sk, prot, allocated);
1907
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001908 /* Alas. Undo changes. */
1909 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
Glauber Costa180d8cd2011-12-11 21:47:02 +00001910
Glauber Costa0e90b312012-01-20 04:57:16 +00001911 sk_memory_allocated_sub(sk, amt);
Glauber Costa180d8cd2011-12-11 21:47:02 +00001912
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001913 return 0;
1914}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001915EXPORT_SYMBOL(__sk_mem_schedule);
1916
1917/**
1918 * __sk_reclaim - reclaim memory_allocated
1919 * @sk: socket
1920 */
1921void __sk_mem_reclaim(struct sock *sk)
1922{
Glauber Costa180d8cd2011-12-11 21:47:02 +00001923 sk_memory_allocated_sub(sk,
Glauber Costa0e90b312012-01-20 04:57:16 +00001924 sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001925 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
1926
Glauber Costa180d8cd2011-12-11 21:47:02 +00001927 if (sk_under_memory_pressure(sk) &&
1928 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
1929 sk_leave_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001930}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001931EXPORT_SYMBOL(__sk_mem_reclaim);
1932
1933
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934/*
1935 * Set of default routines for initialising struct proto_ops when
1936 * the protocol does not support a particular function. In certain
1937 * cases where it makes no sense for a protocol to have a "do nothing"
1938 * function, some default processing is provided.
1939 */
1940
1941int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
1942{
1943 return -EOPNOTSUPP;
1944}
Eric Dumazet2a915252009-05-27 11:30:05 +00001945EXPORT_SYMBOL(sock_no_bind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001947int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 int len, int flags)
1949{
1950 return -EOPNOTSUPP;
1951}
Eric Dumazet2a915252009-05-27 11:30:05 +00001952EXPORT_SYMBOL(sock_no_connect);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953
1954int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
1955{
1956 return -EOPNOTSUPP;
1957}
Eric Dumazet2a915252009-05-27 11:30:05 +00001958EXPORT_SYMBOL(sock_no_socketpair);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959
1960int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
1961{
1962 return -EOPNOTSUPP;
1963}
Eric Dumazet2a915252009-05-27 11:30:05 +00001964EXPORT_SYMBOL(sock_no_accept);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001966int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967 int *len, int peer)
1968{
1969 return -EOPNOTSUPP;
1970}
Eric Dumazet2a915252009-05-27 11:30:05 +00001971EXPORT_SYMBOL(sock_no_getname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972
Eric Dumazet2a915252009-05-27 11:30:05 +00001973unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974{
1975 return 0;
1976}
Eric Dumazet2a915252009-05-27 11:30:05 +00001977EXPORT_SYMBOL(sock_no_poll);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978
1979int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1980{
1981 return -EOPNOTSUPP;
1982}
Eric Dumazet2a915252009-05-27 11:30:05 +00001983EXPORT_SYMBOL(sock_no_ioctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984
1985int sock_no_listen(struct socket *sock, int backlog)
1986{
1987 return -EOPNOTSUPP;
1988}
Eric Dumazet2a915252009-05-27 11:30:05 +00001989EXPORT_SYMBOL(sock_no_listen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990
1991int sock_no_shutdown(struct socket *sock, int how)
1992{
1993 return -EOPNOTSUPP;
1994}
Eric Dumazet2a915252009-05-27 11:30:05 +00001995EXPORT_SYMBOL(sock_no_shutdown);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996
1997int sock_no_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07001998 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999{
2000 return -EOPNOTSUPP;
2001}
Eric Dumazet2a915252009-05-27 11:30:05 +00002002EXPORT_SYMBOL(sock_no_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003
2004int sock_no_getsockopt(struct socket *sock, int level, int optname,
2005 char __user *optval, int __user *optlen)
2006{
2007 return -EOPNOTSUPP;
2008}
Eric Dumazet2a915252009-05-27 11:30:05 +00002009EXPORT_SYMBOL(sock_no_getsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010
2011int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2012 size_t len)
2013{
2014 return -EOPNOTSUPP;
2015}
Eric Dumazet2a915252009-05-27 11:30:05 +00002016EXPORT_SYMBOL(sock_no_sendmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017
2018int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2019 size_t len, int flags)
2020{
2021 return -EOPNOTSUPP;
2022}
Eric Dumazet2a915252009-05-27 11:30:05 +00002023EXPORT_SYMBOL(sock_no_recvmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002024
2025int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2026{
2027 /* Mirror missing mmap method error code */
2028 return -ENODEV;
2029}
Eric Dumazet2a915252009-05-27 11:30:05 +00002030EXPORT_SYMBOL(sock_no_mmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031
2032ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2033{
2034 ssize_t res;
2035 struct msghdr msg = {.msg_flags = flags};
2036 struct kvec iov;
2037 char *kaddr = kmap(page);
2038 iov.iov_base = kaddr + offset;
2039 iov.iov_len = size;
2040 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2041 kunmap(page);
2042 return res;
2043}
Eric Dumazet2a915252009-05-27 11:30:05 +00002044EXPORT_SYMBOL(sock_no_sendpage);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045
2046/*
2047 * Default Socket Callbacks
2048 */
2049
2050static void sock_def_wakeup(struct sock *sk)
2051{
Eric Dumazet43815482010-04-29 11:01:49 +00002052 struct socket_wq *wq;
2053
2054 rcu_read_lock();
2055 wq = rcu_dereference(sk->sk_wq);
2056 if (wq_has_sleeper(wq))
2057 wake_up_interruptible_all(&wq->wait);
2058 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059}
2060
2061static void sock_def_error_report(struct sock *sk)
2062{
Eric Dumazet43815482010-04-29 11:01:49 +00002063 struct socket_wq *wq;
2064
2065 rcu_read_lock();
2066 wq = rcu_dereference(sk->sk_wq);
2067 if (wq_has_sleeper(wq))
2068 wake_up_interruptible_poll(&wq->wait, POLLERR);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002069 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
Eric Dumazet43815482010-04-29 11:01:49 +00002070 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071}
2072
2073static void sock_def_readable(struct sock *sk, int len)
2074{
Eric Dumazet43815482010-04-29 11:01:49 +00002075 struct socket_wq *wq;
2076
2077 rcu_read_lock();
2078 wq = rcu_dereference(sk->sk_wq);
2079 if (wq_has_sleeper(wq))
Eric Dumazet2c6607c2011-01-06 10:54:29 -08002080 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
Davide Libenzi37e55402009-03-31 15:24:21 -07002081 POLLRDNORM | POLLRDBAND);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002082 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
Eric Dumazet43815482010-04-29 11:01:49 +00002083 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084}
2085
2086static void sock_def_write_space(struct sock *sk)
2087{
Eric Dumazet43815482010-04-29 11:01:49 +00002088 struct socket_wq *wq;
2089
2090 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091
2092 /* Do not wake up a writer until he can make "significant"
2093 * progress. --DaveM
2094 */
Stephen Hemmingere71a4782007-04-10 20:10:33 -07002095 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
Eric Dumazet43815482010-04-29 11:01:49 +00002096 wq = rcu_dereference(sk->sk_wq);
2097 if (wq_has_sleeper(wq))
2098 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
Davide Libenzi37e55402009-03-31 15:24:21 -07002099 POLLWRNORM | POLLWRBAND);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100
2101 /* Should agree with poll, otherwise some programs break */
2102 if (sock_writeable(sk))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002103 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104 }
2105
Eric Dumazet43815482010-04-29 11:01:49 +00002106 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107}
2108
2109static void sock_def_destruct(struct sock *sk)
2110{
Jesper Juhla51482b2005-11-08 09:41:34 -08002111 kfree(sk->sk_protinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112}
2113
2114void sk_send_sigurg(struct sock *sk)
2115{
2116 if (sk->sk_socket && sk->sk_socket->file)
2117 if (send_sigurg(&sk->sk_socket->file->f_owner))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002118 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119}
Eric Dumazet2a915252009-05-27 11:30:05 +00002120EXPORT_SYMBOL(sk_send_sigurg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121
2122void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2123 unsigned long expires)
2124{
2125 if (!mod_timer(timer, expires))
2126 sock_hold(sk);
2127}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128EXPORT_SYMBOL(sk_reset_timer);
2129
2130void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2131{
2132 if (timer_pending(timer) && del_timer(timer))
2133 __sock_put(sk);
2134}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135EXPORT_SYMBOL(sk_stop_timer);
2136
2137void sock_init_data(struct socket *sock, struct sock *sk)
2138{
2139 skb_queue_head_init(&sk->sk_receive_queue);
2140 skb_queue_head_init(&sk->sk_write_queue);
2141 skb_queue_head_init(&sk->sk_error_queue);
Chris Leech97fc2f02006-05-23 17:55:33 -07002142#ifdef CONFIG_NET_DMA
2143 skb_queue_head_init(&sk->sk_async_wait_queue);
2144#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145
2146 sk->sk_send_head = NULL;
2147
2148 init_timer(&sk->sk_timer);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002149
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150 sk->sk_allocation = GFP_KERNEL;
2151 sk->sk_rcvbuf = sysctl_rmem_default;
2152 sk->sk_sndbuf = sysctl_wmem_default;
2153 sk->sk_state = TCP_CLOSE;
David S. Miller972692e2008-06-17 22:41:38 -07002154 sk_set_socket(sk, sock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155
2156 sock_set_flag(sk, SOCK_ZAPPED);
2157
Stephen Hemmingere71a4782007-04-10 20:10:33 -07002158 if (sock) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159 sk->sk_type = sock->type;
Eric Dumazet43815482010-04-29 11:01:49 +00002160 sk->sk_wq = sock->wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161 sock->sk = sk;
2162 } else
Eric Dumazet43815482010-04-29 11:01:49 +00002163 sk->sk_wq = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164
Eric Dumazetb6c67122010-04-08 23:03:29 +00002165 spin_lock_init(&sk->sk_dst_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166 rwlock_init(&sk->sk_callback_lock);
Peter Zijlstra443aef02007-07-19 01:49:00 -07002167 lockdep_set_class_and_name(&sk->sk_callback_lock,
2168 af_callback_keys + sk->sk_family,
2169 af_family_clock_key_strings[sk->sk_family]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170
2171 sk->sk_state_change = sock_def_wakeup;
2172 sk->sk_data_ready = sock_def_readable;
2173 sk->sk_write_space = sock_def_write_space;
2174 sk->sk_error_report = sock_def_error_report;
2175 sk->sk_destruct = sock_def_destruct;
2176
2177 sk->sk_sndmsg_page = NULL;
2178 sk->sk_sndmsg_off = 0;
Pavel Emelyanovef64a542012-02-21 07:31:34 +00002179 sk->sk_peek_off = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180
Eric W. Biederman109f6e32010-06-13 03:30:14 +00002181 sk->sk_peer_pid = NULL;
2182 sk->sk_peer_cred = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183 sk->sk_write_pending = 0;
2184 sk->sk_rcvlowat = 1;
2185 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
2186 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
2187
Eric Dumazetf37f0af2008-04-13 21:39:26 -07002188 sk->sk_stamp = ktime_set(-1L, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00002190 /*
2191 * Before updating sk_refcnt, we must commit prior changes to memory
2192 * (Documentation/RCU/rculist_nulls.txt for details)
2193 */
2194 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195 atomic_set(&sk->sk_refcnt, 1);
Wang Chen33c732c2007-11-13 20:30:01 -08002196 atomic_set(&sk->sk_drops, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197}
Eric Dumazet2a915252009-05-27 11:30:05 +00002198EXPORT_SYMBOL(sock_init_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002200void lock_sock_nested(struct sock *sk, int subclass)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201{
2202 might_sleep();
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002203 spin_lock_bh(&sk->sk_lock.slock);
John Heffnerd2e91172007-09-12 10:44:19 +02002204 if (sk->sk_lock.owned)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205 __lock_sock(sk);
John Heffnerd2e91172007-09-12 10:44:19 +02002206 sk->sk_lock.owned = 1;
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002207 spin_unlock(&sk->sk_lock.slock);
2208 /*
2209 * The sk_lock has mutex_lock() semantics here:
2210 */
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002211 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002212 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213}
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002214EXPORT_SYMBOL(lock_sock_nested);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002216void release_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217{
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002218 /*
2219 * The sk_lock has mutex_unlock() semantics:
2220 */
2221 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2222
2223 spin_lock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224 if (sk->sk_backlog.tail)
2225 __release_sock(sk);
Eric Dumazet46d3cea2012-07-11 05:50:31 +00002226
2227 if (sk->sk_prot->release_cb)
2228 sk->sk_prot->release_cb(sk);
2229
John Heffnerd2e91172007-09-12 10:44:19 +02002230 sk->sk_lock.owned = 0;
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002231 if (waitqueue_active(&sk->sk_lock.wq))
2232 wake_up(&sk->sk_lock.wq);
2233 spin_unlock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234}
2235EXPORT_SYMBOL(release_sock);
2236
Eric Dumazet8a74ad62010-05-26 19:20:18 +00002237/**
2238 * lock_sock_fast - fast version of lock_sock
2239 * @sk: socket
2240 *
2241 * This version should be used for very small section, where process wont block
2242 * return false if fast path is taken
2243 * sk_lock.slock locked, owned = 0, BH disabled
2244 * return true if slow path is taken
2245 * sk_lock.slock unlocked, owned = 1, BH enabled
2246 */
2247bool lock_sock_fast(struct sock *sk)
2248{
2249 might_sleep();
2250 spin_lock_bh(&sk->sk_lock.slock);
2251
2252 if (!sk->sk_lock.owned)
2253 /*
2254 * Note : We must disable BH
2255 */
2256 return false;
2257
2258 __lock_sock(sk);
2259 sk->sk_lock.owned = 1;
2260 spin_unlock(&sk->sk_lock.slock);
2261 /*
2262 * The sk_lock has mutex_lock() semantics here:
2263 */
2264 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2265 local_bh_enable();
2266 return true;
2267}
2268EXPORT_SYMBOL(lock_sock_fast);
2269
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002271{
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002272 struct timeval tv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002274 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002275 tv = ktime_to_timeval(sk->sk_stamp);
2276 if (tv.tv_sec == -1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277 return -ENOENT;
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002278 if (tv.tv_sec == 0) {
2279 sk->sk_stamp = ktime_get_real();
2280 tv = ktime_to_timeval(sk->sk_stamp);
2281 }
2282 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002283}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284EXPORT_SYMBOL(sock_get_timestamp);
2285
Eric Dumazetae40eb12007-03-18 17:33:16 -07002286int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2287{
2288 struct timespec ts;
2289 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002290 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetae40eb12007-03-18 17:33:16 -07002291 ts = ktime_to_timespec(sk->sk_stamp);
2292 if (ts.tv_sec == -1)
2293 return -ENOENT;
2294 if (ts.tv_sec == 0) {
2295 sk->sk_stamp = ktime_get_real();
2296 ts = ktime_to_timespec(sk->sk_stamp);
2297 }
2298 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2299}
2300EXPORT_SYMBOL(sock_get_timestampns);
2301
Patrick Ohly20d49472009-02-12 05:03:38 +00002302void sock_enable_timestamp(struct sock *sk, int flag)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002303{
Patrick Ohly20d49472009-02-12 05:03:38 +00002304 if (!sock_flag(sk, flag)) {
Eric Dumazet08e29af2011-11-28 12:04:18 +00002305 unsigned long previous_flags = sk->sk_flags;
2306
Patrick Ohly20d49472009-02-12 05:03:38 +00002307 sock_set_flag(sk, flag);
2308 /*
2309 * we just set one of the two flags which require net
2310 * time stamping, but time stamping might have been on
2311 * already because of the other one
2312 */
Eric Dumazet08e29af2011-11-28 12:04:18 +00002313 if (!(previous_flags & SK_FLAGS_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002314 net_enable_timestamp();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315 }
2316}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317
2318/*
2319 * Get a socket option on an socket.
2320 *
2321 * FIX: POSIX 1003.1g is very ambiguous here. It states that
2322 * asynchronous errors should be reported by getsockopt. We assume
2323 * this means if you specify SO_ERROR (otherwise whats the point of it).
2324 */
2325int sock_common_getsockopt(struct socket *sock, int level, int optname,
2326 char __user *optval, int __user *optlen)
2327{
2328 struct sock *sk = sock->sk;
2329
2330 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2331}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332EXPORT_SYMBOL(sock_common_getsockopt);
2333
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002334#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002335int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2336 char __user *optval, int __user *optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002337{
2338 struct sock *sk = sock->sk;
2339
Johannes Berg1e51f952007-03-06 13:44:06 -08002340 if (sk->sk_prot->compat_getsockopt != NULL)
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002341 return sk->sk_prot->compat_getsockopt(sk, level, optname,
2342 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002343 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2344}
2345EXPORT_SYMBOL(compat_sock_common_getsockopt);
2346#endif
2347
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
2349 struct msghdr *msg, size_t size, int flags)
2350{
2351 struct sock *sk = sock->sk;
2352 int addr_len = 0;
2353 int err;
2354
2355 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
2356 flags & ~MSG_DONTWAIT, &addr_len);
2357 if (err >= 0)
2358 msg->msg_namelen = addr_len;
2359 return err;
2360}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361EXPORT_SYMBOL(sock_common_recvmsg);
2362
2363/*
2364 * Set socket options on an inet socket.
2365 */
2366int sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002367 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368{
2369 struct sock *sk = sock->sk;
2370
2371 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2372}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373EXPORT_SYMBOL(sock_common_setsockopt);
2374
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002375#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002376int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002377 char __user *optval, unsigned int optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002378{
2379 struct sock *sk = sock->sk;
2380
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002381 if (sk->sk_prot->compat_setsockopt != NULL)
2382 return sk->sk_prot->compat_setsockopt(sk, level, optname,
2383 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002384 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2385}
2386EXPORT_SYMBOL(compat_sock_common_setsockopt);
2387#endif
2388
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389void sk_common_release(struct sock *sk)
2390{
2391 if (sk->sk_prot->destroy)
2392 sk->sk_prot->destroy(sk);
2393
2394 /*
2395 * Observation: when sock_common_release is called, processes have
2396 * no access to socket. But net still has.
2397 * Step one, detach it from networking:
2398 *
2399 * A. Remove from hash tables.
2400 */
2401
2402 sk->sk_prot->unhash(sk);
2403
2404 /*
2405 * In this point socket cannot receive new packets, but it is possible
2406 * that some packets are in flight because some CPU runs receiver and
2407 * did hash table lookup before we unhashed socket. They will achieve
2408 * receive queue and will be purged by socket destructor.
2409 *
2410 * Also we still have packets pending on receive queue and probably,
2411 * our own packets waiting in device queues. sock_destroy will drain
2412 * receive queue, but transmitted packets will delay socket destruction
2413 * until the last reference will be released.
2414 */
2415
2416 sock_orphan(sk);
2417
2418 xfrm_sk_free_policy(sk);
2419
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07002420 sk_refcnt_debug_release(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421 sock_put(sk);
2422}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002423EXPORT_SYMBOL(sk_common_release);
2424
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002425#ifdef CONFIG_PROC_FS
2426#define PROTO_INUSE_NR 64 /* should be enough for the first time */
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002427struct prot_inuse {
2428 int val[PROTO_INUSE_NR];
2429};
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002430
2431static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002432
2433#ifdef CONFIG_NET_NS
2434void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2435{
Eric Dumazetd6d9ca02010-07-19 10:48:49 +00002436 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002437}
2438EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2439
2440int sock_prot_inuse_get(struct net *net, struct proto *prot)
2441{
2442 int cpu, idx = prot->inuse_idx;
2443 int res = 0;
2444
2445 for_each_possible_cpu(cpu)
2446 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2447
2448 return res >= 0 ? res : 0;
2449}
2450EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2451
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002452static int __net_init sock_inuse_init_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002453{
2454 net->core.inuse = alloc_percpu(struct prot_inuse);
2455 return net->core.inuse ? 0 : -ENOMEM;
2456}
2457
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002458static void __net_exit sock_inuse_exit_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002459{
2460 free_percpu(net->core.inuse);
2461}
2462
2463static struct pernet_operations net_inuse_ops = {
2464 .init = sock_inuse_init_net,
2465 .exit = sock_inuse_exit_net,
2466};
2467
2468static __init int net_inuse_init(void)
2469{
2470 if (register_pernet_subsys(&net_inuse_ops))
2471 panic("Cannot initialize net inuse counters");
2472
2473 return 0;
2474}
2475
2476core_initcall(net_inuse_init);
2477#else
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002478static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2479
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002480void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002481{
Eric Dumazetd6d9ca02010-07-19 10:48:49 +00002482 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002483}
2484EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2485
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002486int sock_prot_inuse_get(struct net *net, struct proto *prot)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002487{
2488 int cpu, idx = prot->inuse_idx;
2489 int res = 0;
2490
2491 for_each_possible_cpu(cpu)
2492 res += per_cpu(prot_inuse, cpu).val[idx];
2493
2494 return res >= 0 ? res : 0;
2495}
2496EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002497#endif
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002498
2499static void assign_proto_idx(struct proto *prot)
2500{
2501 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2502
2503 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
Joe Perchese005d192012-05-16 19:58:40 +00002504 pr_err("PROTO_INUSE_NR exhausted\n");
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002505 return;
2506 }
2507
2508 set_bit(prot->inuse_idx, proto_inuse_idx);
2509}
2510
2511static void release_proto_idx(struct proto *prot)
2512{
2513 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2514 clear_bit(prot->inuse_idx, proto_inuse_idx);
2515}
2516#else
2517static inline void assign_proto_idx(struct proto *prot)
2518{
2519}
2520
2521static inline void release_proto_idx(struct proto *prot)
2522{
2523}
2524#endif
2525
Linus Torvalds1da177e2005-04-16 15:20:36 -07002526int proto_register(struct proto *prot, int alloc_slab)
2527{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528 if (alloc_slab) {
2529 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
Eric Dumazet271b72c2008-10-29 02:11:14 -07002530 SLAB_HWCACHE_ALIGN | prot->slab_flags,
2531 NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532
2533 if (prot->slab == NULL) {
Joe Perchese005d192012-05-16 19:58:40 +00002534 pr_crit("%s: Can't create sock SLAB cache!\n",
2535 prot->name);
Pavel Emelyanov60e76632008-03-28 16:39:10 -07002536 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002538
2539 if (prot->rsk_prot != NULL) {
Alexey Dobriyanfaf23422010-02-17 09:34:12 +00002540 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002541 if (prot->rsk_prot->slab_name == NULL)
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002542 goto out_free_sock_slab;
2543
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002544 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002545 prot->rsk_prot->obj_size, 0,
Paul Mundt20c2df82007-07-20 10:11:58 +09002546 SLAB_HWCACHE_ALIGN, NULL);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002547
2548 if (prot->rsk_prot->slab == NULL) {
Joe Perchese005d192012-05-16 19:58:40 +00002549 pr_crit("%s: Can't create request sock SLAB cache!\n",
2550 prot->name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002551 goto out_free_request_sock_slab_name;
2552 }
2553 }
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002554
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002555 if (prot->twsk_prot != NULL) {
Alexey Dobriyanfaf23422010-02-17 09:34:12 +00002556 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002557
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002558 if (prot->twsk_prot->twsk_slab_name == NULL)
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002559 goto out_free_request_sock_slab;
2560
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002561 prot->twsk_prot->twsk_slab =
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002562 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002563 prot->twsk_prot->twsk_obj_size,
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002564 0,
2565 SLAB_HWCACHE_ALIGN |
2566 prot->slab_flags,
Paul Mundt20c2df82007-07-20 10:11:58 +09002567 NULL);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002568 if (prot->twsk_prot->twsk_slab == NULL)
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002569 goto out_free_timewait_sock_slab_name;
2570 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002571 }
2572
Glauber Costa36b77a52011-12-16 00:51:59 +00002573 mutex_lock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002574 list_add(&prot->node, &proto_list);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002575 assign_proto_idx(prot);
Glauber Costa36b77a52011-12-16 00:51:59 +00002576 mutex_unlock(&proto_list_mutex);
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002577 return 0;
2578
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002579out_free_timewait_sock_slab_name:
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002580 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002581out_free_request_sock_slab:
2582 if (prot->rsk_prot && prot->rsk_prot->slab) {
2583 kmem_cache_destroy(prot->rsk_prot->slab);
2584 prot->rsk_prot->slab = NULL;
2585 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002586out_free_request_sock_slab_name:
Dan Carpenter72150e92010-03-06 01:04:45 +00002587 if (prot->rsk_prot)
2588 kfree(prot->rsk_prot->slab_name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002589out_free_sock_slab:
2590 kmem_cache_destroy(prot->slab);
2591 prot->slab = NULL;
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002592out:
2593 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002594}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595EXPORT_SYMBOL(proto_register);
2596
2597void proto_unregister(struct proto *prot)
2598{
Glauber Costa36b77a52011-12-16 00:51:59 +00002599 mutex_lock(&proto_list_mutex);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002600 release_proto_idx(prot);
Patrick McHardy0a3f4352005-09-06 19:47:50 -07002601 list_del(&prot->node);
Glauber Costa36b77a52011-12-16 00:51:59 +00002602 mutex_unlock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603
2604 if (prot->slab != NULL) {
2605 kmem_cache_destroy(prot->slab);
2606 prot->slab = NULL;
2607 }
2608
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002609 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002610 kmem_cache_destroy(prot->rsk_prot->slab);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002611 kfree(prot->rsk_prot->slab_name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002612 prot->rsk_prot->slab = NULL;
2613 }
2614
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002615 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002616 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002617 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002618 prot->twsk_prot->twsk_slab = NULL;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002619 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002620}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621EXPORT_SYMBOL(proto_unregister);
2622
2623#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07002624static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
Glauber Costa36b77a52011-12-16 00:51:59 +00002625 __acquires(proto_list_mutex)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626{
Glauber Costa36b77a52011-12-16 00:51:59 +00002627 mutex_lock(&proto_list_mutex);
Pavel Emelianov60f04382007-07-09 13:15:14 -07002628 return seq_list_start_head(&proto_list, *pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629}
2630
2631static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2632{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002633 return seq_list_next(v, &proto_list, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002634}
2635
2636static void proto_seq_stop(struct seq_file *seq, void *v)
Glauber Costa36b77a52011-12-16 00:51:59 +00002637 __releases(proto_list_mutex)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002638{
Glauber Costa36b77a52011-12-16 00:51:59 +00002639 mutex_unlock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002640}
2641
2642static char proto_method_implemented(const void *method)
2643{
2644 return method == NULL ? 'n' : 'y';
2645}
Glauber Costa180d8cd2011-12-11 21:47:02 +00002646static long sock_prot_memory_allocated(struct proto *proto)
2647{
Jeffrin Josecb75a362012-04-25 19:17:29 +05302648 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002649}
2650
2651static char *sock_prot_memory_pressure(struct proto *proto)
2652{
2653 return proto->memory_pressure != NULL ?
2654 proto_memory_pressure(proto) ? "yes" : "no" : "NI";
2655}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002656
2657static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2658{
Glauber Costa180d8cd2011-12-11 21:47:02 +00002659
Eric Dumazet8d987e52010-11-09 23:24:26 +00002660 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2662 proto->name,
2663 proto->obj_size,
Eric Dumazet14e943d2008-11-19 15:14:01 -08002664 sock_prot_inuse_get(seq_file_net(seq), proto),
Glauber Costa180d8cd2011-12-11 21:47:02 +00002665 sock_prot_memory_allocated(proto),
2666 sock_prot_memory_pressure(proto),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667 proto->max_header,
2668 proto->slab == NULL ? "no" : "yes",
2669 module_name(proto->owner),
2670 proto_method_implemented(proto->close),
2671 proto_method_implemented(proto->connect),
2672 proto_method_implemented(proto->disconnect),
2673 proto_method_implemented(proto->accept),
2674 proto_method_implemented(proto->ioctl),
2675 proto_method_implemented(proto->init),
2676 proto_method_implemented(proto->destroy),
2677 proto_method_implemented(proto->shutdown),
2678 proto_method_implemented(proto->setsockopt),
2679 proto_method_implemented(proto->getsockopt),
2680 proto_method_implemented(proto->sendmsg),
2681 proto_method_implemented(proto->recvmsg),
2682 proto_method_implemented(proto->sendpage),
2683 proto_method_implemented(proto->bind),
2684 proto_method_implemented(proto->backlog_rcv),
2685 proto_method_implemented(proto->hash),
2686 proto_method_implemented(proto->unhash),
2687 proto_method_implemented(proto->get_port),
2688 proto_method_implemented(proto->enter_memory_pressure));
2689}
2690
2691static int proto_seq_show(struct seq_file *seq, void *v)
2692{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002693 if (v == &proto_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002694 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2695 "protocol",
2696 "size",
2697 "sockets",
2698 "memory",
2699 "press",
2700 "maxhdr",
2701 "slab",
2702 "module",
2703 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2704 else
Pavel Emelianov60f04382007-07-09 13:15:14 -07002705 proto_seq_printf(seq, list_entry(v, struct proto, node));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002706 return 0;
2707}
2708
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002709static const struct seq_operations proto_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710 .start = proto_seq_start,
2711 .next = proto_seq_next,
2712 .stop = proto_seq_stop,
2713 .show = proto_seq_show,
2714};
2715
2716static int proto_seq_open(struct inode *inode, struct file *file)
2717{
Eric Dumazet14e943d2008-11-19 15:14:01 -08002718 return seq_open_net(inode, file, &proto_seq_ops,
2719 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002720}
2721
Arjan van de Ven9a321442007-02-12 00:55:35 -08002722static const struct file_operations proto_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002723 .owner = THIS_MODULE,
2724 .open = proto_seq_open,
2725 .read = seq_read,
2726 .llseek = seq_lseek,
Eric Dumazet14e943d2008-11-19 15:14:01 -08002727 .release = seq_release_net,
2728};
2729
2730static __net_init int proto_init_net(struct net *net)
2731{
2732 if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops))
2733 return -ENOMEM;
2734
2735 return 0;
2736}
2737
2738static __net_exit void proto_exit_net(struct net *net)
2739{
2740 proc_net_remove(net, "protocols");
2741}
2742
2743
2744static __net_initdata struct pernet_operations proto_net_ops = {
2745 .init = proto_init_net,
2746 .exit = proto_exit_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002747};
2748
2749static int __init proto_init(void)
2750{
Eric Dumazet14e943d2008-11-19 15:14:01 -08002751 return register_pernet_subsys(&proto_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752}
2753
2754subsys_initcall(proto_init);
2755
2756#endif /* PROC_FS */