Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 1 | /* |
| 2 | * taskstats.c - Export per-task statistics to userland |
| 3 | * |
| 4 | * Copyright (C) Shailabh Nagar, IBM Corp. 2006 |
| 5 | * (C) Balbir Singh, IBM Corp. 2006 |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License as published by |
| 9 | * the Free Software Foundation; either version 2 of the License, or |
| 10 | * (at your option) any later version. |
| 11 | * |
| 12 | * This program is distributed in the hope that it will be useful, |
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 15 | * GNU General Public License for more details. |
| 16 | * |
| 17 | */ |
| 18 | |
| 19 | #include <linux/kernel.h> |
| 20 | #include <linux/taskstats_kern.h> |
Jay Lan | f3cef7a | 2006-09-30 23:28:55 -0700 | [diff] [blame] | 21 | #include <linux/tsacct_kern.h> |
Shailabh Nagar | 6f44993 | 2006-07-14 00:24:41 -0700 | [diff] [blame] | 22 | #include <linux/delayacct.h> |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 23 | #include <linux/cpumask.h> |
| 24 | #include <linux/percpu.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 25 | #include <linux/slab.h> |
Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 26 | #include <linux/cgroupstats.h> |
| 27 | #include <linux/cgroup.h> |
| 28 | #include <linux/fs.h> |
| 29 | #include <linux/file.h> |
Eric W. Biederman | 4bd6e32 | 2012-02-07 17:56:49 -0800 | [diff] [blame] | 30 | #include <linux/pid_namespace.h> |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 31 | #include <net/genetlink.h> |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 32 | #include <linux/atomic.h> |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 33 | |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 34 | /* |
| 35 | * Maximum length of a cpumask that can be specified in |
| 36 | * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute |
| 37 | */ |
| 38 | #define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS) |
| 39 | |
Vegard Nossum | b81f3ea | 2008-07-25 01:48:55 -0700 | [diff] [blame] | 40 | static DEFINE_PER_CPU(__u32, taskstats_seqnum); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 41 | static int family_registered; |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 42 | struct kmem_cache *taskstats_cache; |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 43 | |
| 44 | static struct genl_family family = { |
| 45 | .id = GENL_ID_GENERATE, |
| 46 | .name = TASKSTATS_GENL_NAME, |
| 47 | .version = TASKSTATS_GENL_VERSION, |
| 48 | .maxattr = TASKSTATS_CMD_ATTR_MAX, |
| 49 | }; |
| 50 | |
Alexey Dobriyan | b54452b | 2010-02-18 08:14:31 +0000 | [diff] [blame] | 51 | static const struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = { |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 52 | [TASKSTATS_CMD_ATTR_PID] = { .type = NLA_U32 }, |
| 53 | [TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 }, |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 54 | [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING }, |
| 55 | [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },}; |
| 56 | |
Alexey Dobriyan | b54452b | 2010-02-18 08:14:31 +0000 | [diff] [blame] | 57 | static const struct nla_policy cgroupstats_cmd_get_policy[CGROUPSTATS_CMD_ATTR_MAX+1] = { |
Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 58 | [CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 }, |
| 59 | }; |
| 60 | |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 61 | struct listener { |
| 62 | struct list_head list; |
| 63 | pid_t pid; |
Shailabh Nagar | bb12999 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 64 | char valid; |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 65 | }; |
| 66 | |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 67 | struct listener_list { |
| 68 | struct rw_semaphore sem; |
| 69 | struct list_head list; |
| 70 | }; |
| 71 | static DEFINE_PER_CPU(struct listener_list, listener_array); |
| 72 | |
| 73 | enum actions { |
| 74 | REGISTER, |
| 75 | DEREGISTER, |
| 76 | CPU_DONT_CARE |
| 77 | }; |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 78 | |
| 79 | static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp, |
Oleg Nesterov | 3716748 | 2006-12-06 20:36:55 -0800 | [diff] [blame] | 80 | size_t size) |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 81 | { |
| 82 | struct sk_buff *skb; |
| 83 | void *reply; |
| 84 | |
| 85 | /* |
| 86 | * If new attributes are added, please revisit this allocation |
| 87 | */ |
Thomas Graf | 3dabc71 | 2006-11-14 19:44:52 -0800 | [diff] [blame] | 88 | skb = genlmsg_new(size, GFP_KERNEL); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 89 | if (!skb) |
| 90 | return -ENOMEM; |
| 91 | |
| 92 | if (!info) { |
Christoph Lameter | cd85fc5 | 2010-12-08 17:42:22 +0100 | [diff] [blame] | 93 | int seq = this_cpu_inc_return(taskstats_seqnum) - 1; |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 94 | |
Thomas Graf | 17c157c | 2006-11-14 19:46:02 -0800 | [diff] [blame] | 95 | reply = genlmsg_put(skb, 0, seq, &family, 0, cmd); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 96 | } else |
Thomas Graf | 17c157c | 2006-11-14 19:46:02 -0800 | [diff] [blame] | 97 | reply = genlmsg_put_reply(skb, info, &family, 0, cmd); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 98 | if (reply == NULL) { |
| 99 | nlmsg_free(skb); |
| 100 | return -EINVAL; |
| 101 | } |
| 102 | |
| 103 | *skbp = skb; |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 104 | return 0; |
| 105 | } |
| 106 | |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 107 | /* |
| 108 | * Send taskstats data in @skb to listener with nl_pid @pid |
| 109 | */ |
Johannes Berg | 134e637 | 2009-07-10 09:51:34 +0000 | [diff] [blame] | 110 | static int send_reply(struct sk_buff *skb, struct genl_info *info) |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 111 | { |
Arnaldo Carvalho de Melo | b529ccf | 2007-04-25 19:08:35 -0700 | [diff] [blame] | 112 | struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb)); |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 113 | void *reply = genlmsg_data(genlhdr); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 114 | int rc; |
| 115 | |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 116 | rc = genlmsg_end(skb, reply); |
| 117 | if (rc < 0) { |
| 118 | nlmsg_free(skb); |
| 119 | return rc; |
| 120 | } |
| 121 | |
Johannes Berg | 134e637 | 2009-07-10 09:51:34 +0000 | [diff] [blame] | 122 | return genlmsg_reply(skb, info); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 123 | } |
| 124 | |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 125 | /* |
| 126 | * Send taskstats data in @skb to listeners registered for @cpu's exit data |
| 127 | */ |
Oleg Nesterov | 115085e | 2006-12-06 20:36:51 -0800 | [diff] [blame] | 128 | static void send_cpu_listeners(struct sk_buff *skb, |
| 129 | struct listener_list *listeners) |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 130 | { |
Arnaldo Carvalho de Melo | b529ccf | 2007-04-25 19:08:35 -0700 | [diff] [blame] | 131 | struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb)); |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 132 | struct listener *s, *tmp; |
| 133 | struct sk_buff *skb_next, *skb_cur = skb; |
| 134 | void *reply = genlmsg_data(genlhdr); |
Shailabh Nagar | d94a041 | 2006-07-30 03:03:11 -0700 | [diff] [blame] | 135 | int rc, delcount = 0; |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 136 | |
| 137 | rc = genlmsg_end(skb, reply); |
| 138 | if (rc < 0) { |
| 139 | nlmsg_free(skb); |
Shailabh Nagar | d94a041 | 2006-07-30 03:03:11 -0700 | [diff] [blame] | 140 | return; |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 141 | } |
| 142 | |
| 143 | rc = 0; |
Shailabh Nagar | bb12999 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 144 | down_read(&listeners->sem); |
Shailabh Nagar | d94a041 | 2006-07-30 03:03:11 -0700 | [diff] [blame] | 145 | list_for_each_entry(s, &listeners->list, list) { |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 146 | skb_next = NULL; |
| 147 | if (!list_is_last(&s->list, &listeners->list)) { |
| 148 | skb_next = skb_clone(skb_cur, GFP_KERNEL); |
Shailabh Nagar | d94a041 | 2006-07-30 03:03:11 -0700 | [diff] [blame] | 149 | if (!skb_next) |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 150 | break; |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 151 | } |
Johannes Berg | 134e637 | 2009-07-10 09:51:34 +0000 | [diff] [blame] | 152 | rc = genlmsg_unicast(&init_net, skb_cur, s->pid); |
Shailabh Nagar | d94a041 | 2006-07-30 03:03:11 -0700 | [diff] [blame] | 153 | if (rc == -ECONNREFUSED) { |
Shailabh Nagar | bb12999 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 154 | s->valid = 0; |
| 155 | delcount++; |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 156 | } |
| 157 | skb_cur = skb_next; |
| 158 | } |
Shailabh Nagar | bb12999 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 159 | up_read(&listeners->sem); |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 160 | |
Shailabh Nagar | d94a041 | 2006-07-30 03:03:11 -0700 | [diff] [blame] | 161 | if (skb_cur) |
| 162 | nlmsg_free(skb_cur); |
| 163 | |
Shailabh Nagar | bb12999 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 164 | if (!delcount) |
Shailabh Nagar | d94a041 | 2006-07-30 03:03:11 -0700 | [diff] [blame] | 165 | return; |
Shailabh Nagar | bb12999 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 166 | |
| 167 | /* Delete invalidated entries */ |
| 168 | down_write(&listeners->sem); |
| 169 | list_for_each_entry_safe(s, tmp, &listeners->list, list) { |
| 170 | if (!s->valid) { |
| 171 | list_del(&s->list); |
| 172 | kfree(s); |
| 173 | } |
| 174 | } |
| 175 | up_write(&listeners->sem); |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 176 | } |
| 177 | |
Eric W. Biederman | 4bd6e32 | 2012-02-07 17:56:49 -0800 | [diff] [blame] | 178 | static void fill_stats(struct user_namespace *user_ns, |
| 179 | struct pid_namespace *pid_ns, |
| 180 | struct task_struct *tsk, struct taskstats *stats) |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 181 | { |
Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 182 | memset(stats, 0, sizeof(*stats)); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 183 | /* |
| 184 | * Each accounting subsystem adds calls to its functions to |
| 185 | * fill in relevant parts of struct taskstsats as follows |
| 186 | * |
Shailabh Nagar | 7d94ddd | 2006-07-30 03:03:10 -0700 | [diff] [blame] | 187 | * per-task-foo(stats, tsk); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 188 | */ |
| 189 | |
Shailabh Nagar | 7d94ddd | 2006-07-30 03:03:10 -0700 | [diff] [blame] | 190 | delayacct_add_tsk(stats, tsk); |
Jay Lan | f3cef7a | 2006-09-30 23:28:55 -0700 | [diff] [blame] | 191 | |
| 192 | /* fill in basic acct fields */ |
Shailabh Nagar | 6f44993 | 2006-07-14 00:24:41 -0700 | [diff] [blame] | 193 | stats->version = TASKSTATS_VERSION; |
Maxim Uvarov | b663a79 | 2007-07-15 23:40:48 -0700 | [diff] [blame] | 194 | stats->nvcsw = tsk->nvcsw; |
| 195 | stats->nivcsw = tsk->nivcsw; |
Eric W. Biederman | 4bd6e32 | 2012-02-07 17:56:49 -0800 | [diff] [blame] | 196 | bacct_add_tsk(user_ns, pid_ns, stats, tsk); |
Shailabh Nagar | 6f44993 | 2006-07-14 00:24:41 -0700 | [diff] [blame] | 197 | |
Jay Lan | 9acc185 | 2006-09-30 23:28:58 -0700 | [diff] [blame] | 198 | /* fill in extended acct fields */ |
| 199 | xacct_add_tsk(stats, tsk); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 200 | } |
| 201 | |
Michael Holzheu | 3d9e0cf | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 202 | static int fill_stats_for_pid(pid_t pid, struct taskstats *stats) |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 203 | { |
Oleg Nesterov | a98b609 | 2006-10-28 10:38:54 -0700 | [diff] [blame] | 204 | struct task_struct *tsk; |
Michael Holzheu | 3d9e0cf | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 205 | |
| 206 | rcu_read_lock(); |
| 207 | tsk = find_task_by_vpid(pid); |
| 208 | if (tsk) |
| 209 | get_task_struct(tsk); |
| 210 | rcu_read_unlock(); |
| 211 | if (!tsk) |
| 212 | return -ESRCH; |
Eric W. Biederman | 4bd6e32 | 2012-02-07 17:56:49 -0800 | [diff] [blame] | 213 | fill_stats(current_user_ns(), task_active_pid_ns(current), tsk, stats); |
Michael Holzheu | 3d9e0cf | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 214 | put_task_struct(tsk); |
| 215 | return 0; |
| 216 | } |
| 217 | |
| 218 | static int fill_stats_for_tgid(pid_t tgid, struct taskstats *stats) |
| 219 | { |
| 220 | struct task_struct *tsk, *first; |
Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 221 | unsigned long flags; |
Oleg Nesterov | a98b609 | 2006-10-28 10:38:54 -0700 | [diff] [blame] | 222 | int rc = -ESRCH; |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 223 | |
Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 224 | /* |
| 225 | * Add additional stats from live tasks except zombie thread group |
| 226 | * leaders who are already counted with the dead tasks |
| 227 | */ |
Oleg Nesterov | a98b609 | 2006-10-28 10:38:54 -0700 | [diff] [blame] | 228 | rcu_read_lock(); |
Michael Holzheu | 3d9e0cf | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 229 | first = find_task_by_vpid(tgid); |
Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 230 | |
Oleg Nesterov | a98b609 | 2006-10-28 10:38:54 -0700 | [diff] [blame] | 231 | if (!first || !lock_task_sighand(first, &flags)) |
| 232 | goto out; |
| 233 | |
| 234 | if (first->signal->stats) |
| 235 | memcpy(stats, first->signal->stats, sizeof(*stats)); |
Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 236 | else |
| 237 | memset(stats, 0, sizeof(*stats)); |
Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 238 | |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 239 | tsk = first; |
| 240 | do { |
Oleg Nesterov | d7c3f5f | 2006-10-28 10:38:54 -0700 | [diff] [blame] | 241 | if (tsk->exit_state) |
Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 242 | continue; |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 243 | /* |
Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 244 | * Accounting subsystem can call its functions here to |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 245 | * fill in relevant parts of struct taskstsats as follows |
| 246 | * |
Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 247 | * per-task-foo(stats, tsk); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 248 | */ |
Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 249 | delayacct_add_tsk(stats, tsk); |
Shailabh Nagar | 6f44993 | 2006-07-14 00:24:41 -0700 | [diff] [blame] | 250 | |
Maxim Uvarov | b663a79 | 2007-07-15 23:40:48 -0700 | [diff] [blame] | 251 | stats->nvcsw += tsk->nvcsw; |
| 252 | stats->nivcsw += tsk->nivcsw; |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 253 | } while_each_thread(first, tsk); |
Shailabh Nagar | 6f44993 | 2006-07-14 00:24:41 -0700 | [diff] [blame] | 254 | |
Oleg Nesterov | a98b609 | 2006-10-28 10:38:54 -0700 | [diff] [blame] | 255 | unlock_task_sighand(first, &flags); |
| 256 | rc = 0; |
| 257 | out: |
| 258 | rcu_read_unlock(); |
| 259 | |
| 260 | stats->version = TASKSTATS_VERSION; |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 261 | /* |
Robert P. J. Day | 3a4fa0a | 2007-10-19 23:10:43 +0200 | [diff] [blame] | 262 | * Accounting subsystems can also add calls here to modify |
Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 263 | * fields of taskstats. |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 264 | */ |
Oleg Nesterov | a98b609 | 2006-10-28 10:38:54 -0700 | [diff] [blame] | 265 | return rc; |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 266 | } |
| 267 | |
Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 268 | static void fill_tgid_exit(struct task_struct *tsk) |
| 269 | { |
| 270 | unsigned long flags; |
| 271 | |
Oleg Nesterov | b8534d7 | 2006-10-28 10:38:53 -0700 | [diff] [blame] | 272 | spin_lock_irqsave(&tsk->sighand->siglock, flags); |
Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 273 | if (!tsk->signal->stats) |
| 274 | goto ret; |
| 275 | |
| 276 | /* |
| 277 | * Each accounting subsystem calls its functions here to |
| 278 | * accumalate its per-task stats for tsk, into the per-tgid structure |
| 279 | * |
| 280 | * per-task-foo(tsk->signal->stats, tsk); |
| 281 | */ |
| 282 | delayacct_add_tsk(tsk->signal->stats, tsk); |
| 283 | ret: |
Oleg Nesterov | b8534d7 | 2006-10-28 10:38:53 -0700 | [diff] [blame] | 284 | spin_unlock_irqrestore(&tsk->sighand->siglock, flags); |
Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 285 | return; |
| 286 | } |
| 287 | |
Rusty Russell | 41c7bb9 | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 288 | static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd) |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 289 | { |
| 290 | struct listener_list *listeners; |
Vasiliy Kulikov | 26c4cae | 2011-06-27 16:18:11 -0700 | [diff] [blame] | 291 | struct listener *s, *tmp, *s2; |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 292 | unsigned int cpu; |
Chen Gang | 0d20633 | 2013-11-12 15:11:23 -0800 | [diff] [blame] | 293 | int ret = 0; |
Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 294 | |
Rusty Russell | 41c7bb9 | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 295 | if (!cpumask_subset(mask, cpu_possible_mask)) |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 296 | return -EINVAL; |
| 297 | |
Eric W. Biederman | 4bd6e32 | 2012-02-07 17:56:49 -0800 | [diff] [blame] | 298 | if (current_user_ns() != &init_user_ns) |
| 299 | return -EINVAL; |
| 300 | |
| 301 | if (task_active_pid_ns(current) != &init_pid_ns) |
| 302 | return -EINVAL; |
| 303 | |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 304 | if (isadd == REGISTER) { |
Rusty Russell | 41c7bb9 | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 305 | for_each_cpu(cpu, mask) { |
Oleg Nesterov | dfc428b | 2011-08-03 16:21:04 -0700 | [diff] [blame] | 306 | s = kmalloc_node(sizeof(struct listener), |
| 307 | GFP_KERNEL, cpu_to_node(cpu)); |
Chen Gang | 0d20633 | 2013-11-12 15:11:23 -0800 | [diff] [blame] | 308 | if (!s) { |
| 309 | ret = -ENOMEM; |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 310 | goto cleanup; |
Chen Gang | 0d20633 | 2013-11-12 15:11:23 -0800 | [diff] [blame] | 311 | } |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 312 | s->pid = pid; |
Shailabh Nagar | bb12999 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 313 | s->valid = 1; |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 314 | |
| 315 | listeners = &per_cpu(listener_array, cpu); |
| 316 | down_write(&listeners->sem); |
Oleg Nesterov | dfc428b | 2011-08-03 16:21:04 -0700 | [diff] [blame] | 317 | list_for_each_entry(s2, &listeners->list, list) { |
Oleg Nesterov | a729589 | 2011-08-03 16:21:05 -0700 | [diff] [blame] | 318 | if (s2->pid == pid && s2->valid) |
Oleg Nesterov | dfc428b | 2011-08-03 16:21:04 -0700 | [diff] [blame] | 319 | goto exists; |
Vasiliy Kulikov | 26c4cae | 2011-06-27 16:18:11 -0700 | [diff] [blame] | 320 | } |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 321 | list_add(&s->list, &listeners->list); |
Vasiliy Kulikov | 26c4cae | 2011-06-27 16:18:11 -0700 | [diff] [blame] | 322 | s = NULL; |
Oleg Nesterov | dfc428b | 2011-08-03 16:21:04 -0700 | [diff] [blame] | 323 | exists: |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 324 | up_write(&listeners->sem); |
Oleg Nesterov | dfc428b | 2011-08-03 16:21:04 -0700 | [diff] [blame] | 325 | kfree(s); /* nop if NULL */ |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 326 | } |
| 327 | return 0; |
| 328 | } |
| 329 | |
| 330 | /* Deregister or cleanup */ |
| 331 | cleanup: |
Rusty Russell | 41c7bb9 | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 332 | for_each_cpu(cpu, mask) { |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 333 | listeners = &per_cpu(listener_array, cpu); |
| 334 | down_write(&listeners->sem); |
| 335 | list_for_each_entry_safe(s, tmp, &listeners->list, list) { |
| 336 | if (s->pid == pid) { |
| 337 | list_del(&s->list); |
| 338 | kfree(s); |
| 339 | break; |
| 340 | } |
| 341 | } |
| 342 | up_write(&listeners->sem); |
| 343 | } |
Chen Gang | 0d20633 | 2013-11-12 15:11:23 -0800 | [diff] [blame] | 344 | return ret; |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 345 | } |
| 346 | |
Rusty Russell | 41c7bb9 | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 347 | static int parse(struct nlattr *na, struct cpumask *mask) |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 348 | { |
| 349 | char *data; |
| 350 | int len; |
| 351 | int ret; |
| 352 | |
| 353 | if (na == NULL) |
| 354 | return 1; |
| 355 | len = nla_len(na); |
| 356 | if (len > TASKSTATS_CPUMASK_MAXLEN) |
| 357 | return -E2BIG; |
| 358 | if (len < 1) |
| 359 | return -EINVAL; |
| 360 | data = kmalloc(len, GFP_KERNEL); |
| 361 | if (!data) |
| 362 | return -ENOMEM; |
| 363 | nla_strlcpy(data, na, len); |
Rusty Russell | 29c0177 | 2008-12-13 21:20:25 +1030 | [diff] [blame] | 364 | ret = cpulist_parse(data, mask); |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 365 | kfree(data); |
| 366 | return ret; |
| 367 | } |
| 368 | |
Jeff Mahoney | 9ab020c | 2011-01-12 17:00:48 -0800 | [diff] [blame] | 369 | #if defined(CONFIG_64BIT) && !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) |
Jeff Mahoney | 4be2c95 | 2010-12-21 17:24:30 -0800 | [diff] [blame] | 370 | #define TASKSTATS_NEEDS_PADDING 1 |
| 371 | #endif |
| 372 | |
Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 373 | static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid) |
Oleg Nesterov | 68062b8 | 2006-12-06 20:36:53 -0800 | [diff] [blame] | 374 | { |
Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 375 | struct nlattr *na, *ret; |
Oleg Nesterov | 68062b8 | 2006-12-06 20:36:53 -0800 | [diff] [blame] | 376 | int aggr; |
| 377 | |
Oleg Nesterov | 3716748 | 2006-12-06 20:36:55 -0800 | [diff] [blame] | 378 | aggr = (type == TASKSTATS_TYPE_PID) |
| 379 | ? TASKSTATS_TYPE_AGGR_PID |
| 380 | : TASKSTATS_TYPE_AGGR_TGID; |
Oleg Nesterov | 68062b8 | 2006-12-06 20:36:53 -0800 | [diff] [blame] | 381 | |
Jeff Mahoney | 4be2c95 | 2010-12-21 17:24:30 -0800 | [diff] [blame] | 382 | /* |
| 383 | * The taskstats structure is internally aligned on 8 byte |
| 384 | * boundaries but the layout of the aggregrate reply, with |
| 385 | * two NLA headers and the pid (each 4 bytes), actually |
| 386 | * force the entire structure to be unaligned. This causes |
| 387 | * the kernel to issue unaligned access warnings on some |
| 388 | * architectures like ia64. Unfortunately, some software out there |
| 389 | * doesn't properly unroll the NLA packet and assumes that the start |
| 390 | * of the taskstats structure will always be 20 bytes from the start |
| 391 | * of the netlink payload. Aligning the start of the taskstats |
| 392 | * structure breaks this software, which we don't want. So, for now |
| 393 | * the alignment only happens on architectures that require it |
| 394 | * and those users will have to update to fixed versions of those |
| 395 | * packages. Space is reserved in the packet only when needed. |
| 396 | * This ifdef should be removed in several years e.g. 2012 once |
| 397 | * we can be confident that fixed versions are installed on most |
| 398 | * systems. We add the padding before the aggregate since the |
| 399 | * aggregate is already a defined type. |
| 400 | */ |
| 401 | #ifdef TASKSTATS_NEEDS_PADDING |
| 402 | if (nla_put(skb, TASKSTATS_TYPE_NULL, 0, NULL) < 0) |
| 403 | goto err; |
| 404 | #endif |
Oleg Nesterov | 68062b8 | 2006-12-06 20:36:53 -0800 | [diff] [blame] | 405 | na = nla_nest_start(skb, aggr); |
Oleg Nesterov | 3716748 | 2006-12-06 20:36:55 -0800 | [diff] [blame] | 406 | if (!na) |
| 407 | goto err; |
Jeff Mahoney | 4be2c95 | 2010-12-21 17:24:30 -0800 | [diff] [blame] | 408 | |
Chen Gang | 3fa5826 | 2013-11-12 15:11:22 -0800 | [diff] [blame] | 409 | if (nla_put(skb, type, sizeof(pid), &pid) < 0) { |
| 410 | nla_nest_cancel(skb, na); |
Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 411 | goto err; |
Chen Gang | 3fa5826 | 2013-11-12 15:11:22 -0800 | [diff] [blame] | 412 | } |
Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 413 | ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats)); |
Chen Gang | 3fa5826 | 2013-11-12 15:11:22 -0800 | [diff] [blame] | 414 | if (!ret) { |
| 415 | nla_nest_cancel(skb, na); |
Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 416 | goto err; |
Chen Gang | 3fa5826 | 2013-11-12 15:11:22 -0800 | [diff] [blame] | 417 | } |
Oleg Nesterov | 68062b8 | 2006-12-06 20:36:53 -0800 | [diff] [blame] | 418 | nla_nest_end(skb, na); |
| 419 | |
Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 420 | return nla_data(ret); |
| 421 | err: |
| 422 | return NULL; |
Oleg Nesterov | 68062b8 | 2006-12-06 20:36:53 -0800 | [diff] [blame] | 423 | } |
| 424 | |
Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 425 | static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info) |
| 426 | { |
| 427 | int rc = 0; |
| 428 | struct sk_buff *rep_skb; |
| 429 | struct cgroupstats *stats; |
| 430 | struct nlattr *na; |
| 431 | size_t size; |
| 432 | u32 fd; |
Al Viro | 2903ff0 | 2012-08-28 12:52:22 -0400 | [diff] [blame] | 433 | struct fd f; |
Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 434 | |
| 435 | na = info->attrs[CGROUPSTATS_CMD_ATTR_FD]; |
| 436 | if (!na) |
| 437 | return -EINVAL; |
| 438 | |
| 439 | fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]); |
Al Viro | 2903ff0 | 2012-08-28 12:52:22 -0400 | [diff] [blame] | 440 | f = fdget(fd); |
| 441 | if (!f.file) |
Adrian Bunk | f961598 | 2007-11-14 17:00:37 -0800 | [diff] [blame] | 442 | return 0; |
Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 443 | |
Adrian Bunk | f961598 | 2007-11-14 17:00:37 -0800 | [diff] [blame] | 444 | size = nla_total_size(sizeof(struct cgroupstats)); |
Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 445 | |
Adrian Bunk | f961598 | 2007-11-14 17:00:37 -0800 | [diff] [blame] | 446 | rc = prepare_reply(info, CGROUPSTATS_CMD_NEW, &rep_skb, |
| 447 | size); |
| 448 | if (rc < 0) |
| 449 | goto err; |
Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 450 | |
Adrian Bunk | f961598 | 2007-11-14 17:00:37 -0800 | [diff] [blame] | 451 | na = nla_reserve(rep_skb, CGROUPSTATS_TYPE_CGROUP_STATS, |
| 452 | sizeof(struct cgroupstats)); |
Alan Cox | 25353b3 | 2012-07-30 14:42:49 -0700 | [diff] [blame] | 453 | if (na == NULL) { |
Jesper Juhl | 0324b5a | 2012-10-04 17:16:52 -0700 | [diff] [blame] | 454 | nlmsg_free(rep_skb); |
Alan Cox | 25353b3 | 2012-07-30 14:42:49 -0700 | [diff] [blame] | 455 | rc = -EMSGSIZE; |
| 456 | goto err; |
| 457 | } |
| 458 | |
Adrian Bunk | f961598 | 2007-11-14 17:00:37 -0800 | [diff] [blame] | 459 | stats = nla_data(na); |
| 460 | memset(stats, 0, sizeof(*stats)); |
Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 461 | |
Al Viro | 2903ff0 | 2012-08-28 12:52:22 -0400 | [diff] [blame] | 462 | rc = cgroupstats_build(stats, f.file->f_dentry); |
Adrian Bunk | f961598 | 2007-11-14 17:00:37 -0800 | [diff] [blame] | 463 | if (rc < 0) { |
| 464 | nlmsg_free(rep_skb); |
| 465 | goto err; |
Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 466 | } |
| 467 | |
Johannes Berg | 134e637 | 2009-07-10 09:51:34 +0000 | [diff] [blame] | 468 | rc = send_reply(rep_skb, info); |
Adrian Bunk | f961598 | 2007-11-14 17:00:37 -0800 | [diff] [blame] | 469 | |
Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 470 | err: |
Al Viro | 2903ff0 | 2012-08-28 12:52:22 -0400 | [diff] [blame] | 471 | fdput(f); |
Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 472 | return rc; |
| 473 | } |
| 474 | |
Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 475 | static int cmd_attr_register_cpumask(struct genl_info *info) |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 476 | { |
Rusty Russell | 41c7bb9 | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 477 | cpumask_var_t mask; |
Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 478 | int rc; |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 479 | |
Rusty Russell | 41c7bb9 | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 480 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) |
| 481 | return -ENOMEM; |
Rusty Russell | 41c7bb9 | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 482 | rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask); |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 483 | if (rc < 0) |
Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 484 | goto out; |
Eric W. Biederman | 15e4730 | 2012-09-07 20:12:54 +0000 | [diff] [blame] | 485 | rc = add_del_listener(info->snd_portid, mask, REGISTER); |
Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 486 | out: |
| 487 | free_cpumask_var(mask); |
| 488 | return rc; |
| 489 | } |
Rusty Russell | 41c7bb9 | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 490 | |
Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 491 | static int cmd_attr_deregister_cpumask(struct genl_info *info) |
| 492 | { |
| 493 | cpumask_var_t mask; |
| 494 | int rc; |
| 495 | |
| 496 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) |
| 497 | return -ENOMEM; |
Rusty Russell | 41c7bb9 | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 498 | rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask); |
| 499 | if (rc < 0) |
Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 500 | goto out; |
Eric W. Biederman | 15e4730 | 2012-09-07 20:12:54 +0000 | [diff] [blame] | 501 | rc = add_del_listener(info->snd_portid, mask, DEREGISTER); |
Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 502 | out: |
Rusty Russell | 41c7bb9 | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 503 | free_cpumask_var(mask); |
Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 504 | return rc; |
| 505 | } |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 506 | |
Jeff Mahoney | 4be2c95 | 2010-12-21 17:24:30 -0800 | [diff] [blame] | 507 | static size_t taskstats_packet_size(void) |
| 508 | { |
| 509 | size_t size; |
| 510 | |
| 511 | size = nla_total_size(sizeof(u32)) + |
| 512 | nla_total_size(sizeof(struct taskstats)) + nla_total_size(0); |
| 513 | #ifdef TASKSTATS_NEEDS_PADDING |
| 514 | size += nla_total_size(0); /* Padding for alignment */ |
| 515 | #endif |
| 516 | return size; |
| 517 | } |
| 518 | |
Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 519 | static int cmd_attr_pid(struct genl_info *info) |
| 520 | { |
| 521 | struct taskstats *stats; |
| 522 | struct sk_buff *rep_skb; |
| 523 | size_t size; |
| 524 | u32 pid; |
| 525 | int rc; |
| 526 | |
Jeff Mahoney | 4be2c95 | 2010-12-21 17:24:30 -0800 | [diff] [blame] | 527 | size = taskstats_packet_size(); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 528 | |
Oleg Nesterov | 3716748 | 2006-12-06 20:36:55 -0800 | [diff] [blame] | 529 | rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 530 | if (rc < 0) |
| 531 | return rc; |
| 532 | |
Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 533 | rc = -EINVAL; |
Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 534 | pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]); |
| 535 | stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid); |
| 536 | if (!stats) |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 537 | goto err; |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 538 | |
Michael Holzheu | 3d9e0cf | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 539 | rc = fill_stats_for_pid(pid, stats); |
Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 540 | if (rc < 0) |
| 541 | goto err; |
Johannes Berg | 134e637 | 2009-07-10 09:51:34 +0000 | [diff] [blame] | 542 | return send_reply(rep_skb, info); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 543 | err: |
| 544 | nlmsg_free(rep_skb); |
| 545 | return rc; |
| 546 | } |
| 547 | |
Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 548 | static int cmd_attr_tgid(struct genl_info *info) |
| 549 | { |
| 550 | struct taskstats *stats; |
| 551 | struct sk_buff *rep_skb; |
| 552 | size_t size; |
| 553 | u32 tgid; |
| 554 | int rc; |
| 555 | |
Jeff Mahoney | 4be2c95 | 2010-12-21 17:24:30 -0800 | [diff] [blame] | 556 | size = taskstats_packet_size(); |
Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 557 | |
| 558 | rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size); |
| 559 | if (rc < 0) |
| 560 | return rc; |
| 561 | |
| 562 | rc = -EINVAL; |
| 563 | tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]); |
| 564 | stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid); |
| 565 | if (!stats) |
| 566 | goto err; |
| 567 | |
Michael Holzheu | 3d9e0cf | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 568 | rc = fill_stats_for_tgid(tgid, stats); |
Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 569 | if (rc < 0) |
| 570 | goto err; |
| 571 | return send_reply(rep_skb, info); |
| 572 | err: |
| 573 | nlmsg_free(rep_skb); |
| 574 | return rc; |
| 575 | } |
| 576 | |
| 577 | static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info) |
| 578 | { |
| 579 | if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK]) |
| 580 | return cmd_attr_register_cpumask(info); |
| 581 | else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK]) |
| 582 | return cmd_attr_deregister_cpumask(info); |
| 583 | else if (info->attrs[TASKSTATS_CMD_ATTR_PID]) |
| 584 | return cmd_attr_pid(info); |
| 585 | else if (info->attrs[TASKSTATS_CMD_ATTR_TGID]) |
| 586 | return cmd_attr_tgid(info); |
| 587 | else |
| 588 | return -EINVAL; |
| 589 | } |
| 590 | |
Oleg Nesterov | 34ec123 | 2006-12-06 20:36:52 -0800 | [diff] [blame] | 591 | static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk) |
| 592 | { |
| 593 | struct signal_struct *sig = tsk->signal; |
| 594 | struct taskstats *stats; |
| 595 | |
| 596 | if (sig->stats || thread_group_empty(tsk)) |
| 597 | goto ret; |
| 598 | |
| 599 | /* No problem if kmem_cache_zalloc() fails */ |
| 600 | stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL); |
| 601 | |
| 602 | spin_lock_irq(&tsk->sighand->siglock); |
| 603 | if (!sig->stats) { |
| 604 | sig->stats = stats; |
| 605 | stats = NULL; |
| 606 | } |
| 607 | spin_unlock_irq(&tsk->sighand->siglock); |
| 608 | |
| 609 | if (stats) |
| 610 | kmem_cache_free(taskstats_cache, stats); |
| 611 | ret: |
| 612 | return sig->stats; |
| 613 | } |
| 614 | |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 615 | /* Send pid data out on exit */ |
Oleg Nesterov | 115085e | 2006-12-06 20:36:51 -0800 | [diff] [blame] | 616 | void taskstats_exit(struct task_struct *tsk, int group_dead) |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 617 | { |
| 618 | int rc; |
Oleg Nesterov | 115085e | 2006-12-06 20:36:51 -0800 | [diff] [blame] | 619 | struct listener_list *listeners; |
Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 620 | struct taskstats *stats; |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 621 | struct sk_buff *rep_skb; |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 622 | size_t size; |
| 623 | int is_thread_group; |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 624 | |
Oleg Nesterov | 4a279ff | 2006-10-30 22:07:15 -0800 | [diff] [blame] | 625 | if (!family_registered) |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 626 | return; |
| 627 | |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 628 | /* |
| 629 | * Size includes space for nested attributes |
| 630 | */ |
Jeff Mahoney | 4be2c95 | 2010-12-21 17:24:30 -0800 | [diff] [blame] | 631 | size = taskstats_packet_size(); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 632 | |
Oleg Nesterov | 34ec123 | 2006-12-06 20:36:52 -0800 | [diff] [blame] | 633 | is_thread_group = !!taskstats_tgid_alloc(tsk); |
Oleg Nesterov | 4a279ff | 2006-10-30 22:07:15 -0800 | [diff] [blame] | 634 | if (is_thread_group) { |
| 635 | /* PID + STATS + TGID + STATS */ |
| 636 | size = 2 * size; |
| 637 | /* fill the tsk->signal->stats structure */ |
| 638 | fill_tgid_exit(tsk); |
| 639 | } |
| 640 | |
Christoph Lameter | cd85fc5 | 2010-12-08 17:42:22 +0100 | [diff] [blame] | 641 | listeners = __this_cpu_ptr(&listener_array); |
Oleg Nesterov | 115085e | 2006-12-06 20:36:51 -0800 | [diff] [blame] | 642 | if (list_empty(&listeners->list)) |
| 643 | return; |
| 644 | |
Oleg Nesterov | 3716748 | 2006-12-06 20:36:55 -0800 | [diff] [blame] | 645 | rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, size); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 646 | if (rc < 0) |
Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 647 | return; |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 648 | |
Eric W. Biederman | 4bd6e32 | 2012-02-07 17:56:49 -0800 | [diff] [blame] | 649 | stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, |
| 650 | task_pid_nr_ns(tsk, &init_pid_ns)); |
Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 651 | if (!stats) |
Oleg Nesterov | 3716748 | 2006-12-06 20:36:55 -0800 | [diff] [blame] | 652 | goto err; |
Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 653 | |
Eric W. Biederman | 4bd6e32 | 2012-02-07 17:56:49 -0800 | [diff] [blame] | 654 | fill_stats(&init_user_ns, &init_pid_ns, tsk, stats); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 655 | |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 656 | /* |
Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 657 | * Doesn't matter if tsk is the leader or the last group member leaving |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 658 | */ |
Oleg Nesterov | 68062b8 | 2006-12-06 20:36:53 -0800 | [diff] [blame] | 659 | if (!is_thread_group || !group_dead) |
Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 660 | goto send; |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 661 | |
Eric W. Biederman | 4bd6e32 | 2012-02-07 17:56:49 -0800 | [diff] [blame] | 662 | stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, |
| 663 | task_tgid_nr_ns(tsk, &init_pid_ns)); |
Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 664 | if (!stats) |
Oleg Nesterov | 3716748 | 2006-12-06 20:36:55 -0800 | [diff] [blame] | 665 | goto err; |
Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 666 | |
| 667 | memcpy(stats, tsk->signal->stats, sizeof(*stats)); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 668 | |
Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 669 | send: |
Oleg Nesterov | 115085e | 2006-12-06 20:36:51 -0800 | [diff] [blame] | 670 | send_cpu_listeners(rep_skb, listeners); |
Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 671 | return; |
Oleg Nesterov | 3716748 | 2006-12-06 20:36:55 -0800 | [diff] [blame] | 672 | err: |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 673 | nlmsg_free(rep_skb); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 674 | } |
| 675 | |
Johannes Berg | 4534de8 | 2013-11-14 17:14:46 +0100 | [diff] [blame] | 676 | static const struct genl_ops taskstats_ops[] = { |
Johannes Berg | 88d36a9 | 2013-11-14 17:14:39 +0100 | [diff] [blame] | 677 | { |
| 678 | .cmd = TASKSTATS_CMD_GET, |
| 679 | .doit = taskstats_user_cmd, |
| 680 | .policy = taskstats_cmd_get_policy, |
| 681 | .flags = GENL_ADMIN_PERM, |
| 682 | }, |
| 683 | { |
| 684 | .cmd = CGROUPSTATS_CMD_GET, |
| 685 | .doit = cgroupstats_user_cmd, |
| 686 | .policy = cgroupstats_cmd_get_policy, |
| 687 | }, |
Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 688 | }; |
| 689 | |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 690 | /* Needed early in initialization */ |
| 691 | void __init taskstats_init_early(void) |
| 692 | { |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 693 | unsigned int i; |
| 694 | |
Christoph Lameter | 0a31bd5 | 2007-05-06 14:49:57 -0700 | [diff] [blame] | 695 | taskstats_cache = KMEM_CACHE(taskstats, SLAB_PANIC); |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 696 | for_each_possible_cpu(i) { |
| 697 | INIT_LIST_HEAD(&(per_cpu(listener_array, i).list)); |
| 698 | init_rwsem(&(per_cpu(listener_array, i).sem)); |
| 699 | } |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 700 | } |
| 701 | |
| 702 | static int __init taskstats_init(void) |
| 703 | { |
| 704 | int rc; |
| 705 | |
Johannes Berg | c53ed74 | 2013-11-19 15:19:31 +0100 | [diff] [blame] | 706 | rc = genl_register_family_with_ops(&family, taskstats_ops); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 707 | if (rc) |
| 708 | return rc; |
| 709 | |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 710 | family_registered = 1; |
Mandeep Singh Baines | f9b182e | 2011-03-23 16:43:27 -0700 | [diff] [blame] | 711 | pr_info("registered taskstats version %d\n", TASKSTATS_GENL_VERSION); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 712 | return 0; |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 713 | } |
| 714 | |
| 715 | /* |
| 716 | * late initcall ensures initialization of statistics collection |
| 717 | * mechanisms precedes initialization of the taskstats interface |
| 718 | */ |
| 719 | late_initcall(taskstats_init); |