sched: style cleanup, #2
style cleanup of various changes that were done recently.
no code changed:
text data bss dec hex filename
26399 2578 48 29025 7161 sched.o.before
26399 2578 48 29025 7161 sched.o.after
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/sched.c b/kernel/sched.c
index 461ee90..23b9925 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -235,17 +235,17 @@
* Every task in system belong to this group at bootup.
*/
struct task_group init_task_group = {
- .se = init_sched_entity_p,
+ .se = init_sched_entity_p,
.cfs_rq = init_cfs_rq_p,
};
#ifdef CONFIG_FAIR_USER_SCHED
-# define INIT_TASK_GROUP_LOAD 2*NICE_0_LOAD
+# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
#else
# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
#endif
-#define MIN_GROUP_SHARES 2
+#define MIN_GROUP_SHARES 2
static int init_task_group_load = INIT_TASK_GROUP_LOAD;
@@ -352,8 +352,8 @@
/*
* We add the notion of a root-domain which will be used to define per-domain
- * variables. Each exclusive cpuset essentially defines an island domain by
- * fully partitioning the member cpus from any other cpuset. Whenever a new
+ * variables. Each exclusive cpuset essentially defines an island domain by
+ * fully partitioning the member cpus from any other cpuset. Whenever a new
* exclusive cpuset is created, we also create and attach a new root-domain
* object.
*
@@ -365,12 +365,12 @@
cpumask_t span;
cpumask_t online;
- /*
+ /*
* The "RT overload" flag: it gets set if a CPU has more than
* one runnable RT task.
*/
cpumask_t rto_mask;
- atomic_t rto_count;
+ atomic_t rto_count;
};
static struct root_domain def_root_domain;
@@ -434,7 +434,7 @@
atomic_t nr_iowait;
#ifdef CONFIG_SMP
- struct root_domain *rd;
+ struct root_domain *rd;
struct sched_domain *sd;
/* For active balancing */
@@ -5066,7 +5066,7 @@
if (p->sched_class->set_cpus_allowed)
p->sched_class->set_cpus_allowed(p, &new_mask);
else {
- p->cpus_allowed = new_mask;
+ p->cpus_allowed = new_mask;
p->nr_cpus_allowed = cpus_weight(new_mask);
}
@@ -5847,9 +5847,10 @@
if (rq->rd) {
struct root_domain *old_rd = rq->rd;
- for (class = sched_class_highest; class; class = class->next)
+ for (class = sched_class_highest; class; class = class->next) {
if (class->leave_domain)
class->leave_domain(rq);
+ }
if (atomic_dec_and_test(&old_rd->refcount))
kfree(old_rd);
@@ -5858,9 +5859,10 @@
atomic_inc(&rd->refcount);
rq->rd = rd;
- for (class = sched_class_highest; class; class = class->next)
+ for (class = sched_class_highest; class; class = class->next) {
if (class->join_domain)
class->join_domain(rq);
+ }
spin_unlock_irqrestore(&rq->lock, flags);
}
@@ -5895,11 +5897,11 @@
}
/*
- * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
+ * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
* hold the hotplug lock.
*/
-static void cpu_attach_domain(struct sched_domain *sd,
- struct root_domain *rd, int cpu)
+static void
+cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
{
struct rq *rq = cpu_rq(cpu);
struct sched_domain *tmp;
@@ -7095,7 +7097,7 @@
for_each_cpu_mask(i, sdspan)
total_load += tg->cfs_rq[i]->load.weight;
- /* Nothing to do if this group has no load */
+ /* Nothing to do if this group has no load */
if (!total_load)
continue;