blob: 9815b8d1eed5536e312c561a0c066578e359bd51 [file] [log] [blame]
Matt Helsley8174f152008-10-18 20:27:19 -07001/*
2 * kernel/freezer.c - Function to freeze a process
3 *
4 * Originally from kernel/power/process.c
5 */
6
7#include <linux/interrupt.h>
8#include <linux/suspend.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -04009#include <linux/export.h>
Matt Helsley8174f152008-10-18 20:27:19 -070010#include <linux/syscalls.h>
11#include <linux/freezer.h>
Tejun Heo8a32c442011-11-21 12:32:23 -080012#include <linux/kthread.h>
Matt Helsley8174f152008-10-18 20:27:19 -070013
Tejun Heoa3201222011-11-21 12:32:25 -080014/* total number of freezing conditions in effect */
15atomic_t system_freezing_cnt = ATOMIC_INIT(0);
16EXPORT_SYMBOL(system_freezing_cnt);
17
18/* indicate whether PM freezing is in effect, protected by pm_mutex */
19bool pm_freezing;
20bool pm_nosig_freezing;
21
Tejun Heo0c9af092011-11-21 12:32:24 -080022/* protects freezing and frozen transitions */
23static DEFINE_SPINLOCK(freezer_lock);
Matt Helsley8174f152008-10-18 20:27:19 -070024
Tejun Heoa3201222011-11-21 12:32:25 -080025/**
26 * freezing_slow_path - slow path for testing whether a task needs to be frozen
27 * @p: task to be tested
28 *
29 * This function is called by freezing() if system_freezing_cnt isn't zero
30 * and tests whether @p needs to enter and stay in frozen state. Can be
31 * called under any context. The freezers are responsible for ensuring the
32 * target tasks see the updated state.
33 */
34bool freezing_slow_path(struct task_struct *p)
35{
36 if (p->flags & PF_NOFREEZE)
37 return false;
38
39 if (pm_nosig_freezing || cgroup_freezing(p))
40 return true;
41
Tejun Heo34b087e2011-11-23 09:28:17 -080042 if (pm_freezing && !(p->flags & PF_KTHREAD))
Tejun Heoa3201222011-11-21 12:32:25 -080043 return true;
44
45 return false;
46}
47EXPORT_SYMBOL(freezing_slow_path);
48
Matt Helsley8174f152008-10-18 20:27:19 -070049/* Refrigerator is place where frozen processes are stored :-). */
Tejun Heo8a32c442011-11-21 12:32:23 -080050bool __refrigerator(bool check_kthr_stop)
Matt Helsley8174f152008-10-18 20:27:19 -070051{
52 /* Hmm, should we be allowed to suspend when there are realtime
53 processes around? */
Tejun Heoa0acae02011-11-21 12:32:22 -080054 bool was_frozen = false;
Tejun Heo5ece3ea2011-11-21 12:32:26 -080055 long save = current->state;
Matt Helsley8174f152008-10-18 20:27:19 -070056
Matt Helsley8174f152008-10-18 20:27:19 -070057 pr_debug("%s entered refrigerator\n", current->comm);
58
Matt Helsley8174f152008-10-18 20:27:19 -070059 for (;;) {
60 set_current_state(TASK_UNINTERRUPTIBLE);
Tejun Heo5ece3ea2011-11-21 12:32:26 -080061
62 spin_lock_irq(&freezer_lock);
63 current->flags |= PF_FROZEN;
Tejun Heo69074832011-11-21 12:32:24 -080064 if (!freezing(current) ||
Tejun Heo8a32c442011-11-21 12:32:23 -080065 (check_kthr_stop && kthread_should_stop()))
Tejun Heo5ece3ea2011-11-21 12:32:26 -080066 current->flags &= ~PF_FROZEN;
67 spin_unlock_irq(&freezer_lock);
68
69 if (!(current->flags & PF_FROZEN))
Matt Helsley8174f152008-10-18 20:27:19 -070070 break;
Tejun Heoa0acae02011-11-21 12:32:22 -080071 was_frozen = true;
Matt Helsley8174f152008-10-18 20:27:19 -070072 schedule();
73 }
Thomas Gleixner6301cb92009-07-17 14:15:47 +020074
Matt Helsley8174f152008-10-18 20:27:19 -070075 pr_debug("%s left refrigerator\n", current->comm);
Tejun Heo50fb4f72011-11-21 12:32:22 -080076
77 /*
78 * Restore saved task state before returning. The mb'd version
79 * needs to be used; otherwise, it might silently break
80 * synchronization which depends on ordered task state change.
81 */
82 set_current_state(save);
Tejun Heoa0acae02011-11-21 12:32:22 -080083
84 return was_frozen;
Matt Helsley8174f152008-10-18 20:27:19 -070085}
Tejun Heoa0acae02011-11-21 12:32:22 -080086EXPORT_SYMBOL(__refrigerator);
Matt Helsley8174f152008-10-18 20:27:19 -070087
88static void fake_signal_wake_up(struct task_struct *p)
89{
90 unsigned long flags;
91
Tejun Heo37ad8ac2011-11-21 12:32:26 -080092 if (lock_task_sighand(p, &flags)) {
93 signal_wake_up(p, 0);
94 unlock_task_sighand(p, &flags);
95 }
Matt Helsley8174f152008-10-18 20:27:19 -070096}
97
98/**
Tejun Heo839e3402011-11-21 12:32:26 -080099 * freeze_task - send a freeze request to given task
100 * @p: task to send the request to
Matt Helsley8174f152008-10-18 20:27:19 -0700101 *
Tejun Heo839e3402011-11-21 12:32:26 -0800102 * If @p is freezing, the freeze request is sent by setting %TIF_FREEZE
103 * flag and either sending a fake signal to it or waking it up, depending
104 * on whether it has %PF_FREEZER_NOSIG set.
105 *
106 * RETURNS:
107 * %false, if @p is not freezing or already frozen; %true, otherwise
Matt Helsley8174f152008-10-18 20:27:19 -0700108 */
Tejun Heo839e3402011-11-21 12:32:26 -0800109bool freeze_task(struct task_struct *p)
Matt Helsley8174f152008-10-18 20:27:19 -0700110{
Tejun Heo0c9af092011-11-21 12:32:24 -0800111 unsigned long flags;
Matt Helsley8174f152008-10-18 20:27:19 -0700112
Tejun Heo0c9af092011-11-21 12:32:24 -0800113 spin_lock_irqsave(&freezer_lock, flags);
Tejun Heoa3201222011-11-21 12:32:25 -0800114 if (!freezing(p) || frozen(p)) {
115 spin_unlock_irqrestore(&freezer_lock, flags);
116 return false;
117 }
Matt Helsley8174f152008-10-18 20:27:19 -0700118
Tejun Heo34b087e2011-11-23 09:28:17 -0800119 if (!(p->flags & PF_KTHREAD)) {
Tejun Heo8cfe4002010-11-26 23:07:27 +0100120 fake_signal_wake_up(p);
121 /*
122 * fake_signal_wake_up() goes through p's scheduler
123 * lock and guarantees that TASK_STOPPED/TRACED ->
124 * TASK_RUNNING transition can't race with task state
125 * testing in try_to_freeze_tasks().
126 */
Matt Helsley8174f152008-10-18 20:27:19 -0700127 } else {
128 wake_up_state(p, TASK_INTERRUPTIBLE);
129 }
Tejun Heoa3201222011-11-21 12:32:25 -0800130
Tejun Heo0c9af092011-11-21 12:32:24 -0800131 spin_unlock_irqrestore(&freezer_lock, flags);
Tejun Heoa3201222011-11-21 12:32:25 -0800132 return true;
Matt Helsley8174f152008-10-18 20:27:19 -0700133}
134
Tejun Heoa5be2d02011-11-21 12:32:23 -0800135void __thaw_task(struct task_struct *p)
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700136{
Tejun Heo0c9af092011-11-21 12:32:24 -0800137 unsigned long flags;
Tejun Heoa5be2d02011-11-21 12:32:23 -0800138
Tejun Heo69074832011-11-21 12:32:24 -0800139 /*
140 * Clear freezing and kick @p if FROZEN. Clearing is guaranteed to
141 * be visible to @p as waking up implies wmb. Waking up inside
142 * freezer_lock also prevents wakeups from leaking outside
143 * refrigerator.
144 */
Tejun Heo0c9af092011-11-21 12:32:24 -0800145 spin_lock_irqsave(&freezer_lock, flags);
Tejun Heo34b087e2011-11-23 09:28:17 -0800146 if (frozen(p))
Tejun Heoa5be2d02011-11-21 12:32:23 -0800147 wake_up_process(p);
Tejun Heo0c9af092011-11-21 12:32:24 -0800148 spin_unlock_irqrestore(&freezer_lock, flags);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700149}
Tejun Heo96ee6d82011-11-21 12:32:25 -0800150
151/**
Tejun Heo34b087e2011-11-23 09:28:17 -0800152 * set_freezable - make %current freezable
Tejun Heo96ee6d82011-11-21 12:32:25 -0800153 *
154 * Mark %current freezable and enter refrigerator if necessary.
155 */
Tejun Heo34b087e2011-11-23 09:28:17 -0800156bool set_freezable(void)
Tejun Heo96ee6d82011-11-21 12:32:25 -0800157{
158 might_sleep();
159
160 /*
161 * Modify flags while holding freezer_lock. This ensures the
162 * freezer notices that we aren't frozen yet or the freezing
163 * condition is visible to try_to_freeze() below.
164 */
165 spin_lock_irq(&freezer_lock);
166 current->flags &= ~PF_NOFREEZE;
Tejun Heo96ee6d82011-11-21 12:32:25 -0800167 spin_unlock_irq(&freezer_lock);
168
169 return try_to_freeze();
170}
Tejun Heo34b087e2011-11-23 09:28:17 -0800171EXPORT_SYMBOL(set_freezable);