blob: 7964e36ba915b20c89afdb23eb23dfef067b40f8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/madvise.c
3 *
4 * Copyright (C) 1999 Linus Torvalds
5 * Copyright (C) 2002 Christoph Hellwig
6 */
7
8#include <linux/mman.h>
9#include <linux/pagemap.h>
10#include <linux/syscalls.h>
Prasanna Meda05b74382005-06-21 17:14:37 -070011#include <linux/mempolicy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/hugetlb.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040013#include <linux/sched.h>
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070014#include <linux/ksm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
16/*
Nick Piggin0a27a142007-05-06 14:49:53 -070017 * Any behaviour which results in changes to the vma->vm_flags needs to
18 * take mmap_sem for writing. Others, which simply traverse vmas, need
19 * to only take it for reading.
20 */
21static int madvise_need_mmap_write(int behavior)
22{
23 switch (behavior) {
24 case MADV_REMOVE:
25 case MADV_WILLNEED:
26 case MADV_DONTNEED:
27 return 0;
28 default:
29 /* be safe, default to 1. list exceptions explicitly */
30 return 1;
31 }
32}
33
34/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 * We can potentially split a vm area into separate
36 * areas, each area with its own behavior.
37 */
Prasanna Meda05b74382005-06-21 17:14:37 -070038static long madvise_behavior(struct vm_area_struct * vma,
39 struct vm_area_struct **prev,
40 unsigned long start, unsigned long end, int behavior)
Linus Torvalds1da177e2005-04-16 15:20:36 -070041{
42 struct mm_struct * mm = vma->vm_mm;
43 int error = 0;
Prasanna Meda05b74382005-06-21 17:14:37 -070044 pgoff_t pgoff;
Hugh Dickins3866ea92009-09-21 17:01:52 -070045 unsigned long new_flags = vma->vm_flags;
Prasanna Medae798c6e2005-06-21 17:14:36 -070046
47 switch (behavior) {
Michael S. Tsirkinf8225662006-02-14 13:53:08 -080048 case MADV_NORMAL:
49 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
50 break;
Prasanna Medae798c6e2005-06-21 17:14:36 -070051 case MADV_SEQUENTIAL:
Michael S. Tsirkinf8225662006-02-14 13:53:08 -080052 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
Prasanna Medae798c6e2005-06-21 17:14:36 -070053 break;
54 case MADV_RANDOM:
Michael S. Tsirkinf8225662006-02-14 13:53:08 -080055 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
Prasanna Medae798c6e2005-06-21 17:14:36 -070056 break;
Michael S. Tsirkinf8225662006-02-14 13:53:08 -080057 case MADV_DONTFORK:
58 new_flags |= VM_DONTCOPY;
59 break;
60 case MADV_DOFORK:
Hugh Dickins3866ea92009-09-21 17:01:52 -070061 if (vma->vm_flags & VM_IO) {
62 error = -EINVAL;
63 goto out;
64 }
Michael S. Tsirkinf8225662006-02-14 13:53:08 -080065 new_flags &= ~VM_DONTCOPY;
Prasanna Medae798c6e2005-06-21 17:14:36 -070066 break;
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070067 case MADV_MERGEABLE:
68 case MADV_UNMERGEABLE:
69 error = ksm_madvise(vma, start, end, behavior, &new_flags);
70 if (error)
71 goto out;
72 break;
Prasanna Medae798c6e2005-06-21 17:14:36 -070073 }
74
Prasanna Meda05b74382005-06-21 17:14:37 -070075 if (new_flags == vma->vm_flags) {
76 *prev = vma;
Hugh Dickins836d5ff2005-09-03 15:54:53 -070077 goto out;
Prasanna Meda05b74382005-06-21 17:14:37 -070078 }
79
80 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
81 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
82 vma->vm_file, pgoff, vma_policy(vma));
83 if (*prev) {
84 vma = *prev;
85 goto success;
86 }
87
88 *prev = vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
90 if (start != vma->vm_start) {
91 error = split_vma(mm, vma, start, 1);
92 if (error)
93 goto out;
94 }
95
96 if (end != vma->vm_end) {
97 error = split_vma(mm, vma, end, 0);
98 if (error)
99 goto out;
100 }
101
Hugh Dickins836d5ff2005-09-03 15:54:53 -0700102success:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 /*
104 * vm_flags is protected by the mmap_sem held in write mode.
105 */
Prasanna Medae798c6e2005-06-21 17:14:36 -0700106 vma->vm_flags = new_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
108out:
109 if (error == -ENOMEM)
110 error = -EAGAIN;
111 return error;
112}
113
114/*
115 * Schedule all required I/O operations. Do not wait for completion.
116 */
117static long madvise_willneed(struct vm_area_struct * vma,
Prasanna Meda05b74382005-06-21 17:14:37 -0700118 struct vm_area_struct ** prev,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 unsigned long start, unsigned long end)
120{
121 struct file *file = vma->vm_file;
122
Suzuki1bef4002005-10-11 08:29:06 -0700123 if (!file)
124 return -EBADF;
125
Nick Piggin70688e42008-04-28 02:13:02 -0700126 if (file->f_mapping->a_ops->get_xip_mem) {
Carsten Ottefe77ba62005-06-23 22:05:29 -0700127 /* no bad return value, but ignore advice */
128 return 0;
129 }
130
Prasanna Meda05b74382005-06-21 17:14:37 -0700131 *prev = vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
133 if (end > vma->vm_end)
134 end = vma->vm_end;
135 end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
136
Wu Fengguangf7e839d2009-06-16 15:31:20 -0700137 force_page_cache_readahead(file->f_mapping, file, start, end - start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 return 0;
139}
140
141/*
142 * Application no longer needs these pages. If the pages are dirty,
143 * it's OK to just throw them away. The app will be more careful about
144 * data it wants to keep. Be sure to free swap resources too. The
Fernando Luis Vazquez Cao7e6cbea2008-07-29 22:33:39 -0700145 * zap_page_range call sets things up for shrink_active_list to actually free
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 * these pages later if no one else has touched them in the meantime,
147 * although we could add these pages to a global reuse list for
Fernando Luis Vazquez Cao7e6cbea2008-07-29 22:33:39 -0700148 * shrink_active_list to pick up before reclaiming other pages.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 *
150 * NB: This interface discards data rather than pushes it out to swap,
151 * as some implementations do. This has performance implications for
152 * applications like large transactional databases which want to discard
153 * pages in anonymous maps after committing to backing store the data
154 * that was kept in them. There is no reason to write this data out to
155 * the swap area if the application is discarding it.
156 *
157 * An interface that causes the system to free clean pages and flush
158 * dirty pages is already available as msync(MS_INVALIDATE).
159 */
160static long madvise_dontneed(struct vm_area_struct * vma,
Prasanna Meda05b74382005-06-21 17:14:37 -0700161 struct vm_area_struct ** prev,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 unsigned long start, unsigned long end)
163{
Prasanna Meda05b74382005-06-21 17:14:37 -0700164 *prev = vma;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800165 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 return -EINVAL;
167
168 if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
169 struct zap_details details = {
170 .nonlinear_vma = vma,
171 .last_index = ULONG_MAX,
172 };
173 zap_page_range(vma, start, end - start, &details);
174 } else
175 zap_page_range(vma, start, end - start, NULL);
176 return 0;
177}
178
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800179/*
180 * Application wants to free up the pages and associated backing store.
181 * This is effectively punching a hole into the middle of a file.
182 *
183 * NOTE: Currently, only shmfs/tmpfs is supported for this operation.
184 * Other filesystems return -ENOSYS.
185 */
186static long madvise_remove(struct vm_area_struct *vma,
Nick Piggin00e9fa22007-03-16 13:38:10 -0800187 struct vm_area_struct **prev,
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800188 unsigned long start, unsigned long end)
189{
190 struct address_space *mapping;
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700191 loff_t offset, endoff;
192 int error;
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800193
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700194 *prev = NULL; /* tell sys_madvise we drop mmap_sem */
Nick Piggin00e9fa22007-03-16 13:38:10 -0800195
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800196 if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB))
197 return -EINVAL;
198
199 if (!vma->vm_file || !vma->vm_file->f_mapping
200 || !vma->vm_file->f_mapping->host) {
201 return -EINVAL;
202 }
203
Hugh Dickins69cf0fa2006-04-17 22:46:32 +0100204 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
205 return -EACCES;
206
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800207 mapping = vma->vm_file->f_mapping;
208
209 offset = (loff_t)(start - vma->vm_start)
210 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
211 endoff = (loff_t)(end - vma->vm_start - 1)
212 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700213
214 /* vmtruncate_range needs to take i_mutex and i_alloc_sem */
Nick Piggin0a27a142007-05-06 14:49:53 -0700215 up_read(&current->mm->mmap_sem);
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700216 error = vmtruncate_range(mapping->host, offset, endoff);
Nick Piggin0a27a142007-05-06 14:49:53 -0700217 down_read(&current->mm->mmap_sem);
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700218 return error;
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800219}
220
Andi Kleen9893e492009-09-16 11:50:17 +0200221#ifdef CONFIG_MEMORY_FAILURE
222/*
223 * Error injection support for memory error handling.
224 */
225static int madvise_hwpoison(unsigned long start, unsigned long end)
226{
227 int ret = 0;
228
229 if (!capable(CAP_SYS_ADMIN))
230 return -EPERM;
231 for (; start < end; start += PAGE_SIZE) {
232 struct page *p;
Andi Kleend15f1072009-12-16 12:20:00 +0100233 int ret = get_user_pages_fast(start, 1, 0, &p);
Andi Kleen9893e492009-09-16 11:50:17 +0200234 if (ret != 1)
235 return ret;
236 printk(KERN_INFO "Injecting memory failure for page %lx at %lx\n",
237 page_to_pfn(p), start);
238 /* Ignore return value for now */
Andi Kleen82ba0112009-12-16 12:19:57 +0100239 __memory_failure(page_to_pfn(p), 0, MF_COUNT_INCREASED);
Andi Kleen9893e492009-09-16 11:50:17 +0200240 }
241 return ret;
242}
243#endif
244
suzuki165cd402005-07-27 11:43:59 -0700245static long
246madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
247 unsigned long start, unsigned long end, int behavior)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 switch (behavior) {
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800250 case MADV_REMOVE:
Hugh Dickins3866ea92009-09-21 17:01:52 -0700251 return madvise_remove(vma, prev, start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 case MADV_WILLNEED:
Hugh Dickins3866ea92009-09-21 17:01:52 -0700253 return madvise_willneed(vma, prev, start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 case MADV_DONTNEED:
Hugh Dickins3866ea92009-09-21 17:01:52 -0700255 return madvise_dontneed(vma, prev, start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 default:
Hugh Dickins3866ea92009-09-21 17:01:52 -0700257 return madvise_behavior(vma, prev, start, end, behavior);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259}
260
Nick Piggin75927af2009-06-16 15:32:38 -0700261static int
262madvise_behavior_valid(int behavior)
263{
264 switch (behavior) {
265 case MADV_DOFORK:
266 case MADV_DONTFORK:
267 case MADV_NORMAL:
268 case MADV_SEQUENTIAL:
269 case MADV_RANDOM:
270 case MADV_REMOVE:
271 case MADV_WILLNEED:
272 case MADV_DONTNEED:
Hugh Dickinsf8af4da2009-09-21 17:01:57 -0700273#ifdef CONFIG_KSM
274 case MADV_MERGEABLE:
275 case MADV_UNMERGEABLE:
276#endif
Nick Piggin75927af2009-06-16 15:32:38 -0700277 return 1;
278
279 default:
280 return 0;
281 }
282}
Hugh Dickins3866ea92009-09-21 17:01:52 -0700283
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284/*
285 * The madvise(2) system call.
286 *
287 * Applications can use madvise() to advise the kernel how it should
288 * handle paging I/O in this VM area. The idea is to help the kernel
289 * use appropriate read-ahead and caching techniques. The information
290 * provided is advisory only, and can be safely disregarded by the
291 * kernel without affecting the correct operation of the application.
292 *
293 * behavior values:
294 * MADV_NORMAL - the default behavior is to read clusters. This
295 * results in some read-ahead and read-behind.
296 * MADV_RANDOM - the system should read the minimum amount of data
297 * on any access, since it is unlikely that the appli-
298 * cation will need more than what it asks for.
299 * MADV_SEQUENTIAL - pages in the given range will probably be accessed
300 * once, so they can be aggressively read ahead, and
301 * can be freed soon after they are accessed.
302 * MADV_WILLNEED - the application is notifying the system to read
303 * some pages ahead.
304 * MADV_DONTNEED - the application is finished with the given range,
305 * so the kernel can free resources associated with it.
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800306 * MADV_REMOVE - the application wants to free up the given range of
307 * pages and associated backing store.
Hugh Dickins3866ea92009-09-21 17:01:52 -0700308 * MADV_DONTFORK - omit this area from child's address space when forking:
309 * typically, to avoid COWing pages pinned by get_user_pages().
310 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
Hugh Dickinsf8af4da2009-09-21 17:01:57 -0700311 * MADV_MERGEABLE - the application recommends that KSM try to merge pages in
312 * this area with pages of identical content from other such areas.
313 * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 *
315 * return values:
316 * zero - success
317 * -EINVAL - start + len < 0, start is not page-aligned,
318 * "behavior" is not a valid value, or application
319 * is attempting to release locked or shared pages.
320 * -ENOMEM - addresses in the specified range are not currently
321 * mapped, or are outside the AS of the process.
322 * -EIO - an I/O error occurred while paging in data.
323 * -EBADF - map exists, but area maps something that isn't a file.
324 * -EAGAIN - a kernel resource was temporarily unavailable.
325 */
Heiko Carstens3480b252009-01-14 14:14:16 +0100326SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327{
Prasanna Meda05b74382005-06-21 17:14:37 -0700328 unsigned long end, tmp;
329 struct vm_area_struct * vma, *prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 int unmapped_error = 0;
331 int error = -EINVAL;
Jason Baronf7977792007-07-15 23:38:21 -0700332 int write;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 size_t len;
334
Andi Kleen9893e492009-09-16 11:50:17 +0200335#ifdef CONFIG_MEMORY_FAILURE
336 if (behavior == MADV_HWPOISON)
337 return madvise_hwpoison(start, start+len_in);
338#endif
Nick Piggin75927af2009-06-16 15:32:38 -0700339 if (!madvise_behavior_valid(behavior))
340 return error;
341
Jason Baronf7977792007-07-15 23:38:21 -0700342 write = madvise_need_mmap_write(behavior);
343 if (write)
Nick Piggin0a27a142007-05-06 14:49:53 -0700344 down_write(&current->mm->mmap_sem);
345 else
346 down_read(&current->mm->mmap_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347
348 if (start & ~PAGE_MASK)
349 goto out;
350 len = (len_in + ~PAGE_MASK) & PAGE_MASK;
351
352 /* Check to see whether len was rounded up from small -ve to zero */
353 if (len_in && !len)
354 goto out;
355
356 end = start + len;
357 if (end < start)
358 goto out;
359
360 error = 0;
361 if (end == start)
362 goto out;
363
364 /*
365 * If the interval [start,end) covers some unmapped address
366 * ranges, just ignore them, but return -ENOMEM at the end.
Prasanna Meda05b74382005-06-21 17:14:37 -0700367 * - different from the way of handling in mlock etc.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 */
Prasanna Meda05b74382005-06-21 17:14:37 -0700369 vma = find_vma_prev(current->mm, start, &prev);
Hugh Dickins836d5ff2005-09-03 15:54:53 -0700370 if (vma && start > vma->vm_start)
371 prev = vma;
372
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 for (;;) {
374 /* Still start < end. */
375 error = -ENOMEM;
376 if (!vma)
377 goto out;
378
Prasanna Meda05b74382005-06-21 17:14:37 -0700379 /* Here start < (end|vma->vm_end). */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 if (start < vma->vm_start) {
381 unmapped_error = -ENOMEM;
382 start = vma->vm_start;
Prasanna Meda05b74382005-06-21 17:14:37 -0700383 if (start >= end)
384 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 }
386
Prasanna Meda05b74382005-06-21 17:14:37 -0700387 /* Here vma->vm_start <= start < (end|vma->vm_end) */
388 tmp = vma->vm_end;
389 if (end < tmp)
390 tmp = end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391
Prasanna Meda05b74382005-06-21 17:14:37 -0700392 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
393 error = madvise_vma(vma, &prev, start, tmp, behavior);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 if (error)
395 goto out;
Prasanna Meda05b74382005-06-21 17:14:37 -0700396 start = tmp;
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700397 if (prev && start < prev->vm_end)
Prasanna Meda05b74382005-06-21 17:14:37 -0700398 start = prev->vm_end;
399 error = unmapped_error;
400 if (start >= end)
401 goto out;
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700402 if (prev)
403 vma = prev->vm_next;
404 else /* madvise_remove dropped mmap_sem */
405 vma = find_vma(current->mm, start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407out:
Jason Baronf7977792007-07-15 23:38:21 -0700408 if (write)
Nick Piggin0a27a142007-05-06 14:49:53 -0700409 up_write(&current->mm->mmap_sem);
410 else
411 up_read(&current->mm->mmap_sem);
412
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 return error;
414}