mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/
synced 2025-04-19 20:58:31 +09:00
Revert "fanotify: disable readahead if we have pre-content watches"
This reverts commit fac84846a28c0950d4433118b3dffd44306df62d. Signed-off-by: Amir Goldstein <amir73il@gmail.com> Signed-off-by: Jan Kara <jack@suse.cz> Link: https://patch.msgid.link/20250312073852.2123409-7-amir73il@gmail.com
This commit is contained in:
parent
4f4dc3a937
commit
252256e416
12
mm/filemap.c
12
mm/filemap.c
@ -3197,14 +3197,6 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
|
||||
unsigned long vm_flags = vmf->vma->vm_flags;
|
||||
unsigned int mmap_miss;
|
||||
|
||||
/*
|
||||
* If we have pre-content watches we need to disable readahead to make
|
||||
* sure that we don't populate our mapping with 0 filled pages that we
|
||||
* never emitted an event for.
|
||||
*/
|
||||
if (unlikely(FMODE_FSNOTIFY_HSM(file->f_mode)))
|
||||
return fpin;
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
/* Use the readahead code, even if readahead is disabled */
|
||||
if ((vm_flags & VM_HUGEPAGE) && HPAGE_PMD_ORDER <= MAX_PAGECACHE_ORDER) {
|
||||
@ -3273,10 +3265,6 @@ static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
|
||||
struct file *fpin = NULL;
|
||||
unsigned int mmap_miss;
|
||||
|
||||
/* See comment in do_sync_mmap_readahead. */
|
||||
if (unlikely(FMODE_FSNOTIFY_HSM(file->f_mode)))
|
||||
return fpin;
|
||||
|
||||
/* If we don't want any read-ahead, don't bother */
|
||||
if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages)
|
||||
return fpin;
|
||||
|
@ -128,7 +128,6 @@
|
||||
#include <linux/blk-cgroup.h>
|
||||
#include <linux/fadvise.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/fsnotify.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
@ -558,15 +557,6 @@ void page_cache_sync_ra(struct readahead_control *ractl,
|
||||
unsigned long max_pages, contig_count;
|
||||
pgoff_t prev_index, miss;
|
||||
|
||||
/*
|
||||
* If we have pre-content watches we need to disable readahead to make
|
||||
* sure that we don't find 0 filled pages in cache that we never emitted
|
||||
* events for. Filesystems supporting HSM must make sure to not call
|
||||
* this function with ractl->file unset for files handled by HSM.
|
||||
*/
|
||||
if (ractl->file && unlikely(FMODE_FSNOTIFY_HSM(ractl->file->f_mode)))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Even if readahead is disabled, issue this request as readahead
|
||||
* as we'll need it to satisfy the requested range. The forced
|
||||
@ -645,10 +635,6 @@ void page_cache_async_ra(struct readahead_control *ractl,
|
||||
if (!ra->ra_pages)
|
||||
return;
|
||||
|
||||
/* See the comment in page_cache_sync_ra. */
|
||||
if (ractl->file && unlikely(FMODE_FSNOTIFY_HSM(ractl->file->f_mode)))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Same bit is used for PG_readahead and PG_reclaim.
|
||||
*/
|
||||
|
Loading…
x
Reference in New Issue
Block a user