Skip to content

Commit 82b0f8c

Browse files
jankaratorvalds
authored andcommitted
mm: join struct fault_env and vm_fault
Currently we have two different structures for passing fault information around - struct vm_fault and struct fault_env. DAX will need more information in struct vm_fault to handle its faults so the content of that structure would become event closer to fault_env. Furthermore it would need to generate struct fault_env to be able to call some of the generic functions. So at this point I don't think there's much use in keeping these two structures separate. Just embed into struct vm_fault all that is needed to use it for both purposes. Link: http://lkml.kernel.org/r/1479460644-25076-2-git-send-email-jack@suse.cz Signed-off-by: Jan Kara <jack@suse.cz> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Ross Zwisler <ross.zwisler@linux.intel.com> Cc: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 8b7457e commit 82b0f8c

File tree

11 files changed

+423
-422
lines changed

11 files changed

+423
-422
lines changed

Documentation/filesystems/Locking

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -556,7 +556,7 @@ till "end_pgoff". ->map_pages() is called with page table locked and must
556556
not block. If it's not possible to reach a page without blocking,
557557
filesystem should skip it. Filesystem should use do_set_pte() to setup
558558
page table entry. Pointer to entry associated with the page is passed in
559-
"pte" field in fault_env structure. Pointers to entries for other offsets
559+
"pte" field in vm_fault structure. Pointers to entries for other offsets
560560
should be calculated relative to "pte".
561561

562562
->page_mkwrite() is called when a previously read-only pte is

fs/userfaultfd.c

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -257,9 +257,9 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
257257
* fatal_signal_pending()s, and the mmap_sem must be released before
258258
* returning it.
259259
*/
260-
int handle_userfault(struct fault_env *fe, unsigned long reason)
260+
int handle_userfault(struct vm_fault *vmf, unsigned long reason)
261261
{
262-
struct mm_struct *mm = fe->vma->vm_mm;
262+
struct mm_struct *mm = vmf->vma->vm_mm;
263263
struct userfaultfd_ctx *ctx;
264264
struct userfaultfd_wait_queue uwq;
265265
int ret;
@@ -268,7 +268,7 @@ int handle_userfault(struct fault_env *fe, unsigned long reason)
268268
BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
269269

270270
ret = VM_FAULT_SIGBUS;
271-
ctx = fe->vma->vm_userfaultfd_ctx.ctx;
271+
ctx = vmf->vma->vm_userfaultfd_ctx.ctx;
272272
if (!ctx)
273273
goto out;
274274

@@ -301,17 +301,18 @@ int handle_userfault(struct fault_env *fe, unsigned long reason)
301301
* without first stopping userland access to the memory. For
302302
* VM_UFFD_MISSING userfaults this is enough for now.
303303
*/
304-
if (unlikely(!(fe->flags & FAULT_FLAG_ALLOW_RETRY))) {
304+
if (unlikely(!(vmf->flags & FAULT_FLAG_ALLOW_RETRY))) {
305305
/*
306306
* Validate the invariant that nowait must allow retry
307307
* to be sure not to return SIGBUS erroneously on
308308
* nowait invocations.
309309
*/
310-
BUG_ON(fe->flags & FAULT_FLAG_RETRY_NOWAIT);
310+
BUG_ON(vmf->flags & FAULT_FLAG_RETRY_NOWAIT);
311311
#ifdef CONFIG_DEBUG_VM
312312
if (printk_ratelimit()) {
313313
printk(KERN_WARNING
314-
"FAULT_FLAG_ALLOW_RETRY missing %x\n", fe->flags);
314+
"FAULT_FLAG_ALLOW_RETRY missing %x\n",
315+
vmf->flags);
315316
dump_stack();
316317
}
317318
#endif
@@ -323,19 +324,19 @@ int handle_userfault(struct fault_env *fe, unsigned long reason)
323324
* and wait.
324325
*/
325326
ret = VM_FAULT_RETRY;
326-
if (fe->flags & FAULT_FLAG_RETRY_NOWAIT)
327+
if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
327328
goto out;
328329

329330
/* take the reference before dropping the mmap_sem */
330331
userfaultfd_ctx_get(ctx);
331332

332333
init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
333334
uwq.wq.private = current;
334-
uwq.msg = userfault_msg(fe->address, fe->flags, reason);
335+
uwq.msg = userfault_msg(vmf->address, vmf->flags, reason);
335336
uwq.ctx = ctx;
336337

337338
return_to_userland =
338-
(fe->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) ==
339+
(vmf->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) ==
339340
(FAULT_FLAG_USER|FAULT_FLAG_KILLABLE);
340341

341342
spin_lock(&ctx->fault_pending_wqh.lock);
@@ -353,7 +354,8 @@ int handle_userfault(struct fault_env *fe, unsigned long reason)
353354
TASK_KILLABLE);
354355
spin_unlock(&ctx->fault_pending_wqh.lock);
355356

356-
must_wait = userfaultfd_must_wait(ctx, fe->address, fe->flags, reason);
357+
must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags,
358+
reason);
357359
up_read(&mm->mmap_sem);
358360

359361
if (likely(must_wait && !ACCESS_ONCE(ctx->released) &&

include/linux/huge_mm.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
11
#ifndef _LINUX_HUGE_MM_H
22
#define _LINUX_HUGE_MM_H
33

4-
extern int do_huge_pmd_anonymous_page(struct fault_env *fe);
4+
extern int do_huge_pmd_anonymous_page(struct vm_fault *vmf);
55
extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
66
pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
77
struct vm_area_struct *vma);
8-
extern void huge_pmd_set_accessed(struct fault_env *fe, pmd_t orig_pmd);
9-
extern int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd);
8+
extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
9+
extern int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
1010
extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1111
unsigned long addr,
1212
pmd_t *pmd,
@@ -142,7 +142,7 @@ static inline int hpage_nr_pages(struct page *page)
142142
return 1;
143143
}
144144

145-
extern int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd);
145+
extern int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
146146

147147
extern struct page *huge_zero_page;
148148

@@ -212,7 +212,7 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
212212
return NULL;
213213
}
214214

215-
static inline int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd)
215+
static inline int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd)
216216
{
217217
return 0;
218218
}

include/linux/mm.h

Lines changed: 11 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -292,10 +292,16 @@ extern pgprot_t protection_map[16];
292292
* pgoff should be used in favour of virtual_address, if possible.
293293
*/
294294
struct vm_fault {
295+
struct vm_area_struct *vma; /* Target VMA */
295296
unsigned int flags; /* FAULT_FLAG_xxx flags */
296297
gfp_t gfp_mask; /* gfp mask to be used for allocations */
297298
pgoff_t pgoff; /* Logical page offset based on vma */
298-
void __user *virtual_address; /* Faulting virtual address */
299+
unsigned long address; /* Faulting virtual address */
300+
void __user *virtual_address; /* Faulting virtual address masked by
301+
* PAGE_MASK */
302+
pmd_t *pmd; /* Pointer to pmd entry matching
303+
* the 'address'
304+
*/
299305

300306
struct page *cow_page; /* Handler may choose to COW */
301307
struct page *page; /* ->fault handlers should return a
@@ -309,19 +315,7 @@ struct vm_fault {
309315
* VM_FAULT_DAX_LOCKED and fill in
310316
* entry here.
311317
*/
312-
};
313-
314-
/*
315-
* Page fault context: passes though page fault handler instead of endless list
316-
* of function arguments.
317-
*/
318-
struct fault_env {
319-
struct vm_area_struct *vma; /* Target VMA */
320-
unsigned long address; /* Faulting virtual address */
321-
unsigned int flags; /* FAULT_FLAG_xxx flags */
322-
pmd_t *pmd; /* Pointer to pmd entry matching
323-
* the 'address'
324-
*/
318+
/* These three entries are valid only while holding ptl lock */
325319
pte_t *pte; /* Pointer to pte entry matching
326320
* the 'address'. NULL if the page
327321
* table hasn't been allocated.
@@ -351,7 +345,7 @@ struct vm_operations_struct {
351345
int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
352346
int (*pmd_fault)(struct vm_area_struct *, unsigned long address,
353347
pmd_t *, unsigned int flags);
354-
void (*map_pages)(struct fault_env *fe,
348+
void (*map_pages)(struct vm_fault *vmf,
355349
pgoff_t start_pgoff, pgoff_t end_pgoff);
356350

357351
/* notification that a previously read-only page is about to become
@@ -625,7 +619,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
625619
return pte;
626620
}
627621

628-
int alloc_set_pte(struct fault_env *fe, struct mem_cgroup *memcg,
622+
int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
629623
struct page *page);
630624
#endif
631625

@@ -2094,7 +2088,7 @@ extern void truncate_inode_pages_final(struct address_space *);
20942088

20952089
/* generic vm_area_ops exported for stackable file systems */
20962090
extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
2097-
extern void filemap_map_pages(struct fault_env *fe,
2091+
extern void filemap_map_pages(struct vm_fault *vmf,
20982092
pgoff_t start_pgoff, pgoff_t end_pgoff);
20992093
extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
21002094

include/linux/userfaultfd_k.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
#define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
2828
#define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS)
2929

30-
extern int handle_userfault(struct fault_env *fe, unsigned long reason);
30+
extern int handle_userfault(struct vm_fault *vmf, unsigned long reason);
3131

3232
extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
3333
unsigned long src_start, unsigned long len);
@@ -55,7 +55,7 @@ static inline bool userfaultfd_armed(struct vm_area_struct *vma)
5555
#else /* CONFIG_USERFAULTFD */
5656

5757
/* mm helpers */
58-
static inline int handle_userfault(struct fault_env *fe, unsigned long reason)
58+
static inline int handle_userfault(struct vm_fault *vmf, unsigned long reason)
5959
{
6060
return VM_FAULT_SIGBUS;
6161
}

mm/filemap.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -2164,12 +2164,12 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
21642164
}
21652165
EXPORT_SYMBOL(filemap_fault);
21662166

2167-
void filemap_map_pages(struct fault_env *fe,
2167+
void filemap_map_pages(struct vm_fault *vmf,
21682168
pgoff_t start_pgoff, pgoff_t end_pgoff)
21692169
{
21702170
struct radix_tree_iter iter;
21712171
void **slot;
2172-
struct file *file = fe->vma->vm_file;
2172+
struct file *file = vmf->vma->vm_file;
21732173
struct address_space *mapping = file->f_mapping;
21742174
pgoff_t last_pgoff = start_pgoff;
21752175
loff_t size;
@@ -2225,11 +2225,11 @@ void filemap_map_pages(struct fault_env *fe,
22252225
if (file->f_ra.mmap_miss > 0)
22262226
file->f_ra.mmap_miss--;
22272227

2228-
fe->address += (iter.index - last_pgoff) << PAGE_SHIFT;
2229-
if (fe->pte)
2230-
fe->pte += iter.index - last_pgoff;
2228+
vmf->address += (iter.index - last_pgoff) << PAGE_SHIFT;
2229+
if (vmf->pte)
2230+
vmf->pte += iter.index - last_pgoff;
22312231
last_pgoff = iter.index;
2232-
if (alloc_set_pte(fe, NULL, page))
2232+
if (alloc_set_pte(vmf, NULL, page))
22332233
goto unlock;
22342234
unlock_page(page);
22352235
goto next;
@@ -2239,7 +2239,7 @@ void filemap_map_pages(struct fault_env *fe,
22392239
put_page(page);
22402240
next:
22412241
/* Huge page is mapped? No need to proceed. */
2242-
if (pmd_trans_huge(*fe->pmd))
2242+
if (pmd_trans_huge(*vmf->pmd))
22432243
break;
22442244
if (iter.index == end_pgoff)
22452245
break;

0 commit comments

Comments
 (0)