Skip to content

Commit 6028372

Browse files
jankaraakpm00
authored andcommitted
readahead: add trace points
Add a couple of trace points to make debugging readahead logic easier. [jack@suse.cz: v2] Link: https://lkml.kernel.org/r/20250909145849.5090-2-jack@suse.cz Link: https://lkml.kernel.org/r/20250908145533.31528-2-jack@suse.cz Signed-off-by: Jan Kara <jack@suse.cz> Tested-by: Pankaj Raghav <p.raghav@samsung.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent e1831e8 commit 6028372

File tree

2 files changed

+140
-0
lines changed

2 files changed

+140
-0
lines changed

include/trace/events/readahead.h

Lines changed: 132 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,132 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
#undef TRACE_SYSTEM
3+
#define TRACE_SYSTEM readahead
4+
5+
#if !defined(_TRACE_FILEMAP_H) || defined(TRACE_HEADER_MULTI_READ)
6+
#define _TRACE_READAHEAD_H
7+
8+
#include <linux/types.h>
9+
#include <linux/tracepoint.h>
10+
#include <linux/mm.h>
11+
#include <linux/fs.h>
12+
#include <linux/pagemap.h>
13+
14+
TRACE_EVENT(page_cache_ra_unbounded,
15+
TP_PROTO(struct inode *inode, pgoff_t index, unsigned long nr_to_read,
16+
unsigned long lookahead_size),
17+
18+
TP_ARGS(inode, index, nr_to_read, lookahead_size),
19+
20+
TP_STRUCT__entry(
21+
__field(unsigned long, i_ino)
22+
__field(dev_t, s_dev)
23+
__field(pgoff_t, index)
24+
__field(unsigned long, nr_to_read)
25+
__field(unsigned long, lookahead_size)
26+
),
27+
28+
TP_fast_assign(
29+
__entry->i_ino = inode->i_ino;
30+
__entry->s_dev = inode->i_sb->s_dev;
31+
__entry->index = index;
32+
__entry->nr_to_read = nr_to_read;
33+
__entry->lookahead_size = lookahead_size;
34+
),
35+
36+
TP_printk(
37+
"dev=%d:%d ino=%lx index=%lu nr_to_read=%lu lookahead_size=%lu",
38+
MAJOR(__entry->s_dev), MINOR(__entry->s_dev), __entry->i_ino,
39+
__entry->index, __entry->nr_to_read, __entry->lookahead_size
40+
)
41+
);
42+
43+
TRACE_EVENT(page_cache_ra_order,
44+
TP_PROTO(struct inode *inode, pgoff_t index, struct file_ra_state *ra),
45+
46+
TP_ARGS(inode, index, ra),
47+
48+
TP_STRUCT__entry(
49+
__field(unsigned long, i_ino)
50+
__field(dev_t, s_dev)
51+
__field(pgoff_t, index)
52+
__field(unsigned int, order)
53+
__field(unsigned int, size)
54+
__field(unsigned int, async_size)
55+
__field(unsigned int, ra_pages)
56+
),
57+
58+
TP_fast_assign(
59+
__entry->i_ino = inode->i_ino;
60+
__entry->s_dev = inode->i_sb->s_dev;
61+
__entry->index = index;
62+
__entry->order = ra->order;
63+
__entry->size = ra->size;
64+
__entry->async_size = ra->async_size;
65+
__entry->ra_pages = ra->ra_pages;
66+
),
67+
68+
TP_printk(
69+
"dev=%d:%d ino=%lx index=%lu order=%u size=%u async_size=%u ra_pages=%u",
70+
MAJOR(__entry->s_dev), MINOR(__entry->s_dev), __entry->i_ino,
71+
__entry->index, __entry->order, __entry->size,
72+
__entry->async_size, __entry->ra_pages
73+
)
74+
);
75+
76+
DECLARE_EVENT_CLASS(page_cache_ra_op,
77+
TP_PROTO(struct inode *inode, pgoff_t index, struct file_ra_state *ra,
78+
unsigned long req_count),
79+
80+
TP_ARGS(inode, index, ra, req_count),
81+
82+
TP_STRUCT__entry(
83+
__field(unsigned long, i_ino)
84+
__field(dev_t, s_dev)
85+
__field(pgoff_t, index)
86+
__field(unsigned int, order)
87+
__field(unsigned int, size)
88+
__field(unsigned int, async_size)
89+
__field(unsigned int, ra_pages)
90+
__field(unsigned int, mmap_miss)
91+
__field(loff_t, prev_pos)
92+
__field(unsigned long, req_count)
93+
),
94+
95+
TP_fast_assign(
96+
__entry->i_ino = inode->i_ino;
97+
__entry->s_dev = inode->i_sb->s_dev;
98+
__entry->index = index;
99+
__entry->order = ra->order;
100+
__entry->size = ra->size;
101+
__entry->async_size = ra->async_size;
102+
__entry->ra_pages = ra->ra_pages;
103+
__entry->mmap_miss = ra->mmap_miss;
104+
__entry->prev_pos = ra->prev_pos;
105+
__entry->req_count = req_count;
106+
),
107+
108+
TP_printk(
109+
"dev=%d:%d ino=%lx index=%lu req_count=%lu order=%u size=%u async_size=%u ra_pages=%u mmap_miss=%u prev_pos=%lld",
110+
MAJOR(__entry->s_dev), MINOR(__entry->s_dev), __entry->i_ino,
111+
__entry->index, __entry->req_count, __entry->order,
112+
__entry->size, __entry->async_size, __entry->ra_pages,
113+
__entry->mmap_miss, __entry->prev_pos
114+
)
115+
);
116+
117+
DEFINE_EVENT(page_cache_ra_op, page_cache_sync_ra,
118+
TP_PROTO(struct inode *inode, pgoff_t index, struct file_ra_state *ra,
119+
unsigned long req_count),
120+
TP_ARGS(inode, index, ra, req_count)
121+
);
122+
123+
DEFINE_EVENT(page_cache_ra_op, page_cache_async_ra,
124+
TP_PROTO(struct inode *inode, pgoff_t index, struct file_ra_state *ra,
125+
unsigned long req_count),
126+
TP_ARGS(inode, index, ra, req_count)
127+
);
128+
129+
#endif /* _TRACE_FILEMAP_H */
130+
131+
/* This part must be outside protection */
132+
#include <trace/define_trace.h>

mm/readahead.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -129,6 +129,9 @@
129129
#include <linux/fadvise.h>
130130
#include <linux/sched/mm.h>
131131

132+
#define CREATE_TRACE_POINTS
133+
#include <trace/events/readahead.h>
134+
132135
#include "internal.h"
133136

134137
/*
@@ -225,6 +228,8 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
225228
*/
226229
unsigned int nofs = memalloc_nofs_save();
227230

231+
trace_page_cache_ra_unbounded(mapping->host, index, nr_to_read,
232+
lookahead_size);
228233
filemap_invalidate_lock_shared(mapping);
229234
index = mapping_align_index(mapping, index);
230235

@@ -470,6 +475,7 @@ void page_cache_ra_order(struct readahead_control *ractl,
470475
gfp_t gfp = readahead_gfp_mask(mapping);
471476
unsigned int new_order = ra->order;
472477

478+
trace_page_cache_ra_order(mapping->host, start, ra);
473479
if (!mapping_large_folio_support(mapping)) {
474480
ra->order = 0;
475481
goto fallback;
@@ -554,6 +560,7 @@ void page_cache_sync_ra(struct readahead_control *ractl,
554560
unsigned long max_pages, contig_count;
555561
pgoff_t prev_index, miss;
556562

563+
trace_page_cache_sync_ra(ractl->mapping->host, index, ra, req_count);
557564
/*
558565
* Even if readahead is disabled, issue this request as readahead
559566
* as we'll need it to satisfy the requested range. The forced
@@ -638,6 +645,7 @@ void page_cache_async_ra(struct readahead_control *ractl,
638645
if (folio_test_writeback(folio))
639646
return;
640647

648+
trace_page_cache_async_ra(ractl->mapping->host, index, ra, req_count);
641649
folio_clear_readahead(folio);
642650

643651
if (blk_cgroup_congested())

0 commit comments

Comments
 (0)