License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[sfrench/cifs-2.6.git] / include / trace / events / writeback.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM writeback
4
5 #if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_WRITEBACK_H
7
8 #include <linux/tracepoint.h>
9 #include <linux/backing-dev.h>
10 #include <linux/writeback.h>
11
12 #define show_inode_state(state)                                 \
13         __print_flags(state, "|",                               \
14                 {I_DIRTY_SYNC,          "I_DIRTY_SYNC"},        \
15                 {I_DIRTY_DATASYNC,      "I_DIRTY_DATASYNC"},    \
16                 {I_DIRTY_PAGES,         "I_DIRTY_PAGES"},       \
17                 {I_NEW,                 "I_NEW"},               \
18                 {I_WILL_FREE,           "I_WILL_FREE"},         \
19                 {I_FREEING,             "I_FREEING"},           \
20                 {I_CLEAR,               "I_CLEAR"},             \
21                 {I_SYNC,                "I_SYNC"},              \
22                 {I_DIRTY_TIME,          "I_DIRTY_TIME"},        \
23                 {I_DIRTY_TIME_EXPIRED,  "I_DIRTY_TIME_EXPIRED"}, \
24                 {I_REFERENCED,          "I_REFERENCED"}         \
25         )
26
27 /* enums need to be exported to user space */
28 #undef EM
29 #undef EMe
30 #define EM(a,b)         TRACE_DEFINE_ENUM(a);
31 #define EMe(a,b)        TRACE_DEFINE_ENUM(a);
32
33 #define WB_WORK_REASON                                                  \
34         EM( WB_REASON_BACKGROUND,               "background")           \
35         EM( WB_REASON_VMSCAN,                   "vmscan")               \
36         EM( WB_REASON_SYNC,                     "sync")                 \
37         EM( WB_REASON_PERIODIC,                 "periodic")             \
38         EM( WB_REASON_LAPTOP_TIMER,             "laptop_timer")         \
39         EM( WB_REASON_FREE_MORE_MEM,            "free_more_memory")     \
40         EM( WB_REASON_FS_FREE_SPACE,            "fs_free_space")        \
41         EMe(WB_REASON_FORKER_THREAD,            "forker_thread")
42
43 WB_WORK_REASON
44
45 /*
46  * Now redefine the EM() and EMe() macros to map the enums to the strings
47  * that will be printed in the output.
48  */
49 #undef EM
50 #undef EMe
51 #define EM(a,b)         { a, b },
52 #define EMe(a,b)        { a, b }
53
54 struct wb_writeback_work;
55
56 TRACE_EVENT(writeback_dirty_page,
57
58         TP_PROTO(struct page *page, struct address_space *mapping),
59
60         TP_ARGS(page, mapping),
61
62         TP_STRUCT__entry (
63                 __array(char, name, 32)
64                 __field(unsigned long, ino)
65                 __field(pgoff_t, index)
66         ),
67
68         TP_fast_assign(
69                 strncpy(__entry->name,
70                         mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)", 32);
71                 __entry->ino = mapping ? mapping->host->i_ino : 0;
72                 __entry->index = page->index;
73         ),
74
75         TP_printk("bdi %s: ino=%lu index=%lu",
76                 __entry->name,
77                 __entry->ino,
78                 __entry->index
79         )
80 );
81
82 DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
83
84         TP_PROTO(struct inode *inode, int flags),
85
86         TP_ARGS(inode, flags),
87
88         TP_STRUCT__entry (
89                 __array(char, name, 32)
90                 __field(unsigned long, ino)
91                 __field(unsigned long, state)
92                 __field(unsigned long, flags)
93         ),
94
95         TP_fast_assign(
96                 struct backing_dev_info *bdi = inode_to_bdi(inode);
97
98                 /* may be called for files on pseudo FSes w/ unregistered bdi */
99                 strncpy(__entry->name,
100                         bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
101                 __entry->ino            = inode->i_ino;
102                 __entry->state          = inode->i_state;
103                 __entry->flags          = flags;
104         ),
105
106         TP_printk("bdi %s: ino=%lu state=%s flags=%s",
107                 __entry->name,
108                 __entry->ino,
109                 show_inode_state(__entry->state),
110                 show_inode_state(__entry->flags)
111         )
112 );
113
114 DEFINE_EVENT(writeback_dirty_inode_template, writeback_mark_inode_dirty,
115
116         TP_PROTO(struct inode *inode, int flags),
117
118         TP_ARGS(inode, flags)
119 );
120
121 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start,
122
123         TP_PROTO(struct inode *inode, int flags),
124
125         TP_ARGS(inode, flags)
126 );
127
128 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
129
130         TP_PROTO(struct inode *inode, int flags),
131
132         TP_ARGS(inode, flags)
133 );
134
135 #ifdef CREATE_TRACE_POINTS
136 #ifdef CONFIG_CGROUP_WRITEBACK
137
138 static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb)
139 {
140         return wb->memcg_css->cgroup->kn->id.ino;
141 }
142
143 static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc)
144 {
145         if (wbc->wb)
146                 return __trace_wb_assign_cgroup(wbc->wb);
147         else
148                 return -1U;
149 }
150 #else   /* CONFIG_CGROUP_WRITEBACK */
151
152 static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb)
153 {
154         return -1U;
155 }
156
157 static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc)
158 {
159         return -1U;
160 }
161
162 #endif  /* CONFIG_CGROUP_WRITEBACK */
163 #endif  /* CREATE_TRACE_POINTS */
164
165 DECLARE_EVENT_CLASS(writeback_write_inode_template,
166
167         TP_PROTO(struct inode *inode, struct writeback_control *wbc),
168
169         TP_ARGS(inode, wbc),
170
171         TP_STRUCT__entry (
172                 __array(char, name, 32)
173                 __field(unsigned long, ino)
174                 __field(int, sync_mode)
175                 __field(unsigned int, cgroup_ino)
176         ),
177
178         TP_fast_assign(
179                 strncpy(__entry->name,
180                         dev_name(inode_to_bdi(inode)->dev), 32);
181                 __entry->ino            = inode->i_ino;
182                 __entry->sync_mode      = wbc->sync_mode;
183                 __entry->cgroup_ino     = __trace_wbc_assign_cgroup(wbc);
184         ),
185
186         TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%u",
187                 __entry->name,
188                 __entry->ino,
189                 __entry->sync_mode,
190                 __entry->cgroup_ino
191         )
192 );
193
194 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start,
195
196         TP_PROTO(struct inode *inode, struct writeback_control *wbc),
197
198         TP_ARGS(inode, wbc)
199 );
200
201 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode,
202
203         TP_PROTO(struct inode *inode, struct writeback_control *wbc),
204
205         TP_ARGS(inode, wbc)
206 );
207
208 DECLARE_EVENT_CLASS(writeback_work_class,
209         TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work),
210         TP_ARGS(wb, work),
211         TP_STRUCT__entry(
212                 __array(char, name, 32)
213                 __field(long, nr_pages)
214                 __field(dev_t, sb_dev)
215                 __field(int, sync_mode)
216                 __field(int, for_kupdate)
217                 __field(int, range_cyclic)
218                 __field(int, for_background)
219                 __field(int, reason)
220                 __field(unsigned int, cgroup_ino)
221         ),
222         TP_fast_assign(
223                 strncpy(__entry->name,
224                         wb->bdi->dev ? dev_name(wb->bdi->dev) : "(unknown)", 32);
225                 __entry->nr_pages = work->nr_pages;
226                 __entry->sb_dev = work->sb ? work->sb->s_dev : 0;
227                 __entry->sync_mode = work->sync_mode;
228                 __entry->for_kupdate = work->for_kupdate;
229                 __entry->range_cyclic = work->range_cyclic;
230                 __entry->for_background = work->for_background;
231                 __entry->reason = work->reason;
232                 __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
233         ),
234         TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
235                   "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%u",
236                   __entry->name,
237                   MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
238                   __entry->nr_pages,
239                   __entry->sync_mode,
240                   __entry->for_kupdate,
241                   __entry->range_cyclic,
242                   __entry->for_background,
243                   __print_symbolic(__entry->reason, WB_WORK_REASON),
244                   __entry->cgroup_ino
245         )
246 );
247 #define DEFINE_WRITEBACK_WORK_EVENT(name) \
248 DEFINE_EVENT(writeback_work_class, name, \
249         TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), \
250         TP_ARGS(wb, work))
251 DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
252 DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
253 DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
254 DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
255 DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
256
257 TRACE_EVENT(writeback_pages_written,
258         TP_PROTO(long pages_written),
259         TP_ARGS(pages_written),
260         TP_STRUCT__entry(
261                 __field(long,           pages)
262         ),
263         TP_fast_assign(
264                 __entry->pages          = pages_written;
265         ),
266         TP_printk("%ld", __entry->pages)
267 );
268
269 DECLARE_EVENT_CLASS(writeback_class,
270         TP_PROTO(struct bdi_writeback *wb),
271         TP_ARGS(wb),
272         TP_STRUCT__entry(
273                 __array(char, name, 32)
274                 __field(unsigned int, cgroup_ino)
275         ),
276         TP_fast_assign(
277                 strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
278                 __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
279         ),
280         TP_printk("bdi %s: cgroup_ino=%u",
281                   __entry->name,
282                   __entry->cgroup_ino
283         )
284 );
285 #define DEFINE_WRITEBACK_EVENT(name) \
286 DEFINE_EVENT(writeback_class, name, \
287         TP_PROTO(struct bdi_writeback *wb), \
288         TP_ARGS(wb))
289
290 DEFINE_WRITEBACK_EVENT(writeback_nowork);
291 DEFINE_WRITEBACK_EVENT(writeback_wake_background);
292
293 TRACE_EVENT(writeback_bdi_register,
294         TP_PROTO(struct backing_dev_info *bdi),
295         TP_ARGS(bdi),
296         TP_STRUCT__entry(
297                 __array(char, name, 32)
298         ),
299         TP_fast_assign(
300                 strncpy(__entry->name, dev_name(bdi->dev), 32);
301         ),
302         TP_printk("bdi %s",
303                 __entry->name
304         )
305 );
306
307 DECLARE_EVENT_CLASS(wbc_class,
308         TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
309         TP_ARGS(wbc, bdi),
310         TP_STRUCT__entry(
311                 __array(char, name, 32)
312                 __field(long, nr_to_write)
313                 __field(long, pages_skipped)
314                 __field(int, sync_mode)
315                 __field(int, for_kupdate)
316                 __field(int, for_background)
317                 __field(int, for_reclaim)
318                 __field(int, range_cyclic)
319                 __field(long, range_start)
320                 __field(long, range_end)
321                 __field(unsigned int, cgroup_ino)
322         ),
323
324         TP_fast_assign(
325                 strncpy(__entry->name, dev_name(bdi->dev), 32);
326                 __entry->nr_to_write    = wbc->nr_to_write;
327                 __entry->pages_skipped  = wbc->pages_skipped;
328                 __entry->sync_mode      = wbc->sync_mode;
329                 __entry->for_kupdate    = wbc->for_kupdate;
330                 __entry->for_background = wbc->for_background;
331                 __entry->for_reclaim    = wbc->for_reclaim;
332                 __entry->range_cyclic   = wbc->range_cyclic;
333                 __entry->range_start    = (long)wbc->range_start;
334                 __entry->range_end      = (long)wbc->range_end;
335                 __entry->cgroup_ino     = __trace_wbc_assign_cgroup(wbc);
336         ),
337
338         TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
339                 "bgrd=%d reclm=%d cyclic=%d "
340                 "start=0x%lx end=0x%lx cgroup_ino=%u",
341                 __entry->name,
342                 __entry->nr_to_write,
343                 __entry->pages_skipped,
344                 __entry->sync_mode,
345                 __entry->for_kupdate,
346                 __entry->for_background,
347                 __entry->for_reclaim,
348                 __entry->range_cyclic,
349                 __entry->range_start,
350                 __entry->range_end,
351                 __entry->cgroup_ino
352         )
353 )
354
355 #define DEFINE_WBC_EVENT(name) \
356 DEFINE_EVENT(wbc_class, name, \
357         TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
358         TP_ARGS(wbc, bdi))
359 DEFINE_WBC_EVENT(wbc_writepage);
360
361 TRACE_EVENT(writeback_queue_io,
362         TP_PROTO(struct bdi_writeback *wb,
363                  struct wb_writeback_work *work,
364                  int moved),
365         TP_ARGS(wb, work, moved),
366         TP_STRUCT__entry(
367                 __array(char,           name, 32)
368                 __field(unsigned long,  older)
369                 __field(long,           age)
370                 __field(int,            moved)
371                 __field(int,            reason)
372                 __field(unsigned int,   cgroup_ino)
373         ),
374         TP_fast_assign(
375                 unsigned long *older_than_this = work->older_than_this;
376                 strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
377                 __entry->older  = older_than_this ?  *older_than_this : 0;
378                 __entry->age    = older_than_this ?
379                                   (jiffies - *older_than_this) * 1000 / HZ : -1;
380                 __entry->moved  = moved;
381                 __entry->reason = work->reason;
382                 __entry->cgroup_ino     = __trace_wb_assign_cgroup(wb);
383         ),
384         TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%u",
385                 __entry->name,
386                 __entry->older, /* older_than_this in jiffies */
387                 __entry->age,   /* older_than_this in relative milliseconds */
388                 __entry->moved,
389                 __print_symbolic(__entry->reason, WB_WORK_REASON),
390                 __entry->cgroup_ino
391         )
392 );
393
394 TRACE_EVENT(global_dirty_state,
395
396         TP_PROTO(unsigned long background_thresh,
397                  unsigned long dirty_thresh
398         ),
399
400         TP_ARGS(background_thresh,
401                 dirty_thresh
402         ),
403
404         TP_STRUCT__entry(
405                 __field(unsigned long,  nr_dirty)
406                 __field(unsigned long,  nr_writeback)
407                 __field(unsigned long,  nr_unstable)
408                 __field(unsigned long,  background_thresh)
409                 __field(unsigned long,  dirty_thresh)
410                 __field(unsigned long,  dirty_limit)
411                 __field(unsigned long,  nr_dirtied)
412                 __field(unsigned long,  nr_written)
413         ),
414
415         TP_fast_assign(
416                 __entry->nr_dirty       = global_node_page_state(NR_FILE_DIRTY);
417                 __entry->nr_writeback   = global_node_page_state(NR_WRITEBACK);
418                 __entry->nr_unstable    = global_node_page_state(NR_UNSTABLE_NFS);
419                 __entry->nr_dirtied     = global_node_page_state(NR_DIRTIED);
420                 __entry->nr_written     = global_node_page_state(NR_WRITTEN);
421                 __entry->background_thresh = background_thresh;
422                 __entry->dirty_thresh   = dirty_thresh;
423                 __entry->dirty_limit    = global_wb_domain.dirty_limit;
424         ),
425
426         TP_printk("dirty=%lu writeback=%lu unstable=%lu "
427                   "bg_thresh=%lu thresh=%lu limit=%lu "
428                   "dirtied=%lu written=%lu",
429                   __entry->nr_dirty,
430                   __entry->nr_writeback,
431                   __entry->nr_unstable,
432                   __entry->background_thresh,
433                   __entry->dirty_thresh,
434                   __entry->dirty_limit,
435                   __entry->nr_dirtied,
436                   __entry->nr_written
437         )
438 );
439
440 #define KBps(x)                 ((x) << (PAGE_SHIFT - 10))
441
442 TRACE_EVENT(bdi_dirty_ratelimit,
443
444         TP_PROTO(struct bdi_writeback *wb,
445                  unsigned long dirty_rate,
446                  unsigned long task_ratelimit),
447
448         TP_ARGS(wb, dirty_rate, task_ratelimit),
449
450         TP_STRUCT__entry(
451                 __array(char,           bdi, 32)
452                 __field(unsigned long,  write_bw)
453                 __field(unsigned long,  avg_write_bw)
454                 __field(unsigned long,  dirty_rate)
455                 __field(unsigned long,  dirty_ratelimit)
456                 __field(unsigned long,  task_ratelimit)
457                 __field(unsigned long,  balanced_dirty_ratelimit)
458                 __field(unsigned int,   cgroup_ino)
459         ),
460
461         TP_fast_assign(
462                 strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
463                 __entry->write_bw       = KBps(wb->write_bandwidth);
464                 __entry->avg_write_bw   = KBps(wb->avg_write_bandwidth);
465                 __entry->dirty_rate     = KBps(dirty_rate);
466                 __entry->dirty_ratelimit = KBps(wb->dirty_ratelimit);
467                 __entry->task_ratelimit = KBps(task_ratelimit);
468                 __entry->balanced_dirty_ratelimit =
469                                         KBps(wb->balanced_dirty_ratelimit);
470                 __entry->cgroup_ino     = __trace_wb_assign_cgroup(wb);
471         ),
472
473         TP_printk("bdi %s: "
474                   "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
475                   "dirty_ratelimit=%lu task_ratelimit=%lu "
476                   "balanced_dirty_ratelimit=%lu cgroup_ino=%u",
477                   __entry->bdi,
478                   __entry->write_bw,            /* write bandwidth */
479                   __entry->avg_write_bw,        /* avg write bandwidth */
480                   __entry->dirty_rate,          /* bdi dirty rate */
481                   __entry->dirty_ratelimit,     /* base ratelimit */
482                   __entry->task_ratelimit, /* ratelimit with position control */
483                   __entry->balanced_dirty_ratelimit, /* the balanced ratelimit */
484                   __entry->cgroup_ino
485         )
486 );
487
488 TRACE_EVENT(balance_dirty_pages,
489
490         TP_PROTO(struct bdi_writeback *wb,
491                  unsigned long thresh,
492                  unsigned long bg_thresh,
493                  unsigned long dirty,
494                  unsigned long bdi_thresh,
495                  unsigned long bdi_dirty,
496                  unsigned long dirty_ratelimit,
497                  unsigned long task_ratelimit,
498                  unsigned long dirtied,
499                  unsigned long period,
500                  long pause,
501                  unsigned long start_time),
502
503         TP_ARGS(wb, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
504                 dirty_ratelimit, task_ratelimit,
505                 dirtied, period, pause, start_time),
506
507         TP_STRUCT__entry(
508                 __array(         char,  bdi, 32)
509                 __field(unsigned long,  limit)
510                 __field(unsigned long,  setpoint)
511                 __field(unsigned long,  dirty)
512                 __field(unsigned long,  bdi_setpoint)
513                 __field(unsigned long,  bdi_dirty)
514                 __field(unsigned long,  dirty_ratelimit)
515                 __field(unsigned long,  task_ratelimit)
516                 __field(unsigned int,   dirtied)
517                 __field(unsigned int,   dirtied_pause)
518                 __field(unsigned long,  paused)
519                 __field(         long,  pause)
520                 __field(unsigned long,  period)
521                 __field(         long,  think)
522                 __field(unsigned int,   cgroup_ino)
523         ),
524
525         TP_fast_assign(
526                 unsigned long freerun = (thresh + bg_thresh) / 2;
527                 strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
528
529                 __entry->limit          = global_wb_domain.dirty_limit;
530                 __entry->setpoint       = (global_wb_domain.dirty_limit +
531                                                 freerun) / 2;
532                 __entry->dirty          = dirty;
533                 __entry->bdi_setpoint   = __entry->setpoint *
534                                                 bdi_thresh / (thresh + 1);
535                 __entry->bdi_dirty      = bdi_dirty;
536                 __entry->dirty_ratelimit = KBps(dirty_ratelimit);
537                 __entry->task_ratelimit = KBps(task_ratelimit);
538                 __entry->dirtied        = dirtied;
539                 __entry->dirtied_pause  = current->nr_dirtied_pause;
540                 __entry->think          = current->dirty_paused_when == 0 ? 0 :
541                          (long)(jiffies - current->dirty_paused_when) * 1000/HZ;
542                 __entry->period         = period * 1000 / HZ;
543                 __entry->pause          = pause * 1000 / HZ;
544                 __entry->paused         = (jiffies - start_time) * 1000 / HZ;
545                 __entry->cgroup_ino     = __trace_wb_assign_cgroup(wb);
546         ),
547
548
549         TP_printk("bdi %s: "
550                   "limit=%lu setpoint=%lu dirty=%lu "
551                   "bdi_setpoint=%lu bdi_dirty=%lu "
552                   "dirty_ratelimit=%lu task_ratelimit=%lu "
553                   "dirtied=%u dirtied_pause=%u "
554                   "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%u",
555                   __entry->bdi,
556                   __entry->limit,
557                   __entry->setpoint,
558                   __entry->dirty,
559                   __entry->bdi_setpoint,
560                   __entry->bdi_dirty,
561                   __entry->dirty_ratelimit,
562                   __entry->task_ratelimit,
563                   __entry->dirtied,
564                   __entry->dirtied_pause,
565                   __entry->paused,      /* ms */
566                   __entry->pause,       /* ms */
567                   __entry->period,      /* ms */
568                   __entry->think,       /* ms */
569                   __entry->cgroup_ino
570           )
571 );
572
573 TRACE_EVENT(writeback_sb_inodes_requeue,
574
575         TP_PROTO(struct inode *inode),
576         TP_ARGS(inode),
577
578         TP_STRUCT__entry(
579                 __array(char, name, 32)
580                 __field(unsigned long, ino)
581                 __field(unsigned long, state)
582                 __field(unsigned long, dirtied_when)
583                 __field(unsigned int, cgroup_ino)
584         ),
585
586         TP_fast_assign(
587                 strncpy(__entry->name,
588                         dev_name(inode_to_bdi(inode)->dev), 32);
589                 __entry->ino            = inode->i_ino;
590                 __entry->state          = inode->i_state;
591                 __entry->dirtied_when   = inode->dirtied_when;
592                 __entry->cgroup_ino     = __trace_wb_assign_cgroup(inode_to_wb(inode));
593         ),
594
595         TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%u",
596                   __entry->name,
597                   __entry->ino,
598                   show_inode_state(__entry->state),
599                   __entry->dirtied_when,
600                   (jiffies - __entry->dirtied_when) / HZ,
601                   __entry->cgroup_ino
602         )
603 );
604
605 DECLARE_EVENT_CLASS(writeback_congest_waited_template,
606
607         TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
608
609         TP_ARGS(usec_timeout, usec_delayed),
610
611         TP_STRUCT__entry(
612                 __field(        unsigned int,   usec_timeout    )
613                 __field(        unsigned int,   usec_delayed    )
614         ),
615
616         TP_fast_assign(
617                 __entry->usec_timeout   = usec_timeout;
618                 __entry->usec_delayed   = usec_delayed;
619         ),
620
621         TP_printk("usec_timeout=%u usec_delayed=%u",
622                         __entry->usec_timeout,
623                         __entry->usec_delayed)
624 );
625
626 DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
627
628         TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
629
630         TP_ARGS(usec_timeout, usec_delayed)
631 );
632
633 DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
634
635         TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
636
637         TP_ARGS(usec_timeout, usec_delayed)
638 );
639
640 DECLARE_EVENT_CLASS(writeback_single_inode_template,
641
642         TP_PROTO(struct inode *inode,
643                  struct writeback_control *wbc,
644                  unsigned long nr_to_write
645         ),
646
647         TP_ARGS(inode, wbc, nr_to_write),
648
649         TP_STRUCT__entry(
650                 __array(char, name, 32)
651                 __field(unsigned long, ino)
652                 __field(unsigned long, state)
653                 __field(unsigned long, dirtied_when)
654                 __field(unsigned long, writeback_index)
655                 __field(long, nr_to_write)
656                 __field(unsigned long, wrote)
657                 __field(unsigned int, cgroup_ino)
658         ),
659
660         TP_fast_assign(
661                 strncpy(__entry->name,
662                         dev_name(inode_to_bdi(inode)->dev), 32);
663                 __entry->ino            = inode->i_ino;
664                 __entry->state          = inode->i_state;
665                 __entry->dirtied_when   = inode->dirtied_when;
666                 __entry->writeback_index = inode->i_mapping->writeback_index;
667                 __entry->nr_to_write    = nr_to_write;
668                 __entry->wrote          = nr_to_write - wbc->nr_to_write;
669                 __entry->cgroup_ino     = __trace_wbc_assign_cgroup(wbc);
670         ),
671
672         TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
673                   "index=%lu to_write=%ld wrote=%lu cgroup_ino=%u",
674                   __entry->name,
675                   __entry->ino,
676                   show_inode_state(__entry->state),
677                   __entry->dirtied_when,
678                   (jiffies - __entry->dirtied_when) / HZ,
679                   __entry->writeback_index,
680                   __entry->nr_to_write,
681                   __entry->wrote,
682                   __entry->cgroup_ino
683         )
684 );
685
686 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start,
687         TP_PROTO(struct inode *inode,
688                  struct writeback_control *wbc,
689                  unsigned long nr_to_write),
690         TP_ARGS(inode, wbc, nr_to_write)
691 );
692
693 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
694         TP_PROTO(struct inode *inode,
695                  struct writeback_control *wbc,
696                  unsigned long nr_to_write),
697         TP_ARGS(inode, wbc, nr_to_write)
698 );
699
700 DECLARE_EVENT_CLASS(writeback_inode_template,
701         TP_PROTO(struct inode *inode),
702
703         TP_ARGS(inode),
704
705         TP_STRUCT__entry(
706                 __field(        dev_t,  dev                     )
707                 __field(unsigned long,  ino                     )
708                 __field(unsigned long,  state                   )
709                 __field(        __u16, mode                     )
710                 __field(unsigned long, dirtied_when             )
711         ),
712
713         TP_fast_assign(
714                 __entry->dev    = inode->i_sb->s_dev;
715                 __entry->ino    = inode->i_ino;
716                 __entry->state  = inode->i_state;
717                 __entry->mode   = inode->i_mode;
718                 __entry->dirtied_when = inode->dirtied_when;
719         ),
720
721         TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o",
722                   MAJOR(__entry->dev), MINOR(__entry->dev),
723                   __entry->ino, __entry->dirtied_when,
724                   show_inode_state(__entry->state), __entry->mode)
725 );
726
727 DEFINE_EVENT(writeback_inode_template, writeback_lazytime,
728         TP_PROTO(struct inode *inode),
729
730         TP_ARGS(inode)
731 );
732
733 DEFINE_EVENT(writeback_inode_template, writeback_lazytime_iput,
734         TP_PROTO(struct inode *inode),
735
736         TP_ARGS(inode)
737 );
738
739 DEFINE_EVENT(writeback_inode_template, writeback_dirty_inode_enqueue,
740
741         TP_PROTO(struct inode *inode),
742
743         TP_ARGS(inode)
744 );
745
746 /*
747  * Inode writeback list tracking.
748  */
749
750 DEFINE_EVENT(writeback_inode_template, sb_mark_inode_writeback,
751         TP_PROTO(struct inode *inode),
752         TP_ARGS(inode)
753 );
754
755 DEFINE_EVENT(writeback_inode_template, sb_clear_inode_writeback,
756         TP_PROTO(struct inode *inode),
757         TP_ARGS(inode)
758 );
759
760 #endif /* _TRACE_WRITEBACK_H */
761
762 /* This part must be outside protection */
763 #include <trace/define_trace.h>