1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef BLKTRACE_H
3 #define BLKTRACE_H
4
5 #include <linux/blkdev.h>
6 #include <linux/relay.h>
7 #include <linux/compat.h>
8 #include <uapi/linux/blktrace_api.h>
9 #include <linux/list.h>
10
11 #if defined(CONFIG_BLK_DEV_IO_TRACE)
12
13 #include <linux/sysfs.h>
14
15 struct blk_trace {
16 int trace_state;
17 struct rchan *rchan;
18 unsigned long __percpu *sequence;
19 unsigned char __percpu *msg_data;
20 u16 act_mask;
21 u64 start_lba;
22 u64 end_lba;
23 u32 pid;
24 u32 dev;
25 struct dentry *dir;
26 struct dentry *dropped_file;
27 struct dentry *msg_file;
28 struct list_head running_list;
29 atomic_t dropped;
30 };
31
32 struct blkcg;
33
34 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
35 extern void blk_trace_shutdown(struct request_queue *);
36 extern __printf(3, 4)
37 void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *fmt, ...);
38
39 /**
40 * blk_add_trace_msg - Add a (simple) message to the blktrace stream
41 * @q: queue the io is for
42 * @fmt: format to print message in
43 * args... Variable argument list for format
44 *
45 * Description:
46 * Records a (simple) message onto the blktrace stream.
47 *
48 * NOTE: BLK_TN_MAX_MSG characters are output at most.
49 * NOTE: Can not use 'static inline' due to presence of var args...
50 *
51 **/
52 #define blk_add_cgroup_trace_msg(q, cg, fmt, ...) \
53 do { \
54 struct blk_trace *bt; \
55 \
56 rcu_read_lock(); \
57 bt = rcu_dereference((q)->blk_trace); \
58 if (unlikely(bt)) \
59 __trace_note_message(bt, cg, fmt, ##__VA_ARGS__);\
60 rcu_read_unlock(); \
61 } while (0)
62 #define blk_add_trace_msg(q, fmt, ...) \
63 blk_add_cgroup_trace_msg(q, NULL, fmt, ##__VA_ARGS__)
64 #define BLK_TN_MAX_MSG 128
65
blk_trace_note_message_enabled(struct request_queue * q)66 static inline bool blk_trace_note_message_enabled(struct request_queue *q)
67 {
68 struct blk_trace *bt;
69 bool ret;
70
71 rcu_read_lock();
72 bt = rcu_dereference(q->blk_trace);
73 ret = bt && (bt->act_mask & BLK_TC_NOTIFY);
74 rcu_read_unlock();
75 return ret;
76 }
77
78 extern void blk_add_driver_data(struct request_queue *q, struct request *rq,
79 void *data, size_t len);
80 extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
81 struct block_device *bdev,
82 char __user *arg);
83 extern int blk_trace_startstop(struct request_queue *q, int start);
84 extern int blk_trace_remove(struct request_queue *q);
85 extern void blk_trace_remove_sysfs(struct device *dev);
86 extern int blk_trace_init_sysfs(struct device *dev);
87
88 extern struct attribute_group blk_trace_attr_group;
89
90 #else /* !CONFIG_BLK_DEV_IO_TRACE */
91 # define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
92 # define blk_trace_shutdown(q) do { } while (0)
93 # define blk_add_driver_data(q, rq, data, len) do {} while (0)
94 # define blk_trace_setup(q, name, dev, bdev, arg) (-ENOTTY)
95 # define blk_trace_startstop(q, start) (-ENOTTY)
96 # define blk_trace_remove(q) (-ENOTTY)
97 # define blk_add_trace_msg(q, fmt, ...) do { } while (0)
98 # define blk_add_cgroup_trace_msg(q, cg, fmt, ...) do { } while (0)
99 # define blk_trace_remove_sysfs(dev) do { } while (0)
100 # define blk_trace_note_message_enabled(q) (false)
blk_trace_init_sysfs(struct device * dev)101 static inline int blk_trace_init_sysfs(struct device *dev)
102 {
103 return 0;
104 }
105
106 #endif /* CONFIG_BLK_DEV_IO_TRACE */
107
108 #ifdef CONFIG_COMPAT
109
110 struct compat_blk_user_trace_setup {
111 char name[BLKTRACE_BDEV_SIZE];
112 u16 act_mask;
113 u32 buf_size;
114 u32 buf_nr;
115 compat_u64 start_lba;
116 compat_u64 end_lba;
117 u32 pid;
118 };
119 #define BLKTRACESETUP32 _IOWR(0x12, 115, struct compat_blk_user_trace_setup)
120
121 #endif
122
123 extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes);
124
blk_rq_trace_sector(struct request * rq)125 static inline sector_t blk_rq_trace_sector(struct request *rq)
126 {
127 /*
128 * Tracing should ignore starting sector for passthrough requests and
129 * requests where starting sector didn't get set.
130 */
131 if (blk_rq_is_passthrough(rq) || blk_rq_pos(rq) == (sector_t)-1)
132 return 0;
133 return blk_rq_pos(rq);
134 }
135
blk_rq_trace_nr_sectors(struct request * rq)136 static inline unsigned int blk_rq_trace_nr_sectors(struct request *rq)
137 {
138 return blk_rq_is_passthrough(rq) ? 0 : blk_rq_sectors(rq);
139 }
140
141 #endif
142