1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #undef TRACE_SYSTEM 3 #define TRACE_SYSTEM bcache 4 5 #if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ) 6 #define _TRACE_BCACHE_H 7 8 #include <linux/tracepoint.h> 9 10 DECLARE_EVENT_CLASS(bcache_request, 11 TP_PROTO(struct bcache_device *d, struct bio *bio), 12 TP_ARGS(d, bio), 13 14 TP_STRUCT__entry( 15 __field(dev_t, dev ) 16 __field(unsigned int, orig_major ) 17 __field(unsigned int, orig_minor ) 18 __field(sector_t, sector ) 19 __field(dev_t, orig_sector ) 20 __field(unsigned int, nr_sector ) 21 __array(char, rwbs, 6 ) 22 ), 23 24 TP_fast_assign( 25 __entry->dev = bio_dev(bio); 26 __entry->orig_major = d->disk->major; 27 __entry->orig_minor = d->disk->first_minor; 28 __entry->sector = bio->bi_iter.bi_sector; 29 __entry->orig_sector = bio->bi_iter.bi_sector - 16; 30 __entry->nr_sector = bio->bi_iter.bi_size >> 9; 31 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); 32 ), 33 34 TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)", 35 MAJOR(__entry->dev), MINOR(__entry->dev), 36 __entry->rwbs, (unsigned long long)__entry->sector, 37 __entry->nr_sector, __entry->orig_major, __entry->orig_minor, 38 (unsigned long long)__entry->orig_sector) 39 ); 40 41 DECLARE_EVENT_CLASS(bkey, 42 TP_PROTO(struct bkey *k), 43 TP_ARGS(k), 44 45 TP_STRUCT__entry( 46 __field(u32, size ) 47 __field(u32, inode ) 48 __field(u64, offset ) 49 __field(bool, dirty ) 50 ), 51 52 TP_fast_assign( 53 __entry->inode = KEY_INODE(k); 54 __entry->offset = KEY_OFFSET(k); 55 __entry->size = KEY_SIZE(k); 56 __entry->dirty = KEY_DIRTY(k); 57 ), 58 59 TP_printk("%u:%llu len %u dirty %u", __entry->inode, 60 __entry->offset, __entry->size, __entry->dirty) 61 ); 62 63 DECLARE_EVENT_CLASS(btree_node, 64 TP_PROTO(struct btree *b), 65 TP_ARGS(b), 66 67 TP_STRUCT__entry( 68 __field(size_t, bucket ) 69 ), 70 71 TP_fast_assign( 72 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0); 73 ), 74 75 TP_printk("bucket %zu", __entry->bucket) 76 ); 77 78 /* request.c */ 79 80 DEFINE_EVENT(bcache_request, bcache_request_start, 81 TP_PROTO(struct bcache_device *d, struct bio *bio), 82 TP_ARGS(d, bio) 83 ); 84 85 DEFINE_EVENT(bcache_request, bcache_request_end, 86 TP_PROTO(struct bcache_device *d, struct bio *bio), 87 TP_ARGS(d, bio) 88 ); 89 90 DECLARE_EVENT_CLASS(bcache_bio, 91 TP_PROTO(struct bio *bio), 92 TP_ARGS(bio), 93 94 TP_STRUCT__entry( 95 __field(dev_t, dev ) 96 __field(sector_t, sector ) 97 __field(unsigned int, nr_sector ) 98 __array(char, rwbs, 6 ) 99 ), 100 101 TP_fast_assign( 102 __entry->dev = bio_dev(bio); 103 __entry->sector = bio->bi_iter.bi_sector; 104 __entry->nr_sector = bio->bi_iter.bi_size >> 9; 105 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); 106 ), 107 108 TP_printk("%d,%d %s %llu + %u", 109 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 110 (unsigned long long)__entry->sector, __entry->nr_sector) 111 ); 112 113 DEFINE_EVENT(bcache_bio, bcache_bypass_sequential, 114 TP_PROTO(struct bio *bio), 115 TP_ARGS(bio) 116 ); 117 118 DEFINE_EVENT(bcache_bio, bcache_bypass_congested, 119 TP_PROTO(struct bio *bio), 120 TP_ARGS(bio) 121 ); 122 123 TRACE_EVENT(bcache_read, 124 TP_PROTO(struct bio *bio, bool hit, bool bypass), 125 TP_ARGS(bio, hit, bypass), 126 127 TP_STRUCT__entry( 128 __field(dev_t, dev ) 129 __field(sector_t, sector ) 130 __field(unsigned int, nr_sector ) 131 __array(char, rwbs, 6 ) 132 __field(bool, cache_hit ) 133 __field(bool, bypass ) 134 ), 135 136 TP_fast_assign( 137 __entry->dev = bio_dev(bio); 138 __entry->sector = bio->bi_iter.bi_sector; 139 __entry->nr_sector = bio->bi_iter.bi_size >> 9; 140 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); 141 __entry->cache_hit = hit; 142 __entry->bypass = bypass; 143 ), 144 145 TP_printk("%d,%d %s %llu + %u hit %u bypass %u", 146 MAJOR(__entry->dev), MINOR(__entry->dev), 147 __entry->rwbs, (unsigned long long)__entry->sector, 148 __entry->nr_sector, __entry->cache_hit, __entry->bypass) 149 ); 150 151 TRACE_EVENT(bcache_write, 152 TP_PROTO(struct cache_set *c, u64 inode, struct bio *bio, 153 bool writeback, bool bypass), 154 TP_ARGS(c, inode, bio, writeback, bypass), 155 156 TP_STRUCT__entry( 157 __array(char, uuid, 16 ) 158 __field(u64, inode ) 159 __field(sector_t, sector ) 160 __field(unsigned int, nr_sector ) 161 __array(char, rwbs, 6 ) 162 __field(bool, writeback ) 163 __field(bool, bypass ) 164 ), 165 166 TP_fast_assign( 167 memcpy(__entry->uuid, c->sb.set_uuid, 16); 168 __entry->inode = inode; 169 __entry->sector = bio->bi_iter.bi_sector; 170 __entry->nr_sector = bio->bi_iter.bi_size >> 9; 171 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); 172 __entry->writeback = writeback; 173 __entry->bypass = bypass; 174 ), 175 176 TP_printk("%pU inode %llu %s %llu + %u hit %u bypass %u", 177 __entry->uuid, __entry->inode, 178 __entry->rwbs, (unsigned long long)__entry->sector, 179 __entry->nr_sector, __entry->writeback, __entry->bypass) 180 ); 181 182 DEFINE_EVENT(bcache_bio, bcache_read_retry, 183 TP_PROTO(struct bio *bio), 184 TP_ARGS(bio) 185 ); 186 187 DEFINE_EVENT(bkey, bcache_cache_insert, 188 TP_PROTO(struct bkey *k), 189 TP_ARGS(k) 190 ); 191 192 /* Journal */ 193 194 DECLARE_EVENT_CLASS(cache_set, 195 TP_PROTO(struct cache_set *c), 196 TP_ARGS(c), 197 198 TP_STRUCT__entry( 199 __array(char, uuid, 16 ) 200 ), 201 202 TP_fast_assign( 203 memcpy(__entry->uuid, c->sb.set_uuid, 16); 204 ), 205 206 TP_printk("%pU", __entry->uuid) 207 ); 208 209 DEFINE_EVENT(bkey, bcache_journal_replay_key, 210 TP_PROTO(struct bkey *k), 211 TP_ARGS(k) 212 ); 213 214 DEFINE_EVENT(cache_set, bcache_journal_full, 215 TP_PROTO(struct cache_set *c), 216 TP_ARGS(c) 217 ); 218 219 DEFINE_EVENT(cache_set, bcache_journal_entry_full, 220 TP_PROTO(struct cache_set *c), 221 TP_ARGS(c) 222 ); 223 224 DEFINE_EVENT(bcache_bio, bcache_journal_write, 225 TP_PROTO(struct bio *bio), 226 TP_ARGS(bio) 227 ); 228 229 /* Btree */ 230 231 DEFINE_EVENT(cache_set, bcache_btree_cache_cannibalize, 232 TP_PROTO(struct cache_set *c), 233 TP_ARGS(c) 234 ); 235 236 DEFINE_EVENT(btree_node, bcache_btree_read, 237 TP_PROTO(struct btree *b), 238 TP_ARGS(b) 239 ); 240 241 TRACE_EVENT(bcache_btree_write, 242 TP_PROTO(struct btree *b), 243 TP_ARGS(b), 244 245 TP_STRUCT__entry( 246 __field(size_t, bucket ) 247 __field(unsigned, block ) 248 __field(unsigned, keys ) 249 ), 250 251 TP_fast_assign( 252 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0); 253 __entry->block = b->written; 254 __entry->keys = b->keys.set[b->keys.nsets].data->keys; 255 ), 256 257 TP_printk("bucket %zu", __entry->bucket) 258 ); 259 260 DEFINE_EVENT(btree_node, bcache_btree_node_alloc, 261 TP_PROTO(struct btree *b), 262 TP_ARGS(b) 263 ); 264 265 DEFINE_EVENT(cache_set, bcache_btree_node_alloc_fail, 266 TP_PROTO(struct cache_set *c), 267 TP_ARGS(c) 268 ); 269 270 DEFINE_EVENT(btree_node, bcache_btree_node_free, 271 TP_PROTO(struct btree *b), 272 TP_ARGS(b) 273 ); 274 275 TRACE_EVENT(bcache_btree_gc_coalesce, 276 TP_PROTO(unsigned nodes), 277 TP_ARGS(nodes), 278 279 TP_STRUCT__entry( 280 __field(unsigned, nodes ) 281 ), 282 283 TP_fast_assign( 284 __entry->nodes = nodes; 285 ), 286 287 TP_printk("coalesced %u nodes", __entry->nodes) 288 ); 289 290 DEFINE_EVENT(cache_set, bcache_gc_start, 291 TP_PROTO(struct cache_set *c), 292 TP_ARGS(c) 293 ); 294 295 DEFINE_EVENT(cache_set, bcache_gc_end, 296 TP_PROTO(struct cache_set *c), 297 TP_ARGS(c) 298 ); 299 300 DEFINE_EVENT(bkey, bcache_gc_copy, 301 TP_PROTO(struct bkey *k), 302 TP_ARGS(k) 303 ); 304 305 DEFINE_EVENT(bkey, bcache_gc_copy_collision, 306 TP_PROTO(struct bkey *k), 307 TP_ARGS(k) 308 ); 309 310 TRACE_EVENT(bcache_btree_insert_key, 311 TP_PROTO(struct btree *b, struct bkey *k, unsigned op, unsigned status), 312 TP_ARGS(b, k, op, status), 313 314 TP_STRUCT__entry( 315 __field(u64, btree_node ) 316 __field(u32, btree_level ) 317 __field(u32, inode ) 318 __field(u64, offset ) 319 __field(u32, size ) 320 __field(u8, dirty ) 321 __field(u8, op ) 322 __field(u8, status ) 323 ), 324 325 TP_fast_assign( 326 __entry->btree_node = PTR_BUCKET_NR(b->c, &b->key, 0); 327 __entry->btree_level = b->level; 328 __entry->inode = KEY_INODE(k); 329 __entry->offset = KEY_OFFSET(k); 330 __entry->size = KEY_SIZE(k); 331 __entry->dirty = KEY_DIRTY(k); 332 __entry->op = op; 333 __entry->status = status; 334 ), 335 336 TP_printk("%u for %u at %llu(%u): %u:%llu len %u dirty %u", 337 __entry->status, __entry->op, 338 __entry->btree_node, __entry->btree_level, 339 __entry->inode, __entry->offset, 340 __entry->size, __entry->dirty) 341 ); 342 343 DECLARE_EVENT_CLASS(btree_split, 344 TP_PROTO(struct btree *b, unsigned keys), 345 TP_ARGS(b, keys), 346 347 TP_STRUCT__entry( 348 __field(size_t, bucket ) 349 __field(unsigned, keys ) 350 ), 351 352 TP_fast_assign( 353 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0); 354 __entry->keys = keys; 355 ), 356 357 TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys) 358 ); 359 360 DEFINE_EVENT(btree_split, bcache_btree_node_split, 361 TP_PROTO(struct btree *b, unsigned keys), 362 TP_ARGS(b, keys) 363 ); 364 365 DEFINE_EVENT(btree_split, bcache_btree_node_compact, 366 TP_PROTO(struct btree *b, unsigned keys), 367 TP_ARGS(b, keys) 368 ); 369 370 DEFINE_EVENT(btree_node, bcache_btree_set_root, 371 TP_PROTO(struct btree *b), 372 TP_ARGS(b) 373 ); 374 375 TRACE_EVENT(bcache_keyscan, 376 TP_PROTO(unsigned nr_found, 377 unsigned start_inode, uint64_t start_offset, 378 unsigned end_inode, uint64_t end_offset), 379 TP_ARGS(nr_found, 380 start_inode, start_offset, 381 end_inode, end_offset), 382 383 TP_STRUCT__entry( 384 __field(__u32, nr_found ) 385 __field(__u32, start_inode ) 386 __field(__u64, start_offset ) 387 __field(__u32, end_inode ) 388 __field(__u64, end_offset ) 389 ), 390 391 TP_fast_assign( 392 __entry->nr_found = nr_found; 393 __entry->start_inode = start_inode; 394 __entry->start_offset = start_offset; 395 __entry->end_inode = end_inode; 396 __entry->end_offset = end_offset; 397 ), 398 399 TP_printk("found %u keys from %u:%llu to %u:%llu", __entry->nr_found, 400 __entry->start_inode, __entry->start_offset, 401 __entry->end_inode, __entry->end_offset) 402 ); 403 404 /* Allocator */ 405 406 TRACE_EVENT(bcache_invalidate, 407 TP_PROTO(struct cache *ca, size_t bucket), 408 TP_ARGS(ca, bucket), 409 410 TP_STRUCT__entry( 411 __field(unsigned, sectors ) 412 __field(dev_t, dev ) 413 __field(__u64, offset ) 414 ), 415 416 TP_fast_assign( 417 __entry->dev = ca->bdev->bd_dev; 418 __entry->offset = bucket << ca->set->bucket_bits; 419 __entry->sectors = GC_SECTORS_USED(&ca->buckets[bucket]); 420 ), 421 422 TP_printk("invalidated %u sectors at %d,%d sector=%llu", 423 __entry->sectors, MAJOR(__entry->dev), 424 MINOR(__entry->dev), __entry->offset) 425 ); 426 427 TRACE_EVENT(bcache_alloc, 428 TP_PROTO(struct cache *ca, size_t bucket), 429 TP_ARGS(ca, bucket), 430 431 TP_STRUCT__entry( 432 __field(dev_t, dev ) 433 __field(__u64, offset ) 434 ), 435 436 TP_fast_assign( 437 __entry->dev = ca->bdev->bd_dev; 438 __entry->offset = bucket << ca->set->bucket_bits; 439 ), 440 441 TP_printk("allocated %d,%d sector=%llu", MAJOR(__entry->dev), 442 MINOR(__entry->dev), __entry->offset) 443 ); 444 445 TRACE_EVENT(bcache_alloc_fail, 446 TP_PROTO(struct cache *ca, unsigned reserve), 447 TP_ARGS(ca, reserve), 448 449 TP_STRUCT__entry( 450 __field(dev_t, dev ) 451 __field(unsigned, free ) 452 __field(unsigned, free_inc ) 453 __field(unsigned, blocked ) 454 ), 455 456 TP_fast_assign( 457 __entry->dev = ca->bdev->bd_dev; 458 __entry->free = fifo_used(&ca->free[reserve]); 459 __entry->free_inc = fifo_used(&ca->free_inc); 460 __entry->blocked = atomic_read(&ca->set->prio_blocked); 461 ), 462 463 TP_printk("alloc fail %d,%d free %u free_inc %u blocked %u", 464 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->free, 465 __entry->free_inc, __entry->blocked) 466 ); 467 468 /* Background writeback */ 469 470 DEFINE_EVENT(bkey, bcache_writeback, 471 TP_PROTO(struct bkey *k), 472 TP_ARGS(k) 473 ); 474 475 DEFINE_EVENT(bkey, bcache_writeback_collision, 476 TP_PROTO(struct bkey *k), 477 TP_ARGS(k) 478 ); 479 480 #endif /* _TRACE_BCACHE_H */ 481 482 /* This part must be outside protection */ 483 #include <trace/define_trace.h> 484