1 /*
2 * Copyright (C) 2001 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the LGPL.
6 */
7
8 #ifndef _LINUX_DEVICE_MAPPER_H
9 #define _LINUX_DEVICE_MAPPER_H
10
11 #include <linux/bio.h>
12 #include <linux/blkdev.h>
13 #include <linux/math64.h>
14 #include <linux/ratelimit.h>
15
16 struct dm_dev;
17 struct dm_target;
18 struct dm_table;
19 struct mapped_device;
20 struct bio_vec;
21
22 /*
23 * Type of table, mapped_device's mempool and request_queue
24 */
25 enum dm_queue_mode {
26 DM_TYPE_NONE = 0,
27 DM_TYPE_BIO_BASED = 1,
28 DM_TYPE_REQUEST_BASED = 2,
29 DM_TYPE_MQ_REQUEST_BASED = 3,
30 DM_TYPE_DAX_BIO_BASED = 4,
31 DM_TYPE_NVME_BIO_BASED = 5,
32 };
33
34 typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
35
36 union map_info {
37 void *ptr;
38 };
39
40 /*
41 * In the constructor the target parameter will already have the
42 * table, type, begin and len fields filled in.
43 */
44 typedef int (*dm_ctr_fn) (struct dm_target *target,
45 unsigned int argc, char **argv);
46
47 /*
48 * The destructor doesn't need to free the dm_target, just
49 * anything hidden ti->private.
50 */
51 typedef void (*dm_dtr_fn) (struct dm_target *ti);
52
53 /*
54 * The map function must return:
55 * < 0: error
56 * = 0: The target will handle the io by resubmitting it later
57 * = 1: simple remap complete
58 * = 2: The target wants to push back the io
59 */
60 typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
61 typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti,
62 struct request *rq,
63 union map_info *map_context,
64 struct request **clone);
65 typedef void (*dm_release_clone_request_fn) (struct request *clone,
66 union map_info *map_context);
67
68 /*
69 * Returns:
70 * < 0 : error (currently ignored)
71 * 0 : ended successfully
72 * 1 : for some reason the io has still not completed (eg,
73 * multipath target might want to requeue a failed io).
74 * 2 : The target wants to push back the io
75 */
76 typedef int (*dm_endio_fn) (struct dm_target *ti,
77 struct bio *bio, blk_status_t *error);
78 typedef int (*dm_request_endio_fn) (struct dm_target *ti,
79 struct request *clone, blk_status_t error,
80 union map_info *map_context);
81
82 typedef void (*dm_presuspend_fn) (struct dm_target *ti);
83 typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti);
84 typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
85 typedef int (*dm_preresume_fn) (struct dm_target *ti);
86 typedef void (*dm_resume_fn) (struct dm_target *ti);
87
88 typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
89 unsigned status_flags, char *result, unsigned maxlen);
90
91 typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv,
92 char *result, unsigned maxlen);
93
94 typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev);
95
96 /*
97 * These iteration functions are typically used to check (and combine)
98 * properties of underlying devices.
99 * E.g. Does at least one underlying device support flush?
100 * Does any underlying device not support WRITE_SAME?
101 *
102 * The callout function is called once for each contiguous section of
103 * an underlying device. State can be maintained in *data.
104 * Return non-zero to stop iterating through any further devices.
105 */
106 typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
107 struct dm_dev *dev,
108 sector_t start, sector_t len,
109 void *data);
110
111 /*
112 * This function must iterate through each section of device used by the
113 * target until it encounters a non-zero return code, which it then returns.
114 * Returns zero if no callout returned non-zero.
115 */
116 typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
117 iterate_devices_callout_fn fn,
118 void *data);
119
120 typedef void (*dm_io_hints_fn) (struct dm_target *ti,
121 struct queue_limits *limits);
122
123 /*
124 * Returns:
125 * 0: The target can handle the next I/O immediately.
126 * 1: The target can't handle the next I/O immediately.
127 */
128 typedef int (*dm_busy_fn) (struct dm_target *ti);
129
130 /*
131 * Returns:
132 * < 0 : error
133 * >= 0 : the number of bytes accessible at the address
134 */
135 typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
136 long nr_pages, void **kaddr, pfn_t *pfn);
137 typedef size_t (*dm_dax_copy_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
138 void *addr, size_t bytes, struct iov_iter *i);
139 #define PAGE_SECTORS (PAGE_SIZE / 512)
140
141 void dm_error(const char *message);
142
143 struct dm_dev {
144 struct block_device *bdev;
145 struct dax_device *dax_dev;
146 fmode_t mode;
147 char name[16];
148 };
149
150 dev_t dm_get_dev_t(const char *path);
151
152 /*
153 * Constructors should call these functions to ensure destination devices
154 * are opened/closed correctly.
155 */
156 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
157 struct dm_dev **result);
158 void dm_put_device(struct dm_target *ti, struct dm_dev *d);
159
160 /*
161 * Information about a target type
162 */
163
164 struct target_type {
165 uint64_t features;
166 const char *name;
167 struct module *module;
168 unsigned version[3];
169 dm_ctr_fn ctr;
170 dm_dtr_fn dtr;
171 dm_map_fn map;
172 dm_clone_and_map_request_fn clone_and_map_rq;
173 dm_release_clone_request_fn release_clone_rq;
174 dm_endio_fn end_io;
175 dm_request_endio_fn rq_end_io;
176 dm_presuspend_fn presuspend;
177 dm_presuspend_undo_fn presuspend_undo;
178 dm_postsuspend_fn postsuspend;
179 dm_preresume_fn preresume;
180 dm_resume_fn resume;
181 dm_status_fn status;
182 dm_message_fn message;
183 dm_prepare_ioctl_fn prepare_ioctl;
184 dm_busy_fn busy;
185 dm_iterate_devices_fn iterate_devices;
186 dm_io_hints_fn io_hints;
187 dm_dax_direct_access_fn direct_access;
188 dm_dax_copy_iter_fn dax_copy_from_iter;
189 dm_dax_copy_iter_fn dax_copy_to_iter;
190
191 /* For internal device-mapper use. */
192 struct list_head list;
193 };
194
195 /*
196 * Target features
197 */
198
199 /*
200 * Any table that contains an instance of this target must have only one.
201 */
202 #define DM_TARGET_SINGLETON 0x00000001
203 #define dm_target_needs_singleton(type) ((type)->features & DM_TARGET_SINGLETON)
204
205 /*
206 * Indicates that a target does not support read-only devices.
207 */
208 #define DM_TARGET_ALWAYS_WRITEABLE 0x00000002
209 #define dm_target_always_writeable(type) \
210 ((type)->features & DM_TARGET_ALWAYS_WRITEABLE)
211
212 /*
213 * Any device that contains a table with an instance of this target may never
214 * have tables containing any different target type.
215 */
216 #define DM_TARGET_IMMUTABLE 0x00000004
217 #define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE)
218
219 /*
220 * Indicates that a target may replace any target; even immutable targets.
221 * .map, .map_rq, .clone_and_map_rq and .release_clone_rq are all defined.
222 */
223 #define DM_TARGET_WILDCARD 0x00000008
224 #define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD)
225
226 /*
227 * A target implements own bio data integrity.
228 */
229 #define DM_TARGET_INTEGRITY 0x00000010
230 #define dm_target_has_integrity(type) ((type)->features & DM_TARGET_INTEGRITY)
231
232 /*
233 * A target passes integrity data to the lower device.
234 */
235 #define DM_TARGET_PASSES_INTEGRITY 0x00000020
236 #define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY)
237
238 /*
239 * Indicates that a target supports host-managed zoned block devices.
240 */
241 #define DM_TARGET_ZONED_HM 0x00000040
242 #define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM)
243
244 struct dm_target {
245 struct dm_table *table;
246 struct target_type *type;
247
248 /* target limits */
249 sector_t begin;
250 sector_t len;
251
252 /* If non-zero, maximum size of I/O submitted to a target. */
253 uint32_t max_io_len;
254
255 /*
256 * A number of zero-length barrier bios that will be submitted
257 * to the target for the purpose of flushing cache.
258 *
259 * The bio number can be accessed with dm_bio_get_target_bio_nr.
260 * It is a responsibility of the target driver to remap these bios
261 * to the real underlying devices.
262 */
263 unsigned num_flush_bios;
264
265 /*
266 * The number of discard bios that will be submitted to the target.
267 * The bio number can be accessed with dm_bio_get_target_bio_nr.
268 */
269 unsigned num_discard_bios;
270
271 /*
272 * The number of secure erase bios that will be submitted to the target.
273 * The bio number can be accessed with dm_bio_get_target_bio_nr.
274 */
275 unsigned num_secure_erase_bios;
276
277 /*
278 * The number of WRITE SAME bios that will be submitted to the target.
279 * The bio number can be accessed with dm_bio_get_target_bio_nr.
280 */
281 unsigned num_write_same_bios;
282
283 /*
284 * The number of WRITE ZEROES bios that will be submitted to the target.
285 * The bio number can be accessed with dm_bio_get_target_bio_nr.
286 */
287 unsigned num_write_zeroes_bios;
288
289 /*
290 * The minimum number of extra bytes allocated in each io for the
291 * target to use.
292 */
293 unsigned per_io_data_size;
294
295 /* target specific data */
296 void *private;
297
298 /* Used to provide an error string from the ctr */
299 char *error;
300
301 /*
302 * Set if this target needs to receive flushes regardless of
303 * whether or not its underlying devices have support.
304 */
305 bool flush_supported:1;
306
307 /*
308 * Set if this target needs to receive discards regardless of
309 * whether or not its underlying devices have support.
310 */
311 bool discards_supported:1;
312
313 /*
314 * Set if the target required discard bios to be split
315 * on max_io_len boundary.
316 */
317 bool split_discard_bios:1;
318
319 /*
320 * Set if we need to limit the number of in-flight bios when swapping.
321 */
322 bool limit_swap_bios:1;
323 };
324
325 /* Each target can link one of these into the table */
326 struct dm_target_callbacks {
327 struct list_head list;
328 int (*congested_fn) (struct dm_target_callbacks *, int);
329 };
330
331 void *dm_per_bio_data(struct bio *bio, size_t data_size);
332 struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size);
333 unsigned dm_bio_get_target_bio_nr(const struct bio *bio);
334
335 int dm_register_target(struct target_type *t);
336 void dm_unregister_target(struct target_type *t);
337
338 /*
339 * Target argument parsing.
340 */
341 struct dm_arg_set {
342 unsigned argc;
343 char **argv;
344 };
345
346 /*
347 * The minimum and maximum value of a numeric argument, together with
348 * the error message to use if the number is found to be outside that range.
349 */
350 struct dm_arg {
351 unsigned min;
352 unsigned max;
353 char *error;
354 };
355
356 /*
357 * Validate the next argument, either returning it as *value or, if invalid,
358 * returning -EINVAL and setting *error.
359 */
360 int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
361 unsigned *value, char **error);
362
363 /*
364 * Process the next argument as the start of a group containing between
365 * arg->min and arg->max further arguments. Either return the size as
366 * *num_args or, if invalid, return -EINVAL and set *error.
367 */
368 int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
369 unsigned *num_args, char **error);
370
371 /*
372 * Return the current argument and shift to the next.
373 */
374 const char *dm_shift_arg(struct dm_arg_set *as);
375
376 /*
377 * Move through num_args arguments.
378 */
379 void dm_consume_args(struct dm_arg_set *as, unsigned num_args);
380
381 /*-----------------------------------------------------------------
382 * Functions for creating and manipulating mapped devices.
383 * Drop the reference with dm_put when you finish with the object.
384 *---------------------------------------------------------------*/
385
386 /*
387 * DM_ANY_MINOR chooses the next available minor number.
388 */
389 #define DM_ANY_MINOR (-1)
390 int dm_create(int minor, struct mapped_device **md);
391
392 /*
393 * Reference counting for md.
394 */
395 struct mapped_device *dm_get_md(dev_t dev);
396 void dm_get(struct mapped_device *md);
397 int dm_hold(struct mapped_device *md);
398 void dm_put(struct mapped_device *md);
399
400 /*
401 * An arbitrary pointer may be stored alongside a mapped device.
402 */
403 void dm_set_mdptr(struct mapped_device *md, void *ptr);
404 void *dm_get_mdptr(struct mapped_device *md);
405
406 /*
407 * A device can still be used while suspended, but I/O is deferred.
408 */
409 int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
410 int dm_resume(struct mapped_device *md);
411
412 /*
413 * Event functions.
414 */
415 uint32_t dm_get_event_nr(struct mapped_device *md);
416 int dm_wait_event(struct mapped_device *md, int event_nr);
417 uint32_t dm_next_uevent_seq(struct mapped_device *md);
418 void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
419
420 /*
421 * Info functions.
422 */
423 const char *dm_device_name(struct mapped_device *md);
424 int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
425 struct gendisk *dm_disk(struct mapped_device *md);
426 int dm_suspended(struct dm_target *ti);
427 int dm_post_suspending(struct dm_target *ti);
428 int dm_noflush_suspending(struct dm_target *ti);
429 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
430 void dm_remap_zone_report(struct dm_target *ti, struct bio *bio,
431 sector_t start);
432 union map_info *dm_get_rq_mapinfo(struct request *rq);
433
434 struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
435
436 /*
437 * Geometry functions.
438 */
439 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
440 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
441
442 /*-----------------------------------------------------------------
443 * Functions for manipulating device-mapper tables.
444 *---------------------------------------------------------------*/
445
446 /*
447 * First create an empty table.
448 */
449 int dm_table_create(struct dm_table **result, fmode_t mode,
450 unsigned num_targets, struct mapped_device *md);
451
452 /*
453 * Then call this once for each target.
454 */
455 int dm_table_add_target(struct dm_table *t, const char *type,
456 sector_t start, sector_t len, char *params);
457
458 /*
459 * Target_ctr should call this if it needs to add any callbacks.
460 */
461 void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb);
462
463 /*
464 * Target can use this to set the table's type.
465 * Can only ever be called from a target's ctr.
466 * Useful for "hybrid" target (supports both bio-based
467 * and request-based).
468 */
469 void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type);
470
471 /*
472 * Finally call this to make the table ready for use.
473 */
474 int dm_table_complete(struct dm_table *t);
475
476 /*
477 * Destroy the table when finished.
478 */
479 void dm_table_destroy(struct dm_table *t);
480
481 /*
482 * Target may require that it is never sent I/O larger than len.
483 */
484 int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len);
485
486 /*
487 * Table reference counting.
488 */
489 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx);
490 void dm_put_live_table(struct mapped_device *md, int srcu_idx);
491 void dm_sync_table(struct mapped_device *md);
492
493 /*
494 * Queries
495 */
496 sector_t dm_table_get_size(struct dm_table *t);
497 unsigned int dm_table_get_num_targets(struct dm_table *t);
498 fmode_t dm_table_get_mode(struct dm_table *t);
499 struct mapped_device *dm_table_get_md(struct dm_table *t);
500
501 /*
502 * Trigger an event.
503 */
504 void dm_table_event(struct dm_table *t);
505
506 /*
507 * Run the queue for request-based targets.
508 */
509 void dm_table_run_md_queue_async(struct dm_table *t);
510
511 /*
512 * The device must be suspended before calling this method.
513 * Returns the previous table, which the caller must destroy.
514 */
515 struct dm_table *dm_swap_table(struct mapped_device *md,
516 struct dm_table *t);
517
518 /*
519 * A wrapper around vmalloc.
520 */
521 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
522
523 /*-----------------------------------------------------------------
524 * Macros.
525 *---------------------------------------------------------------*/
526 #define DM_NAME "device-mapper"
527
528 #define DM_RATELIMIT(pr_func, fmt, ...) \
529 do { \
530 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, \
531 DEFAULT_RATELIMIT_BURST); \
532 \
533 if (__ratelimit(&rs)) \
534 pr_func(DM_FMT(fmt), ##__VA_ARGS__); \
535 } while (0)
536
537 #define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n"
538
539 #define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__)
540
541 #define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__)
542 #define DMERR_LIMIT(fmt, ...) DM_RATELIMIT(pr_err, fmt, ##__VA_ARGS__)
543 #define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__)
544 #define DMWARN_LIMIT(fmt, ...) DM_RATELIMIT(pr_warn, fmt, ##__VA_ARGS__)
545 #define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__)
546 #define DMINFO_LIMIT(fmt, ...) DM_RATELIMIT(pr_info, fmt, ##__VA_ARGS__)
547
548 #ifdef CONFIG_DM_DEBUG
549 #define DMDEBUG(fmt, ...) printk(KERN_DEBUG DM_FMT(fmt), ##__VA_ARGS__)
550 #define DMDEBUG_LIMIT(fmt, ...) DM_RATELIMIT(pr_debug, fmt, ##__VA_ARGS__)
551 #else
552 #define DMDEBUG(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
553 #define DMDEBUG_LIMIT(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
554 #endif
555
556 #define DMEMIT(x...) sz += ((sz >= maxlen) ? \
557 0 : scnprintf(result + sz, maxlen - sz, x))
558
559 /*
560 * Definitions of return values from target end_io function.
561 */
562 #define DM_ENDIO_DONE 0
563 #define DM_ENDIO_INCOMPLETE 1
564 #define DM_ENDIO_REQUEUE 2
565 #define DM_ENDIO_DELAY_REQUEUE 3
566
567 /*
568 * Definitions of return values from target map function.
569 */
570 #define DM_MAPIO_SUBMITTED 0
571 #define DM_MAPIO_REMAPPED 1
572 #define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
573 #define DM_MAPIO_DELAY_REQUEUE DM_ENDIO_DELAY_REQUEUE
574 #define DM_MAPIO_KILL 4
575
576 #define dm_sector_div64(x, y)( \
577 { \
578 u64 _res; \
579 (x) = div64_u64_rem(x, y, &_res); \
580 _res; \
581 } \
582 )
583
584 /*
585 * Ceiling(n / sz)
586 */
587 #define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
588
589 #define dm_sector_div_up(n, sz) ( \
590 { \
591 sector_t _r = ((n) + (sz) - 1); \
592 sector_div(_r, (sz)); \
593 _r; \
594 } \
595 )
596
597 /*
598 * ceiling(n / size) * size
599 */
600 #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
601
602 #define dm_array_too_big(fixed, obj, num) \
603 ((num) > (UINT_MAX - (fixed)) / (obj))
604
605 /*
606 * Sector offset taken relative to the start of the target instead of
607 * relative to the start of the device.
608 */
609 #define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
610
to_sector(unsigned long long n)611 static inline sector_t to_sector(unsigned long long n)
612 {
613 return (n >> SECTOR_SHIFT);
614 }
615
to_bytes(sector_t n)616 static inline unsigned long to_bytes(sector_t n)
617 {
618 return (n << SECTOR_SHIFT);
619 }
620
621 #endif /* _LINUX_DEVICE_MAPPER_H */
622