Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2001 Sistina Software (UK) Limited.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 *
6 * This file is released under the LGPL.
7 */
8
9#ifndef _LINUX_DEVICE_MAPPER_H
10#define _LINUX_DEVICE_MAPPER_H
11
12#include <linux/bio.h>
13#include <linux/blkdev.h>
14#include <linux/dm-ioctl.h>
15#include <linux/math64.h>
16#include <linux/ratelimit.h>
17
18struct dm_dev;
19struct dm_target;
20struct dm_table;
21struct dm_report_zones_args;
22struct mapped_device;
23struct bio_vec;
24enum dax_access_mode;
25
26/*
27 * Type of table, mapped_device's mempool and request_queue
28 */
29enum dm_queue_mode {
30 DM_TYPE_NONE = 0,
31 DM_TYPE_BIO_BASED = 1,
32 DM_TYPE_REQUEST_BASED = 2,
33 DM_TYPE_DAX_BIO_BASED = 3,
34};
35
36typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE, STATUSTYPE_IMA } status_type_t;
37
38union map_info {
39 void *ptr;
40};
41
42/*
43 * In the constructor the target parameter will already have the
44 * table, type, begin and len fields filled in.
45 */
46typedef int (*dm_ctr_fn) (struct dm_target *target,
47 unsigned int argc, char **argv);
48
49/*
50 * The destructor doesn't need to free the dm_target, just
51 * anything hidden ti->private.
52 */
53typedef void (*dm_dtr_fn) (struct dm_target *ti);
54
55/*
56 * The map function must return:
57 * < 0: error
58 * = 0: The target will handle the io by resubmitting it later
59 * = 1: simple remap complete
60 * = 2: The target wants to push back the io
61 */
62typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
63typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti,
64 struct request *rq,
65 union map_info *map_context,
66 struct request **clone);
67typedef void (*dm_release_clone_request_fn) (struct request *clone,
68 union map_info *map_context);
69
70/*
71 * Returns:
72 * < 0 : error (currently ignored)
73 * 0 : ended successfully
74 * 1 : for some reason the io has still not completed (eg,
75 * multipath target might want to requeue a failed io).
76 * 2 : The target wants to push back the io
77 */
78typedef int (*dm_endio_fn) (struct dm_target *ti,
79 struct bio *bio, blk_status_t *error);
80typedef int (*dm_request_endio_fn) (struct dm_target *ti,
81 struct request *clone, blk_status_t error,
82 union map_info *map_context);
83
84typedef void (*dm_presuspend_fn) (struct dm_target *ti);
85typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti);
86typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
87typedef int (*dm_preresume_fn) (struct dm_target *ti);
88typedef void (*dm_resume_fn) (struct dm_target *ti);
89
90typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
91 unsigned int status_flags, char *result, unsigned int maxlen);
92
93typedef int (*dm_message_fn) (struct dm_target *ti, unsigned int argc, char **argv,
94 char *result, unsigned int maxlen);
95
96/*
97 * Called with *forward == true. If it remains true, the ioctl should be
98 * forwarded to bdev. If it is reset to false, the target already fully handled
99 * the ioctl and the return value is the return value for the whole ioctl.
100 */
101typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev,
102 unsigned int cmd, unsigned long arg,
103 bool *forward);
104
105#ifdef CONFIG_BLK_DEV_ZONED
106typedef int (*dm_report_zones_fn) (struct dm_target *ti,
107 struct dm_report_zones_args *args,
108 unsigned int nr_zones);
109#else
110/*
111 * Define dm_report_zones_fn so that targets can assign to NULL if
112 * CONFIG_BLK_DEV_ZONED disabled. Otherwise each target needs to do
113 * awkward #ifdefs in their target_type, etc.
114 */
115typedef int (*dm_report_zones_fn) (struct dm_target *dummy);
116#endif
117
118/*
119 * These iteration functions are typically used to check (and combine)
120 * properties of underlying devices.
121 * E.g. Does at least one underlying device support flush?
122 * Does any underlying device not support WRITE_SAME?
123 *
124 * The callout function is called once for each contiguous section of
125 * an underlying device. State can be maintained in *data.
126 * Return non-zero to stop iterating through any further devices.
127 */
128typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
129 struct dm_dev *dev,
130 sector_t start, sector_t len,
131 void *data);
132
133/*
134 * This function must iterate through each section of device used by the
135 * target until it encounters a non-zero return code, which it then returns.
136 * Returns zero if no callout returned non-zero.
137 */
138typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
139 iterate_devices_callout_fn fn,
140 void *data);
141
142typedef void (*dm_io_hints_fn) (struct dm_target *ti,
143 struct queue_limits *limits);
144
145/*
146 * Returns:
147 * 0: The target can handle the next I/O immediately.
148 * 1: The target can't handle the next I/O immediately.
149 */
150typedef int (*dm_busy_fn) (struct dm_target *ti);
151
152/*
153 * Returns:
154 * < 0 : error
155 * >= 0 : the number of bytes accessible at the address
156 */
157typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
158 long nr_pages, enum dax_access_mode node, void **kaddr,
159 unsigned long *pfn);
160typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff,
161 size_t nr_pages);
162
163/*
164 * Returns:
165 * != 0 : number of bytes transferred
166 * 0 : recovery write failed
167 */
168typedef size_t (*dm_dax_recovery_write_fn)(struct dm_target *ti, pgoff_t pgoff,
169 void *addr, size_t bytes, struct iov_iter *i);
170
171void dm_error(const char *message);
172
173struct dm_dev {
174 struct block_device *bdev;
175 struct file *bdev_file;
176 struct dax_device *dax_dev;
177 blk_mode_t mode;
178 char name[16];
179};
180
181/*
182 * Constructors should call these functions to ensure destination devices
183 * are opened/closed correctly.
184 */
185int dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode,
186 struct dm_dev **result);
187void dm_put_device(struct dm_target *ti, struct dm_dev *d);
188
189/*
190 * Helper function for getting devices
191 */
192int dm_devt_from_path(const char *path, dev_t *dev_p);
193
194/*
195 * Information about a target type
196 */
197
198struct target_type {
199 uint64_t features;
200 const char *name;
201 struct module *module;
202 unsigned int version[3];
203 dm_ctr_fn ctr;
204 dm_dtr_fn dtr;
205 dm_map_fn map;
206 dm_clone_and_map_request_fn clone_and_map_rq;
207 dm_release_clone_request_fn release_clone_rq;
208 dm_endio_fn end_io;
209 dm_request_endio_fn rq_end_io;
210 dm_presuspend_fn presuspend;
211 dm_presuspend_undo_fn presuspend_undo;
212 dm_postsuspend_fn postsuspend;
213 dm_preresume_fn preresume;
214 dm_resume_fn resume;
215 dm_status_fn status;
216 dm_message_fn message;
217 dm_prepare_ioctl_fn prepare_ioctl;
218 dm_report_zones_fn report_zones;
219 dm_busy_fn busy;
220 dm_iterate_devices_fn iterate_devices;
221 dm_io_hints_fn io_hints;
222 dm_dax_direct_access_fn direct_access;
223 dm_dax_zero_page_range_fn dax_zero_page_range;
224 dm_dax_recovery_write_fn dax_recovery_write;
225
226 /* For internal device-mapper use. */
227 struct list_head list;
228};
229
230/*
231 * Target features
232 */
233
234/*
235 * Any table that contains an instance of this target must have only one.
236 */
237#define DM_TARGET_SINGLETON 0x00000001
238#define dm_target_needs_singleton(type) ((type)->features & DM_TARGET_SINGLETON)
239
240/*
241 * Indicates that a target does not support read-only devices.
242 */
243#define DM_TARGET_ALWAYS_WRITEABLE 0x00000002
244#define dm_target_always_writeable(type) \
245 ((type)->features & DM_TARGET_ALWAYS_WRITEABLE)
246
247/*
248 * Any device that contains a table with an instance of this target may never
249 * have tables containing any different target type.
250 */
251#define DM_TARGET_IMMUTABLE 0x00000004
252#define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE)
253
254/*
255 * Indicates that a target may replace any target; even immutable targets.
256 * .map, .map_rq, .clone_and_map_rq and .release_clone_rq are all defined.
257 */
258#define DM_TARGET_WILDCARD 0x00000008
259#define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD)
260
261/*
262 * A target implements own bio data integrity.
263 */
264#define DM_TARGET_INTEGRITY 0x00000010
265#define dm_target_has_integrity(type) ((type)->features & DM_TARGET_INTEGRITY)
266
267/*
268 * A target passes integrity data to the lower device.
269 */
270#define DM_TARGET_PASSES_INTEGRITY 0x00000020
271#define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY)
272
273/*
274 * Indicates support for zoned block devices:
275 * - DM_TARGET_ZONED_HM: the target also supports host-managed zoned
276 * block devices but does not support combining different zoned models.
277 * - DM_TARGET_MIXED_ZONED_MODEL: the target supports combining multiple
278 * devices with different zoned models.
279 */
280#ifdef CONFIG_BLK_DEV_ZONED
281#define DM_TARGET_ZONED_HM 0x00000040
282#define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM)
283#else
284#define DM_TARGET_ZONED_HM 0x00000000
285#define dm_target_supports_zoned_hm(type) (false)
286#endif
287
288/*
289 * A target handles REQ_NOWAIT
290 */
291#define DM_TARGET_NOWAIT 0x00000080
292#define dm_target_supports_nowait(type) ((type)->features & DM_TARGET_NOWAIT)
293
294/*
295 * A target supports passing through inline crypto support.
296 */
297#define DM_TARGET_PASSES_CRYPTO 0x00000100
298#define dm_target_passes_crypto(type) ((type)->features & DM_TARGET_PASSES_CRYPTO)
299
300#ifdef CONFIG_BLK_DEV_ZONED
301#define DM_TARGET_MIXED_ZONED_MODEL 0x00000200
302#define dm_target_supports_mixed_zoned_model(type) \
303 ((type)->features & DM_TARGET_MIXED_ZONED_MODEL)
304#else
305#define DM_TARGET_MIXED_ZONED_MODEL 0x00000000
306#define dm_target_supports_mixed_zoned_model(type) (false)
307#endif
308
309#define DM_TARGET_ATOMIC_WRITES 0x00000400
310#define dm_target_supports_atomic_writes(type) ((type)->features & DM_TARGET_ATOMIC_WRITES)
311
312struct dm_target {
313 struct dm_table *table;
314 struct target_type *type;
315
316 /* target limits */
317 sector_t begin;
318 sector_t len;
319
320 /* If non-zero, maximum size of I/O submitted to a target. */
321 uint32_t max_io_len;
322
323 /*
324 * A number of zero-length barrier bios that will be submitted
325 * to the target for the purpose of flushing cache.
326 *
327 * The bio number can be accessed with dm_bio_get_target_bio_nr.
328 * It is a responsibility of the target driver to remap these bios
329 * to the real underlying devices.
330 */
331 unsigned int num_flush_bios;
332
333 /*
334 * The number of discard bios that will be submitted to the target.
335 * The bio number can be accessed with dm_bio_get_target_bio_nr.
336 */
337 unsigned int num_discard_bios;
338
339 /*
340 * The number of secure erase bios that will be submitted to the target.
341 * The bio number can be accessed with dm_bio_get_target_bio_nr.
342 */
343 unsigned int num_secure_erase_bios;
344
345 /*
346 * The number of WRITE ZEROES bios that will be submitted to the target.
347 * The bio number can be accessed with dm_bio_get_target_bio_nr.
348 */
349 unsigned int num_write_zeroes_bios;
350
351 /*
352 * The minimum number of extra bytes allocated in each io for the
353 * target to use.
354 */
355 unsigned int per_io_data_size;
356
357 /* target specific data */
358 void *private;
359
360 /* Used to provide an error string from the ctr */
361 char *error;
362
363 /*
364 * Set if this target needs to receive flushes regardless of
365 * whether or not its underlying devices have support.
366 */
367 bool flush_supported:1;
368
369 /*
370 * Set if this target needs to receive discards regardless of
371 * whether or not its underlying devices have support.
372 */
373 bool discards_supported:1;
374
375 /*
376 * Automatically set by dm-core if this target supports
377 * REQ_OP_ZONE_RESET_ALL. Otherwise, this operation will be emulated
378 * using REQ_OP_ZONE_RESET. Target drivers must not set this manually.
379 */
380 bool zone_reset_all_supported:1;
381
382 /*
383 * Set if this target requires that discards be split on
384 * 'max_discard_sectors' boundaries.
385 */
386 bool max_discard_granularity:1;
387
388 /*
389 * Set if we need to limit the number of in-flight bios when swapping.
390 */
391 bool limit_swap_bios:1;
392
393 /*
394 * Set if this target implements a zoned device and needs emulation of
395 * zone append operations using regular writes.
396 */
397 bool emulate_zone_append:1;
398
399 /*
400 * Set if the target will submit IO using dm_submit_bio_remap()
401 * after returning DM_MAPIO_SUBMITTED from its map function.
402 */
403 bool accounts_remapped_io:1;
404
405 /*
406 * Set if the target will submit the DM bio without first calling
407 * bio_set_dev(). NOTE: ideally a target should _not_ need this.
408 */
409 bool needs_bio_set_dev:1;
410
411 /*
412 * Set if the target supports flush optimization. If all the targets in
413 * a table have flush_bypasses_map set, the dm core will not send
414 * flushes to the targets via a ->map method. It will iterate over
415 * dm_table->devices and send flushes to the devices directly. This
416 * optimization reduces the number of flushes being sent when multiple
417 * targets in a table use the same underlying device.
418 *
419 * This optimization may be enabled on targets that just pass the
420 * flushes to the underlying devices without performing any other
421 * actions on the flush request. Currently, dm-linear and dm-stripe
422 * support it.
423 */
424 bool flush_bypasses_map:1;
425
426 /*
427 * Set if the target calls bio_integrity_alloc on bios received
428 * in the map method.
429 */
430 bool mempool_needs_integrity:1;
431};
432
433void *dm_per_bio_data(struct bio *bio, size_t data_size);
434struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size);
435unsigned int dm_bio_get_target_bio_nr(const struct bio *bio);
436
437u64 dm_start_time_ns_from_clone(struct bio *bio);
438
439int dm_register_target(struct target_type *t);
440void dm_unregister_target(struct target_type *t);
441
442/*
443 * Target argument parsing.
444 */
445struct dm_arg_set {
446 unsigned int argc;
447 char **argv;
448};
449
450/*
451 * The minimum and maximum value of a numeric argument, together with
452 * the error message to use if the number is found to be outside that range.
453 */
454struct dm_arg {
455 unsigned int min;
456 unsigned int max;
457 char *error;
458};
459
460/*
461 * Validate the next argument, either returning it as *value or, if invalid,
462 * returning -EINVAL and setting *error.
463 */
464int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
465 unsigned int *value, char **error);
466
467/*
468 * Process the next argument as the start of a group containing between
469 * arg->min and arg->max further arguments. Either return the size as
470 * *num_args or, if invalid, return -EINVAL and set *error.
471 */
472int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
473 unsigned int *num_args, char **error);
474
475/*
476 * Return the current argument and shift to the next.
477 */
478const char *dm_shift_arg(struct dm_arg_set *as);
479
480/*
481 * Move through num_args arguments.
482 */
483void dm_consume_args(struct dm_arg_set *as, unsigned int num_args);
484
485/*
486 *----------------------------------------------------------------
487 * Functions for creating and manipulating mapped devices.
488 * Drop the reference with dm_put when you finish with the object.
489 *----------------------------------------------------------------
490 */
491
492/*
493 * DM_ANY_MINOR chooses the next available minor number.
494 */
495#define DM_ANY_MINOR (-1)
496int dm_create(int minor, struct mapped_device **md);
497
498/*
499 * Reference counting for md.
500 */
501struct mapped_device *dm_get_md(dev_t dev);
502void dm_get(struct mapped_device *md);
503int dm_hold(struct mapped_device *md);
504void dm_put(struct mapped_device *md);
505
506/*
507 * An arbitrary pointer may be stored alongside a mapped device.
508 */
509void dm_set_mdptr(struct mapped_device *md, void *ptr);
510void *dm_get_mdptr(struct mapped_device *md);
511
512/*
513 * A device can still be used while suspended, but I/O is deferred.
514 */
515int dm_suspend(struct mapped_device *md, unsigned int suspend_flags);
516int dm_resume(struct mapped_device *md);
517
518/*
519 * Event functions.
520 */
521uint32_t dm_get_event_nr(struct mapped_device *md);
522int dm_wait_event(struct mapped_device *md, int event_nr);
523uint32_t dm_next_uevent_seq(struct mapped_device *md);
524void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
525
526/*
527 * Info functions.
528 */
529const char *dm_device_name(struct mapped_device *md);
530int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
531struct gendisk *dm_disk(struct mapped_device *md);
532int dm_suspended(struct dm_target *ti);
533int dm_post_suspending(struct dm_target *ti);
534int dm_noflush_suspending(struct dm_target *ti);
535void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors);
536void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone);
537
538#ifdef CONFIG_BLK_DEV_ZONED
539struct dm_report_zones_args {
540 struct dm_target *tgt;
541 sector_t next_sector;
542
543 void *orig_data;
544 report_zones_cb orig_cb;
545 unsigned int zone_idx;
546
547 /* must be filled by ->report_zones before calling dm_report_zones_cb */
548 sector_t start;
549};
550int dm_report_zones(struct block_device *bdev, sector_t start, sector_t sector,
551 struct dm_report_zones_args *args, unsigned int nr_zones);
552#endif /* CONFIG_BLK_DEV_ZONED */
553
554/*
555 * Device mapper functions to parse and create devices specified by the
556 * parameter "dm-mod.create="
557 */
558int __init dm_early_create(struct dm_ioctl *dmi,
559 struct dm_target_spec **spec_array,
560 char **target_params_array);
561
562/*
563 * Geometry functions.
564 */
565int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
566int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
567
568/*
569 *---------------------------------------------------------------
570 * Functions for manipulating device-mapper tables.
571 *---------------------------------------------------------------
572 */
573
574/*
575 * First create an empty table.
576 */
577int dm_table_create(struct dm_table **result, blk_mode_t mode,
578 unsigned int num_targets, struct mapped_device *md);
579
580/*
581 * Then call this once for each target.
582 */
583int dm_table_add_target(struct dm_table *t, const char *type,
584 sector_t start, sector_t len, char *params);
585
586/*
587 * Target can use this to set the table's type.
588 * Can only ever be called from a target's ctr.
589 * Useful for "hybrid" target (supports both bio-based
590 * and request-based).
591 */
592void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type);
593
594/*
595 * Finally call this to make the table ready for use.
596 */
597int dm_table_complete(struct dm_table *t);
598
599/*
600 * Destroy the table when finished.
601 */
602void dm_table_destroy(struct dm_table *t);
603
604/*
605 * Target may require that it is never sent I/O larger than len.
606 */
607int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len);
608
609/*
610 * Table reference counting.
611 */
612struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx);
613void dm_put_live_table(struct mapped_device *md, int srcu_idx);
614void dm_sync_table(struct mapped_device *md);
615
616/*
617 * Queries
618 */
619sector_t dm_table_get_size(struct dm_table *t);
620blk_mode_t dm_table_get_mode(struct dm_table *t);
621struct mapped_device *dm_table_get_md(struct dm_table *t);
622const char *dm_table_device_name(struct dm_table *t);
623
624/*
625 * Trigger an event.
626 */
627void dm_table_event(struct dm_table *t);
628
629/*
630 * Run the queue for request-based targets.
631 */
632void dm_table_run_md_queue_async(struct dm_table *t);
633
634/*
635 * The device must be suspended before calling this method.
636 * Returns the previous table, which the caller must destroy.
637 */
638struct dm_table *dm_swap_table(struct mapped_device *md,
639 struct dm_table *t);
640
641/*
642 * Table blk_crypto_profile functions
643 */
644void dm_destroy_crypto_profile(struct blk_crypto_profile *profile);
645
646/*
647 *---------------------------------------------------------------
648 * Macros.
649 *---------------------------------------------------------------
650 */
651#define DM_NAME "device-mapper"
652
653#define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n"
654
655#define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__)
656
657#define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__)
658#define DMERR_LIMIT(fmt, ...) pr_err_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
659#define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__)
660#define DMWARN_LIMIT(fmt, ...) pr_warn_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
661#define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__)
662#define DMINFO_LIMIT(fmt, ...) pr_info_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
663
664#define DMDEBUG(fmt, ...) pr_debug(DM_FMT(fmt), ##__VA_ARGS__)
665#define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
666
667#define DMEMIT(x...) (sz += ((sz >= maxlen) ? 0 : scnprintf(result + sz, maxlen - sz, x)))
668
669#define DMEMIT_TARGET_NAME_VERSION(y) \
670 DMEMIT("target_name=%s,target_version=%u.%u.%u", \
671 (y)->name, (y)->version[0], (y)->version[1], (y)->version[2])
672
673/**
674 * module_dm() - Helper macro for DM targets that don't do anything
675 * special in their module_init and module_exit.
676 * Each module may only use this macro once, and calling it replaces
677 * module_init() and module_exit().
678 *
679 * @name: DM target's name
680 */
681#define module_dm(name) \
682static int __init dm_##name##_init(void) \
683{ \
684 return dm_register_target(&(name##_target)); \
685} \
686module_init(dm_##name##_init) \
687static void __exit dm_##name##_exit(void) \
688{ \
689 dm_unregister_target(&(name##_target)); \
690} \
691module_exit(dm_##name##_exit)
692
693/*
694 * Definitions of return values from target end_io function.
695 */
696#define DM_ENDIO_DONE 0
697#define DM_ENDIO_INCOMPLETE 1
698#define DM_ENDIO_REQUEUE 2
699#define DM_ENDIO_DELAY_REQUEUE 3
700
701/*
702 * Definitions of return values from target map function.
703 */
704#define DM_MAPIO_SUBMITTED 0
705#define DM_MAPIO_REMAPPED 1
706#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
707#define DM_MAPIO_DELAY_REQUEUE DM_ENDIO_DELAY_REQUEUE
708#define DM_MAPIO_KILL 4
709
710#define dm_sector_div64(x, y)( \
711{ \
712 u64 _res; \
713 (x) = div64_u64_rem(x, y, &_res); \
714 _res; \
715} \
716)
717
718/*
719 * Ceiling(n / sz)
720 */
721#define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
722
723#define dm_sector_div_up(n, sz) ( \
724{ \
725 sector_t _r = ((n) + (sz) - 1); \
726 sector_div(_r, (sz)); \
727 _r; \
728} \
729)
730
731/*
732 * ceiling(n / size) * size
733 */
734#define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
735
736/*
737 * Sector offset taken relative to the start of the target instead of
738 * relative to the start of the device.
739 */
740#define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
741
742static inline sector_t to_sector(unsigned long long n)
743{
744 return (n >> SECTOR_SHIFT);
745}
746
747static inline unsigned long to_bytes(sector_t n)
748{
749 return (n << SECTOR_SHIFT);
750}
751
752#endif /* _LINUX_DEVICE_MAPPER_H */