at v3.6 14 kB view raw
1/* 2 * Copyright (C) 2001 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the LGPL. 6 */ 7 8#ifndef _LINUX_DEVICE_MAPPER_H 9#define _LINUX_DEVICE_MAPPER_H 10 11#include <linux/bio.h> 12#include <linux/blkdev.h> 13#include <linux/ratelimit.h> 14 15struct dm_dev; 16struct dm_target; 17struct dm_table; 18struct mapped_device; 19struct bio_vec; 20 21typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; 22 23union map_info { 24 void *ptr; 25 unsigned long long ll; 26 unsigned target_request_nr; 27}; 28 29/* 30 * In the constructor the target parameter will already have the 31 * table, type, begin and len fields filled in. 32 */ 33typedef int (*dm_ctr_fn) (struct dm_target *target, 34 unsigned int argc, char **argv); 35 36/* 37 * The destructor doesn't need to free the dm_target, just 38 * anything hidden ti->private. 39 */ 40typedef void (*dm_dtr_fn) (struct dm_target *ti); 41 42/* 43 * The map function must return: 44 * < 0: error 45 * = 0: The target will handle the io by resubmitting it later 46 * = 1: simple remap complete 47 * = 2: The target wants to push back the io 48 */ 49typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio, 50 union map_info *map_context); 51typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone, 52 union map_info *map_context); 53 54/* 55 * Returns: 56 * < 0 : error (currently ignored) 57 * 0 : ended successfully 58 * 1 : for some reason the io has still not completed (eg, 59 * multipath target might want to requeue a failed io). 60 * 2 : The target wants to push back the io 61 */ 62typedef int (*dm_endio_fn) (struct dm_target *ti, 63 struct bio *bio, int error, 64 union map_info *map_context); 65typedef int (*dm_request_endio_fn) (struct dm_target *ti, 66 struct request *clone, int error, 67 union map_info *map_context); 68 69typedef void (*dm_presuspend_fn) (struct dm_target *ti); 70typedef void (*dm_postsuspend_fn) (struct dm_target *ti); 71typedef int (*dm_preresume_fn) (struct dm_target *ti); 72typedef void (*dm_resume_fn) (struct dm_target *ti); 73 74typedef int (*dm_status_fn) (struct dm_target *ti, status_type_t status_type, 75 unsigned status_flags, char *result, unsigned maxlen); 76 77typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv); 78 79typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd, 80 unsigned long arg); 81 82typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm, 83 struct bio_vec *biovec, int max_size); 84 85typedef int (*iterate_devices_callout_fn) (struct dm_target *ti, 86 struct dm_dev *dev, 87 sector_t start, sector_t len, 88 void *data); 89 90typedef int (*dm_iterate_devices_fn) (struct dm_target *ti, 91 iterate_devices_callout_fn fn, 92 void *data); 93 94typedef void (*dm_io_hints_fn) (struct dm_target *ti, 95 struct queue_limits *limits); 96 97/* 98 * Returns: 99 * 0: The target can handle the next I/O immediately. 100 * 1: The target can't handle the next I/O immediately. 101 */ 102typedef int (*dm_busy_fn) (struct dm_target *ti); 103 104void dm_error(const char *message); 105 106/* 107 * Combine device limits. 108 */ 109int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, 110 sector_t start, sector_t len, void *data); 111 112struct dm_dev { 113 struct block_device *bdev; 114 fmode_t mode; 115 char name[16]; 116}; 117 118/* 119 * Constructors should call these functions to ensure destination devices 120 * are opened/closed correctly. 121 */ 122int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, 123 struct dm_dev **result); 124void dm_put_device(struct dm_target *ti, struct dm_dev *d); 125 126/* 127 * Information about a target type 128 */ 129 130struct target_type { 131 uint64_t features; 132 const char *name; 133 struct module *module; 134 unsigned version[3]; 135 dm_ctr_fn ctr; 136 dm_dtr_fn dtr; 137 dm_map_fn map; 138 dm_map_request_fn map_rq; 139 dm_endio_fn end_io; 140 dm_request_endio_fn rq_end_io; 141 dm_presuspend_fn presuspend; 142 dm_postsuspend_fn postsuspend; 143 dm_preresume_fn preresume; 144 dm_resume_fn resume; 145 dm_status_fn status; 146 dm_message_fn message; 147 dm_ioctl_fn ioctl; 148 dm_merge_fn merge; 149 dm_busy_fn busy; 150 dm_iterate_devices_fn iterate_devices; 151 dm_io_hints_fn io_hints; 152 153 /* For internal device-mapper use. */ 154 struct list_head list; 155}; 156 157/* 158 * Target features 159 */ 160 161/* 162 * Any table that contains an instance of this target must have only one. 163 */ 164#define DM_TARGET_SINGLETON 0x00000001 165#define dm_target_needs_singleton(type) ((type)->features & DM_TARGET_SINGLETON) 166 167/* 168 * Indicates that a target does not support read-only devices. 169 */ 170#define DM_TARGET_ALWAYS_WRITEABLE 0x00000002 171#define dm_target_always_writeable(type) \ 172 ((type)->features & DM_TARGET_ALWAYS_WRITEABLE) 173 174/* 175 * Any device that contains a table with an instance of this target may never 176 * have tables containing any different target type. 177 */ 178#define DM_TARGET_IMMUTABLE 0x00000004 179#define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE) 180 181struct dm_target { 182 struct dm_table *table; 183 struct target_type *type; 184 185 /* target limits */ 186 sector_t begin; 187 sector_t len; 188 189 /* If non-zero, maximum size of I/O submitted to a target. */ 190 uint32_t max_io_len; 191 192 /* 193 * A number of zero-length barrier requests that will be submitted 194 * to the target for the purpose of flushing cache. 195 * 196 * The request number will be placed in union map_info->target_request_nr. 197 * It is a responsibility of the target driver to remap these requests 198 * to the real underlying devices. 199 */ 200 unsigned num_flush_requests; 201 202 /* 203 * The number of discard requests that will be submitted to the 204 * target. map_info->request_nr is used just like num_flush_requests. 205 */ 206 unsigned num_discard_requests; 207 208 /* target specific data */ 209 void *private; 210 211 /* Used to provide an error string from the ctr */ 212 char *error; 213 214 /* 215 * Set if this target needs to receive flushes regardless of 216 * whether or not its underlying devices have support. 217 */ 218 bool flush_supported:1; 219 220 /* 221 * Set if this target needs to receive discards regardless of 222 * whether or not its underlying devices have support. 223 */ 224 bool discards_supported:1; 225 226 /* 227 * Set if the target required discard request to be split 228 * on max_io_len boundary. 229 */ 230 bool split_discard_requests:1; 231 232 /* 233 * Set if this target does not return zeroes on discarded blocks. 234 */ 235 bool discard_zeroes_data_unsupported:1; 236}; 237 238/* Each target can link one of these into the table */ 239struct dm_target_callbacks { 240 struct list_head list; 241 int (*congested_fn) (struct dm_target_callbacks *, int); 242}; 243 244int dm_register_target(struct target_type *t); 245void dm_unregister_target(struct target_type *t); 246 247/* 248 * Target argument parsing. 249 */ 250struct dm_arg_set { 251 unsigned argc; 252 char **argv; 253}; 254 255/* 256 * The minimum and maximum value of a numeric argument, together with 257 * the error message to use if the number is found to be outside that range. 258 */ 259struct dm_arg { 260 unsigned min; 261 unsigned max; 262 char *error; 263}; 264 265/* 266 * Validate the next argument, either returning it as *value or, if invalid, 267 * returning -EINVAL and setting *error. 268 */ 269int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, 270 unsigned *value, char **error); 271 272/* 273 * Process the next argument as the start of a group containing between 274 * arg->min and arg->max further arguments. Either return the size as 275 * *num_args or, if invalid, return -EINVAL and set *error. 276 */ 277int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set, 278 unsigned *num_args, char **error); 279 280/* 281 * Return the current argument and shift to the next. 282 */ 283const char *dm_shift_arg(struct dm_arg_set *as); 284 285/* 286 * Move through num_args arguments. 287 */ 288void dm_consume_args(struct dm_arg_set *as, unsigned num_args); 289 290/*----------------------------------------------------------------- 291 * Functions for creating and manipulating mapped devices. 292 * Drop the reference with dm_put when you finish with the object. 293 *---------------------------------------------------------------*/ 294 295/* 296 * DM_ANY_MINOR chooses the next available minor number. 297 */ 298#define DM_ANY_MINOR (-1) 299int dm_create(int minor, struct mapped_device **md); 300 301/* 302 * Reference counting for md. 303 */ 304struct mapped_device *dm_get_md(dev_t dev); 305void dm_get(struct mapped_device *md); 306void dm_put(struct mapped_device *md); 307 308/* 309 * An arbitrary pointer may be stored alongside a mapped device. 310 */ 311void dm_set_mdptr(struct mapped_device *md, void *ptr); 312void *dm_get_mdptr(struct mapped_device *md); 313 314/* 315 * A device can still be used while suspended, but I/O is deferred. 316 */ 317int dm_suspend(struct mapped_device *md, unsigned suspend_flags); 318int dm_resume(struct mapped_device *md); 319 320/* 321 * Event functions. 322 */ 323uint32_t dm_get_event_nr(struct mapped_device *md); 324int dm_wait_event(struct mapped_device *md, int event_nr); 325uint32_t dm_next_uevent_seq(struct mapped_device *md); 326void dm_uevent_add(struct mapped_device *md, struct list_head *elist); 327 328/* 329 * Info functions. 330 */ 331const char *dm_device_name(struct mapped_device *md); 332int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid); 333struct gendisk *dm_disk(struct mapped_device *md); 334int dm_suspended(struct dm_target *ti); 335int dm_noflush_suspending(struct dm_target *ti); 336union map_info *dm_get_mapinfo(struct bio *bio); 337union map_info *dm_get_rq_mapinfo(struct request *rq); 338 339/* 340 * Geometry functions. 341 */ 342int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo); 343int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo); 344 345 346/*----------------------------------------------------------------- 347 * Functions for manipulating device-mapper tables. 348 *---------------------------------------------------------------*/ 349 350/* 351 * First create an empty table. 352 */ 353int dm_table_create(struct dm_table **result, fmode_t mode, 354 unsigned num_targets, struct mapped_device *md); 355 356/* 357 * Then call this once for each target. 358 */ 359int dm_table_add_target(struct dm_table *t, const char *type, 360 sector_t start, sector_t len, char *params); 361 362/* 363 * Target_ctr should call this if it needs to add any callbacks. 364 */ 365void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb); 366 367/* 368 * Finally call this to make the table ready for use. 369 */ 370int dm_table_complete(struct dm_table *t); 371 372/* 373 * Target may require that it is never sent I/O larger than len. 374 */ 375int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len); 376 377/* 378 * Table reference counting. 379 */ 380struct dm_table *dm_get_live_table(struct mapped_device *md); 381void dm_table_get(struct dm_table *t); 382void dm_table_put(struct dm_table *t); 383 384/* 385 * Queries 386 */ 387sector_t dm_table_get_size(struct dm_table *t); 388unsigned int dm_table_get_num_targets(struct dm_table *t); 389fmode_t dm_table_get_mode(struct dm_table *t); 390struct mapped_device *dm_table_get_md(struct dm_table *t); 391 392/* 393 * Trigger an event. 394 */ 395void dm_table_event(struct dm_table *t); 396 397/* 398 * The device must be suspended before calling this method. 399 * Returns the previous table, which the caller must destroy. 400 */ 401struct dm_table *dm_swap_table(struct mapped_device *md, 402 struct dm_table *t); 403 404/* 405 * A wrapper around vmalloc. 406 */ 407void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size); 408 409/*----------------------------------------------------------------- 410 * Macros. 411 *---------------------------------------------------------------*/ 412#define DM_NAME "device-mapper" 413 414#ifdef CONFIG_PRINTK 415extern struct ratelimit_state dm_ratelimit_state; 416 417#define dm_ratelimit() __ratelimit(&dm_ratelimit_state) 418#else 419#define dm_ratelimit() 0 420#endif 421 422#define DMCRIT(f, arg...) \ 423 printk(KERN_CRIT DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg) 424 425#define DMERR(f, arg...) \ 426 printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg) 427#define DMERR_LIMIT(f, arg...) \ 428 do { \ 429 if (dm_ratelimit()) \ 430 printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " \ 431 f "\n", ## arg); \ 432 } while (0) 433 434#define DMWARN(f, arg...) \ 435 printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg) 436#define DMWARN_LIMIT(f, arg...) \ 437 do { \ 438 if (dm_ratelimit()) \ 439 printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " \ 440 f "\n", ## arg); \ 441 } while (0) 442 443#define DMINFO(f, arg...) \ 444 printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg) 445#define DMINFO_LIMIT(f, arg...) \ 446 do { \ 447 if (dm_ratelimit()) \ 448 printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f \ 449 "\n", ## arg); \ 450 } while (0) 451 452#ifdef CONFIG_DM_DEBUG 453# define DMDEBUG(f, arg...) \ 454 printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX " DEBUG: " f "\n", ## arg) 455# define DMDEBUG_LIMIT(f, arg...) \ 456 do { \ 457 if (dm_ratelimit()) \ 458 printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX ": " f \ 459 "\n", ## arg); \ 460 } while (0) 461#else 462# define DMDEBUG(f, arg...) do {} while (0) 463# define DMDEBUG_LIMIT(f, arg...) do {} while (0) 464#endif 465 466#define DMEMIT(x...) sz += ((sz >= maxlen) ? \ 467 0 : scnprintf(result + sz, maxlen - sz, x)) 468 469#define SECTOR_SHIFT 9 470 471/* 472 * Definitions of return values from target end_io function. 473 */ 474#define DM_ENDIO_INCOMPLETE 1 475#define DM_ENDIO_REQUEUE 2 476 477/* 478 * Definitions of return values from target map function. 479 */ 480#define DM_MAPIO_SUBMITTED 0 481#define DM_MAPIO_REMAPPED 1 482#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE 483 484/* 485 * Ceiling(n / sz) 486 */ 487#define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz)) 488 489#define dm_sector_div_up(n, sz) ( \ 490{ \ 491 sector_t _r = ((n) + (sz) - 1); \ 492 sector_div(_r, (sz)); \ 493 _r; \ 494} \ 495) 496 497/* 498 * ceiling(n / size) * size 499 */ 500#define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz)) 501 502#define dm_array_too_big(fixed, obj, num) \ 503 ((num) > (UINT_MAX - (fixed)) / (obj)) 504 505/* 506 * Sector offset taken relative to the start of the target instead of 507 * relative to the start of the device. 508 */ 509#define dm_target_offset(ti, sector) ((sector) - (ti)->begin) 510 511static inline sector_t to_sector(unsigned long n) 512{ 513 return (n >> SECTOR_SHIFT); 514} 515 516static inline unsigned long to_bytes(sector_t n) 517{ 518 return (n << SECTOR_SHIFT); 519} 520 521/*----------------------------------------------------------------- 522 * Helper for block layer and dm core operations 523 *---------------------------------------------------------------*/ 524void dm_dispatch_request(struct request *rq); 525void dm_requeue_unmapped_request(struct request *rq); 526void dm_kill_unmapped_request(struct request *rq, int error); 527int dm_underlying_device_busy(struct request_queue *q); 528 529#endif /* _LINUX_DEVICE_MAPPER_H */