Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.3-rc6 243 lines 7.4 kB view raw
1/* 2 * Internal header file for device mapper 3 * 4 * Copyright (C) 2001, 2002 Sistina Software 5 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 6 * 7 * This file is released under the LGPL. 8 */ 9 10#ifndef DM_INTERNAL_H 11#define DM_INTERNAL_H 12 13#include <linux/fs.h> 14#include <linux/device-mapper.h> 15#include <linux/list.h> 16#include <linux/blkdev.h> 17#include <linux/backing-dev.h> 18#include <linux/hdreg.h> 19#include <linux/completion.h> 20#include <linux/kobject.h> 21 22#include "dm-stats.h" 23 24/* 25 * Suspend feature flags 26 */ 27#define DM_SUSPEND_LOCKFS_FLAG (1 << 0) 28#define DM_SUSPEND_NOFLUSH_FLAG (1 << 1) 29 30/* 31 * Status feature flags 32 */ 33#define DM_STATUS_NOFLUSH_FLAG (1 << 0) 34 35/* 36 * Type of table and mapped_device's mempool 37 */ 38#define DM_TYPE_NONE 0 39#define DM_TYPE_BIO_BASED 1 40#define DM_TYPE_REQUEST_BASED 2 41#define DM_TYPE_MQ_REQUEST_BASED 3 42 43/* 44 * List of devices that a metadevice uses and should open/close. 45 */ 46struct dm_dev_internal { 47 struct list_head list; 48 atomic_t count; 49 struct dm_dev *dm_dev; 50}; 51 52struct dm_table; 53struct dm_md_mempools; 54 55/*----------------------------------------------------------------- 56 * Internal table functions. 57 *---------------------------------------------------------------*/ 58void dm_table_destroy(struct dm_table *t); 59void dm_table_event_callback(struct dm_table *t, 60 void (*fn)(void *), void *context); 61struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index); 62struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector); 63bool dm_table_has_no_data_devices(struct dm_table *table); 64int dm_calculate_queue_limits(struct dm_table *table, 65 struct queue_limits *limits); 66void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, 67 struct queue_limits *limits); 68struct list_head *dm_table_get_devices(struct dm_table *t); 69void dm_table_presuspend_targets(struct dm_table *t); 70void dm_table_presuspend_undo_targets(struct dm_table *t); 71void dm_table_postsuspend_targets(struct dm_table *t); 72int dm_table_resume_targets(struct dm_table *t); 73int dm_table_any_congested(struct dm_table *t, int bdi_bits); 74unsigned dm_table_get_type(struct dm_table *t); 75struct target_type *dm_table_get_immutable_target_type(struct dm_table *t); 76bool dm_table_request_based(struct dm_table *t); 77bool dm_table_mq_request_based(struct dm_table *t); 78void dm_table_free_md_mempools(struct dm_table *t); 79struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); 80 81void dm_lock_md_type(struct mapped_device *md); 82void dm_unlock_md_type(struct mapped_device *md); 83void dm_set_md_type(struct mapped_device *md, unsigned type); 84unsigned dm_get_md_type(struct mapped_device *md); 85struct target_type *dm_get_immutable_target_type(struct mapped_device *md); 86 87int dm_setup_md_queue(struct mapped_device *md); 88 89/* 90 * To check the return value from dm_table_find_target(). 91 */ 92#define dm_target_is_valid(t) ((t)->table) 93 94/* 95 * To check whether the target type is bio-based or not (request-based). 96 */ 97#define dm_target_bio_based(t) ((t)->type->map != NULL) 98 99/* 100 * To check whether the target type is request-based or not (bio-based). 101 */ 102#define dm_target_request_based(t) (((t)->type->map_rq != NULL) || \ 103 ((t)->type->clone_and_map_rq != NULL)) 104 105/* 106 * To check whether the target type is a hybrid (capable of being 107 * either request-based or bio-based). 108 */ 109#define dm_target_hybrid(t) (dm_target_bio_based(t) && dm_target_request_based(t)) 110 111/*----------------------------------------------------------------- 112 * A registry of target types. 113 *---------------------------------------------------------------*/ 114int dm_target_init(void); 115void dm_target_exit(void); 116struct target_type *dm_get_target_type(const char *name); 117void dm_put_target_type(struct target_type *tt); 118int dm_target_iterate(void (*iter_func)(struct target_type *tt, 119 void *param), void *param); 120 121int dm_split_args(int *argc, char ***argvp, char *input); 122 123/* 124 * Is this mapped_device being deleted? 125 */ 126int dm_deleting_md(struct mapped_device *md); 127 128/* 129 * Is this mapped_device suspended? 130 */ 131int dm_suspended_md(struct mapped_device *md); 132 133/* 134 * Internal suspend and resume methods. 135 */ 136int dm_suspended_internally_md(struct mapped_device *md); 137void dm_internal_suspend_fast(struct mapped_device *md); 138void dm_internal_resume_fast(struct mapped_device *md); 139void dm_internal_suspend_noflush(struct mapped_device *md); 140void dm_internal_resume(struct mapped_device *md); 141 142/* 143 * Test if the device is scheduled for deferred remove. 144 */ 145int dm_test_deferred_remove_flag(struct mapped_device *md); 146 147/* 148 * Try to remove devices marked for deferred removal. 149 */ 150void dm_deferred_remove(void); 151 152/* 153 * The device-mapper can be driven through one of two interfaces; 154 * ioctl or filesystem, depending which patch you have applied. 155 */ 156int dm_interface_init(void); 157void dm_interface_exit(void); 158 159/* 160 * sysfs interface 161 */ 162struct dm_kobject_holder { 163 struct kobject kobj; 164 struct completion completion; 165}; 166 167static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj) 168{ 169 return &container_of(kobj, struct dm_kobject_holder, kobj)->completion; 170} 171 172int dm_sysfs_init(struct mapped_device *md); 173void dm_sysfs_exit(struct mapped_device *md); 174struct kobject *dm_kobject(struct mapped_device *md); 175struct mapped_device *dm_get_from_kobject(struct kobject *kobj); 176 177/* 178 * The kobject helper 179 */ 180void dm_kobject_release(struct kobject *kobj); 181 182/* 183 * Targets for linear and striped mappings 184 */ 185int dm_linear_init(void); 186void dm_linear_exit(void); 187 188int dm_stripe_init(void); 189void dm_stripe_exit(void); 190 191/* 192 * mapped_device operations 193 */ 194void dm_destroy(struct mapped_device *md); 195void dm_destroy_immediate(struct mapped_device *md); 196int dm_open_count(struct mapped_device *md); 197int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred); 198int dm_cancel_deferred_remove(struct mapped_device *md); 199int dm_request_based(struct mapped_device *md); 200sector_t dm_get_size(struct mapped_device *md); 201struct request_queue *dm_get_md_queue(struct mapped_device *md); 202int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, 203 struct dm_dev **result); 204void dm_put_table_device(struct mapped_device *md, struct dm_dev *d); 205struct dm_stats *dm_get_stats(struct mapped_device *md); 206 207int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 208 unsigned cookie); 209 210void dm_internal_suspend(struct mapped_device *md); 211void dm_internal_resume(struct mapped_device *md); 212 213bool dm_use_blk_mq(struct mapped_device *md); 214 215int dm_io_init(void); 216void dm_io_exit(void); 217 218int dm_kcopyd_init(void); 219void dm_kcopyd_exit(void); 220 221/* 222 * Mempool operations 223 */ 224struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type, 225 unsigned integrity, unsigned per_bio_data_size); 226void dm_free_md_mempools(struct dm_md_mempools *pools); 227 228/* 229 * Helpers that are used by DM core 230 */ 231unsigned dm_get_reserved_bio_based_ios(void); 232unsigned dm_get_reserved_rq_based_ios(void); 233 234static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen) 235{ 236 return !maxlen || strlen(result) + 1 >= maxlen; 237} 238 239ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf); 240ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md, 241 const char *buf, size_t count); 242 243#endif