Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dm: uevent generate events

This patch adds support for the dm_path_event dm_send_event functions which
create and send udev events.

Signed-off-by: Mike Anderson <andmike@linux.vnet.ibm.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>

authored by

Mike Anderson and committed by
Alasdair G Kergon
7a8c3d3b 51e5b2bd

+295
+97
Documentation/device-mapper/dm-uevent.txt
··· 1 + The device-mapper uevent code adds the capability to device-mapper to create 2 + and send kobject uevents (uevents). Previously device-mapper events were only 3 + available through the ioctl interface. The advantage of the uevents interface 4 + is the event contains environment attributes providing increased context for 5 + the event avoiding the need to query the state of the device-mapper device after 6 + the event is received. 7 + 8 + There are two functions currently for device-mapper events. The first function 9 + listed creates the event and the second function sends the event(s). 10 + 11 + void dm_path_uevent(enum dm_uevent_type event_type, struct dm_target *ti, 12 + const char *path, unsigned nr_valid_paths) 13 + 14 + void dm_send_uevents(struct list_head *events, struct kobject *kobj) 15 + 16 + 17 + The variables added to the uevent environment are: 18 + 19 + Variable Name: DM_TARGET 20 + Uevent Action(s): KOBJ_CHANGE 21 + Type: string 22 + Description: 23 + Value: Name of device-mapper target that generated the event. 24 + 25 + Variable Name: DM_ACTION 26 + Uevent Action(s): KOBJ_CHANGE 27 + Type: string 28 + Description: 29 + Value: Device-mapper specific action that caused the uevent action. 30 + PATH_FAILED - A path has failed. 31 + PATH_REINSTATED - A path has been reinstated. 32 + 33 + Variable Name: DM_SEQNUM 34 + Uevent Action(s): KOBJ_CHANGE 35 + Type: unsigned integer 36 + Description: A sequence number for this specific device-mapper device. 37 + Value: Valid unsigned integer range. 38 + 39 + Variable Name: DM_PATH 40 + Uevent Action(s): KOBJ_CHANGE 41 + Type: string 42 + Description: Major and minor number of the path device pertaining to this 43 + event. 44 + Value: Path name in the form of "Major:Minor" 45 + 46 + Variable Name: DM_NR_VALID_PATHS 47 + Uevent Action(s): KOBJ_CHANGE 48 + Type: unsigned integer 49 + Description: 50 + Value: Valid unsigned integer range. 51 + 52 + Variable Name: DM_NAME 53 + Uevent Action(s): KOBJ_CHANGE 54 + Type: string 55 + Description: Name of the device-mapper device. 56 + Value: Name 57 + 58 + Variable Name: DM_UUID 59 + Uevent Action(s): KOBJ_CHANGE 60 + Type: string 61 + Description: UUID of the device-mapper device. 62 + Value: UUID. (Empty string if there isn't one.) 63 + 64 + An example of the uevents generated as captured by udevmonitor is shown 65 + below. 66 + 67 + 1.) Path failure. 68 + UEVENT[1192521009.711215] change@/block/dm-3 69 + ACTION=change 70 + DEVPATH=/block/dm-3 71 + SUBSYSTEM=block 72 + DM_TARGET=multipath 73 + DM_ACTION=PATH_FAILED 74 + DM_SEQNUM=1 75 + DM_PATH=8:32 76 + DM_NR_VALID_PATHS=0 77 + DM_NAME=mpath2 78 + DM_UUID=mpath-35333333000002328 79 + MINOR=3 80 + MAJOR=253 81 + SEQNUM=1130 82 + 83 + 2.) Path reinstate. 84 + UEVENT[1192521132.989927] change@/block/dm-3 85 + ACTION=change 86 + DEVPATH=/block/dm-3 87 + SUBSYSTEM=block 88 + DM_TARGET=multipath 89 + DM_ACTION=PATH_REINSTATED 90 + DM_SEQNUM=2 91 + DM_PATH=8:32 92 + DM_NR_VALID_PATHS=1 93 + DM_NAME=mpath2 94 + DM_UUID=mpath-35333333000002328 95 + MINOR=3 96 + MAJOR=253 97 + SEQNUM=1131
+150
drivers/md/dm-uevent.c
··· 21 21 #include <linux/list.h> 22 22 #include <linux/slab.h> 23 23 #include <linux/kobject.h> 24 + #include <linux/dm-ioctl.h> 24 25 25 26 #include "dm.h" 26 27 #include "dm-uevent.h" 27 28 28 29 #define DM_MSG_PREFIX "uevent" 30 + 31 + static const struct { 32 + enum dm_uevent_type type; 33 + enum kobject_action action; 34 + char *name; 35 + } _dm_uevent_type_names[] = { 36 + {DM_UEVENT_PATH_FAILED, KOBJ_CHANGE, "PATH_FAILED"}, 37 + {DM_UEVENT_PATH_REINSTATED, KOBJ_CHANGE, "PATH_REINSTATED"}, 38 + }; 29 39 30 40 static struct kmem_cache *_dm_event_cache; 31 41 ··· 44 34 enum kobject_action action; 45 35 struct kobj_uevent_env ku_env; 46 36 struct list_head elist; 37 + char name[DM_NAME_LEN]; 38 + char uuid[DM_UUID_LEN]; 47 39 }; 48 40 49 41 static void dm_uevent_free(struct dm_uevent *event) ··· 66 54 67 55 return event; 68 56 } 57 + 58 + static struct dm_uevent *dm_build_path_uevent(struct mapped_device *md, 59 + struct dm_target *ti, 60 + enum kobject_action action, 61 + const char *dm_action, 62 + const char *path, 63 + unsigned nr_valid_paths) 64 + { 65 + struct dm_uevent *event; 66 + 67 + event = dm_uevent_alloc(md); 68 + if (!event) { 69 + DMERR("%s: dm_uevent_alloc() failed", __FUNCTION__); 70 + goto err_nomem; 71 + } 72 + 73 + event->action = action; 74 + 75 + if (add_uevent_var(&event->ku_env, "DM_TARGET=%s", ti->type->name)) { 76 + DMERR("%s: add_uevent_var() for DM_TARGET failed", 77 + __FUNCTION__); 78 + goto err_add; 79 + } 80 + 81 + if (add_uevent_var(&event->ku_env, "DM_ACTION=%s", dm_action)) { 82 + DMERR("%s: add_uevent_var() for DM_ACTION failed", 83 + __FUNCTION__); 84 + goto err_add; 85 + } 86 + 87 + if (add_uevent_var(&event->ku_env, "DM_SEQNUM=%u", 88 + dm_next_uevent_seq(md))) { 89 + DMERR("%s: add_uevent_var() for DM_SEQNUM failed", 90 + __FUNCTION__); 91 + goto err_add; 92 + } 93 + 94 + if (add_uevent_var(&event->ku_env, "DM_PATH=%s", path)) { 95 + DMERR("%s: add_uevent_var() for DM_PATH failed", __FUNCTION__); 96 + goto err_add; 97 + } 98 + 99 + if (add_uevent_var(&event->ku_env, "DM_NR_VALID_PATHS=%d", 100 + nr_valid_paths)) { 101 + DMERR("%s: add_uevent_var() for DM_NR_VALID_PATHS failed", 102 + __FUNCTION__); 103 + goto err_add; 104 + } 105 + 106 + return event; 107 + 108 + err_add: 109 + dm_uevent_free(event); 110 + err_nomem: 111 + return ERR_PTR(-ENOMEM); 112 + } 113 + 114 + /** 115 + * dm_send_uevents - send uevents for given list 116 + * 117 + * @events: list of events to send 118 + * @kobj: kobject generating event 119 + * 120 + */ 121 + void dm_send_uevents(struct list_head *events, struct kobject *kobj) 122 + { 123 + int r; 124 + struct dm_uevent *event, *next; 125 + 126 + list_for_each_entry_safe(event, next, events, elist) { 127 + list_del_init(&event->elist); 128 + 129 + /* 130 + * Need to call dm_copy_name_and_uuid from here for now. 131 + * Context of previous var adds and locking used for 132 + * hash_cell not compatable. 133 + */ 134 + if (dm_copy_name_and_uuid(event->md, event->name, 135 + event->uuid)) { 136 + DMERR("%s: dm_copy_name_and_uuid() failed", 137 + __FUNCTION__); 138 + goto uevent_free; 139 + } 140 + 141 + if (add_uevent_var(&event->ku_env, "DM_NAME=%s", event->name)) { 142 + DMERR("%s: add_uevent_var() for DM_NAME failed", 143 + __FUNCTION__); 144 + goto uevent_free; 145 + } 146 + 147 + if (add_uevent_var(&event->ku_env, "DM_UUID=%s", event->uuid)) { 148 + DMERR("%s: add_uevent_var() for DM_UUID failed", 149 + __FUNCTION__); 150 + goto uevent_free; 151 + } 152 + 153 + r = kobject_uevent_env(kobj, event->action, event->ku_env.envp); 154 + if (r) 155 + DMERR("%s: kobject_uevent_env failed", __FUNCTION__); 156 + uevent_free: 157 + dm_uevent_free(event); 158 + } 159 + } 160 + EXPORT_SYMBOL_GPL(dm_send_uevents); 161 + 162 + /** 163 + * dm_path_uevent - called to create a new path event and queue it 164 + * 165 + * @event_type: path event type enum 166 + * @ti: pointer to a dm_target 167 + * @path: string containing pathname 168 + * @nr_valid_paths: number of valid paths remaining 169 + * 170 + */ 171 + void dm_path_uevent(enum dm_uevent_type event_type, struct dm_target *ti, 172 + const char *path, unsigned nr_valid_paths) 173 + { 174 + struct mapped_device *md = dm_table_get_md(ti->table); 175 + struct dm_uevent *event; 176 + 177 + if (event_type >= ARRAY_SIZE(_dm_uevent_type_names)) { 178 + DMERR("%s: Invalid event_type %d", __FUNCTION__, event_type); 179 + goto out; 180 + } 181 + 182 + event = dm_build_path_uevent(md, ti, 183 + _dm_uevent_type_names[event_type].action, 184 + _dm_uevent_type_names[event_type].name, 185 + path, nr_valid_paths); 186 + if (IS_ERR(event)) 187 + goto out; 188 + 189 + dm_uevent_add(md, &event->elist); 190 + 191 + out: 192 + dm_put(md); 193 + } 194 + EXPORT_SYMBOL_GPL(dm_path_uevent); 69 195 70 196 int dm_uevent_init(void) 71 197 {
+18
drivers/md/dm-uevent.h
··· 21 21 #ifndef DM_UEVENT_H 22 22 #define DM_UEVENT_H 23 23 24 + enum dm_uevent_type { 25 + DM_UEVENT_PATH_FAILED, 26 + DM_UEVENT_PATH_REINSTATED, 27 + }; 28 + 24 29 #ifdef CONFIG_DM_UEVENT 25 30 26 31 extern int dm_uevent_init(void); 27 32 extern void dm_uevent_exit(void); 33 + extern void dm_send_uevents(struct list_head *events, struct kobject *kobj); 34 + extern void dm_path_uevent(enum dm_uevent_type event_type, 35 + struct dm_target *ti, const char *path, 36 + unsigned nr_valid_paths); 28 37 29 38 #else 30 39 ··· 42 33 return 0; 43 34 } 44 35 static inline void dm_uevent_exit(void) 36 + { 37 + } 38 + static inline void dm_send_uevents(struct list_head *events, 39 + struct kobject *kobj) 40 + { 41 + } 42 + static inline void dm_path_uevent(enum dm_uevent_type event_type, 43 + struct dm_target *ti, const char *path, 44 + unsigned nr_valid_paths) 45 45 { 46 46 } 47 47
+28
drivers/md/dm.c
··· 113 113 */ 114 114 atomic_t event_nr; 115 115 wait_queue_head_t eventq; 116 + atomic_t uevent_seq; 117 + struct list_head uevent_list; 118 + spinlock_t uevent_lock; /* Protect access to uevent_list */ 116 119 117 120 /* 118 121 * freeze/thaw support require holding onto a super block ··· 988 985 atomic_set(&md->holders, 1); 989 986 atomic_set(&md->open_count, 0); 990 987 atomic_set(&md->event_nr, 0); 988 + atomic_set(&md->uevent_seq, 0); 989 + INIT_LIST_HEAD(&md->uevent_list); 990 + spin_lock_init(&md->uevent_lock); 991 991 992 992 md->queue = blk_alloc_queue(GFP_KERNEL); 993 993 if (!md->queue) ··· 1089 1083 */ 1090 1084 static void event_callback(void *context) 1091 1085 { 1086 + unsigned long flags; 1087 + LIST_HEAD(uevents); 1092 1088 struct mapped_device *md = (struct mapped_device *) context; 1089 + 1090 + spin_lock_irqsave(&md->uevent_lock, flags); 1091 + list_splice_init(&md->uevent_list, &uevents); 1092 + spin_unlock_irqrestore(&md->uevent_lock, flags); 1093 + 1094 + dm_send_uevents(&uevents, &md->disk->kobj); 1093 1095 1094 1096 atomic_inc(&md->event_nr); 1095 1097 wake_up(&md->eventq); ··· 1516 1502 /*----------------------------------------------------------------- 1517 1503 * Event notification. 1518 1504 *---------------------------------------------------------------*/ 1505 + uint32_t dm_next_uevent_seq(struct mapped_device *md) 1506 + { 1507 + return atomic_add_return(1, &md->uevent_seq); 1508 + } 1509 + 1519 1510 uint32_t dm_get_event_nr(struct mapped_device *md) 1520 1511 { 1521 1512 return atomic_read(&md->event_nr); ··· 1530 1511 { 1531 1512 return wait_event_interruptible(md->eventq, 1532 1513 (event_nr != atomic_read(&md->event_nr))); 1514 + } 1515 + 1516 + void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 1517 + { 1518 + unsigned long flags; 1519 + 1520 + spin_lock_irqsave(&md->uevent_lock, flags); 1521 + list_add(elist, &md->uevent_list); 1522 + spin_unlock_irqrestore(&md->uevent_lock, flags); 1533 1523 } 1534 1524 1535 1525 /*
+2
include/linux/device-mapper.h
··· 183 183 */ 184 184 uint32_t dm_get_event_nr(struct mapped_device *md); 185 185 int dm_wait_event(struct mapped_device *md, int event_nr); 186 + uint32_t dm_next_uevent_seq(struct mapped_device *md); 187 + void dm_uevent_add(struct mapped_device *md, struct list_head *elist); 186 188 187 189 /* 188 190 * Info functions.