connector: convert to synchronous netlink message processing

Commits 01a16b21 (netlink: kill eff_cap from struct netlink_skb_parms)
and c53fa1ed (netlink: kill loginuid/sessionid/sid members from struct
netlink_skb_parms) removed some members from struct netlink_skb_parms
that depend on the current context, all netlink users are now required
to do synchronous message processing.

connector however queues received messages and processes them in a work
queue, which is not valid anymore. This patch converts connector to do
synchronous message processing by invoking the registered callback handler
directly from the netlink receive function.

In order to avoid invoking the callback with connector locks held, a
reference count is added to struct cn_callback_entry, the reference
is taken when finding a matching callback entry on the device's queue_list
and released after the callback handler has been invoked.

Signed-off-by: Patrick McHardy <kaber@trash.net>
Acked-by: Evgeniy Polyakov <zbr@ioremap.net>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by Patrick McHardy and committed by David S. Miller 04f482fa e2666f84

+32 -89
+17 -41
drivers/connector/cn_queue.c
··· 31 #include <linux/connector.h> 32 #include <linux/delay.h> 33 34 - void cn_queue_wrapper(struct work_struct *work) 35 - { 36 - struct cn_callback_entry *cbq = 37 - container_of(work, struct cn_callback_entry, work); 38 - struct cn_callback_data *d = &cbq->data; 39 - struct cn_msg *msg = NLMSG_DATA(nlmsg_hdr(d->skb)); 40 - struct netlink_skb_parms *nsp = &NETLINK_CB(d->skb); 41 - 42 - d->callback(msg, nsp); 43 - 44 - kfree_skb(d->skb); 45 - d->skb = NULL; 46 - 47 - kfree(d->free); 48 - } 49 - 50 static struct cn_callback_entry * 51 - cn_queue_alloc_callback_entry(const char *name, struct cb_id *id, 52 void (*callback)(struct cn_msg *, struct netlink_skb_parms *)) 53 { 54 struct cn_callback_entry *cbq; ··· 44 return NULL; 45 } 46 47 snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name); 48 memcpy(&cbq->id.id, id, sizeof(struct cb_id)); 49 - cbq->data.callback = callback; 50 - 51 - INIT_WORK(&cbq->work, &cn_queue_wrapper); 52 return cbq; 53 } 54 55 - static void cn_queue_free_callback(struct cn_callback_entry *cbq) 56 { 57 - flush_workqueue(cbq->pdev->cn_queue); 58 kfree(cbq); 59 } 60 ··· 76 struct cn_callback_entry *cbq, *__cbq; 77 int found = 0; 78 79 - cbq = cn_queue_alloc_callback_entry(name, id, callback); 80 if (!cbq) 81 return -ENOMEM; 82 - 83 - atomic_inc(&dev->refcnt); 84 - cbq->pdev = dev; 85 86 spin_lock_bh(&dev->queue_lock); 87 list_for_each_entry(__cbq, &dev->queue_list, callback_entry) { ··· 92 spin_unlock_bh(&dev->queue_lock); 93 94 if (found) { 95 - cn_queue_free_callback(cbq); 96 - atomic_dec(&dev->refcnt); 97 return -EINVAL; 98 } 99 ··· 117 } 118 spin_unlock_bh(&dev->queue_lock); 119 120 - if (found) { 121 - cn_queue_free_callback(cbq); 122 - atomic_dec(&dev->refcnt); 123 - } 124 } 125 126 struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *nls) ··· 136 137 dev->nls = nls; 138 139 - dev->cn_queue = alloc_ordered_workqueue(dev->name, 0); 140 - if (!dev->cn_queue) { 141 - kfree(dev); 142 - return NULL; 143 - } 144 - 145 return dev; 146 } 147 148 void cn_queue_free_dev(struct cn_queue_dev *dev) 149 { 150 struct cn_callback_entry *cbq, *n; 151 - 152 - flush_workqueue(dev->cn_queue); 153 - destroy_workqueue(dev->cn_queue); 154 155 spin_lock_bh(&dev->queue_lock); 156 list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry)
··· 31 #include <linux/connector.h> 32 #include <linux/delay.h> 33 34 static struct cn_callback_entry * 35 + cn_queue_alloc_callback_entry(struct cn_queue_dev *dev, const char *name, 36 + struct cb_id *id, 37 void (*callback)(struct cn_msg *, struct netlink_skb_parms *)) 38 { 39 struct cn_callback_entry *cbq; ··· 59 return NULL; 60 } 61 62 + atomic_set(&cbq->refcnt, 1); 63 + 64 + atomic_inc(&dev->refcnt); 65 + cbq->pdev = dev; 66 + 67 snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name); 68 memcpy(&cbq->id.id, id, sizeof(struct cb_id)); 69 + cbq->callback = callback; 70 return cbq; 71 } 72 73 + void cn_queue_release_callback(struct cn_callback_entry *cbq) 74 { 75 + if (!atomic_dec_and_test(&cbq->refcnt)) 76 + return; 77 + 78 + atomic_dec(&cbq->pdev->refcnt); 79 kfree(cbq); 80 } 81 ··· 85 struct cn_callback_entry *cbq, *__cbq; 86 int found = 0; 87 88 + cbq = cn_queue_alloc_callback_entry(dev, name, id, callback); 89 if (!cbq) 90 return -ENOMEM; 91 92 spin_lock_bh(&dev->queue_lock); 93 list_for_each_entry(__cbq, &dev->queue_list, callback_entry) { ··· 104 spin_unlock_bh(&dev->queue_lock); 105 106 if (found) { 107 + cn_queue_release_callback(cbq); 108 return -EINVAL; 109 } 110 ··· 130 } 131 spin_unlock_bh(&dev->queue_lock); 132 133 + if (found) 134 + cn_queue_release_callback(cbq); 135 } 136 137 struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *nls) ··· 151 152 dev->nls = nls; 153 154 return dev; 155 } 156 157 void cn_queue_free_dev(struct cn_queue_dev *dev) 158 { 159 struct cn_callback_entry *cbq, *n; 160 161 spin_lock_bh(&dev->queue_lock); 162 list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry)
+12 -35
drivers/connector/connector.c
··· 122 */ 123 static int cn_call_callback(struct sk_buff *skb) 124 { 125 - struct cn_callback_entry *__cbq, *__new_cbq; 126 struct cn_dev *dev = &cdev; 127 struct cn_msg *msg = NLMSG_DATA(nlmsg_hdr(skb)); 128 int err = -ENODEV; 129 130 spin_lock_bh(&dev->cbdev->queue_lock); 131 - list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) { 132 - if (cn_cb_equal(&__cbq->id.id, &msg->id)) { 133 - if (likely(!work_pending(&__cbq->work) && 134 - __cbq->data.skb == NULL)) { 135 - __cbq->data.skb = skb; 136 - 137 - if (queue_work(dev->cbdev->cn_queue, 138 - &__cbq->work)) 139 - err = 0; 140 - else 141 - err = -EINVAL; 142 - } else { 143 - struct cn_callback_data *d; 144 - 145 - err = -ENOMEM; 146 - __new_cbq = kzalloc(sizeof(struct cn_callback_entry), GFP_ATOMIC); 147 - if (__new_cbq) { 148 - d = &__new_cbq->data; 149 - d->skb = skb; 150 - d->callback = __cbq->data.callback; 151 - d->free = __new_cbq; 152 - 153 - INIT_WORK(&__new_cbq->work, 154 - &cn_queue_wrapper); 155 - 156 - if (queue_work(dev->cbdev->cn_queue, 157 - &__new_cbq->work)) 158 - err = 0; 159 - else { 160 - kfree(__new_cbq); 161 - err = -EINVAL; 162 - } 163 - } 164 - } 165 break; 166 } 167 } 168 spin_unlock_bh(&dev->cbdev->queue_lock); 169 170 return err; 171 }
··· 122 */ 123 static int cn_call_callback(struct sk_buff *skb) 124 { 125 + struct cn_callback_entry *i, *cbq = NULL; 126 struct cn_dev *dev = &cdev; 127 struct cn_msg *msg = NLMSG_DATA(nlmsg_hdr(skb)); 128 + struct netlink_skb_parms *nsp = &NETLINK_CB(skb); 129 int err = -ENODEV; 130 131 spin_lock_bh(&dev->cbdev->queue_lock); 132 + list_for_each_entry(i, &dev->cbdev->queue_list, callback_entry) { 133 + if (cn_cb_equal(&i->id.id, &msg->id)) { 134 + atomic_inc(&i->refcnt); 135 + cbq = i; 136 break; 137 } 138 } 139 spin_unlock_bh(&dev->cbdev->queue_lock); 140 + 141 + if (cbq != NULL) { 142 + cbq->callback(msg, nsp); 143 + kfree_skb(skb); 144 + cn_queue_release_callback(cbq); 145 + } 146 147 return err; 148 }
+3 -13
include/linux/connector.h
··· 88 atomic_t refcnt; 89 unsigned char name[CN_CBQ_NAMELEN]; 90 91 - struct workqueue_struct *cn_queue; 92 - 93 struct list_head queue_list; 94 spinlock_t queue_lock; 95 ··· 99 struct cb_id id; 100 }; 101 102 - struct cn_callback_data { 103 - struct sk_buff *skb; 104 - void (*callback) (struct cn_msg *, struct netlink_skb_parms *); 105 - 106 - void *free; 107 - }; 108 - 109 struct cn_callback_entry { 110 struct list_head callback_entry; 111 - struct work_struct work; 112 struct cn_queue_dev *pdev; 113 114 struct cn_callback_id id; 115 - struct cn_callback_data data; 116 117 u32 seq, group; 118 }; ··· 129 struct cb_id *id, 130 void (*callback)(struct cn_msg *, struct netlink_skb_parms *)); 131 void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id); 132 133 struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *); 134 void cn_queue_free_dev(struct cn_queue_dev *dev); 135 136 int cn_cb_equal(struct cb_id *, struct cb_id *); 137 - 138 - void cn_queue_wrapper(struct work_struct *work); 139 140 #endif /* __KERNEL__ */ 141 #endif /* __CONNECTOR_H */
··· 88 atomic_t refcnt; 89 unsigned char name[CN_CBQ_NAMELEN]; 90 91 struct list_head queue_list; 92 spinlock_t queue_lock; 93 ··· 101 struct cb_id id; 102 }; 103 104 struct cn_callback_entry { 105 struct list_head callback_entry; 106 + atomic_t refcnt; 107 struct cn_queue_dev *pdev; 108 109 struct cn_callback_id id; 110 + void (*callback) (struct cn_msg *, struct netlink_skb_parms *); 111 112 u32 seq, group; 113 }; ··· 138 struct cb_id *id, 139 void (*callback)(struct cn_msg *, struct netlink_skb_parms *)); 140 void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id); 141 + void cn_queue_release_callback(struct cn_callback_entry *); 142 143 struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *); 144 void cn_queue_free_dev(struct cn_queue_dev *dev); 145 146 int cn_cb_equal(struct cb_id *, struct cb_id *); 147 148 #endif /* __KERNEL__ */ 149 #endif /* __CONNECTOR_H */