Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.24 476 lines 11 kB view raw
1/* 2 * connector.c 3 * 4 * 2004-2005 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru> 5 * All rights reserved. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 */ 21 22#include <linux/kernel.h> 23#include <linux/module.h> 24#include <linux/list.h> 25#include <linux/skbuff.h> 26#include <linux/netlink.h> 27#include <linux/moduleparam.h> 28#include <linux/connector.h> 29#include <linux/mutex.h> 30 31#include <net/sock.h> 32 33MODULE_LICENSE("GPL"); 34MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); 35MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector."); 36 37static u32 cn_idx = CN_IDX_CONNECTOR; 38static u32 cn_val = CN_VAL_CONNECTOR; 39 40module_param(cn_idx, uint, 0); 41module_param(cn_val, uint, 0); 42MODULE_PARM_DESC(cn_idx, "Connector's main device idx."); 43MODULE_PARM_DESC(cn_val, "Connector's main device val."); 44 45static DEFINE_MUTEX(notify_lock); 46static LIST_HEAD(notify_list); 47 48static struct cn_dev cdev; 49 50int cn_already_initialized = 0; 51 52/* 53 * msg->seq and msg->ack are used to determine message genealogy. 54 * When someone sends message it puts there locally unique sequence 55 * and random acknowledge numbers. Sequence number may be copied into 56 * nlmsghdr->nlmsg_seq too. 57 * 58 * Sequence number is incremented with each message to be sent. 59 * 60 * If we expect reply to our message then the sequence number in 61 * received message MUST be the same as in original message, and 62 * acknowledge number MUST be the same + 1. 63 * 64 * If we receive a message and its sequence number is not equal to the 65 * one we are expecting then it is a new message. 66 * 67 * If we receive a message and its sequence number is the same as one 68 * we are expecting but it's acknowledgement number is not equal to 69 * the acknowledgement number in the original message + 1, then it is 70 * a new message. 71 * 72 */ 73int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask) 74{ 75 struct cn_callback_entry *__cbq; 76 unsigned int size; 77 struct sk_buff *skb; 78 struct nlmsghdr *nlh; 79 struct cn_msg *data; 80 struct cn_dev *dev = &cdev; 81 u32 group = 0; 82 int found = 0; 83 84 if (!__group) { 85 spin_lock_bh(&dev->cbdev->queue_lock); 86 list_for_each_entry(__cbq, &dev->cbdev->queue_list, 87 callback_entry) { 88 if (cn_cb_equal(&__cbq->id.id, &msg->id)) { 89 found = 1; 90 group = __cbq->group; 91 } 92 } 93 spin_unlock_bh(&dev->cbdev->queue_lock); 94 95 if (!found) 96 return -ENODEV; 97 } else { 98 group = __group; 99 } 100 101 if (!netlink_has_listeners(dev->nls, group)) 102 return -ESRCH; 103 104 size = NLMSG_SPACE(sizeof(*msg) + msg->len); 105 106 skb = alloc_skb(size, gfp_mask); 107 if (!skb) 108 return -ENOMEM; 109 110 nlh = NLMSG_PUT(skb, 0, msg->seq, NLMSG_DONE, size - sizeof(*nlh)); 111 112 data = NLMSG_DATA(nlh); 113 114 memcpy(data, msg, sizeof(*data) + msg->len); 115 116 NETLINK_CB(skb).dst_group = group; 117 118 return netlink_broadcast(dev->nls, skb, 0, group, gfp_mask); 119 120nlmsg_failure: 121 kfree_skb(skb); 122 return -EINVAL; 123} 124EXPORT_SYMBOL_GPL(cn_netlink_send); 125 126/* 127 * Callback helper - queues work and setup destructor for given data. 128 */ 129static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), void *data) 130{ 131 struct cn_callback_entry *__cbq, *__new_cbq; 132 struct cn_dev *dev = &cdev; 133 int err = -ENODEV; 134 135 spin_lock_bh(&dev->cbdev->queue_lock); 136 list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) { 137 if (cn_cb_equal(&__cbq->id.id, &msg->id)) { 138 if (likely(!work_pending(&__cbq->work) && 139 __cbq->data.ddata == NULL)) { 140 __cbq->data.callback_priv = msg; 141 142 __cbq->data.ddata = data; 143 __cbq->data.destruct_data = destruct_data; 144 145 if (queue_work(dev->cbdev->cn_queue, 146 &__cbq->work)) 147 err = 0; 148 else 149 err = -EINVAL; 150 } else { 151 struct cn_callback_data *d; 152 153 err = -ENOMEM; 154 __new_cbq = kzalloc(sizeof(struct cn_callback_entry), GFP_ATOMIC); 155 if (__new_cbq) { 156 d = &__new_cbq->data; 157 d->callback_priv = msg; 158 d->callback = __cbq->data.callback; 159 d->ddata = data; 160 d->destruct_data = destruct_data; 161 d->free = __new_cbq; 162 163 INIT_WORK(&__new_cbq->work, 164 &cn_queue_wrapper); 165 166 if (queue_work(dev->cbdev->cn_queue, 167 &__new_cbq->work)) 168 err = 0; 169 else { 170 kfree(__new_cbq); 171 err = -EINVAL; 172 } 173 } 174 } 175 break; 176 } 177 } 178 spin_unlock_bh(&dev->cbdev->queue_lock); 179 180 return err; 181} 182 183/* 184 * Skb receive helper - checks skb and msg size and calls callback 185 * helper. 186 */ 187static int __cn_rx_skb(struct sk_buff *skb, struct nlmsghdr *nlh) 188{ 189 u32 pid, uid, seq, group; 190 struct cn_msg *msg; 191 192 pid = NETLINK_CREDS(skb)->pid; 193 uid = NETLINK_CREDS(skb)->uid; 194 seq = nlh->nlmsg_seq; 195 group = NETLINK_CB((skb)).dst_group; 196 msg = NLMSG_DATA(nlh); 197 198 return cn_call_callback(msg, (void (*)(void *))kfree_skb, skb); 199} 200 201/* 202 * Main netlink receiving function. 203 * 204 * It checks skb and netlink header sizes and calls the skb receive 205 * helper with a shared skb. 206 */ 207static void cn_rx_skb(struct sk_buff *__skb) 208{ 209 struct nlmsghdr *nlh; 210 u32 len; 211 int err; 212 struct sk_buff *skb; 213 214 skb = skb_get(__skb); 215 216 if (skb->len >= NLMSG_SPACE(0)) { 217 nlh = nlmsg_hdr(skb); 218 219 if (nlh->nlmsg_len < sizeof(struct cn_msg) || 220 skb->len < nlh->nlmsg_len || 221 nlh->nlmsg_len > CONNECTOR_MAX_MSG_SIZE) { 222 kfree_skb(skb); 223 return; 224 } 225 226 len = NLMSG_ALIGN(nlh->nlmsg_len); 227 if (len > skb->len) 228 len = skb->len; 229 230 err = __cn_rx_skb(skb, nlh); 231 if (err < 0) 232 kfree_skb(skb); 233 } 234} 235 236/* 237 * Notification routing. 238 * 239 * Gets id and checks if there are notification request for it's idx 240 * and val. If there are such requests notify the listeners with the 241 * given notify event. 242 * 243 */ 244static void cn_notify(struct cb_id *id, u32 notify_event) 245{ 246 struct cn_ctl_entry *ent; 247 248 mutex_lock(&notify_lock); 249 list_for_each_entry(ent, &notify_list, notify_entry) { 250 int i; 251 struct cn_notify_req *req; 252 struct cn_ctl_msg *ctl = ent->msg; 253 int idx_found, val_found; 254 255 idx_found = val_found = 0; 256 257 req = (struct cn_notify_req *)ctl->data; 258 for (i = 0; i < ctl->idx_notify_num; ++i, ++req) { 259 if (id->idx >= req->first && 260 id->idx < req->first + req->range) { 261 idx_found = 1; 262 break; 263 } 264 } 265 266 for (i = 0; i < ctl->val_notify_num; ++i, ++req) { 267 if (id->val >= req->first && 268 id->val < req->first + req->range) { 269 val_found = 1; 270 break; 271 } 272 } 273 274 if (idx_found && val_found) { 275 struct cn_msg m = { .ack = notify_event, }; 276 277 memcpy(&m.id, id, sizeof(m.id)); 278 cn_netlink_send(&m, ctl->group, GFP_KERNEL); 279 } 280 } 281 mutex_unlock(&notify_lock); 282} 283 284/* 285 * Callback add routing - adds callback with given ID and name. 286 * If there is registered callback with the same ID it will not be added. 287 * 288 * May sleep. 289 */ 290int cn_add_callback(struct cb_id *id, char *name, void (*callback)(void *)) 291{ 292 int err; 293 struct cn_dev *dev = &cdev; 294 295 if (!cn_already_initialized) 296 return -EAGAIN; 297 298 err = cn_queue_add_callback(dev->cbdev, name, id, callback); 299 if (err) 300 return err; 301 302 cn_notify(id, 0); 303 304 return 0; 305} 306EXPORT_SYMBOL_GPL(cn_add_callback); 307 308/* 309 * Callback remove routing - removes callback 310 * with given ID. 311 * If there is no registered callback with given 312 * ID nothing happens. 313 * 314 * May sleep while waiting for reference counter to become zero. 315 */ 316void cn_del_callback(struct cb_id *id) 317{ 318 struct cn_dev *dev = &cdev; 319 320 cn_queue_del_callback(dev->cbdev, id); 321 cn_notify(id, 1); 322} 323EXPORT_SYMBOL_GPL(cn_del_callback); 324 325/* 326 * Checks two connector's control messages to be the same. 327 * Returns 1 if they are the same or if the first one is corrupted. 328 */ 329static int cn_ctl_msg_equals(struct cn_ctl_msg *m1, struct cn_ctl_msg *m2) 330{ 331 int i; 332 struct cn_notify_req *req1, *req2; 333 334 if (m1->idx_notify_num != m2->idx_notify_num) 335 return 0; 336 337 if (m1->val_notify_num != m2->val_notify_num) 338 return 0; 339 340 if (m1->len != m2->len) 341 return 0; 342 343 if ((m1->idx_notify_num + m1->val_notify_num) * sizeof(*req1) != 344 m1->len) 345 return 1; 346 347 req1 = (struct cn_notify_req *)m1->data; 348 req2 = (struct cn_notify_req *)m2->data; 349 350 for (i = 0; i < m1->idx_notify_num; ++i) { 351 if (req1->first != req2->first || req1->range != req2->range) 352 return 0; 353 req1++; 354 req2++; 355 } 356 357 for (i = 0; i < m1->val_notify_num; ++i) { 358 if (req1->first != req2->first || req1->range != req2->range) 359 return 0; 360 req1++; 361 req2++; 362 } 363 364 return 1; 365} 366 367/* 368 * Main connector device's callback. 369 * 370 * Used for notification of a request's processing. 371 */ 372static void cn_callback(void *data) 373{ 374 struct cn_msg *msg = data; 375 struct cn_ctl_msg *ctl; 376 struct cn_ctl_entry *ent; 377 u32 size; 378 379 if (msg->len < sizeof(*ctl)) 380 return; 381 382 ctl = (struct cn_ctl_msg *)msg->data; 383 384 size = (sizeof(*ctl) + ((ctl->idx_notify_num + 385 ctl->val_notify_num) * 386 sizeof(struct cn_notify_req))); 387 388 if (msg->len != size) 389 return; 390 391 if (ctl->len + sizeof(*ctl) != msg->len) 392 return; 393 394 /* 395 * Remove notification. 396 */ 397 if (ctl->group == 0) { 398 struct cn_ctl_entry *n; 399 400 mutex_lock(&notify_lock); 401 list_for_each_entry_safe(ent, n, &notify_list, notify_entry) { 402 if (cn_ctl_msg_equals(ent->msg, ctl)) { 403 list_del(&ent->notify_entry); 404 kfree(ent); 405 } 406 } 407 mutex_unlock(&notify_lock); 408 409 return; 410 } 411 412 size += sizeof(*ent); 413 414 ent = kzalloc(size, GFP_KERNEL); 415 if (!ent) 416 return; 417 418 ent->msg = (struct cn_ctl_msg *)(ent + 1); 419 420 memcpy(ent->msg, ctl, size - sizeof(*ent)); 421 422 mutex_lock(&notify_lock); 423 list_add(&ent->notify_entry, &notify_list); 424 mutex_unlock(&notify_lock); 425} 426 427static int __devinit cn_init(void) 428{ 429 struct cn_dev *dev = &cdev; 430 int err; 431 432 dev->input = cn_rx_skb; 433 dev->id.idx = cn_idx; 434 dev->id.val = cn_val; 435 436 dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, 437 CN_NETLINK_USERS + 0xf, 438 dev->input, NULL, THIS_MODULE); 439 if (!dev->nls) 440 return -EIO; 441 442 dev->cbdev = cn_queue_alloc_dev("cqueue", dev->nls); 443 if (!dev->cbdev) { 444 if (dev->nls->sk_socket) 445 sock_release(dev->nls->sk_socket); 446 return -EINVAL; 447 } 448 449 cn_already_initialized = 1; 450 451 err = cn_add_callback(&dev->id, "connector", &cn_callback); 452 if (err) { 453 cn_already_initialized = 0; 454 cn_queue_free_dev(dev->cbdev); 455 if (dev->nls->sk_socket) 456 sock_release(dev->nls->sk_socket); 457 return -EINVAL; 458 } 459 460 return 0; 461} 462 463static void __devexit cn_fini(void) 464{ 465 struct cn_dev *dev = &cdev; 466 467 cn_already_initialized = 0; 468 469 cn_del_callback(&dev->id); 470 cn_queue_free_dev(dev->cbdev); 471 if (dev->nls->sk_socket) 472 sock_release(dev->nls->sk_socket); 473} 474 475subsys_initcall(cn_init); 476module_exit(cn_fini);