Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

w1: process w1 netlink commands in w1_process thread

Netlink is a socket interface and is expected to be asynchronous.
Clients can now make w1 requests without blocking by making use of the
w1_master thread to process netlink commands which was previously only
used for doing an automatic bus search.

Signed-off-by: David Fries <David@Fries.net>
Acked-by: Evgeniy Polyakov <zbr@ioremap.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

David Fries and committed by
Greg Kroah-Hartman
9fcbbac5 70b34d2e

+296 -91
+124 -48
drivers/w1/w1.c
··· 79 79 { 80 80 struct w1_slave *sl = dev_to_w1_slave(dev); 81 81 82 - dev_dbg(dev, "%s: Releasing %s.\n", __func__, sl->name); 83 - 84 - while (atomic_read(&sl->refcnt)) { 85 - dev_dbg(dev, "Waiting for %s to become free: refcnt=%d.\n", 86 - sl->name, atomic_read(&sl->refcnt)); 87 - if (msleep_interruptible(1000)) 88 - flush_signals(current); 89 - } 82 + dev_dbg(dev, "%s: Releasing %s [%p]\n", __func__, sl->name, sl); 90 83 91 84 w1_family_put(sl->family); 92 85 sl->master->slave_count--; 93 - 94 - complete(&sl->released); 95 86 } 96 87 97 88 static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) ··· 268 277 mutex_lock(&md->mutex); 269 278 md->enable_pullup = tmp; 270 279 mutex_unlock(&md->mutex); 271 - wake_up_process(md->thread); 272 280 273 281 return count; 274 282 } ··· 360 370 { 361 371 struct w1_master *md = dev_to_w1_master(dev); 362 372 int c = PAGE_SIZE; 373 + struct list_head *ent, *n; 374 + struct w1_slave *sl = NULL; 363 375 364 - mutex_lock(&md->mutex); 376 + mutex_lock(&md->list_mutex); 365 377 366 - if (md->slave_count == 0) 367 - c -= snprintf(buf + PAGE_SIZE - c, c, "not found.\n"); 368 - else { 369 - struct list_head *ent, *n; 370 - struct w1_slave *sl; 378 + list_for_each_safe(ent, n, &md->slist) { 379 + sl = list_entry(ent, struct w1_slave, w1_slave_entry); 371 380 372 - list_for_each_safe(ent, n, &md->slist) { 373 - sl = list_entry(ent, struct w1_slave, w1_slave_entry); 374 - 375 - c -= snprintf(buf + PAGE_SIZE - c, c, "%s\n", sl->name); 376 - } 381 + c -= snprintf(buf + PAGE_SIZE - c, c, "%s\n", sl->name); 377 382 } 383 + if (!sl) 384 + c -= snprintf(buf + PAGE_SIZE - c, c, "not found.\n"); 378 385 379 - mutex_unlock(&md->mutex); 386 + mutex_unlock(&md->list_mutex); 380 387 381 388 return PAGE_SIZE - c; 382 389 } ··· 427 440 } 428 441 429 442 /* Searches the slaves in the w1_master and returns a pointer or NULL. 430 - * Note: must hold the mutex 443 + * Note: must not hold list_mutex 431 444 */ 432 445 struct w1_slave *w1_slave_search_device(struct w1_master *dev, 433 446 struct w1_reg_num *rn) 434 447 { 435 448 struct w1_slave *sl; 449 + mutex_lock(&dev->list_mutex); 436 450 list_for_each_entry(sl, &dev->slist, w1_slave_entry) { 437 451 if (sl->reg_num.family == rn->family && 438 452 sl->reg_num.id == rn->id && 439 453 sl->reg_num.crc == rn->crc) { 454 + mutex_unlock(&dev->list_mutex); 440 455 return sl; 441 456 } 442 457 } 458 + mutex_unlock(&dev->list_mutex); 443 459 return NULL; 444 460 } 445 461 ··· 499 509 mutex_lock(&md->mutex); 500 510 sl = w1_slave_search_device(md, &rn); 501 511 if (sl) { 502 - w1_slave_detach(sl); 512 + result = w1_slave_detach(sl); 513 + /* refcnt 0 means it was detached in the call */ 514 + if (result == 0) 515 + result = count; 503 516 } else { 504 517 dev_info(dev, "Device %02x-%012llx doesn't exists\n", rn.family, 505 518 (unsigned long long)rn.id); ··· 697 704 dev_set_uevent_suppress(&sl->dev, false); 698 705 kobject_uevent(&sl->dev.kobj, KOBJ_ADD); 699 706 707 + mutex_lock(&sl->master->list_mutex); 700 708 list_add_tail(&sl->w1_slave_entry, &sl->master->slist); 709 + mutex_unlock(&sl->master->list_mutex); 701 710 702 711 return 0; 703 712 } ··· 726 731 727 732 memset(&msg, 0, sizeof(msg)); 728 733 memcpy(&sl->reg_num, rn, sizeof(sl->reg_num)); 729 - atomic_set(&sl->refcnt, 0); 730 - init_completion(&sl->released); 734 + atomic_set(&sl->refcnt, 1); 735 + atomic_inc(&sl->master->refcnt); 731 736 732 737 /* slave modules need to be loaded in a context with unlocked mutex */ 733 738 mutex_unlock(&dev->mutex); ··· 767 772 return 0; 768 773 } 769 774 770 - void w1_slave_detach(struct w1_slave *sl) 775 + int w1_unref_slave(struct w1_slave *sl) 771 776 { 772 - struct w1_netlink_msg msg; 777 + struct w1_master *dev = sl->master; 778 + int refcnt; 779 + mutex_lock(&dev->list_mutex); 780 + refcnt = atomic_sub_return(1, &sl->refcnt); 781 + if (refcnt == 0) { 782 + struct w1_netlink_msg msg; 773 783 774 - dev_dbg(&sl->dev, "%s: detaching %s [%p].\n", __func__, sl->name, sl); 784 + dev_dbg(&sl->dev, "%s: detaching %s [%p].\n", __func__, 785 + sl->name, sl); 775 786 776 - list_del(&sl->w1_slave_entry); 787 + list_del(&sl->w1_slave_entry); 777 788 778 - memset(&msg, 0, sizeof(msg)); 779 - memcpy(msg.id.id, &sl->reg_num, sizeof(msg.id)); 780 - msg.type = W1_SLAVE_REMOVE; 781 - w1_netlink_send(sl->master, &msg); 789 + memset(&msg, 0, sizeof(msg)); 790 + memcpy(msg.id.id, &sl->reg_num, sizeof(msg.id)); 791 + msg.type = W1_SLAVE_REMOVE; 792 + w1_netlink_send(sl->master, &msg); 782 793 783 - device_unregister(&sl->dev); 794 + device_unregister(&sl->dev); 795 + #ifdef DEBUG 796 + memset(sl, 0, sizeof(*sl)); 797 + #endif 798 + kfree(sl); 799 + } 800 + atomic_dec(&dev->refcnt); 801 + mutex_unlock(&dev->list_mutex); 802 + return refcnt; 803 + } 784 804 785 - wait_for_completion(&sl->released); 786 - kfree(sl); 805 + int w1_slave_detach(struct w1_slave *sl) 806 + { 807 + /* Only detach a slave once as it decreases the refcnt each time. */ 808 + int destroy_now; 809 + mutex_lock(&sl->master->list_mutex); 810 + destroy_now = !test_bit(W1_SLAVE_DETACH, &sl->flags); 811 + set_bit(W1_SLAVE_DETACH, &sl->flags); 812 + mutex_unlock(&sl->master->list_mutex); 813 + 814 + if (destroy_now) 815 + destroy_now = !w1_unref_slave(sl); 816 + return destroy_now ? 0 : -EBUSY; 787 817 } 788 818 789 819 struct w1_master *w1_search_master_id(u32 id) ··· 837 817 838 818 mutex_lock(&w1_mlock); 839 819 list_for_each_entry(dev, &w1_masters, w1_master_entry) { 840 - mutex_lock(&dev->mutex); 820 + mutex_lock(&dev->list_mutex); 841 821 list_for_each_entry(sl, &dev->slist, w1_slave_entry) { 842 822 if (sl->reg_num.family == id->family && 843 823 sl->reg_num.id == id->id && ··· 848 828 break; 849 829 } 850 830 } 851 - mutex_unlock(&dev->mutex); 831 + mutex_unlock(&dev->list_mutex); 852 832 853 833 if (found) 854 834 break; ··· 868 848 dev_dbg(&dev->dev, "Reconnecting slaves in device %s " 869 849 "for family %02x.\n", dev->name, f->fid); 870 850 mutex_lock(&dev->mutex); 851 + mutex_lock(&dev->list_mutex); 871 852 list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) { 872 853 /* If it is a new family, slaves with the default 873 854 * family driver and are that family will be ··· 880 859 (!attach && sl->family->fid == f->fid)) { 881 860 struct w1_reg_num rn; 882 861 862 + mutex_unlock(&dev->list_mutex); 883 863 memcpy(&rn, &sl->reg_num, sizeof(rn)); 884 - w1_slave_detach(sl); 885 - 886 - w1_attach_slave_device(dev, &rn); 864 + /* If it was already in use let the automatic 865 + * scan pick it up again later. 866 + */ 867 + if (!w1_slave_detach(sl)) 868 + w1_attach_slave_device(dev, &rn); 869 + mutex_lock(&dev->list_mutex); 887 870 } 888 871 } 889 872 dev_dbg(&dev->dev, "Reconnecting slaves in device %s " 890 873 "has been finished.\n", dev->name); 874 + mutex_unlock(&dev->list_mutex); 891 875 mutex_unlock(&dev->mutex); 892 876 } 893 877 mutex_unlock(&w1_mlock); ··· 1046 1020 { 1047 1021 struct w1_slave *sl, *sln; 1048 1022 1023 + mutex_lock(&dev->list_mutex); 1049 1024 list_for_each_entry(sl, &dev->slist, w1_slave_entry) 1050 1025 clear_bit(W1_SLAVE_ACTIVE, &sl->flags); 1026 + mutex_unlock(&dev->list_mutex); 1051 1027 1052 1028 w1_search_devices(dev, search_type, cb); 1053 1029 1030 + mutex_lock(&dev->list_mutex); 1054 1031 list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) { 1055 - if (!test_bit(W1_SLAVE_ACTIVE, &sl->flags) && !--sl->ttl) 1032 + if (!test_bit(W1_SLAVE_ACTIVE, &sl->flags) && !--sl->ttl) { 1033 + mutex_unlock(&dev->list_mutex); 1056 1034 w1_slave_detach(sl); 1035 + mutex_lock(&dev->list_mutex); 1036 + } 1057 1037 else if (test_bit(W1_SLAVE_ACTIVE, &sl->flags)) 1058 1038 sl->ttl = dev->slave_ttl; 1059 1039 } 1040 + mutex_unlock(&dev->list_mutex); 1060 1041 1061 1042 if (dev->search_count > 0) 1062 1043 dev->search_count--; ··· 1074 1041 w1_search_process_cb(dev, search_type, w1_slave_found); 1075 1042 } 1076 1043 1044 + int w1_process_callbacks(struct w1_master *dev) 1045 + { 1046 + int ret = 0; 1047 + struct w1_async_cmd *async_cmd, *async_n; 1048 + 1049 + /* The list can be added to in another thread, loop until it is empty */ 1050 + while (!list_empty(&dev->async_list)) { 1051 + list_for_each_entry_safe(async_cmd, async_n, &dev->async_list, 1052 + async_entry) { 1053 + /* drop the lock, if it is a search it can take a long 1054 + * time */ 1055 + mutex_unlock(&dev->list_mutex); 1056 + async_cmd->cb(dev, async_cmd); 1057 + ret = 1; 1058 + mutex_lock(&dev->list_mutex); 1059 + } 1060 + } 1061 + return ret; 1062 + } 1063 + 1077 1064 int w1_process(void *data) 1078 1065 { 1079 1066 struct w1_master *dev = (struct w1_master *) data; ··· 1101 1048 * time can be calculated in jiffies once. 1102 1049 */ 1103 1050 const unsigned long jtime = msecs_to_jiffies(w1_timeout * 1000); 1051 + /* remainder if it woke up early */ 1052 + unsigned long jremain = 0; 1104 1053 1105 - while (!kthread_should_stop()) { 1106 - if (dev->search_count) { 1054 + for (;;) { 1055 + 1056 + if (!jremain && dev->search_count) { 1107 1057 mutex_lock(&dev->mutex); 1108 1058 w1_search_process(dev, W1_SEARCH); 1109 1059 mutex_unlock(&dev->mutex); 1110 1060 } 1111 1061 1062 + mutex_lock(&dev->list_mutex); 1063 + /* Note, w1_process_callback drops the lock while processing, 1064 + * but locks it again before returning. 1065 + */ 1066 + if (!w1_process_callbacks(dev) && jremain) { 1067 + /* a wake up is either to stop the thread, process 1068 + * callbacks, or search, it isn't process callbacks, so 1069 + * schedule a search. 1070 + */ 1071 + jremain = 1; 1072 + } 1073 + 1112 1074 try_to_freeze(); 1113 1075 __set_current_state(TASK_INTERRUPTIBLE); 1076 + 1077 + /* hold list_mutex until after interruptible to prevent loosing 1078 + * the wakeup signal when async_cmd is added. 1079 + */ 1080 + mutex_unlock(&dev->list_mutex); 1114 1081 1115 1082 if (kthread_should_stop()) 1116 1083 break; 1117 1084 1118 1085 /* Only sleep when the search is active. */ 1119 - if (dev->search_count) 1120 - schedule_timeout(jtime); 1086 + if (dev->search_count) { 1087 + if (!jremain) 1088 + jremain = jtime; 1089 + jremain = schedule_timeout(jremain); 1090 + } 1121 1091 else 1122 1092 schedule(); 1123 1093 }
+30 -2
drivers/w1/w1.h
··· 58 58 #define W1_RESUME_CMD 0xA5 59 59 60 60 #define W1_SLAVE_ACTIVE 0 61 + #define W1_SLAVE_DETACH 1 61 62 62 63 struct w1_slave 63 64 { ··· 75 74 struct w1_family *family; 76 75 void *family_data; 77 76 struct device dev; 78 - struct completion released; 79 77 }; 80 78 81 79 typedef void (*w1_slave_found_callback)(struct w1_master *, u64); ··· 171 171 struct list_head w1_master_entry; 172 172 struct module *owner; 173 173 unsigned char name[W1_MAXNAMELEN]; 174 + /* list_mutex protects just slist and async_list so slaves can be 175 + * searched for and async commands added while the master has 176 + * w1_master.mutex locked and is operating on the bus. 177 + * lock order w1_mlock, w1_master.mutex, w1_master_list_mutex 178 + */ 179 + struct mutex list_mutex; 174 180 struct list_head slist; 181 + struct list_head async_list; 175 182 int max_slave_count, slave_count; 176 183 unsigned long attempts; 177 184 int slave_ttl; ··· 212 205 u32 seq; 213 206 }; 214 207 208 + /** 209 + * struct w1_async_cmd - execute callback from the w1_process kthread 210 + * @async_entry: link entry 211 + * @cb: callback function, must list_del and destroy this list before 212 + * returning 213 + * 214 + * When inserted into the w1_master async_list, w1_process will execute 215 + * the callback. Embed this into the structure with the command details. 216 + */ 217 + struct w1_async_cmd { 218 + struct list_head async_entry; 219 + void (*cb)(struct w1_master *dev, struct w1_async_cmd *async_cmd); 220 + }; 221 + 215 222 int w1_create_master_attributes(struct w1_master *); 216 223 void w1_destroy_master_attributes(struct w1_master *master); 217 224 void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb); 218 225 void w1_search_devices(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb); 226 + /* call w1_unref_slave to release the reference counts w1_search_slave added */ 219 227 struct w1_slave *w1_search_slave(struct w1_reg_num *id); 228 + /* decrements the reference on sl->master and sl, and cleans up if zero 229 + * returns the reference count after it has been decremented */ 230 + int w1_unref_slave(struct w1_slave *sl); 220 231 void w1_slave_found(struct w1_master *dev, u64 rn); 221 232 void w1_search_process_cb(struct w1_master *dev, u8 search_type, 222 233 w1_slave_found_callback cb); ··· 249 224 */ 250 225 void w1_reconnect_slaves(struct w1_family *f, int attach); 251 226 int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn); 252 - void w1_slave_detach(struct w1_slave *sl); 227 + /* 0 success, otherwise EBUSY */ 228 + int w1_slave_detach(struct w1_slave *sl); 253 229 254 230 u8 w1_triplet(struct w1_master *dev, int bdir); 255 231 void w1_write_8(struct w1_master *, u8); ··· 286 260 extern struct list_head w1_masters; 287 261 extern struct mutex w1_mlock; 288 262 263 + /* returns 1 if there were commands to executed 0 otherwise */ 264 + extern int w1_process_callbacks(struct w1_master *dev); 289 265 extern int w1_process(void *); 290 266 291 267 #endif /* __KERNEL__ */
+13 -4
drivers/w1/w1_int.c
··· 75 75 atomic_set(&dev->refcnt, 2); 76 76 77 77 INIT_LIST_HEAD(&dev->slist); 78 + INIT_LIST_HEAD(&dev->async_list); 78 79 mutex_init(&dev->mutex); 79 80 mutex_init(&dev->bus_mutex); 81 + mutex_init(&dev->list_mutex); 80 82 81 83 memcpy(&dev->dev, device, sizeof(struct device)); 82 84 dev_set_name(&dev->dev, "w1_bus_master%u", dev->id); ··· 190 188 struct w1_netlink_msg msg; 191 189 struct w1_slave *sl, *sln; 192 190 193 - set_bit(W1_ABORT_SEARCH, &dev->flags); 194 - kthread_stop(dev->thread); 195 - 196 191 mutex_lock(&w1_mlock); 197 192 list_del(&dev->w1_master_entry); 198 193 mutex_unlock(&w1_mlock); 199 194 195 + set_bit(W1_ABORT_SEARCH, &dev->flags); 196 + kthread_stop(dev->thread); 197 + 200 198 mutex_lock(&dev->mutex); 201 - list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) 199 + mutex_lock(&dev->list_mutex); 200 + list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) { 201 + mutex_unlock(&dev->list_mutex); 202 202 w1_slave_detach(sl); 203 + mutex_lock(&dev->list_mutex); 204 + } 203 205 w1_destroy_master_attributes(dev); 206 + mutex_unlock(&dev->list_mutex); 204 207 mutex_unlock(&dev->mutex); 205 208 atomic_dec(&dev->refcnt); 206 209 ··· 215 208 216 209 if (msleep_interruptible(1000)) 217 210 flush_signals(current); 211 + w1_process_callbacks(dev); 218 212 } 213 + w1_process_callbacks(dev); 219 214 220 215 memset(&msg, 0, sizeof(msg)); 221 216 msg.id.mst.id = dev->id;
+129 -37
drivers/w1/w1_netlink.c
··· 119 119 120 120 if (req_cmd->cmd == W1_CMD_LIST_SLAVES) { 121 121 __u64 rn; 122 + mutex_lock(&dev->list_mutex); 122 123 list_for_each_entry(sl, &dev->slist, w1_slave_entry) { 123 124 memcpy(&rn, &sl->reg_num, sizeof(rn)); 124 125 w1_send_slave(dev, rn); 125 126 } 127 + mutex_unlock(&dev->list_mutex); 126 128 } else { 127 129 w1_search_process_cb(dev, cmd->cmd == W1_CMD_ALARM_SEARCH ? 128 130 W1_ALARM_SEARCH : W1_SEARCH, w1_found_send_slave); ··· 370 368 return error; 371 369 } 372 370 371 + /* Bundle together a reference count, the full message, and broken out 372 + * commands to be executed on each w1 master kthread in one memory allocation. 373 + */ 374 + struct w1_cb_block { 375 + atomic_t refcnt; 376 + struct cn_msg msg; 377 + /* cn_msg data */ 378 + /* one or more variable length struct w1_cb_node */ 379 + }; 380 + struct w1_cb_node { 381 + struct w1_async_cmd async; 382 + /* pointers within w1_cb_block and msg data */ 383 + struct w1_cb_block *block; 384 + struct w1_netlink_msg *m; 385 + struct w1_slave *sl; 386 + struct w1_master *dev; 387 + }; 388 + 389 + static void w1_process_cb(struct w1_master *dev, struct w1_async_cmd *async_cmd) 390 + { 391 + struct w1_cb_node *node = container_of(async_cmd, struct w1_cb_node, 392 + async); 393 + u16 mlen = node->m->len; 394 + u8 *cmd_data = node->m->data; 395 + int err = 0; 396 + struct w1_slave *sl = node->sl; 397 + struct w1_netlink_cmd *cmd = NULL; 398 + 399 + mutex_lock(&dev->mutex); 400 + if (sl && w1_reset_select_slave(sl)) 401 + err = -ENODEV; 402 + 403 + while (mlen && !err) { 404 + cmd = (struct w1_netlink_cmd *)cmd_data; 405 + 406 + if (cmd->len + sizeof(struct w1_netlink_cmd) > mlen) { 407 + err = -E2BIG; 408 + break; 409 + } 410 + 411 + if (sl) 412 + err = w1_process_command_slave(sl, &node->block->msg, 413 + node->m, cmd); 414 + else 415 + err = w1_process_command_master(dev, &node->block->msg, 416 + node->m, cmd); 417 + 418 + w1_netlink_send_error(&node->block->msg, node->m, cmd, err); 419 + err = 0; 420 + 421 + cmd_data += cmd->len + sizeof(struct w1_netlink_cmd); 422 + mlen -= cmd->len + sizeof(struct w1_netlink_cmd); 423 + } 424 + 425 + if (!cmd || err) 426 + w1_netlink_send_error(&node->block->msg, node->m, cmd, err); 427 + 428 + if (sl) 429 + w1_unref_slave(sl); 430 + else 431 + atomic_dec(&dev->refcnt); 432 + mutex_unlock(&dev->mutex); 433 + 434 + mutex_lock(&dev->list_mutex); 435 + list_del(&async_cmd->async_entry); 436 + mutex_unlock(&dev->list_mutex); 437 + 438 + if (atomic_sub_return(1, &node->block->refcnt) == 0) 439 + kfree(node->block); 440 + } 441 + 373 442 static void w1_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp) 374 443 { 375 444 struct w1_netlink_msg *m = (struct w1_netlink_msg *)(msg + 1); 376 - struct w1_netlink_cmd *cmd; 377 445 struct w1_slave *sl; 378 446 struct w1_master *dev; 447 + u16 msg_len; 379 448 int err = 0; 449 + struct w1_cb_block *block = NULL; 450 + struct w1_cb_node *node = NULL; 451 + int node_count = 0; 380 452 381 - while (msg->len && !err) { 453 + /* Count the number of master or slave commands there are to allocate 454 + * space for one cb_node each. 455 + */ 456 + msg_len = msg->len; 457 + while (msg_len && !err) { 458 + if (m->len + sizeof(struct w1_netlink_msg) > msg_len) { 459 + err = -E2BIG; 460 + break; 461 + } 462 + 463 + if (m->type == W1_MASTER_CMD || m->type == W1_SLAVE_CMD) 464 + ++node_count; 465 + 466 + msg_len -= sizeof(struct w1_netlink_msg) + m->len; 467 + m = (struct w1_netlink_msg *)(((u8 *)m) + 468 + sizeof(struct w1_netlink_msg) + m->len); 469 + } 470 + m = (struct w1_netlink_msg *)(msg + 1); 471 + if (node_count) { 472 + /* msg->len doesn't include itself */ 473 + long size = sizeof(struct w1_cb_block) + msg->len + 474 + node_count*sizeof(struct w1_cb_node); 475 + block = kmalloc(size, GFP_KERNEL); 476 + if (!block) { 477 + w1_netlink_send_error(msg, m, NULL, -ENOMEM); 478 + return; 479 + } 480 + atomic_set(&block->refcnt, 1); 481 + memcpy(&block->msg, msg, sizeof(*msg) + msg->len); 482 + node = (struct w1_cb_node *)((u8 *)block->msg.data + msg->len); 483 + } 484 + 485 + msg_len = msg->len; 486 + while (msg_len && !err) { 382 487 struct w1_reg_num id; 383 488 u16 mlen = m->len; 384 - u8 *cmd_data = m->data; 385 489 386 490 dev = NULL; 387 491 sl = NULL; 388 - cmd = NULL; 389 492 390 493 memcpy(&id, m->id.id, sizeof(id)); 391 494 #if 0 392 495 printk("%s: %02x.%012llx.%02x: type=%02x, len=%u.\n", 393 496 __func__, id.family, (unsigned long long)id.id, id.crc, m->type, m->len); 394 497 #endif 395 - if (m->len + sizeof(struct w1_netlink_msg) > msg->len) { 498 + if (m->len + sizeof(struct w1_netlink_msg) > msg_len) { 396 499 err = -E2BIG; 397 500 break; 398 501 } ··· 522 415 if (!mlen) 523 416 goto out_cont; 524 417 525 - mutex_lock(&dev->mutex); 418 + atomic_inc(&block->refcnt); 419 + node->async.cb = w1_process_cb; 420 + node->block = block; 421 + node->m = (struct w1_netlink_msg *)((u8 *)&block->msg + 422 + (size_t)((u8 *)m - (u8 *)msg)); 423 + node->sl = sl; 424 + node->dev = dev; 526 425 527 - if (sl && w1_reset_select_slave(sl)) { 528 - err = -ENODEV; 529 - goto out_up; 530 - } 426 + mutex_lock(&dev->list_mutex); 427 + list_add_tail(&node->async.async_entry, &dev->async_list); 428 + wake_up_process(dev->thread); 429 + mutex_unlock(&dev->list_mutex); 430 + ++node; 531 431 532 - while (mlen) { 533 - cmd = (struct w1_netlink_cmd *)cmd_data; 534 - 535 - if (cmd->len + sizeof(struct w1_netlink_cmd) > mlen) { 536 - err = -E2BIG; 537 - break; 538 - } 539 - 540 - if (sl) 541 - err = w1_process_command_slave(sl, msg, m, cmd); 542 - else 543 - err = w1_process_command_master(dev, msg, m, cmd); 544 - 545 - w1_netlink_send_error(msg, m, cmd, err); 546 - err = 0; 547 - 548 - cmd_data += cmd->len + sizeof(struct w1_netlink_cmd); 549 - mlen -= cmd->len + sizeof(struct w1_netlink_cmd); 550 - } 551 - out_up: 552 - atomic_dec(&dev->refcnt); 553 - if (sl) 554 - atomic_dec(&sl->refcnt); 555 - mutex_unlock(&dev->mutex); 556 432 out_cont: 557 - if (!cmd || err) 558 - w1_netlink_send_error(msg, m, cmd, err); 559 - msg->len -= sizeof(struct w1_netlink_msg) + m->len; 433 + if (err) 434 + w1_netlink_send_error(msg, m, NULL, err); 435 + msg_len -= sizeof(struct w1_netlink_msg) + m->len; 560 436 m = (struct w1_netlink_msg *)(((u8 *)m) + sizeof(struct w1_netlink_msg) + m->len); 561 437 562 438 /* ··· 548 458 if (err == -ENODEV) 549 459 err = 0; 550 460 } 461 + if (block && atomic_sub_return(1, &block->refcnt) == 0) 462 + kfree(block); 551 463 } 552 464 553 465 int w1_init_netlink(void)