Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.24-rc1 1058 lines 24 kB view raw
1/* 2 * net/9p/mux.c 3 * 4 * Protocol Multiplexer 5 * 6 * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com> 7 * Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 11 * as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to: 20 * Free Software Foundation 21 * 51 Franklin Street, Fifth Floor 22 * Boston, MA 02111-1301 USA 23 * 24 */ 25 26#include <linux/module.h> 27#include <linux/errno.h> 28#include <linux/fs.h> 29#include <linux/poll.h> 30#include <linux/kthread.h> 31#include <linux/idr.h> 32#include <linux/mutex.h> 33#include <net/9p/9p.h> 34#include <linux/parser.h> 35#include <net/9p/transport.h> 36#include <net/9p/conn.h> 37 38#define ERREQFLUSH 1 39#define SCHED_TIMEOUT 10 40#define MAXPOLLWADDR 2 41 42enum { 43 Rworksched = 1, /* read work scheduled or running */ 44 Rpending = 2, /* can read */ 45 Wworksched = 4, /* write work scheduled or running */ 46 Wpending = 8, /* can write */ 47}; 48 49enum { 50 None, 51 Flushing, 52 Flushed, 53}; 54 55struct p9_mux_poll_task; 56 57struct p9_req { 58 spinlock_t lock; /* protect request structure */ 59 int tag; 60 struct p9_fcall *tcall; 61 struct p9_fcall *rcall; 62 int err; 63 p9_conn_req_callback cb; 64 void *cba; 65 int flush; 66 struct list_head req_list; 67}; 68 69struct p9_conn { 70 spinlock_t lock; /* protect lock structure */ 71 struct list_head mux_list; 72 struct p9_mux_poll_task *poll_task; 73 int msize; 74 unsigned char *extended; 75 struct p9_trans *trans; 76 struct p9_idpool *tagpool; 77 int err; 78 wait_queue_head_t equeue; 79 struct list_head req_list; 80 struct list_head unsent_req_list; 81 struct p9_fcall *rcall; 82 int rpos; 83 char *rbuf; 84 int wpos; 85 int wsize; 86 char *wbuf; 87 wait_queue_t poll_wait[MAXPOLLWADDR]; 88 wait_queue_head_t *poll_waddr[MAXPOLLWADDR]; 89 poll_table pt; 90 struct work_struct rq; 91 struct work_struct wq; 92 unsigned long wsched; 93}; 94 95struct p9_mux_poll_task { 96 struct task_struct *task; 97 struct list_head mux_list; 98 int muxnum; 99}; 100 101struct p9_mux_rpc { 102 struct p9_conn *m; 103 int err; 104 struct p9_fcall *tcall; 105 struct p9_fcall *rcall; 106 wait_queue_head_t wqueue; 107}; 108 109static int p9_poll_proc(void *); 110static void p9_read_work(struct work_struct *work); 111static void p9_write_work(struct work_struct *work); 112static void p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, 113 poll_table * p); 114static u16 p9_mux_get_tag(struct p9_conn *); 115static void p9_mux_put_tag(struct p9_conn *, u16); 116 117static DEFINE_MUTEX(p9_mux_task_lock); 118static struct workqueue_struct *p9_mux_wq; 119 120static int p9_mux_num; 121static int p9_mux_poll_task_num; 122static struct p9_mux_poll_task p9_mux_poll_tasks[100]; 123 124int p9_mux_global_init(void) 125{ 126 int i; 127 128 for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++) 129 p9_mux_poll_tasks[i].task = NULL; 130 131 p9_mux_wq = create_workqueue("v9fs"); 132 if (!p9_mux_wq) { 133 printk(KERN_WARNING "v9fs: mux: creating workqueue failed\n"); 134 return -ENOMEM; 135 } 136 137 return 0; 138} 139 140void p9_mux_global_exit(void) 141{ 142 destroy_workqueue(p9_mux_wq); 143} 144 145/** 146 * p9_mux_calc_poll_procs - calculates the number of polling procs 147 * based on the number of mounted v9fs filesystems. 148 * 149 * The current implementation returns sqrt of the number of mounts. 150 */ 151static int p9_mux_calc_poll_procs(int muxnum) 152{ 153 int n; 154 155 if (p9_mux_poll_task_num) 156 n = muxnum / p9_mux_poll_task_num + 157 (muxnum % p9_mux_poll_task_num ? 1 : 0); 158 else 159 n = 1; 160 161 if (n > ARRAY_SIZE(p9_mux_poll_tasks)) 162 n = ARRAY_SIZE(p9_mux_poll_tasks); 163 164 return n; 165} 166 167static int p9_mux_poll_start(struct p9_conn *m) 168{ 169 int i, n; 170 struct p9_mux_poll_task *vpt, *vptlast; 171 struct task_struct *pproc; 172 173 P9_DPRINTK(P9_DEBUG_MUX, "mux %p muxnum %d procnum %d\n", m, p9_mux_num, 174 p9_mux_poll_task_num); 175 mutex_lock(&p9_mux_task_lock); 176 177 n = p9_mux_calc_poll_procs(p9_mux_num + 1); 178 if (n > p9_mux_poll_task_num) { 179 for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++) { 180 if (p9_mux_poll_tasks[i].task == NULL) { 181 vpt = &p9_mux_poll_tasks[i]; 182 P9_DPRINTK(P9_DEBUG_MUX, "create proc %p\n", 183 vpt); 184 pproc = kthread_create(p9_poll_proc, vpt, 185 "v9fs-poll"); 186 187 if (!IS_ERR(pproc)) { 188 vpt->task = pproc; 189 INIT_LIST_HEAD(&vpt->mux_list); 190 vpt->muxnum = 0; 191 p9_mux_poll_task_num++; 192 wake_up_process(vpt->task); 193 } 194 break; 195 } 196 } 197 198 if (i >= ARRAY_SIZE(p9_mux_poll_tasks)) 199 P9_DPRINTK(P9_DEBUG_ERROR, 200 "warning: no free poll slots\n"); 201 } 202 203 n = (p9_mux_num + 1) / p9_mux_poll_task_num + 204 ((p9_mux_num + 1) % p9_mux_poll_task_num ? 1 : 0); 205 206 vptlast = NULL; 207 for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++) { 208 vpt = &p9_mux_poll_tasks[i]; 209 if (vpt->task != NULL) { 210 vptlast = vpt; 211 if (vpt->muxnum < n) { 212 P9_DPRINTK(P9_DEBUG_MUX, "put in proc %d\n", i); 213 list_add(&m->mux_list, &vpt->mux_list); 214 vpt->muxnum++; 215 m->poll_task = vpt; 216 memset(&m->poll_waddr, 0, 217 sizeof(m->poll_waddr)); 218 init_poll_funcptr(&m->pt, p9_pollwait); 219 break; 220 } 221 } 222 } 223 224 if (i >= ARRAY_SIZE(p9_mux_poll_tasks)) { 225 if (vptlast == NULL) 226 return -ENOMEM; 227 228 P9_DPRINTK(P9_DEBUG_MUX, "put in proc %d\n", i); 229 list_add(&m->mux_list, &vptlast->mux_list); 230 vptlast->muxnum++; 231 m->poll_task = vptlast; 232 memset(&m->poll_waddr, 0, sizeof(m->poll_waddr)); 233 init_poll_funcptr(&m->pt, p9_pollwait); 234 } 235 236 p9_mux_num++; 237 mutex_unlock(&p9_mux_task_lock); 238 239 return 0; 240} 241 242static void p9_mux_poll_stop(struct p9_conn *m) 243{ 244 int i; 245 struct p9_mux_poll_task *vpt; 246 247 mutex_lock(&p9_mux_task_lock); 248 vpt = m->poll_task; 249 list_del(&m->mux_list); 250 for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) { 251 if (m->poll_waddr[i] != NULL) { 252 remove_wait_queue(m->poll_waddr[i], &m->poll_wait[i]); 253 m->poll_waddr[i] = NULL; 254 } 255 } 256 vpt->muxnum--; 257 if (!vpt->muxnum) { 258 P9_DPRINTK(P9_DEBUG_MUX, "destroy proc %p\n", vpt); 259 kthread_stop(vpt->task); 260 vpt->task = NULL; 261 p9_mux_poll_task_num--; 262 } 263 p9_mux_num--; 264 mutex_unlock(&p9_mux_task_lock); 265} 266 267/** 268 * p9_conn_create - allocate and initialize the per-session mux data 269 * Creates the polling task if this is the first session. 270 * 271 * @trans - transport structure 272 * @msize - maximum message size 273 * @extended - pointer to the extended flag 274 */ 275struct p9_conn *p9_conn_create(struct p9_trans *trans, int msize, 276 unsigned char *extended) 277{ 278 int i, n; 279 struct p9_conn *m, *mtmp; 280 281 P9_DPRINTK(P9_DEBUG_MUX, "transport %p msize %d\n", trans, msize); 282 m = kmalloc(sizeof(struct p9_conn), GFP_KERNEL); 283 if (!m) 284 return ERR_PTR(-ENOMEM); 285 286 spin_lock_init(&m->lock); 287 INIT_LIST_HEAD(&m->mux_list); 288 m->msize = msize; 289 m->extended = extended; 290 m->trans = trans; 291 m->tagpool = p9_idpool_create(); 292 if (IS_ERR(m->tagpool)) { 293 mtmp = ERR_PTR(-ENOMEM); 294 kfree(m); 295 return mtmp; 296 } 297 298 m->err = 0; 299 init_waitqueue_head(&m->equeue); 300 INIT_LIST_HEAD(&m->req_list); 301 INIT_LIST_HEAD(&m->unsent_req_list); 302 m->rcall = NULL; 303 m->rpos = 0; 304 m->rbuf = NULL; 305 m->wpos = m->wsize = 0; 306 m->wbuf = NULL; 307 INIT_WORK(&m->rq, p9_read_work); 308 INIT_WORK(&m->wq, p9_write_work); 309 m->wsched = 0; 310 memset(&m->poll_waddr, 0, sizeof(m->poll_waddr)); 311 m->poll_task = NULL; 312 n = p9_mux_poll_start(m); 313 if (n) { 314 kfree(m); 315 return ERR_PTR(n); 316 } 317 318 n = trans->poll(trans, &m->pt); 319 if (n & POLLIN) { 320 P9_DPRINTK(P9_DEBUG_MUX, "mux %p can read\n", m); 321 set_bit(Rpending, &m->wsched); 322 } 323 324 if (n & POLLOUT) { 325 P9_DPRINTK(P9_DEBUG_MUX, "mux %p can write\n", m); 326 set_bit(Wpending, &m->wsched); 327 } 328 329 for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) { 330 if (IS_ERR(m->poll_waddr[i])) { 331 p9_mux_poll_stop(m); 332 mtmp = (void *)m->poll_waddr; /* the error code */ 333 kfree(m); 334 m = mtmp; 335 break; 336 } 337 } 338 339 return m; 340} 341EXPORT_SYMBOL(p9_conn_create); 342 343/** 344 * p9_mux_destroy - cancels all pending requests and frees mux resources 345 */ 346void p9_conn_destroy(struct p9_conn *m) 347{ 348 P9_DPRINTK(P9_DEBUG_MUX, "mux %p prev %p next %p\n", m, 349 m->mux_list.prev, m->mux_list.next); 350 p9_conn_cancel(m, -ECONNRESET); 351 352 if (!list_empty(&m->req_list)) { 353 /* wait until all processes waiting on this session exit */ 354 P9_DPRINTK(P9_DEBUG_MUX, 355 "mux %p waiting for empty request queue\n", m); 356 wait_event_timeout(m->equeue, (list_empty(&m->req_list)), 5000); 357 P9_DPRINTK(P9_DEBUG_MUX, "mux %p request queue empty: %d\n", m, 358 list_empty(&m->req_list)); 359 } 360 361 p9_mux_poll_stop(m); 362 m->trans = NULL; 363 p9_idpool_destroy(m->tagpool); 364 kfree(m); 365} 366EXPORT_SYMBOL(p9_conn_destroy); 367 368/** 369 * p9_pollwait - called by files poll operation to add v9fs-poll task 370 * to files wait queue 371 */ 372static void 373p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, 374 poll_table * p) 375{ 376 int i; 377 struct p9_conn *m; 378 379 m = container_of(p, struct p9_conn, pt); 380 for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) 381 if (m->poll_waddr[i] == NULL) 382 break; 383 384 if (i >= ARRAY_SIZE(m->poll_waddr)) { 385 P9_DPRINTK(P9_DEBUG_ERROR, "not enough wait_address slots\n"); 386 return; 387 } 388 389 m->poll_waddr[i] = wait_address; 390 391 if (!wait_address) { 392 P9_DPRINTK(P9_DEBUG_ERROR, "no wait_address\n"); 393 m->poll_waddr[i] = ERR_PTR(-EIO); 394 return; 395 } 396 397 init_waitqueue_entry(&m->poll_wait[i], m->poll_task->task); 398 add_wait_queue(wait_address, &m->poll_wait[i]); 399} 400 401/** 402 * p9_poll_mux - polls a mux and schedules read or write works if necessary 403 */ 404static void p9_poll_mux(struct p9_conn *m) 405{ 406 int n; 407 408 if (m->err < 0) 409 return; 410 411 n = m->trans->poll(m->trans, NULL); 412 if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) { 413 P9_DPRINTK(P9_DEBUG_MUX, "error mux %p err %d\n", m, n); 414 if (n >= 0) 415 n = -ECONNRESET; 416 p9_conn_cancel(m, n); 417 } 418 419 if (n & POLLIN) { 420 set_bit(Rpending, &m->wsched); 421 P9_DPRINTK(P9_DEBUG_MUX, "mux %p can read\n", m); 422 if (!test_and_set_bit(Rworksched, &m->wsched)) { 423 P9_DPRINTK(P9_DEBUG_MUX, "schedule read work %p\n", m); 424 queue_work(p9_mux_wq, &m->rq); 425 } 426 } 427 428 if (n & POLLOUT) { 429 set_bit(Wpending, &m->wsched); 430 P9_DPRINTK(P9_DEBUG_MUX, "mux %p can write\n", m); 431 if ((m->wsize || !list_empty(&m->unsent_req_list)) 432 && !test_and_set_bit(Wworksched, &m->wsched)) { 433 P9_DPRINTK(P9_DEBUG_MUX, "schedule write work %p\n", m); 434 queue_work(p9_mux_wq, &m->wq); 435 } 436 } 437} 438 439/** 440 * p9_poll_proc - polls all v9fs transports for new events and queues 441 * the appropriate work to the work queue 442 */ 443static int p9_poll_proc(void *a) 444{ 445 struct p9_conn *m, *mtmp; 446 struct p9_mux_poll_task *vpt; 447 448 vpt = a; 449 P9_DPRINTK(P9_DEBUG_MUX, "start %p %p\n", current, vpt); 450 while (!kthread_should_stop()) { 451 set_current_state(TASK_INTERRUPTIBLE); 452 453 list_for_each_entry_safe(m, mtmp, &vpt->mux_list, mux_list) { 454 p9_poll_mux(m); 455 } 456 457 P9_DPRINTK(P9_DEBUG_MUX, "sleeping...\n"); 458 schedule_timeout(SCHED_TIMEOUT * HZ); 459 } 460 461 __set_current_state(TASK_RUNNING); 462 P9_DPRINTK(P9_DEBUG_MUX, "finish\n"); 463 return 0; 464} 465 466/** 467 * p9_write_work - called when a transport can send some data 468 */ 469static void p9_write_work(struct work_struct *work) 470{ 471 int n, err; 472 struct p9_conn *m; 473 struct p9_req *req; 474 475 m = container_of(work, struct p9_conn, wq); 476 477 if (m->err < 0) { 478 clear_bit(Wworksched, &m->wsched); 479 return; 480 } 481 482 if (!m->wsize) { 483 if (list_empty(&m->unsent_req_list)) { 484 clear_bit(Wworksched, &m->wsched); 485 return; 486 } 487 488 spin_lock(&m->lock); 489again: 490 req = list_entry(m->unsent_req_list.next, struct p9_req, 491 req_list); 492 list_move_tail(&req->req_list, &m->req_list); 493 if (req->err == ERREQFLUSH) 494 goto again; 495 496 m->wbuf = req->tcall->sdata; 497 m->wsize = req->tcall->size; 498 m->wpos = 0; 499 spin_unlock(&m->lock); 500 } 501 502 P9_DPRINTK(P9_DEBUG_MUX, "mux %p pos %d size %d\n", m, m->wpos, 503 m->wsize); 504 clear_bit(Wpending, &m->wsched); 505 err = m->trans->write(m->trans, m->wbuf + m->wpos, m->wsize - m->wpos); 506 P9_DPRINTK(P9_DEBUG_MUX, "mux %p sent %d bytes\n", m, err); 507 if (err == -EAGAIN) { 508 clear_bit(Wworksched, &m->wsched); 509 return; 510 } 511 512 if (err < 0) 513 goto error; 514 else if (err == 0) { 515 err = -EREMOTEIO; 516 goto error; 517 } 518 519 m->wpos += err; 520 if (m->wpos == m->wsize) 521 m->wpos = m->wsize = 0; 522 523 if (m->wsize == 0 && !list_empty(&m->unsent_req_list)) { 524 if (test_and_clear_bit(Wpending, &m->wsched)) 525 n = POLLOUT; 526 else 527 n = m->trans->poll(m->trans, NULL); 528 529 if (n & POLLOUT) { 530 P9_DPRINTK(P9_DEBUG_MUX, "schedule write work %p\n", m); 531 queue_work(p9_mux_wq, &m->wq); 532 } else 533 clear_bit(Wworksched, &m->wsched); 534 } else 535 clear_bit(Wworksched, &m->wsched); 536 537 return; 538 539error: 540 p9_conn_cancel(m, err); 541 clear_bit(Wworksched, &m->wsched); 542} 543 544static void process_request(struct p9_conn *m, struct p9_req *req) 545{ 546 int ecode; 547 struct p9_str *ename; 548 549 if (!req->err && req->rcall->id == P9_RERROR) { 550 ecode = req->rcall->params.rerror.errno; 551 ename = &req->rcall->params.rerror.error; 552 553 P9_DPRINTK(P9_DEBUG_MUX, "Rerror %.*s\n", ename->len, 554 ename->str); 555 556 if (*m->extended) 557 req->err = -ecode; 558 559 if (!req->err) { 560 req->err = p9_errstr2errno(ename->str, ename->len); 561 562 if (!req->err) { /* string match failed */ 563 PRINT_FCALL_ERROR("unknown error", req->rcall); 564 } 565 566 if (!req->err) 567 req->err = -ESERVERFAULT; 568 } 569 } else if (req->tcall && req->rcall->id != req->tcall->id + 1) { 570 P9_DPRINTK(P9_DEBUG_ERROR, 571 "fcall mismatch: expected %d, got %d\n", 572 req->tcall->id + 1, req->rcall->id); 573 if (!req->err) 574 req->err = -EIO; 575 } 576} 577 578/** 579 * p9_read_work - called when there is some data to be read from a transport 580 */ 581static void p9_read_work(struct work_struct *work) 582{ 583 int n, err; 584 struct p9_conn *m; 585 struct p9_req *req, *rptr, *rreq; 586 struct p9_fcall *rcall; 587 char *rbuf; 588 589 m = container_of(work, struct p9_conn, rq); 590 591 if (m->err < 0) 592 return; 593 594 rcall = NULL; 595 P9_DPRINTK(P9_DEBUG_MUX, "start mux %p pos %d\n", m, m->rpos); 596 597 if (!m->rcall) { 598 m->rcall = 599 kmalloc(sizeof(struct p9_fcall) + m->msize, GFP_KERNEL); 600 if (!m->rcall) { 601 err = -ENOMEM; 602 goto error; 603 } 604 605 m->rbuf = (char *)m->rcall + sizeof(struct p9_fcall); 606 m->rpos = 0; 607 } 608 609 clear_bit(Rpending, &m->wsched); 610 err = m->trans->read(m->trans, m->rbuf + m->rpos, m->msize - m->rpos); 611 P9_DPRINTK(P9_DEBUG_MUX, "mux %p got %d bytes\n", m, err); 612 if (err == -EAGAIN) { 613 clear_bit(Rworksched, &m->wsched); 614 return; 615 } 616 617 if (err <= 0) 618 goto error; 619 620 m->rpos += err; 621 while (m->rpos > 4) { 622 n = le32_to_cpu(*(__le32 *) m->rbuf); 623 if (n >= m->msize) { 624 P9_DPRINTK(P9_DEBUG_ERROR, 625 "requested packet size too big: %d\n", n); 626 err = -EIO; 627 goto error; 628 } 629 630 if (m->rpos < n) 631 break; 632 633 err = 634 p9_deserialize_fcall(m->rbuf, n, m->rcall, *m->extended); 635 if (err < 0) { 636 goto error; 637 } 638 639#ifdef CONFIG_NET_9P_DEBUG 640 if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) { 641 char buf[150]; 642 643 p9_printfcall(buf, sizeof(buf), m->rcall, 644 *m->extended); 645 printk(KERN_NOTICE ">>> %p %s\n", m, buf); 646 } 647#endif 648 649 rcall = m->rcall; 650 rbuf = m->rbuf; 651 if (m->rpos > n) { 652 m->rcall = kmalloc(sizeof(struct p9_fcall) + m->msize, 653 GFP_KERNEL); 654 if (!m->rcall) { 655 err = -ENOMEM; 656 goto error; 657 } 658 659 m->rbuf = (char *)m->rcall + sizeof(struct p9_fcall); 660 memmove(m->rbuf, rbuf + n, m->rpos - n); 661 m->rpos -= n; 662 } else { 663 m->rcall = NULL; 664 m->rbuf = NULL; 665 m->rpos = 0; 666 } 667 668 P9_DPRINTK(P9_DEBUG_MUX, "mux %p fcall id %d tag %d\n", m, 669 rcall->id, rcall->tag); 670 671 req = NULL; 672 spin_lock(&m->lock); 673 list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) { 674 if (rreq->tag == rcall->tag) { 675 req = rreq; 676 if (req->flush != Flushing) 677 list_del(&req->req_list); 678 break; 679 } 680 } 681 spin_unlock(&m->lock); 682 683 if (req) { 684 req->rcall = rcall; 685 process_request(m, req); 686 687 if (req->flush != Flushing) { 688 if (req->cb) 689 (*req->cb) (req, req->cba); 690 else 691 kfree(req->rcall); 692 693 wake_up(&m->equeue); 694 } 695 } else { 696 if (err >= 0 && rcall->id != P9_RFLUSH) 697 P9_DPRINTK(P9_DEBUG_ERROR, 698 "unexpected response mux %p id %d tag %d\n", 699 m, rcall->id, rcall->tag); 700 kfree(rcall); 701 } 702 } 703 704 if (!list_empty(&m->req_list)) { 705 if (test_and_clear_bit(Rpending, &m->wsched)) 706 n = POLLIN; 707 else 708 n = m->trans->poll(m->trans, NULL); 709 710 if (n & POLLIN) { 711 P9_DPRINTK(P9_DEBUG_MUX, "schedule read work %p\n", m); 712 queue_work(p9_mux_wq, &m->rq); 713 } else 714 clear_bit(Rworksched, &m->wsched); 715 } else 716 clear_bit(Rworksched, &m->wsched); 717 718 return; 719 720error: 721 p9_conn_cancel(m, err); 722 clear_bit(Rworksched, &m->wsched); 723} 724 725/** 726 * p9_send_request - send 9P request 727 * The function can sleep until the request is scheduled for sending. 728 * The function can be interrupted. Return from the function is not 729 * a guarantee that the request is sent successfully. Can return errors 730 * that can be retrieved by PTR_ERR macros. 731 * 732 * @m: mux data 733 * @tc: request to be sent 734 * @cb: callback function to call when response is received 735 * @cba: parameter to pass to the callback function 736 */ 737static struct p9_req *p9_send_request(struct p9_conn *m, 738 struct p9_fcall *tc, 739 p9_conn_req_callback cb, void *cba) 740{ 741 int n; 742 struct p9_req *req; 743 744 P9_DPRINTK(P9_DEBUG_MUX, "mux %p task %p tcall %p id %d\n", m, current, 745 tc, tc->id); 746 if (m->err < 0) 747 return ERR_PTR(m->err); 748 749 req = kmalloc(sizeof(struct p9_req), GFP_KERNEL); 750 if (!req) 751 return ERR_PTR(-ENOMEM); 752 753 if (tc->id == P9_TVERSION) 754 n = P9_NOTAG; 755 else 756 n = p9_mux_get_tag(m); 757 758 if (n < 0) 759 return ERR_PTR(-ENOMEM); 760 761 p9_set_tag(tc, n); 762 763#ifdef CONFIG_NET_9P_DEBUG 764 if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) { 765 char buf[150]; 766 767 p9_printfcall(buf, sizeof(buf), tc, *m->extended); 768 printk(KERN_NOTICE "<<< %p %s\n", m, buf); 769 } 770#endif 771 772 spin_lock_init(&req->lock); 773 req->tag = n; 774 req->tcall = tc; 775 req->rcall = NULL; 776 req->err = 0; 777 req->cb = cb; 778 req->cba = cba; 779 req->flush = None; 780 781 spin_lock(&m->lock); 782 list_add_tail(&req->req_list, &m->unsent_req_list); 783 spin_unlock(&m->lock); 784 785 if (test_and_clear_bit(Wpending, &m->wsched)) 786 n = POLLOUT; 787 else 788 n = m->trans->poll(m->trans, NULL); 789 790 if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched)) 791 queue_work(p9_mux_wq, &m->wq); 792 793 return req; 794} 795 796static void p9_mux_free_request(struct p9_conn *m, struct p9_req *req) 797{ 798 p9_mux_put_tag(m, req->tag); 799 kfree(req); 800} 801 802static void p9_mux_flush_cb(struct p9_req *freq, void *a) 803{ 804 p9_conn_req_callback cb; 805 int tag; 806 struct p9_conn *m; 807 struct p9_req *req, *rreq, *rptr; 808 809 m = a; 810 P9_DPRINTK(P9_DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m, 811 freq->tcall, freq->rcall, freq->err, 812 freq->tcall->params.tflush.oldtag); 813 814 spin_lock(&m->lock); 815 cb = NULL; 816 tag = freq->tcall->params.tflush.oldtag; 817 req = NULL; 818 list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) { 819 if (rreq->tag == tag) { 820 req = rreq; 821 list_del(&req->req_list); 822 break; 823 } 824 } 825 spin_unlock(&m->lock); 826 827 if (req) { 828 spin_lock(&req->lock); 829 req->flush = Flushed; 830 spin_unlock(&req->lock); 831 832 if (req->cb) 833 (*req->cb) (req, req->cba); 834 else 835 kfree(req->rcall); 836 837 wake_up(&m->equeue); 838 } 839 840 kfree(freq->tcall); 841 kfree(freq->rcall); 842 p9_mux_free_request(m, freq); 843} 844 845static int 846p9_mux_flush_request(struct p9_conn *m, struct p9_req *req) 847{ 848 struct p9_fcall *fc; 849 struct p9_req *rreq, *rptr; 850 851 P9_DPRINTK(P9_DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag); 852 853 /* if a response was received for a request, do nothing */ 854 spin_lock(&req->lock); 855 if (req->rcall || req->err) { 856 spin_unlock(&req->lock); 857 P9_DPRINTK(P9_DEBUG_MUX, 858 "mux %p req %p response already received\n", m, req); 859 return 0; 860 } 861 862 req->flush = Flushing; 863 spin_unlock(&req->lock); 864 865 spin_lock(&m->lock); 866 /* if the request is not sent yet, just remove it from the list */ 867 list_for_each_entry_safe(rreq, rptr, &m->unsent_req_list, req_list) { 868 if (rreq->tag == req->tag) { 869 P9_DPRINTK(P9_DEBUG_MUX, 870 "mux %p req %p request is not sent yet\n", m, req); 871 list_del(&rreq->req_list); 872 req->flush = Flushed; 873 spin_unlock(&m->lock); 874 if (req->cb) 875 (*req->cb) (req, req->cba); 876 return 0; 877 } 878 } 879 spin_unlock(&m->lock); 880 881 clear_thread_flag(TIF_SIGPENDING); 882 fc = p9_create_tflush(req->tag); 883 p9_send_request(m, fc, p9_mux_flush_cb, m); 884 return 1; 885} 886 887static void 888p9_conn_rpc_cb(struct p9_req *req, void *a) 889{ 890 struct p9_mux_rpc *r; 891 892 P9_DPRINTK(P9_DEBUG_MUX, "req %p r %p\n", req, a); 893 r = a; 894 r->rcall = req->rcall; 895 r->err = req->err; 896 897 if (req->flush != None && !req->err) 898 r->err = -ERESTARTSYS; 899 900 wake_up(&r->wqueue); 901} 902 903/** 904 * p9_mux_rpc - sends 9P request and waits until a response is available. 905 * The function can be interrupted. 906 * @m: mux data 907 * @tc: request to be sent 908 * @rc: pointer where a pointer to the response is stored 909 */ 910int 911p9_conn_rpc(struct p9_conn *m, struct p9_fcall *tc, 912 struct p9_fcall **rc) 913{ 914 int err, sigpending; 915 unsigned long flags; 916 struct p9_req *req; 917 struct p9_mux_rpc r; 918 919 r.err = 0; 920 r.tcall = tc; 921 r.rcall = NULL; 922 r.m = m; 923 init_waitqueue_head(&r.wqueue); 924 925 if (rc) 926 *rc = NULL; 927 928 sigpending = 0; 929 if (signal_pending(current)) { 930 sigpending = 1; 931 clear_thread_flag(TIF_SIGPENDING); 932 } 933 934 req = p9_send_request(m, tc, p9_conn_rpc_cb, &r); 935 if (IS_ERR(req)) { 936 err = PTR_ERR(req); 937 P9_DPRINTK(P9_DEBUG_MUX, "error %d\n", err); 938 return err; 939 } 940 941 err = wait_event_interruptible(r.wqueue, r.rcall != NULL || r.err < 0); 942 if (r.err < 0) 943 err = r.err; 944 945 if (err == -ERESTARTSYS && m->trans->status == Connected 946 && m->err == 0) { 947 if (p9_mux_flush_request(m, req)) { 948 /* wait until we get response of the flush message */ 949 do { 950 clear_thread_flag(TIF_SIGPENDING); 951 err = wait_event_interruptible(r.wqueue, 952 r.rcall || r.err); 953 } while (!r.rcall && !r.err && err == -ERESTARTSYS && 954 m->trans->status == Connected && !m->err); 955 956 err = -ERESTARTSYS; 957 } 958 sigpending = 1; 959 } 960 961 if (sigpending) { 962 spin_lock_irqsave(&current->sighand->siglock, flags); 963 recalc_sigpending(); 964 spin_unlock_irqrestore(&current->sighand->siglock, flags); 965 } 966 967 if (rc) 968 *rc = r.rcall; 969 else 970 kfree(r.rcall); 971 972 p9_mux_free_request(m, req); 973 if (err > 0) 974 err = -EIO; 975 976 return err; 977} 978EXPORT_SYMBOL(p9_conn_rpc); 979 980#ifdef P9_NONBLOCK 981/** 982 * p9_conn_rpcnb - sends 9P request without waiting for response. 983 * @m: mux data 984 * @tc: request to be sent 985 * @cb: callback function to be called when response arrives 986 * @cba: value to pass to the callback function 987 */ 988int p9_conn_rpcnb(struct p9_conn *m, struct p9_fcall *tc, 989 p9_conn_req_callback cb, void *a) 990{ 991 int err; 992 struct p9_req *req; 993 994 req = p9_send_request(m, tc, cb, a); 995 if (IS_ERR(req)) { 996 err = PTR_ERR(req); 997 P9_DPRINTK(P9_DEBUG_MUX, "error %d\n", err); 998 return PTR_ERR(req); 999 } 1000 1001 P9_DPRINTK(P9_DEBUG_MUX, "mux %p tc %p tag %d\n", m, tc, req->tag); 1002 return 0; 1003} 1004EXPORT_SYMBOL(p9_conn_rpcnb); 1005#endif /* P9_NONBLOCK */ 1006 1007/** 1008 * p9_conn_cancel - cancel all pending requests with error 1009 * @m: mux data 1010 * @err: error code 1011 */ 1012void p9_conn_cancel(struct p9_conn *m, int err) 1013{ 1014 struct p9_req *req, *rtmp; 1015 LIST_HEAD(cancel_list); 1016 1017 P9_DPRINTK(P9_DEBUG_ERROR, "mux %p err %d\n", m, err); 1018 m->err = err; 1019 spin_lock(&m->lock); 1020 list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) { 1021 list_move(&req->req_list, &cancel_list); 1022 } 1023 list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) { 1024 list_move(&req->req_list, &cancel_list); 1025 } 1026 spin_unlock(&m->lock); 1027 1028 list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) { 1029 list_del(&req->req_list); 1030 if (!req->err) 1031 req->err = err; 1032 1033 if (req->cb) 1034 (*req->cb) (req, req->cba); 1035 else 1036 kfree(req->rcall); 1037 } 1038 1039 wake_up(&m->equeue); 1040} 1041EXPORT_SYMBOL(p9_conn_cancel); 1042 1043static u16 p9_mux_get_tag(struct p9_conn *m) 1044{ 1045 int tag; 1046 1047 tag = p9_idpool_get(m->tagpool); 1048 if (tag < 0) 1049 return P9_NOTAG; 1050 else 1051 return (u16) tag; 1052} 1053 1054static void p9_mux_put_tag(struct p9_conn *m, u16 tag) 1055{ 1056 if (tag != P9_NOTAG && p9_idpool_check(tag, m->tagpool)) 1057 p9_idpool_put(tag, m->tagpool); 1058}