svc: Move the xprt independent code to the svc_xprt.c file

This functionally trivial patch moves all of the transport independent
functions from the svcsock.c file to the transport independent svc_xprt.c
file.

In addition the following formatting changes were made:
- White space cleanup
- Function signatures on single line
- The inline directive was removed
- Lines over 80 columns were reformatted
- The term 'socket' was changed to 'transport' in comments
- The SMP comment was moved and updated.

Signed-off-by: Tom Tucker <tom@opengridcomputing.com>
Acked-by: Neil Brown <neilb@suse.de>
Reviewed-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Greg Banks <gnb@sgi.com>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>

authored by Tom Tucker and committed by J. Bruce Fields 0f0257ea 18d19f94

+804 -810
+27
include/linux/sunrpc/svc_xprt.h
··· 72 72 void svc_xprt_init(struct svc_xprt_class *, struct svc_xprt *, 73 73 struct svc_serv *); 74 74 int svc_create_xprt(struct svc_serv *, char *, unsigned short, int); 75 + void svc_xprt_enqueue(struct svc_xprt *xprt); 75 76 void svc_xprt_received(struct svc_xprt *); 76 77 void svc_xprt_put(struct svc_xprt *xprt); 77 78 void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt); 79 + void svc_close_xprt(struct svc_xprt *xprt); 80 + void svc_delete_xprt(struct svc_xprt *xprt); 81 + int svc_port_is_privileged(struct sockaddr *sin); 82 + 78 83 static inline void svc_xprt_get(struct svc_xprt *xprt) 79 84 { 80 85 kref_get(&xprt->xpt_ref); ··· 131 126 return svc_addr_port((struct sockaddr *)&xprt->xpt_remote); 132 127 } 133 128 129 + static inline char *__svc_print_addr(struct sockaddr *addr, 130 + char *buf, size_t len) 131 + { 132 + switch (addr->sa_family) { 133 + case AF_INET: 134 + snprintf(buf, len, "%u.%u.%u.%u, port=%u", 135 + NIPQUAD(((struct sockaddr_in *) addr)->sin_addr), 136 + ntohs(((struct sockaddr_in *) addr)->sin_port)); 137 + break; 138 + 139 + case AF_INET6: 140 + snprintf(buf, len, "%x:%x:%x:%x:%x:%x:%x:%x, port=%u", 141 + NIP6(((struct sockaddr_in6 *) addr)->sin6_addr), 142 + ntohs(((struct sockaddr_in6 *) addr)->sin6_port)); 143 + break; 144 + 145 + default: 146 + snprintf(buf, len, "unknown address type: %d", addr->sa_family); 147 + break; 148 + } 149 + return buf; 150 + } 134 151 #endif /* SUNRPC_SVC_XPRT_H */
+753
net/sunrpc/svc_xprt.c
··· 35 35 36 36 #define RPCDBG_FACILITY RPCDBG_SVCXPRT 37 37 38 + static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt); 39 + static int svc_deferred_recv(struct svc_rqst *rqstp); 40 + static struct cache_deferred_req *svc_defer(struct cache_req *req); 41 + static void svc_age_temp_xprts(unsigned long closure); 42 + 43 + /* apparently the "standard" is that clients close 44 + * idle connections after 5 minutes, servers after 45 + * 6 minutes 46 + * http://www.connectathon.org/talks96/nfstcp.pdf 47 + */ 48 + static int svc_conn_age_period = 6*60; 49 + 38 50 /* List of registered transport classes */ 39 51 static DEFINE_SPINLOCK(svc_xprt_class_lock); 40 52 static LIST_HEAD(svc_xprt_class_list); 53 + 54 + /* SMP locking strategy: 55 + * 56 + * svc_pool->sp_lock protects most of the fields of that pool. 57 + * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt. 58 + * when both need to be taken (rare), svc_serv->sv_lock is first. 59 + * BKL protects svc_serv->sv_nrthread. 60 + * svc_sock->sk_lock protects the svc_sock->sk_deferred list 61 + * and the ->sk_info_authunix cache. 62 + * 63 + * The XPT_BUSY bit in xprt->xpt_flags prevents a transport being 64 + * enqueued multiply. During normal transport processing this bit 65 + * is set by svc_xprt_enqueue and cleared by svc_xprt_received. 66 + * Providers should not manipulate this bit directly. 67 + * 68 + * Some flags can be set to certain values at any time 69 + * providing that certain rules are followed: 70 + * 71 + * XPT_CONN, XPT_DATA: 72 + * - Can be set or cleared at any time. 73 + * - After a set, svc_xprt_enqueue must be called to enqueue 74 + * the transport for processing. 75 + * - After a clear, the transport must be read/accepted. 76 + * If this succeeds, it must be set again. 77 + * XPT_CLOSE: 78 + * - Can set at any time. It is never cleared. 79 + * XPT_DEAD: 80 + * - Can only be set while XPT_BUSY is held which ensures 81 + * that no other thread will be using the transport or will 82 + * try to set XPT_DEAD. 83 + */ 41 84 42 85 int svc_reg_xprt_class(struct svc_xprt_class *xcl) 43 86 { ··· 221 178 } 222 179 EXPORT_SYMBOL_GPL(svc_xprt_copy_addrs); 223 180 181 + /** 182 + * svc_print_addr - Format rq_addr field for printing 183 + * @rqstp: svc_rqst struct containing address to print 184 + * @buf: target buffer for formatted address 185 + * @len: length of target buffer 186 + * 187 + */ 188 + char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len) 189 + { 190 + return __svc_print_addr(svc_addr(rqstp), buf, len); 191 + } 192 + EXPORT_SYMBOL_GPL(svc_print_addr); 193 + 194 + /* 195 + * Queue up an idle server thread. Must have pool->sp_lock held. 196 + * Note: this is really a stack rather than a queue, so that we only 197 + * use as many different threads as we need, and the rest don't pollute 198 + * the cache. 199 + */ 200 + static void svc_thread_enqueue(struct svc_pool *pool, struct svc_rqst *rqstp) 201 + { 202 + list_add(&rqstp->rq_list, &pool->sp_threads); 203 + } 204 + 205 + /* 206 + * Dequeue an nfsd thread. Must have pool->sp_lock held. 207 + */ 208 + static void svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp) 209 + { 210 + list_del(&rqstp->rq_list); 211 + } 212 + 213 + /* 214 + * Queue up a transport with data pending. If there are idle nfsd 215 + * processes, wake 'em up. 216 + * 217 + */ 218 + void svc_xprt_enqueue(struct svc_xprt *xprt) 219 + { 220 + struct svc_serv *serv = xprt->xpt_server; 221 + struct svc_pool *pool; 222 + struct svc_rqst *rqstp; 223 + int cpu; 224 + 225 + if (!(xprt->xpt_flags & 226 + ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED)))) 227 + return; 228 + if (test_bit(XPT_DEAD, &xprt->xpt_flags)) 229 + return; 230 + 231 + cpu = get_cpu(); 232 + pool = svc_pool_for_cpu(xprt->xpt_server, cpu); 233 + put_cpu(); 234 + 235 + spin_lock_bh(&pool->sp_lock); 236 + 237 + if (!list_empty(&pool->sp_threads) && 238 + !list_empty(&pool->sp_sockets)) 239 + printk(KERN_ERR 240 + "svc_xprt_enqueue: " 241 + "threads and transports both waiting??\n"); 242 + 243 + if (test_bit(XPT_DEAD, &xprt->xpt_flags)) { 244 + /* Don't enqueue dead transports */ 245 + dprintk("svc: transport %p is dead, not enqueued\n", xprt); 246 + goto out_unlock; 247 + } 248 + 249 + /* Mark transport as busy. It will remain in this state until 250 + * the provider calls svc_xprt_received. We update XPT_BUSY 251 + * atomically because it also guards against trying to enqueue 252 + * the transport twice. 253 + */ 254 + if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) { 255 + /* Don't enqueue transport while already enqueued */ 256 + dprintk("svc: transport %p busy, not enqueued\n", xprt); 257 + goto out_unlock; 258 + } 259 + BUG_ON(xprt->xpt_pool != NULL); 260 + xprt->xpt_pool = pool; 261 + 262 + /* Handle pending connection */ 263 + if (test_bit(XPT_CONN, &xprt->xpt_flags)) 264 + goto process; 265 + 266 + /* Handle close in-progress */ 267 + if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) 268 + goto process; 269 + 270 + /* Check if we have space to reply to a request */ 271 + if (!xprt->xpt_ops->xpo_has_wspace(xprt)) { 272 + /* Don't enqueue while not enough space for reply */ 273 + dprintk("svc: no write space, transport %p not enqueued\n", 274 + xprt); 275 + xprt->xpt_pool = NULL; 276 + clear_bit(XPT_BUSY, &xprt->xpt_flags); 277 + goto out_unlock; 278 + } 279 + 280 + process: 281 + if (!list_empty(&pool->sp_threads)) { 282 + rqstp = list_entry(pool->sp_threads.next, 283 + struct svc_rqst, 284 + rq_list); 285 + dprintk("svc: transport %p served by daemon %p\n", 286 + xprt, rqstp); 287 + svc_thread_dequeue(pool, rqstp); 288 + if (rqstp->rq_xprt) 289 + printk(KERN_ERR 290 + "svc_xprt_enqueue: server %p, rq_xprt=%p!\n", 291 + rqstp, rqstp->rq_xprt); 292 + rqstp->rq_xprt = xprt; 293 + svc_xprt_get(xprt); 294 + rqstp->rq_reserved = serv->sv_max_mesg; 295 + atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); 296 + BUG_ON(xprt->xpt_pool != pool); 297 + wake_up(&rqstp->rq_wait); 298 + } else { 299 + dprintk("svc: transport %p put into queue\n", xprt); 300 + list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); 301 + BUG_ON(xprt->xpt_pool != pool); 302 + } 303 + 304 + out_unlock: 305 + spin_unlock_bh(&pool->sp_lock); 306 + } 307 + EXPORT_SYMBOL_GPL(svc_xprt_enqueue); 308 + 309 + /* 310 + * Dequeue the first transport. Must be called with the pool->sp_lock held. 311 + */ 312 + static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool) 313 + { 314 + struct svc_xprt *xprt; 315 + 316 + if (list_empty(&pool->sp_sockets)) 317 + return NULL; 318 + 319 + xprt = list_entry(pool->sp_sockets.next, 320 + struct svc_xprt, xpt_ready); 321 + list_del_init(&xprt->xpt_ready); 322 + 323 + dprintk("svc: transport %p dequeued, inuse=%d\n", 324 + xprt, atomic_read(&xprt->xpt_ref.refcount)); 325 + 326 + return xprt; 327 + } 328 + 329 + /* 330 + * svc_xprt_received conditionally queues the transport for processing 331 + * by another thread. The caller must hold the XPT_BUSY bit and must 332 + * not thereafter touch transport data. 333 + * 334 + * Note: XPT_DATA only gets cleared when a read-attempt finds no (or 335 + * insufficient) data. 336 + */ 337 + void svc_xprt_received(struct svc_xprt *xprt) 338 + { 339 + BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags)); 340 + xprt->xpt_pool = NULL; 341 + clear_bit(XPT_BUSY, &xprt->xpt_flags); 342 + svc_xprt_enqueue(xprt); 343 + } 344 + EXPORT_SYMBOL_GPL(svc_xprt_received); 345 + 346 + /** 347 + * svc_reserve - change the space reserved for the reply to a request. 348 + * @rqstp: The request in question 349 + * @space: new max space to reserve 350 + * 351 + * Each request reserves some space on the output queue of the transport 352 + * to make sure the reply fits. This function reduces that reserved 353 + * space to be the amount of space used already, plus @space. 354 + * 355 + */ 356 + void svc_reserve(struct svc_rqst *rqstp, int space) 357 + { 358 + space += rqstp->rq_res.head[0].iov_len; 359 + 360 + if (space < rqstp->rq_reserved) { 361 + struct svc_xprt *xprt = rqstp->rq_xprt; 362 + atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved); 363 + rqstp->rq_reserved = space; 364 + 365 + svc_xprt_enqueue(xprt); 366 + } 367 + } 368 + 369 + static void svc_xprt_release(struct svc_rqst *rqstp) 370 + { 371 + struct svc_xprt *xprt = rqstp->rq_xprt; 372 + 373 + rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp); 374 + 375 + svc_free_res_pages(rqstp); 376 + rqstp->rq_res.page_len = 0; 377 + rqstp->rq_res.page_base = 0; 378 + 379 + /* Reset response buffer and release 380 + * the reservation. 381 + * But first, check that enough space was reserved 382 + * for the reply, otherwise we have a bug! 383 + */ 384 + if ((rqstp->rq_res.len) > rqstp->rq_reserved) 385 + printk(KERN_ERR "RPC request reserved %d but used %d\n", 386 + rqstp->rq_reserved, 387 + rqstp->rq_res.len); 388 + 389 + rqstp->rq_res.head[0].iov_len = 0; 390 + svc_reserve(rqstp, 0); 391 + rqstp->rq_xprt = NULL; 392 + 393 + svc_xprt_put(xprt); 394 + } 395 + 396 + /* 397 + * External function to wake up a server waiting for data 398 + * This really only makes sense for services like lockd 399 + * which have exactly one thread anyway. 400 + */ 401 + void svc_wake_up(struct svc_serv *serv) 402 + { 403 + struct svc_rqst *rqstp; 404 + unsigned int i; 405 + struct svc_pool *pool; 406 + 407 + for (i = 0; i < serv->sv_nrpools; i++) { 408 + pool = &serv->sv_pools[i]; 409 + 410 + spin_lock_bh(&pool->sp_lock); 411 + if (!list_empty(&pool->sp_threads)) { 412 + rqstp = list_entry(pool->sp_threads.next, 413 + struct svc_rqst, 414 + rq_list); 415 + dprintk("svc: daemon %p woken up.\n", rqstp); 416 + /* 417 + svc_thread_dequeue(pool, rqstp); 418 + rqstp->rq_xprt = NULL; 419 + */ 420 + wake_up(&rqstp->rq_wait); 421 + } 422 + spin_unlock_bh(&pool->sp_lock); 423 + } 424 + } 425 + 426 + int svc_port_is_privileged(struct sockaddr *sin) 427 + { 428 + switch (sin->sa_family) { 429 + case AF_INET: 430 + return ntohs(((struct sockaddr_in *)sin)->sin_port) 431 + < PROT_SOCK; 432 + case AF_INET6: 433 + return ntohs(((struct sockaddr_in6 *)sin)->sin6_port) 434 + < PROT_SOCK; 435 + default: 436 + return 0; 437 + } 438 + } 439 + 440 + /* 441 + * Make sure that we don't have too many active connections. If we 442 + * have, something must be dropped. 443 + * 444 + * There's no point in trying to do random drop here for DoS 445 + * prevention. The NFS clients does 1 reconnect in 15 seconds. An 446 + * attacker can easily beat that. 447 + * 448 + * The only somewhat efficient mechanism would be if drop old 449 + * connections from the same IP first. But right now we don't even 450 + * record the client IP in svc_sock. 451 + */ 452 + static void svc_check_conn_limits(struct svc_serv *serv) 453 + { 454 + if (serv->sv_tmpcnt > (serv->sv_nrthreads+3)*20) { 455 + struct svc_xprt *xprt = NULL; 456 + spin_lock_bh(&serv->sv_lock); 457 + if (!list_empty(&serv->sv_tempsocks)) { 458 + if (net_ratelimit()) { 459 + /* Try to help the admin */ 460 + printk(KERN_NOTICE "%s: too many open " 461 + "connections, consider increasing the " 462 + "number of nfsd threads\n", 463 + serv->sv_name); 464 + } 465 + /* 466 + * Always select the oldest connection. It's not fair, 467 + * but so is life 468 + */ 469 + xprt = list_entry(serv->sv_tempsocks.prev, 470 + struct svc_xprt, 471 + xpt_list); 472 + set_bit(XPT_CLOSE, &xprt->xpt_flags); 473 + svc_xprt_get(xprt); 474 + } 475 + spin_unlock_bh(&serv->sv_lock); 476 + 477 + if (xprt) { 478 + svc_xprt_enqueue(xprt); 479 + svc_xprt_put(xprt); 480 + } 481 + } 482 + } 483 + 484 + /* 485 + * Receive the next request on any transport. This code is carefully 486 + * organised not to touch any cachelines in the shared svc_serv 487 + * structure, only cachelines in the local svc_pool. 488 + */ 489 + int svc_recv(struct svc_rqst *rqstp, long timeout) 490 + { 491 + struct svc_xprt *xprt = NULL; 492 + struct svc_serv *serv = rqstp->rq_server; 493 + struct svc_pool *pool = rqstp->rq_pool; 494 + int len, i; 495 + int pages; 496 + struct xdr_buf *arg; 497 + DECLARE_WAITQUEUE(wait, current); 498 + 499 + dprintk("svc: server %p waiting for data (to = %ld)\n", 500 + rqstp, timeout); 501 + 502 + if (rqstp->rq_xprt) 503 + printk(KERN_ERR 504 + "svc_recv: service %p, transport not NULL!\n", 505 + rqstp); 506 + if (waitqueue_active(&rqstp->rq_wait)) 507 + printk(KERN_ERR 508 + "svc_recv: service %p, wait queue active!\n", 509 + rqstp); 510 + 511 + /* now allocate needed pages. If we get a failure, sleep briefly */ 512 + pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE; 513 + for (i = 0; i < pages ; i++) 514 + while (rqstp->rq_pages[i] == NULL) { 515 + struct page *p = alloc_page(GFP_KERNEL); 516 + if (!p) { 517 + int j = msecs_to_jiffies(500); 518 + schedule_timeout_uninterruptible(j); 519 + } 520 + rqstp->rq_pages[i] = p; 521 + } 522 + rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */ 523 + BUG_ON(pages >= RPCSVC_MAXPAGES); 524 + 525 + /* Make arg->head point to first page and arg->pages point to rest */ 526 + arg = &rqstp->rq_arg; 527 + arg->head[0].iov_base = page_address(rqstp->rq_pages[0]); 528 + arg->head[0].iov_len = PAGE_SIZE; 529 + arg->pages = rqstp->rq_pages + 1; 530 + arg->page_base = 0; 531 + /* save at least one page for response */ 532 + arg->page_len = (pages-2)*PAGE_SIZE; 533 + arg->len = (pages-1)*PAGE_SIZE; 534 + arg->tail[0].iov_len = 0; 535 + 536 + try_to_freeze(); 537 + cond_resched(); 538 + if (signalled()) 539 + return -EINTR; 540 + 541 + spin_lock_bh(&pool->sp_lock); 542 + xprt = svc_xprt_dequeue(pool); 543 + if (xprt) { 544 + rqstp->rq_xprt = xprt; 545 + svc_xprt_get(xprt); 546 + rqstp->rq_reserved = serv->sv_max_mesg; 547 + atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); 548 + } else { 549 + /* No data pending. Go to sleep */ 550 + svc_thread_enqueue(pool, rqstp); 551 + 552 + /* 553 + * We have to be able to interrupt this wait 554 + * to bring down the daemons ... 555 + */ 556 + set_current_state(TASK_INTERRUPTIBLE); 557 + add_wait_queue(&rqstp->rq_wait, &wait); 558 + spin_unlock_bh(&pool->sp_lock); 559 + 560 + schedule_timeout(timeout); 561 + 562 + try_to_freeze(); 563 + 564 + spin_lock_bh(&pool->sp_lock); 565 + remove_wait_queue(&rqstp->rq_wait, &wait); 566 + 567 + xprt = rqstp->rq_xprt; 568 + if (!xprt) { 569 + svc_thread_dequeue(pool, rqstp); 570 + spin_unlock_bh(&pool->sp_lock); 571 + dprintk("svc: server %p, no data yet\n", rqstp); 572 + return signalled()? -EINTR : -EAGAIN; 573 + } 574 + } 575 + spin_unlock_bh(&pool->sp_lock); 576 + 577 + len = 0; 578 + if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { 579 + dprintk("svc_recv: found XPT_CLOSE\n"); 580 + svc_delete_xprt(xprt); 581 + } else if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { 582 + struct svc_xprt *newxpt; 583 + newxpt = xprt->xpt_ops->xpo_accept(xprt); 584 + if (newxpt) { 585 + /* 586 + * We know this module_get will succeed because the 587 + * listener holds a reference too 588 + */ 589 + __module_get(newxpt->xpt_class->xcl_owner); 590 + svc_check_conn_limits(xprt->xpt_server); 591 + spin_lock_bh(&serv->sv_lock); 592 + set_bit(XPT_TEMP, &newxpt->xpt_flags); 593 + list_add(&newxpt->xpt_list, &serv->sv_tempsocks); 594 + serv->sv_tmpcnt++; 595 + if (serv->sv_temptimer.function == NULL) { 596 + /* setup timer to age temp transports */ 597 + setup_timer(&serv->sv_temptimer, 598 + svc_age_temp_xprts, 599 + (unsigned long)serv); 600 + mod_timer(&serv->sv_temptimer, 601 + jiffies + svc_conn_age_period * HZ); 602 + } 603 + spin_unlock_bh(&serv->sv_lock); 604 + svc_xprt_received(newxpt); 605 + } 606 + svc_xprt_received(xprt); 607 + } else { 608 + dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", 609 + rqstp, pool->sp_id, xprt, 610 + atomic_read(&xprt->xpt_ref.refcount)); 611 + rqstp->rq_deferred = svc_deferred_dequeue(xprt); 612 + if (rqstp->rq_deferred) { 613 + svc_xprt_received(xprt); 614 + len = svc_deferred_recv(rqstp); 615 + } else 616 + len = xprt->xpt_ops->xpo_recvfrom(rqstp); 617 + dprintk("svc: got len=%d\n", len); 618 + } 619 + 620 + /* No data, incomplete (TCP) read, or accept() */ 621 + if (len == 0 || len == -EAGAIN) { 622 + rqstp->rq_res.len = 0; 623 + svc_xprt_release(rqstp); 624 + return -EAGAIN; 625 + } 626 + clear_bit(XPT_OLD, &xprt->xpt_flags); 627 + 628 + rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp)); 629 + rqstp->rq_chandle.defer = svc_defer; 630 + 631 + if (serv->sv_stats) 632 + serv->sv_stats->netcnt++; 633 + return len; 634 + } 635 + 636 + /* 637 + * Drop request 638 + */ 639 + void svc_drop(struct svc_rqst *rqstp) 640 + { 641 + dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt); 642 + svc_xprt_release(rqstp); 643 + } 644 + 645 + /* 646 + * Return reply to client. 647 + */ 648 + int svc_send(struct svc_rqst *rqstp) 649 + { 650 + struct svc_xprt *xprt; 651 + int len; 652 + struct xdr_buf *xb; 653 + 654 + xprt = rqstp->rq_xprt; 655 + if (!xprt) 656 + return -EFAULT; 657 + 658 + /* release the receive skb before sending the reply */ 659 + rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp); 660 + 661 + /* calculate over-all length */ 662 + xb = &rqstp->rq_res; 663 + xb->len = xb->head[0].iov_len + 664 + xb->page_len + 665 + xb->tail[0].iov_len; 666 + 667 + /* Grab mutex to serialize outgoing data. */ 668 + mutex_lock(&xprt->xpt_mutex); 669 + if (test_bit(XPT_DEAD, &xprt->xpt_flags)) 670 + len = -ENOTCONN; 671 + else 672 + len = xprt->xpt_ops->xpo_sendto(rqstp); 673 + mutex_unlock(&xprt->xpt_mutex); 674 + svc_xprt_release(rqstp); 675 + 676 + if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN) 677 + return 0; 678 + return len; 679 + } 680 + 681 + /* 682 + * Timer function to close old temporary transports, using 683 + * a mark-and-sweep algorithm. 684 + */ 685 + static void svc_age_temp_xprts(unsigned long closure) 686 + { 687 + struct svc_serv *serv = (struct svc_serv *)closure; 688 + struct svc_xprt *xprt; 689 + struct list_head *le, *next; 690 + LIST_HEAD(to_be_aged); 691 + 692 + dprintk("svc_age_temp_xprts\n"); 693 + 694 + if (!spin_trylock_bh(&serv->sv_lock)) { 695 + /* busy, try again 1 sec later */ 696 + dprintk("svc_age_temp_xprts: busy\n"); 697 + mod_timer(&serv->sv_temptimer, jiffies + HZ); 698 + return; 699 + } 700 + 701 + list_for_each_safe(le, next, &serv->sv_tempsocks) { 702 + xprt = list_entry(le, struct svc_xprt, xpt_list); 703 + 704 + /* First time through, just mark it OLD. Second time 705 + * through, close it. */ 706 + if (!test_and_set_bit(XPT_OLD, &xprt->xpt_flags)) 707 + continue; 708 + if (atomic_read(&xprt->xpt_ref.refcount) > 1 709 + || test_bit(XPT_BUSY, &xprt->xpt_flags)) 710 + continue; 711 + svc_xprt_get(xprt); 712 + list_move(le, &to_be_aged); 713 + set_bit(XPT_CLOSE, &xprt->xpt_flags); 714 + set_bit(XPT_DETACHED, &xprt->xpt_flags); 715 + } 716 + spin_unlock_bh(&serv->sv_lock); 717 + 718 + while (!list_empty(&to_be_aged)) { 719 + le = to_be_aged.next; 720 + /* fiddling the xpt_list node is safe 'cos we're XPT_DETACHED */ 721 + list_del_init(le); 722 + xprt = list_entry(le, struct svc_xprt, xpt_list); 723 + 724 + dprintk("queuing xprt %p for closing\n", xprt); 725 + 726 + /* a thread will dequeue and close it soon */ 727 + svc_xprt_enqueue(xprt); 728 + svc_xprt_put(xprt); 729 + } 730 + 731 + mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ); 732 + } 733 + 734 + /* 735 + * Remove a dead transport 736 + */ 737 + void svc_delete_xprt(struct svc_xprt *xprt) 738 + { 739 + struct svc_serv *serv = xprt->xpt_server; 740 + 741 + dprintk("svc: svc_delete_xprt(%p)\n", xprt); 742 + xprt->xpt_ops->xpo_detach(xprt); 743 + 744 + spin_lock_bh(&serv->sv_lock); 745 + if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags)) 746 + list_del_init(&xprt->xpt_list); 747 + /* 748 + * We used to delete the transport from whichever list 749 + * it's sk_xprt.xpt_ready node was on, but we don't actually 750 + * need to. This is because the only time we're called 751 + * while still attached to a queue, the queue itself 752 + * is about to be destroyed (in svc_destroy). 753 + */ 754 + if (!test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) { 755 + BUG_ON(atomic_read(&xprt->xpt_ref.refcount) < 2); 756 + if (test_bit(XPT_TEMP, &xprt->xpt_flags)) 757 + serv->sv_tmpcnt--; 758 + svc_xprt_put(xprt); 759 + } 760 + spin_unlock_bh(&serv->sv_lock); 761 + } 762 + 763 + void svc_close_xprt(struct svc_xprt *xprt) 764 + { 765 + set_bit(XPT_CLOSE, &xprt->xpt_flags); 766 + if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) 767 + /* someone else will have to effect the close */ 768 + return; 769 + 770 + svc_xprt_get(xprt); 771 + svc_delete_xprt(xprt); 772 + clear_bit(XPT_BUSY, &xprt->xpt_flags); 773 + svc_xprt_put(xprt); 774 + } 775 + 776 + void svc_close_all(struct list_head *xprt_list) 777 + { 778 + struct svc_xprt *xprt; 779 + struct svc_xprt *tmp; 780 + 781 + list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) { 782 + set_bit(XPT_CLOSE, &xprt->xpt_flags); 783 + if (test_bit(XPT_BUSY, &xprt->xpt_flags)) { 784 + /* Waiting to be processed, but no threads left, 785 + * So just remove it from the waiting list 786 + */ 787 + list_del_init(&xprt->xpt_ready); 788 + clear_bit(XPT_BUSY, &xprt->xpt_flags); 789 + } 790 + svc_close_xprt(xprt); 791 + } 792 + } 793 + 794 + /* 795 + * Handle defer and revisit of requests 796 + */ 797 + 798 + static void svc_revisit(struct cache_deferred_req *dreq, int too_many) 799 + { 800 + struct svc_deferred_req *dr = 801 + container_of(dreq, struct svc_deferred_req, handle); 802 + struct svc_xprt *xprt = dr->xprt; 803 + 804 + if (too_many) { 805 + svc_xprt_put(xprt); 806 + kfree(dr); 807 + return; 808 + } 809 + dprintk("revisit queued\n"); 810 + dr->xprt = NULL; 811 + spin_lock(&xprt->xpt_lock); 812 + list_add(&dr->handle.recent, &xprt->xpt_deferred); 813 + spin_unlock(&xprt->xpt_lock); 814 + set_bit(XPT_DEFERRED, &xprt->xpt_flags); 815 + svc_xprt_enqueue(xprt); 816 + svc_xprt_put(xprt); 817 + } 818 + 819 + static struct cache_deferred_req *svc_defer(struct cache_req *req) 820 + { 821 + struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle); 822 + int size = sizeof(struct svc_deferred_req) + (rqstp->rq_arg.len); 823 + struct svc_deferred_req *dr; 824 + 825 + if (rqstp->rq_arg.page_len) 826 + return NULL; /* if more than a page, give up FIXME */ 827 + if (rqstp->rq_deferred) { 828 + dr = rqstp->rq_deferred; 829 + rqstp->rq_deferred = NULL; 830 + } else { 831 + int skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len; 832 + /* FIXME maybe discard if size too large */ 833 + dr = kmalloc(size, GFP_KERNEL); 834 + if (dr == NULL) 835 + return NULL; 836 + 837 + dr->handle.owner = rqstp->rq_server; 838 + dr->prot = rqstp->rq_prot; 839 + memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen); 840 + dr->addrlen = rqstp->rq_addrlen; 841 + dr->daddr = rqstp->rq_daddr; 842 + dr->argslen = rqstp->rq_arg.len >> 2; 843 + memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, 844 + dr->argslen<<2); 845 + } 846 + svc_xprt_get(rqstp->rq_xprt); 847 + dr->xprt = rqstp->rq_xprt; 848 + 849 + dr->handle.revisit = svc_revisit; 850 + return &dr->handle; 851 + } 852 + 853 + /* 854 + * recv data from a deferred request into an active one 855 + */ 856 + static int svc_deferred_recv(struct svc_rqst *rqstp) 857 + { 858 + struct svc_deferred_req *dr = rqstp->rq_deferred; 859 + 860 + rqstp->rq_arg.head[0].iov_base = dr->args; 861 + rqstp->rq_arg.head[0].iov_len = dr->argslen<<2; 862 + rqstp->rq_arg.page_len = 0; 863 + rqstp->rq_arg.len = dr->argslen<<2; 864 + rqstp->rq_prot = dr->prot; 865 + memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen); 866 + rqstp->rq_addrlen = dr->addrlen; 867 + rqstp->rq_daddr = dr->daddr; 868 + rqstp->rq_respages = rqstp->rq_pages; 869 + return dr->argslen<<2; 870 + } 871 + 872 + 873 + static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt) 874 + { 875 + struct svc_deferred_req *dr = NULL; 876 + 877 + if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags)) 878 + return NULL; 879 + spin_lock(&xprt->xpt_lock); 880 + clear_bit(XPT_DEFERRED, &xprt->xpt_flags); 881 + if (!list_empty(&xprt->xpt_deferred)) { 882 + dr = list_entry(xprt->xpt_deferred.next, 883 + struct svc_deferred_req, 884 + handle.recent); 885 + list_del_init(&dr->handle.recent); 886 + set_bit(XPT_DEFERRED, &xprt->xpt_flags); 887 + } 888 + spin_unlock(&xprt->xpt_lock); 889 + return dr; 890 + }
+24 -810
net/sunrpc/svcsock.c
··· 48 48 #include <linux/sunrpc/svcsock.h> 49 49 #include <linux/sunrpc/stats.h> 50 50 51 - /* SMP locking strategy: 52 - * 53 - * svc_pool->sp_lock protects most of the fields of that pool. 54 - * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt. 55 - * when both need to be taken (rare), svc_serv->sv_lock is first. 56 - * BKL protects svc_serv->sv_nrthread. 57 - * svc_sock->sk_lock protects the svc_sock->sk_deferred list 58 - * and the ->sk_info_authunix cache. 59 - * svc_sock->sk_xprt.xpt_flags.XPT_BUSY prevents a svc_sock being 60 - * enqueued multiply. 61 - * 62 - * Some flags can be set to certain values at any time 63 - * providing that certain rules are followed: 64 - * 65 - * XPT_CONN, XPT_DATA, can be set or cleared at any time. 66 - * after a set, svc_xprt_enqueue must be called. 67 - * after a clear, the socket must be read/accepted 68 - * if this succeeds, it must be set again. 69 - * XPT_CLOSE can set at any time. It is never cleared. 70 - * xpt_ref contains a bias of '1' until XPT_DEAD is set. 71 - * so when xprt_ref hits zero, we know the transport is dead 72 - * and no-one is using it. 73 - * XPT_DEAD can only be set while XPT_BUSY is held which ensures 74 - * no other thread will be using the socket or will try to 75 - * set XPT_DEAD. 76 - * 77 - */ 78 - 79 51 #define RPCDBG_FACILITY RPCDBG_SVCXPRT 80 52 81 53 82 54 static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *, 83 55 int *errp, int flags); 84 - static void svc_delete_xprt(struct svc_xprt *xprt); 85 56 static void svc_udp_data_ready(struct sock *, int); 86 57 static int svc_udp_recvfrom(struct svc_rqst *); 87 58 static int svc_udp_sendto(struct svc_rqst *); 88 - static void svc_close_xprt(struct svc_xprt *xprt); 89 59 static void svc_sock_detach(struct svc_xprt *); 90 60 static void svc_sock_free(struct svc_xprt *); 91 61 92 - static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt); 93 - static int svc_deferred_recv(struct svc_rqst *rqstp); 94 - static struct cache_deferred_req *svc_defer(struct cache_req *req); 95 62 static struct svc_xprt *svc_create_socket(struct svc_serv *, int, 96 63 struct sockaddr *, int, int); 97 - static void svc_age_temp_xprts(unsigned long closure); 98 - 99 - /* apparently the "standard" is that clients close 100 - * idle connections after 5 minutes, servers after 101 - * 6 minutes 102 - * http://www.connectathon.org/talks96/nfstcp.pdf 103 - */ 104 - static int svc_conn_age_period = 6*60; 105 - 106 64 #ifdef CONFIG_DEBUG_LOCK_ALLOC 107 65 static struct lock_class_key svc_key[2]; 108 66 static struct lock_class_key svc_slock_key[2]; 109 67 110 - static inline void svc_reclassify_socket(struct socket *sock) 68 + static void svc_reclassify_socket(struct socket *sock) 111 69 { 112 70 struct sock *sk = sock->sk; 113 71 BUG_ON(sock_owned_by_user(sk)); ··· 89 131 } 90 132 } 91 133 #else 92 - static inline void svc_reclassify_socket(struct socket *sock) 134 + static void svc_reclassify_socket(struct socket *sock) 93 135 { 94 136 } 95 137 #endif 96 - 97 - static char *__svc_print_addr(struct sockaddr *addr, char *buf, size_t len) 98 - { 99 - switch (addr->sa_family) { 100 - case AF_INET: 101 - snprintf(buf, len, "%u.%u.%u.%u, port=%u", 102 - NIPQUAD(((struct sockaddr_in *) addr)->sin_addr), 103 - ntohs(((struct sockaddr_in *) addr)->sin_port)); 104 - break; 105 - 106 - case AF_INET6: 107 - snprintf(buf, len, "%x:%x:%x:%x:%x:%x:%x:%x, port=%u", 108 - NIP6(((struct sockaddr_in6 *) addr)->sin6_addr), 109 - ntohs(((struct sockaddr_in6 *) addr)->sin6_port)); 110 - break; 111 - 112 - default: 113 - snprintf(buf, len, "unknown address type: %d", addr->sa_family); 114 - break; 115 - } 116 - return buf; 117 - } 118 - 119 - /** 120 - * svc_print_addr - Format rq_addr field for printing 121 - * @rqstp: svc_rqst struct containing address to print 122 - * @buf: target buffer for formatted address 123 - * @len: length of target buffer 124 - * 125 - */ 126 - char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len) 127 - { 128 - return __svc_print_addr(svc_addr(rqstp), buf, len); 129 - } 130 - EXPORT_SYMBOL_GPL(svc_print_addr); 131 - 132 - /* 133 - * Queue up an idle server thread. Must have pool->sp_lock held. 134 - * Note: this is really a stack rather than a queue, so that we only 135 - * use as many different threads as we need, and the rest don't pollute 136 - * the cache. 137 - */ 138 - static inline void 139 - svc_thread_enqueue(struct svc_pool *pool, struct svc_rqst *rqstp) 140 - { 141 - list_add(&rqstp->rq_list, &pool->sp_threads); 142 - } 143 - 144 - /* 145 - * Dequeue an nfsd thread. Must have pool->sp_lock held. 146 - */ 147 - static inline void 148 - svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp) 149 - { 150 - list_del(&rqstp->rq_list); 151 - } 152 138 153 139 /* 154 140 * Release an skbuff after use ··· 113 211 if (dr) { 114 212 rqstp->rq_deferred = NULL; 115 213 kfree(dr); 116 - } 117 - } 118 - 119 - /* 120 - * Queue up a socket with data pending. If there are idle nfsd 121 - * processes, wake 'em up. 122 - * 123 - */ 124 - void svc_xprt_enqueue(struct svc_xprt *xprt) 125 - { 126 - struct svc_serv *serv = xprt->xpt_server; 127 - struct svc_pool *pool; 128 - struct svc_rqst *rqstp; 129 - int cpu; 130 - 131 - if (!(xprt->xpt_flags & 132 - ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED)))) 133 - return; 134 - if (test_bit(XPT_DEAD, &xprt->xpt_flags)) 135 - return; 136 - 137 - cpu = get_cpu(); 138 - pool = svc_pool_for_cpu(xprt->xpt_server, cpu); 139 - put_cpu(); 140 - 141 - spin_lock_bh(&pool->sp_lock); 142 - 143 - if (!list_empty(&pool->sp_threads) && 144 - !list_empty(&pool->sp_sockets)) 145 - printk(KERN_ERR 146 - "svc_xprt_enqueue: " 147 - "threads and transports both waiting??\n"); 148 - 149 - if (test_bit(XPT_DEAD, &xprt->xpt_flags)) { 150 - /* Don't enqueue dead sockets */ 151 - dprintk("svc: transport %p is dead, not enqueued\n", xprt); 152 - goto out_unlock; 153 - } 154 - 155 - /* Mark socket as busy. It will remain in this state until the 156 - * server has processed all pending data and put the socket back 157 - * on the idle list. We update XPT_BUSY atomically because 158 - * it also guards against trying to enqueue the svc_sock twice. 159 - */ 160 - if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) { 161 - /* Don't enqueue socket while already enqueued */ 162 - dprintk("svc: transport %p busy, not enqueued\n", xprt); 163 - goto out_unlock; 164 - } 165 - BUG_ON(xprt->xpt_pool != NULL); 166 - xprt->xpt_pool = pool; 167 - 168 - /* Handle pending connection */ 169 - if (test_bit(XPT_CONN, &xprt->xpt_flags)) 170 - goto process; 171 - 172 - /* Handle close in-progress */ 173 - if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) 174 - goto process; 175 - 176 - /* Check if we have space to reply to a request */ 177 - if (!xprt->xpt_ops->xpo_has_wspace(xprt)) { 178 - /* Don't enqueue while not enough space for reply */ 179 - dprintk("svc: no write space, transport %p not enqueued\n", 180 - xprt); 181 - xprt->xpt_pool = NULL; 182 - clear_bit(XPT_BUSY, &xprt->xpt_flags); 183 - goto out_unlock; 184 - } 185 - 186 - process: 187 - if (!list_empty(&pool->sp_threads)) { 188 - rqstp = list_entry(pool->sp_threads.next, 189 - struct svc_rqst, 190 - rq_list); 191 - dprintk("svc: transport %p served by daemon %p\n", 192 - xprt, rqstp); 193 - svc_thread_dequeue(pool, rqstp); 194 - if (rqstp->rq_xprt) 195 - printk(KERN_ERR 196 - "svc_xprt_enqueue: server %p, rq_xprt=%p!\n", 197 - rqstp, rqstp->rq_xprt); 198 - rqstp->rq_xprt = xprt; 199 - svc_xprt_get(xprt); 200 - rqstp->rq_reserved = serv->sv_max_mesg; 201 - atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); 202 - BUG_ON(xprt->xpt_pool != pool); 203 - wake_up(&rqstp->rq_wait); 204 - } else { 205 - dprintk("svc: transport %p put into queue\n", xprt); 206 - list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); 207 - BUG_ON(xprt->xpt_pool != pool); 208 - } 209 - 210 - out_unlock: 211 - spin_unlock_bh(&pool->sp_lock); 212 - } 213 - EXPORT_SYMBOL_GPL(svc_xprt_enqueue); 214 - 215 - /* 216 - * Dequeue the first socket. Must be called with the pool->sp_lock held. 217 - */ 218 - static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool) 219 - { 220 - struct svc_xprt *xprt; 221 - 222 - if (list_empty(&pool->sp_sockets)) 223 - return NULL; 224 - 225 - xprt = list_entry(pool->sp_sockets.next, 226 - struct svc_xprt, xpt_ready); 227 - list_del_init(&xprt->xpt_ready); 228 - 229 - dprintk("svc: transport %p dequeued, inuse=%d\n", 230 - xprt, atomic_read(&xprt->xpt_ref.refcount)); 231 - 232 - return xprt; 233 - } 234 - 235 - /* 236 - * svc_xprt_received conditionally queues the transport for processing 237 - * by another thread. The caller must hold the XPT_BUSY bit and must 238 - * not thereafter touch transport data. 239 - * 240 - * Note: XPT_DATA only gets cleared when a read-attempt finds no (or 241 - * insufficient) data. 242 - */ 243 - void svc_xprt_received(struct svc_xprt *xprt) 244 - { 245 - BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags)); 246 - xprt->xpt_pool = NULL; 247 - clear_bit(XPT_BUSY, &xprt->xpt_flags); 248 - svc_xprt_enqueue(xprt); 249 - } 250 - EXPORT_SYMBOL_GPL(svc_xprt_received); 251 - 252 - /** 253 - * svc_reserve - change the space reserved for the reply to a request. 254 - * @rqstp: The request in question 255 - * @space: new max space to reserve 256 - * 257 - * Each request reserves some space on the output queue of the socket 258 - * to make sure the reply fits. This function reduces that reserved 259 - * space to be the amount of space used already, plus @space. 260 - * 261 - */ 262 - void svc_reserve(struct svc_rqst *rqstp, int space) 263 - { 264 - space += rqstp->rq_res.head[0].iov_len; 265 - 266 - if (space < rqstp->rq_reserved) { 267 - struct svc_xprt *xprt = rqstp->rq_xprt; 268 - atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved); 269 - rqstp->rq_reserved = space; 270 - 271 - svc_xprt_enqueue(xprt); 272 - } 273 - } 274 - 275 - static void svc_xprt_release(struct svc_rqst *rqstp) 276 - { 277 - struct svc_xprt *xprt = rqstp->rq_xprt; 278 - 279 - rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp); 280 - 281 - svc_free_res_pages(rqstp); 282 - rqstp->rq_res.page_len = 0; 283 - rqstp->rq_res.page_base = 0; 284 - 285 - /* Reset response buffer and release 286 - * the reservation. 287 - * But first, check that enough space was reserved 288 - * for the reply, otherwise we have a bug! 289 - */ 290 - if ((rqstp->rq_res.len) > rqstp->rq_reserved) 291 - printk(KERN_ERR "RPC request reserved %d but used %d\n", 292 - rqstp->rq_reserved, 293 - rqstp->rq_res.len); 294 - 295 - rqstp->rq_res.head[0].iov_len = 0; 296 - svc_reserve(rqstp, 0); 297 - rqstp->rq_xprt = NULL; 298 - 299 - svc_xprt_put(xprt); 300 - } 301 - 302 - /* 303 - * External function to wake up a server waiting for data 304 - * This really only makes sense for services like lockd 305 - * which have exactly one thread anyway. 306 - */ 307 - void 308 - svc_wake_up(struct svc_serv *serv) 309 - { 310 - struct svc_rqst *rqstp; 311 - unsigned int i; 312 - struct svc_pool *pool; 313 - 314 - for (i = 0; i < serv->sv_nrpools; i++) { 315 - pool = &serv->sv_pools[i]; 316 - 317 - spin_lock_bh(&pool->sp_lock); 318 - if (!list_empty(&pool->sp_threads)) { 319 - rqstp = list_entry(pool->sp_threads.next, 320 - struct svc_rqst, 321 - rq_list); 322 - dprintk("svc: daemon %p woken up.\n", rqstp); 323 - /* 324 - svc_thread_dequeue(pool, rqstp); 325 - rqstp->rq_xprt = NULL; 326 - */ 327 - wake_up(&rqstp->rq_wait); 328 - } 329 - spin_unlock_bh(&pool->sp_lock); 330 214 } 331 215 } 332 216 ··· 157 469 /* 158 470 * Generic sendto routine 159 471 */ 160 - static int 161 - svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr) 472 + static int svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr) 162 473 { 163 474 struct svc_sock *svsk = 164 475 container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt); ··· 292 605 /* 293 606 * Check input queue length 294 607 */ 295 - static int 296 - svc_recv_available(struct svc_sock *svsk) 608 + static int svc_recv_available(struct svc_sock *svsk) 297 609 { 298 610 struct socket *sock = svsk->sk_sock; 299 611 int avail, err; ··· 305 619 /* 306 620 * Generic recvfrom routine. 307 621 */ 308 - static int 309 - svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, int buflen) 622 + static int svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, 623 + int buflen) 310 624 { 311 625 struct svc_sock *svsk = 312 626 container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt); ··· 326 640 /* 327 641 * Set socket snd and rcv buffer lengths 328 642 */ 329 - static inline void 330 - svc_sock_setbufsize(struct socket *sock, unsigned int snd, unsigned int rcv) 643 + static void svc_sock_setbufsize(struct socket *sock, unsigned int snd, 644 + unsigned int rcv) 331 645 { 332 646 #if 0 333 647 mm_segment_t oldfs; ··· 352 666 /* 353 667 * INET callback when data has been received on the socket. 354 668 */ 355 - static void 356 - svc_udp_data_ready(struct sock *sk, int count) 669 + static void svc_udp_data_ready(struct sock *sk, int count) 357 670 { 358 671 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 359 672 ··· 370 685 /* 371 686 * INET callback when space is newly available on the socket. 372 687 */ 373 - static void 374 - svc_write_space(struct sock *sk) 688 + static void svc_write_space(struct sock *sk) 375 689 { 376 690 struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data); 377 691 ··· 416 732 /* 417 733 * Receive a datagram from a UDP socket. 418 734 */ 419 - static int 420 - svc_udp_recvfrom(struct svc_rqst *rqstp) 735 + static int svc_udp_recvfrom(struct svc_rqst *rqstp) 421 736 { 422 737 struct svc_sock *svsk = 423 738 container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt); ··· 510 827 skb_free_datagram(svsk->sk_sk, skb); 511 828 } else { 512 829 /* we can use it in-place */ 513 - rqstp->rq_arg.head[0].iov_base = skb->data + sizeof(struct udphdr); 830 + rqstp->rq_arg.head[0].iov_base = skb->data + 831 + sizeof(struct udphdr); 514 832 rqstp->rq_arg.head[0].iov_len = len; 515 833 if (skb_checksum_complete(skb)) { 516 834 skb_free_datagram(svsk->sk_sk, skb); ··· 622 938 3 * svsk->sk_xprt.xpt_server->sv_max_mesg, 623 939 3 * svsk->sk_xprt.xpt_server->sv_max_mesg); 624 940 625 - set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* might have come in before data_ready set up */ 941 + /* data might have come in before data_ready set up */ 942 + set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); 626 943 set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags); 627 944 628 945 oldfs = get_fs(); ··· 638 953 * A data_ready event on a listening socket means there's a connection 639 954 * pending. Do not use state_change as a substitute for it. 640 955 */ 641 - static void 642 - svc_tcp_listen_data_ready(struct sock *sk, int count_unused) 956 + static void svc_tcp_listen_data_ready(struct sock *sk, int count_unused) 643 957 { 644 958 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 645 959 ··· 670 986 /* 671 987 * A state change on a connected socket means it's dying or dead. 672 988 */ 673 - static void 674 - svc_tcp_state_change(struct sock *sk) 989 + static void svc_tcp_state_change(struct sock *sk) 675 990 { 676 991 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 677 992 ··· 687 1004 wake_up_interruptible_all(sk->sk_sleep); 688 1005 } 689 1006 690 - static void 691 - svc_tcp_data_ready(struct sock *sk, int count) 1007 + static void svc_tcp_data_ready(struct sock *sk, int count) 692 1008 { 693 1009 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 694 1010 ··· 699 1017 } 700 1018 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 701 1019 wake_up_interruptible(sk->sk_sleep); 702 - } 703 - 704 - static inline int svc_port_is_privileged(struct sockaddr *sin) 705 - { 706 - switch (sin->sa_family) { 707 - case AF_INET: 708 - return ntohs(((struct sockaddr_in *)sin)->sin_port) 709 - < PROT_SOCK; 710 - case AF_INET6: 711 - return ntohs(((struct sockaddr_in6 *)sin)->sin6_port) 712 - < PROT_SOCK; 713 - default: 714 - return 0; 715 - } 716 1020 } 717 1021 718 1022 /* ··· 783 1115 /* 784 1116 * Receive data from a TCP socket. 785 1117 */ 786 - static int 787 - svc_tcp_recvfrom(struct svc_rqst *rqstp) 1118 + static int svc_tcp_recvfrom(struct svc_rqst *rqstp) 788 1119 { 789 1120 struct svc_sock *svsk = 790 1121 container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt); ··· 936 1269 /* 937 1270 * Send out data on TCP socket. 938 1271 */ 939 - static int 940 - svc_tcp_sendto(struct svc_rqst *rqstp) 1272 + static int svc_tcp_sendto(struct svc_rqst *rqstp) 941 1273 { 942 1274 struct xdr_buf *xbufp = &rqstp->rq_res; 943 1275 int sent; ··· 954 1288 955 1289 sent = svc_sendto(rqstp, &rqstp->rq_res); 956 1290 if (sent != xbufp->len) { 957 - printk(KERN_NOTICE "rpc-srv/tcp: %s: %s %d when sending %d bytes - shutting down socket\n", 1291 + printk(KERN_NOTICE 1292 + "rpc-srv/tcp: %s: %s %d when sending %d bytes " 1293 + "- shutting down socket\n", 958 1294 rqstp->rq_xprt->xpt_server->sv_name, 959 1295 (sent<0)?"got error":"sent only", 960 1296 sent, xbufp->len); ··· 1078 1410 } 1079 1411 } 1080 1412 1081 - void 1082 - svc_sock_update_bufs(struct svc_serv *serv) 1413 + void svc_sock_update_bufs(struct svc_serv *serv) 1083 1414 { 1084 1415 /* 1085 1416 * The number of server threads has changed. Update ··· 1098 1431 set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags); 1099 1432 } 1100 1433 spin_unlock_bh(&serv->sv_lock); 1101 - } 1102 - 1103 - /* 1104 - * Make sure that we don't have too many active connections. If we 1105 - * have, something must be dropped. 1106 - * 1107 - * There's no point in trying to do random drop here for DoS 1108 - * prevention. The NFS clients does 1 reconnect in 15 seconds. An 1109 - * attacker can easily beat that. 1110 - * 1111 - * The only somewhat efficient mechanism would be if drop old 1112 - * connections from the same IP first. But right now we don't even 1113 - * record the client IP in svc_sock. 1114 - */ 1115 - static void svc_check_conn_limits(struct svc_serv *serv) 1116 - { 1117 - if (serv->sv_tmpcnt > (serv->sv_nrthreads+3)*20) { 1118 - struct svc_xprt *xprt = NULL; 1119 - spin_lock_bh(&serv->sv_lock); 1120 - if (!list_empty(&serv->sv_tempsocks)) { 1121 - if (net_ratelimit()) { 1122 - /* Try to help the admin */ 1123 - printk(KERN_NOTICE "%s: too many open " 1124 - "connections, consider increasing the " 1125 - "number of nfsd threads\n", 1126 - serv->sv_name); 1127 - } 1128 - /* 1129 - * Always select the oldest connection. It's not fair, 1130 - * but so is life 1131 - */ 1132 - xprt = list_entry(serv->sv_tempsocks.prev, 1133 - struct svc_xprt, 1134 - xpt_list); 1135 - set_bit(XPT_CLOSE, &xprt->xpt_flags); 1136 - svc_xprt_get(xprt); 1137 - } 1138 - spin_unlock_bh(&serv->sv_lock); 1139 - 1140 - if (xprt) { 1141 - svc_xprt_enqueue(xprt); 1142 - svc_xprt_put(xprt); 1143 - } 1144 - } 1145 - } 1146 - 1147 - /* 1148 - * Receive the next request on any socket. This code is carefully 1149 - * organised not to touch any cachelines in the shared svc_serv 1150 - * structure, only cachelines in the local svc_pool. 1151 - */ 1152 - int 1153 - svc_recv(struct svc_rqst *rqstp, long timeout) 1154 - { 1155 - struct svc_xprt *xprt = NULL; 1156 - struct svc_serv *serv = rqstp->rq_server; 1157 - struct svc_pool *pool = rqstp->rq_pool; 1158 - int len, i; 1159 - int pages; 1160 - struct xdr_buf *arg; 1161 - DECLARE_WAITQUEUE(wait, current); 1162 - 1163 - dprintk("svc: server %p waiting for data (to = %ld)\n", 1164 - rqstp, timeout); 1165 - 1166 - if (rqstp->rq_xprt) 1167 - printk(KERN_ERR 1168 - "svc_recv: service %p, transport not NULL!\n", 1169 - rqstp); 1170 - if (waitqueue_active(&rqstp->rq_wait)) 1171 - printk(KERN_ERR 1172 - "svc_recv: service %p, wait queue active!\n", 1173 - rqstp); 1174 - 1175 - 1176 - /* now allocate needed pages. If we get a failure, sleep briefly */ 1177 - pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE; 1178 - for (i=0; i < pages ; i++) 1179 - while (rqstp->rq_pages[i] == NULL) { 1180 - struct page *p = alloc_page(GFP_KERNEL); 1181 - if (!p) 1182 - schedule_timeout_uninterruptible(msecs_to_jiffies(500)); 1183 - rqstp->rq_pages[i] = p; 1184 - } 1185 - rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */ 1186 - BUG_ON(pages >= RPCSVC_MAXPAGES); 1187 - 1188 - /* Make arg->head point to first page and arg->pages point to rest */ 1189 - arg = &rqstp->rq_arg; 1190 - arg->head[0].iov_base = page_address(rqstp->rq_pages[0]); 1191 - arg->head[0].iov_len = PAGE_SIZE; 1192 - arg->pages = rqstp->rq_pages + 1; 1193 - arg->page_base = 0; 1194 - /* save at least one page for response */ 1195 - arg->page_len = (pages-2)*PAGE_SIZE; 1196 - arg->len = (pages-1)*PAGE_SIZE; 1197 - arg->tail[0].iov_len = 0; 1198 - 1199 - try_to_freeze(); 1200 - cond_resched(); 1201 - if (signalled()) 1202 - return -EINTR; 1203 - 1204 - spin_lock_bh(&pool->sp_lock); 1205 - xprt = svc_xprt_dequeue(pool); 1206 - if (xprt) { 1207 - rqstp->rq_xprt = xprt; 1208 - svc_xprt_get(xprt); 1209 - rqstp->rq_reserved = serv->sv_max_mesg; 1210 - atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); 1211 - } else { 1212 - /* No data pending. Go to sleep */ 1213 - svc_thread_enqueue(pool, rqstp); 1214 - 1215 - /* 1216 - * We have to be able to interrupt this wait 1217 - * to bring down the daemons ... 1218 - */ 1219 - set_current_state(TASK_INTERRUPTIBLE); 1220 - add_wait_queue(&rqstp->rq_wait, &wait); 1221 - spin_unlock_bh(&pool->sp_lock); 1222 - 1223 - schedule_timeout(timeout); 1224 - 1225 - try_to_freeze(); 1226 - 1227 - spin_lock_bh(&pool->sp_lock); 1228 - remove_wait_queue(&rqstp->rq_wait, &wait); 1229 - 1230 - xprt = rqstp->rq_xprt; 1231 - if (!xprt) { 1232 - svc_thread_dequeue(pool, rqstp); 1233 - spin_unlock_bh(&pool->sp_lock); 1234 - dprintk("svc: server %p, no data yet\n", rqstp); 1235 - return signalled()? -EINTR : -EAGAIN; 1236 - } 1237 - } 1238 - spin_unlock_bh(&pool->sp_lock); 1239 - 1240 - len = 0; 1241 - if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { 1242 - dprintk("svc_recv: found XPT_CLOSE\n"); 1243 - svc_delete_xprt(xprt); 1244 - } else if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { 1245 - struct svc_xprt *newxpt; 1246 - newxpt = xprt->xpt_ops->xpo_accept(xprt); 1247 - if (newxpt) { 1248 - /* 1249 - * We know this module_get will succeed because the 1250 - * listener holds a reference too 1251 - */ 1252 - __module_get(newxpt->xpt_class->xcl_owner); 1253 - svc_check_conn_limits(xprt->xpt_server); 1254 - spin_lock_bh(&serv->sv_lock); 1255 - set_bit(XPT_TEMP, &newxpt->xpt_flags); 1256 - list_add(&newxpt->xpt_list, &serv->sv_tempsocks); 1257 - serv->sv_tmpcnt++; 1258 - if (serv->sv_temptimer.function == NULL) { 1259 - /* setup timer to age temp sockets */ 1260 - setup_timer(&serv->sv_temptimer, 1261 - svc_age_temp_xprts, 1262 - (unsigned long)serv); 1263 - mod_timer(&serv->sv_temptimer, 1264 - jiffies + svc_conn_age_period * HZ); 1265 - } 1266 - spin_unlock_bh(&serv->sv_lock); 1267 - svc_xprt_received(newxpt); 1268 - } 1269 - svc_xprt_received(xprt); 1270 - } else { 1271 - dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", 1272 - rqstp, pool->sp_id, xprt, 1273 - atomic_read(&xprt->xpt_ref.refcount)); 1274 - rqstp->rq_deferred = svc_deferred_dequeue(xprt); 1275 - if (rqstp->rq_deferred) { 1276 - svc_xprt_received(xprt); 1277 - len = svc_deferred_recv(rqstp); 1278 - } else 1279 - len = xprt->xpt_ops->xpo_recvfrom(rqstp); 1280 - dprintk("svc: got len=%d\n", len); 1281 - } 1282 - 1283 - /* No data, incomplete (TCP) read, or accept() */ 1284 - if (len == 0 || len == -EAGAIN) { 1285 - rqstp->rq_res.len = 0; 1286 - svc_xprt_release(rqstp); 1287 - return -EAGAIN; 1288 - } 1289 - clear_bit(XPT_OLD, &xprt->xpt_flags); 1290 - 1291 - rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp)); 1292 - rqstp->rq_chandle.defer = svc_defer; 1293 - 1294 - if (serv->sv_stats) 1295 - serv->sv_stats->netcnt++; 1296 - return len; 1297 - } 1298 - 1299 - /* 1300 - * Drop request 1301 - */ 1302 - void 1303 - svc_drop(struct svc_rqst *rqstp) 1304 - { 1305 - dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt); 1306 - svc_xprt_release(rqstp); 1307 - } 1308 - 1309 - /* 1310 - * Return reply to client. 1311 - */ 1312 - int 1313 - svc_send(struct svc_rqst *rqstp) 1314 - { 1315 - struct svc_xprt *xprt; 1316 - int len; 1317 - struct xdr_buf *xb; 1318 - 1319 - xprt = rqstp->rq_xprt; 1320 - if (!xprt) 1321 - return -EFAULT; 1322 - 1323 - /* release the receive skb before sending the reply */ 1324 - rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp); 1325 - 1326 - /* calculate over-all length */ 1327 - xb = & rqstp->rq_res; 1328 - xb->len = xb->head[0].iov_len + 1329 - xb->page_len + 1330 - xb->tail[0].iov_len; 1331 - 1332 - /* Grab mutex to serialize outgoing data. */ 1333 - mutex_lock(&xprt->xpt_mutex); 1334 - if (test_bit(XPT_DEAD, &xprt->xpt_flags)) 1335 - len = -ENOTCONN; 1336 - else 1337 - len = xprt->xpt_ops->xpo_sendto(rqstp); 1338 - mutex_unlock(&xprt->xpt_mutex); 1339 - svc_xprt_release(rqstp); 1340 - 1341 - if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN) 1342 - return 0; 1343 - return len; 1344 - } 1345 - 1346 - /* 1347 - * Timer function to close old temporary sockets, using 1348 - * a mark-and-sweep algorithm. 1349 - */ 1350 - static void svc_age_temp_xprts(unsigned long closure) 1351 - { 1352 - struct svc_serv *serv = (struct svc_serv *)closure; 1353 - struct svc_xprt *xprt; 1354 - struct list_head *le, *next; 1355 - LIST_HEAD(to_be_aged); 1356 - 1357 - dprintk("svc_age_temp_xprts\n"); 1358 - 1359 - if (!spin_trylock_bh(&serv->sv_lock)) { 1360 - /* busy, try again 1 sec later */ 1361 - dprintk("svc_age_temp_xprts: busy\n"); 1362 - mod_timer(&serv->sv_temptimer, jiffies + HZ); 1363 - return; 1364 - } 1365 - 1366 - list_for_each_safe(le, next, &serv->sv_tempsocks) { 1367 - xprt = list_entry(le, struct svc_xprt, xpt_list); 1368 - 1369 - /* First time through, just mark it OLD. Second time 1370 - * through, close it. */ 1371 - if (!test_and_set_bit(XPT_OLD, &xprt->xpt_flags)) 1372 - continue; 1373 - if (atomic_read(&xprt->xpt_ref.refcount) > 1 1374 - || test_bit(XPT_BUSY, &xprt->xpt_flags)) 1375 - continue; 1376 - svc_xprt_get(xprt); 1377 - list_move(le, &to_be_aged); 1378 - set_bit(XPT_CLOSE, &xprt->xpt_flags); 1379 - set_bit(XPT_DETACHED, &xprt->xpt_flags); 1380 - } 1381 - spin_unlock_bh(&serv->sv_lock); 1382 - 1383 - while (!list_empty(&to_be_aged)) { 1384 - le = to_be_aged.next; 1385 - /* fiddling the xpt_list node is safe 'cos we're XPT_DETACHED */ 1386 - list_del_init(le); 1387 - xprt = list_entry(le, struct svc_xprt, xpt_list); 1388 - 1389 - dprintk("queuing xprt %p for closing\n", xprt); 1390 - 1391 - /* a thread will dequeue and close it soon */ 1392 - svc_xprt_enqueue(xprt); 1393 - svc_xprt_put(xprt); 1394 - } 1395 - 1396 - mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ); 1397 1434 } 1398 1435 1399 1436 /* ··· 1283 1912 else 1284 1913 sock_release(svsk->sk_sock); 1285 1914 kfree(svsk); 1286 - } 1287 - 1288 - /* 1289 - * Remove a dead transport 1290 - */ 1291 - static void svc_delete_xprt(struct svc_xprt *xprt) 1292 - { 1293 - struct svc_serv *serv = xprt->xpt_server; 1294 - 1295 - dprintk("svc: svc_delete_xprt(%p)\n", xprt); 1296 - xprt->xpt_ops->xpo_detach(xprt); 1297 - 1298 - spin_lock_bh(&serv->sv_lock); 1299 - if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags)) 1300 - list_del_init(&xprt->xpt_list); 1301 - /* 1302 - * We used to delete the transport from whichever list 1303 - * it's sk_xprt.xpt_ready node was on, but we don't actually 1304 - * need to. This is because the only time we're called 1305 - * while still attached to a queue, the queue itself 1306 - * is about to be destroyed (in svc_destroy). 1307 - */ 1308 - if (!test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) { 1309 - BUG_ON(atomic_read(&xprt->xpt_ref.refcount) < 2); 1310 - if (test_bit(XPT_TEMP, &xprt->xpt_flags)) 1311 - serv->sv_tmpcnt--; 1312 - svc_xprt_put(xprt); 1313 - } 1314 - spin_unlock_bh(&serv->sv_lock); 1315 - } 1316 - 1317 - static void svc_close_xprt(struct svc_xprt *xprt) 1318 - { 1319 - set_bit(XPT_CLOSE, &xprt->xpt_flags); 1320 - if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) 1321 - /* someone else will have to effect the close */ 1322 - return; 1323 - 1324 - svc_xprt_get(xprt); 1325 - svc_delete_xprt(xprt); 1326 - clear_bit(XPT_BUSY, &xprt->xpt_flags); 1327 - svc_xprt_put(xprt); 1328 - } 1329 - 1330 - void svc_close_all(struct list_head *xprt_list) 1331 - { 1332 - struct svc_xprt *xprt; 1333 - struct svc_xprt *tmp; 1334 - 1335 - list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) { 1336 - set_bit(XPT_CLOSE, &xprt->xpt_flags); 1337 - if (test_bit(XPT_BUSY, &xprt->xpt_flags)) { 1338 - /* Waiting to be processed, but no threads left, 1339 - * So just remove it from the waiting list 1340 - */ 1341 - list_del_init(&xprt->xpt_ready); 1342 - clear_bit(XPT_BUSY, &xprt->xpt_flags); 1343 - } 1344 - svc_close_xprt(xprt); 1345 - } 1346 - } 1347 - 1348 - /* 1349 - * Handle defer and revisit of requests 1350 - */ 1351 - 1352 - static void svc_revisit(struct cache_deferred_req *dreq, int too_many) 1353 - { 1354 - struct svc_deferred_req *dr = container_of(dreq, struct svc_deferred_req, handle); 1355 - struct svc_xprt *xprt = dr->xprt; 1356 - 1357 - if (too_many) { 1358 - svc_xprt_put(xprt); 1359 - kfree(dr); 1360 - return; 1361 - } 1362 - dprintk("revisit queued\n"); 1363 - dr->xprt = NULL; 1364 - spin_lock(&xprt->xpt_lock); 1365 - list_add(&dr->handle.recent, &xprt->xpt_deferred); 1366 - spin_unlock(&xprt->xpt_lock); 1367 - set_bit(XPT_DEFERRED, &xprt->xpt_flags); 1368 - svc_xprt_enqueue(xprt); 1369 - svc_xprt_put(xprt); 1370 - } 1371 - 1372 - static struct cache_deferred_req * 1373 - svc_defer(struct cache_req *req) 1374 - { 1375 - struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle); 1376 - int size = sizeof(struct svc_deferred_req) + (rqstp->rq_arg.len); 1377 - struct svc_deferred_req *dr; 1378 - 1379 - if (rqstp->rq_arg.page_len) 1380 - return NULL; /* if more than a page, give up FIXME */ 1381 - if (rqstp->rq_deferred) { 1382 - dr = rqstp->rq_deferred; 1383 - rqstp->rq_deferred = NULL; 1384 - } else { 1385 - int skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len; 1386 - /* FIXME maybe discard if size too large */ 1387 - dr = kmalloc(size, GFP_KERNEL); 1388 - if (dr == NULL) 1389 - return NULL; 1390 - 1391 - dr->handle.owner = rqstp->rq_server; 1392 - dr->prot = rqstp->rq_prot; 1393 - memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen); 1394 - dr->addrlen = rqstp->rq_addrlen; 1395 - dr->daddr = rqstp->rq_daddr; 1396 - dr->argslen = rqstp->rq_arg.len >> 2; 1397 - memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, dr->argslen<<2); 1398 - } 1399 - svc_xprt_get(rqstp->rq_xprt); 1400 - dr->xprt = rqstp->rq_xprt; 1401 - 1402 - dr->handle.revisit = svc_revisit; 1403 - return &dr->handle; 1404 - } 1405 - 1406 - /* 1407 - * recv data from a deferred request into an active one 1408 - */ 1409 - static int svc_deferred_recv(struct svc_rqst *rqstp) 1410 - { 1411 - struct svc_deferred_req *dr = rqstp->rq_deferred; 1412 - 1413 - rqstp->rq_arg.head[0].iov_base = dr->args; 1414 - rqstp->rq_arg.head[0].iov_len = dr->argslen<<2; 1415 - rqstp->rq_arg.page_len = 0; 1416 - rqstp->rq_arg.len = dr->argslen<<2; 1417 - rqstp->rq_prot = dr->prot; 1418 - memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen); 1419 - rqstp->rq_addrlen = dr->addrlen; 1420 - rqstp->rq_daddr = dr->daddr; 1421 - rqstp->rq_respages = rqstp->rq_pages; 1422 - return dr->argslen<<2; 1423 - } 1424 - 1425 - 1426 - static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt) 1427 - { 1428 - struct svc_deferred_req *dr = NULL; 1429 - 1430 - if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags)) 1431 - return NULL; 1432 - spin_lock(&xprt->xpt_lock); 1433 - clear_bit(XPT_DEFERRED, &xprt->xpt_flags); 1434 - if (!list_empty(&xprt->xpt_deferred)) { 1435 - dr = list_entry(xprt->xpt_deferred.next, 1436 - struct svc_deferred_req, 1437 - handle.recent); 1438 - list_del_init(&dr->handle.recent); 1439 - set_bit(XPT_DEFERRED, &xprt->xpt_flags); 1440 - } 1441 - spin_unlock(&xprt->xpt_lock); 1442 - return dr; 1443 1915 }