SUNRPC: spin svc_rqst initialization to its own function

Move the initialzation in __svc_create_thread that happens prior to
thread creation to a new function. Export the function to allow
services to have better control over the svc_rqst structs.

Also rearrange the rqstp initialization to prevent NULL pointer
dereferences in svc_exit_thread in case allocations fail.

Signed-off-by: Jeff Layton <jlayton@redhat.com>
Reviewed-by: NeilBrown <neilb@suse.de>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>

authored by Jeff Layton and committed by J. Bruce Fields 0113ab34 87d26ea7

+44 -17
+2
include/linux/sunrpc/svc.h
··· 384 384 */ 385 385 struct svc_serv * svc_create(struct svc_program *, unsigned int, 386 386 void (*shutdown)(struct svc_serv*)); 387 + struct svc_rqst *svc_prepare_thread(struct svc_serv *serv, 388 + struct svc_pool *pool); 387 389 int svc_create_thread(svc_thread_fn, struct svc_serv *); 388 390 void svc_exit_thread(struct svc_rqst *); 389 391 struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
+42 -17
net/sunrpc/svc.c
··· 531 531 put_page(rqstp->rq_pages[i]); 532 532 } 533 533 534 + struct svc_rqst * 535 + svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool) 536 + { 537 + struct svc_rqst *rqstp; 538 + 539 + rqstp = kzalloc(sizeof(*rqstp), GFP_KERNEL); 540 + if (!rqstp) 541 + goto out_enomem; 542 + 543 + init_waitqueue_head(&rqstp->rq_wait); 544 + 545 + serv->sv_nrthreads++; 546 + spin_lock_bh(&pool->sp_lock); 547 + pool->sp_nrthreads++; 548 + list_add(&rqstp->rq_all, &pool->sp_all_threads); 549 + spin_unlock_bh(&pool->sp_lock); 550 + rqstp->rq_server = serv; 551 + rqstp->rq_pool = pool; 552 + 553 + rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL); 554 + if (!rqstp->rq_argp) 555 + goto out_thread; 556 + 557 + rqstp->rq_resp = kmalloc(serv->sv_xdrsize, GFP_KERNEL); 558 + if (!rqstp->rq_resp) 559 + goto out_thread; 560 + 561 + if (!svc_init_buffer(rqstp, serv->sv_max_mesg)) 562 + goto out_thread; 563 + 564 + return rqstp; 565 + out_thread: 566 + svc_exit_thread(rqstp); 567 + out_enomem: 568 + return ERR_PTR(-ENOMEM); 569 + } 570 + EXPORT_SYMBOL(svc_prepare_thread); 571 + 534 572 /* 535 573 * Create a thread in the given pool. Caller must hold BKL. 536 574 * On a NUMA or SMP machine, with a multi-pool serv, the thread ··· 583 545 int have_oldmask = 0; 584 546 cpumask_t oldmask; 585 547 586 - rqstp = kzalloc(sizeof(*rqstp), GFP_KERNEL); 587 - if (!rqstp) 548 + rqstp = svc_prepare_thread(serv, pool); 549 + if (IS_ERR(rqstp)) { 550 + error = PTR_ERR(rqstp); 588 551 goto out; 589 - 590 - init_waitqueue_head(&rqstp->rq_wait); 591 - 592 - if (!(rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL)) 593 - || !(rqstp->rq_resp = kmalloc(serv->sv_xdrsize, GFP_KERNEL)) 594 - || !svc_init_buffer(rqstp, serv->sv_max_mesg)) 595 - goto out_thread; 596 - 597 - serv->sv_nrthreads++; 598 - spin_lock_bh(&pool->sp_lock); 599 - pool->sp_nrthreads++; 600 - list_add(&rqstp->rq_all, &pool->sp_all_threads); 601 - spin_unlock_bh(&pool->sp_lock); 602 - rqstp->rq_server = serv; 603 - rqstp->rq_pool = pool; 552 + } 604 553 605 554 if (serv->sv_nrpools > 1) 606 555 have_oldmask = svc_pool_map_set_cpumask(pool->sp_id, &oldmask);