Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

workqueue: fix some scripts/kernel-doc warnings

When building the htmldocs (in verbose mode), scripts/kernel-doc reports the
following type of warnings:

Warning(kernel/workqueue.c:653): No description found for return value of
'get_work_pool'

Fix them by:
- Using "Return:" sections to introduce descriptions of return values
- Adding some missing descriptions

Signed-off-by: Yacine Belkadi <yacine.belkadi.1@gmail.com>
Signed-off-by: Jiri Kosina <jkosina@suse.cz>

authored by

Yacine Belkadi and committed by
Jiri Kosina
d185af30 1a5d6d2b

+66 -41
+66 -41
kernel/workqueue.c
··· 540 540 * This must be called either with pwq_lock held or sched RCU read locked. 541 541 * If the pwq needs to be used beyond the locking in effect, the caller is 542 542 * responsible for guaranteeing that the pwq stays online. 543 + * 544 + * Return: The unbound pool_workqueue for @node. 543 545 */ 544 546 static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq, 545 547 int node) ··· 640 638 * get_work_pool - return the worker_pool a given work was associated with 641 639 * @work: the work item of interest 642 640 * 643 - * Return the worker_pool @work was last associated with. %NULL if none. 644 - * 645 641 * Pools are created and destroyed under wq_pool_mutex, and allows read 646 642 * access under sched-RCU read lock. As such, this function should be 647 643 * called under wq_pool_mutex or with preemption disabled. ··· 648 648 * mentioned locking is in effect. If the returned pool needs to be used 649 649 * beyond the critical section, the caller is responsible for ensuring the 650 650 * returned pool is and stays online. 651 + * 652 + * Return: The worker_pool @work was last associated with. %NULL if none. 651 653 */ 652 654 static struct worker_pool *get_work_pool(struct work_struct *work) 653 655 { ··· 673 671 * get_work_pool_id - return the worker pool ID a given work is associated with 674 672 * @work: the work item of interest 675 673 * 676 - * Return the worker_pool ID @work was last associated with. 674 + * Return: The worker_pool ID @work was last associated with. 677 675 * %WORK_OFFQ_POOL_NONE if none. 678 676 */ 679 677 static int get_work_pool_id(struct work_struct *work) ··· 832 830 * CONTEXT: 833 831 * spin_lock_irq(rq->lock) 834 832 * 835 - * RETURNS: 833 + * Return: 836 834 * Worker task on @cpu to wake up, %NULL if none. 837 835 */ 838 836 struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu) ··· 967 965 * CONTEXT: 968 966 * spin_lock_irq(pool->lock). 969 967 * 970 - * RETURNS: 971 - * Pointer to worker which is executing @work if found, NULL 968 + * Return: 969 + * Pointer to worker which is executing @work if found, %NULL 972 970 * otherwise. 973 971 */ 974 972 static struct worker *find_worker_executing_work(struct worker_pool *pool, ··· 1156 1154 * @flags: place to store irq state 1157 1155 * 1158 1156 * Try to grab PENDING bit of @work. This function can handle @work in any 1159 - * stable state - idle, on timer or on worklist. Return values are 1157 + * stable state - idle, on timer or on worklist. 1160 1158 * 1159 + * Return: 1161 1160 * 1 if @work was pending and we successfully stole PENDING 1162 1161 * 0 if @work was idle and we claimed PENDING 1163 1162 * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry 1164 1163 * -ENOENT if someone else is canceling @work, this state may persist 1165 1164 * for arbitrarily long 1166 1165 * 1166 + * Note: 1167 1167 * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting 1168 1168 * interrupted while holding PENDING and @work off queue, irq must be 1169 1169 * disabled on entry. This, combined with delayed_work->timer being ··· 1407 1403 * @wq: workqueue to use 1408 1404 * @work: work to queue 1409 1405 * 1410 - * Returns %false if @work was already on a queue, %true otherwise. 1411 - * 1412 1406 * We queue the work to a specific CPU, the caller must ensure it 1413 1407 * can't go away. 1408 + * 1409 + * Return: %false if @work was already on a queue, %true otherwise. 1414 1410 */ 1415 1411 bool queue_work_on(int cpu, struct workqueue_struct *wq, 1416 1412 struct work_struct *work) ··· 1480 1476 * @dwork: work to queue 1481 1477 * @delay: number of jiffies to wait before queueing 1482 1478 * 1483 - * Returns %false if @work was already on a queue, %true otherwise. If 1479 + * Return: %false if @work was already on a queue, %true otherwise. If 1484 1480 * @delay is zero and @dwork is idle, it will be scheduled for immediate 1485 1481 * execution. 1486 1482 */ ··· 1516 1512 * zero, @work is guaranteed to be scheduled immediately regardless of its 1517 1513 * current state. 1518 1514 * 1519 - * Returns %false if @dwork was idle and queued, %true if @dwork was 1515 + * Return: %false if @dwork was idle and queued, %true if @dwork was 1520 1516 * pending and its timer was modified. 1521 1517 * 1522 1518 * This function is safe to call from any context including IRQ handler. ··· 1631 1627 * Might sleep. Called without any lock but returns with pool->lock 1632 1628 * held. 1633 1629 * 1634 - * RETURNS: 1630 + * Return: 1635 1631 * %true if the associated pool is online (@worker is successfully 1636 1632 * bound), %false if offline. 1637 1633 */ ··· 1692 1688 * CONTEXT: 1693 1689 * Might sleep. Does GFP_KERNEL allocations. 1694 1690 * 1695 - * RETURNS: 1691 + * Return: 1696 1692 * Pointer to the newly created worker. 1697 1693 */ 1698 1694 static struct worker *create_worker(struct worker_pool *pool) ··· 1792 1788 * @pool: the target pool 1793 1789 * 1794 1790 * Grab the managership of @pool and create and start a new worker for it. 1791 + * 1792 + * Return: 0 on success. A negative error code otherwise. 1795 1793 */ 1796 1794 static int create_and_start_worker(struct worker_pool *pool) 1797 1795 { ··· 1938 1932 * multiple times. Does GFP_KERNEL allocations. Called only from 1939 1933 * manager. 1940 1934 * 1941 - * RETURNS: 1935 + * Return: 1942 1936 * %false if no action was taken and pool->lock stayed locked, %true 1943 1937 * otherwise. 1944 1938 */ ··· 1995 1989 * spin_lock_irq(pool->lock) which may be released and regrabbed 1996 1990 * multiple times. Called only from manager. 1997 1991 * 1998 - * RETURNS: 1992 + * Return: 1999 1993 * %false if no action was taken and pool->lock stayed locked, %true 2000 1994 * otherwise. 2001 1995 */ ··· 2038 2032 * spin_lock_irq(pool->lock) which may be released and regrabbed 2039 2033 * multiple times. Does GFP_KERNEL allocations. 2040 2034 * 2041 - * RETURNS: 2035 + * Return: 2042 2036 * spin_lock_irq(pool->lock) which may be released and regrabbed 2043 2037 * multiple times. Does GFP_KERNEL allocations. 2044 2038 */ ··· 2252 2246 * work items regardless of their specific target workqueue. The only 2253 2247 * exception is work items which belong to workqueues with a rescuer which 2254 2248 * will be explained in rescuer_thread(). 2249 + * 2250 + * Return: 0 2255 2251 */ 2256 2252 static int worker_thread(void *__worker) 2257 2253 { ··· 2352 2344 * those works so that forward progress can be guaranteed. 2353 2345 * 2354 2346 * This should happen rarely. 2347 + * 2348 + * Return: 0 2355 2349 */ 2356 2350 static int rescuer_thread(void *__rescuer) 2357 2351 { ··· 2526 2516 * CONTEXT: 2527 2517 * mutex_lock(wq->mutex). 2528 2518 * 2529 - * RETURNS: 2519 + * Return: 2530 2520 * %true if @flush_color >= 0 and there's something to flush. %false 2531 2521 * otherwise. 2532 2522 */ ··· 2834 2824 * Wait until @work has finished execution. @work is guaranteed to be idle 2835 2825 * on return if it hasn't been requeued since flush started. 2836 2826 * 2837 - * RETURNS: 2827 + * Return: 2838 2828 * %true if flush_work() waited for the work to finish execution, 2839 2829 * %false if it was already idle. 2840 2830 */ ··· 2894 2884 * The caller must ensure that the workqueue on which @work was last 2895 2885 * queued can't be destroyed before this function returns. 2896 2886 * 2897 - * RETURNS: 2887 + * Return: 2898 2888 * %true if @work was pending, %false otherwise. 2899 2889 */ 2900 2890 bool cancel_work_sync(struct work_struct *work) ··· 2911 2901 * immediate execution. Like flush_work(), this function only 2912 2902 * considers the last queueing instance of @dwork. 2913 2903 * 2914 - * RETURNS: 2904 + * Return: 2915 2905 * %true if flush_work() waited for the work to finish execution, 2916 2906 * %false if it was already idle. 2917 2907 */ ··· 2929 2919 * cancel_delayed_work - cancel a delayed work 2930 2920 * @dwork: delayed_work to cancel 2931 2921 * 2932 - * Kill off a pending delayed_work. Returns %true if @dwork was pending 2933 - * and canceled; %false if wasn't pending. Note that the work callback 2934 - * function may still be running on return, unless it returns %true and the 2935 - * work doesn't re-arm itself. Explicitly flush or use 2936 - * cancel_delayed_work_sync() to wait on it. 2922 + * Kill off a pending delayed_work. 2923 + * 2924 + * Return: %true if @dwork was pending and canceled; %false if it wasn't 2925 + * pending. 2926 + * 2927 + * Note: 2928 + * The work callback function may still be running on return, unless 2929 + * it returns %true and the work doesn't re-arm itself. Explicitly flush or 2930 + * use cancel_delayed_work_sync() to wait on it. 2937 2931 * 2938 2932 * This function is safe to call from any context including IRQ handler. 2939 2933 */ ··· 2966 2952 * 2967 2953 * This is cancel_work_sync() for delayed works. 2968 2954 * 2969 - * RETURNS: 2955 + * Return: 2970 2956 * %true if @dwork was pending, %false otherwise. 2971 2957 */ 2972 2958 bool cancel_delayed_work_sync(struct delayed_work *dwork) ··· 2983 2969 * system workqueue and blocks until all CPUs have completed. 2984 2970 * schedule_on_each_cpu() is very slow. 2985 2971 * 2986 - * RETURNS: 2972 + * Return: 2987 2973 * 0 on success, -errno on failure. 2988 2974 */ 2989 2975 int schedule_on_each_cpu(work_func_t func) ··· 3051 3037 * Executes the function immediately if process context is available, 3052 3038 * otherwise schedules the function for delayed execution. 3053 3039 * 3054 - * Returns: 0 - function was executed 3040 + * Return: 0 - function was executed 3055 3041 * 1 - function was scheduled for execution 3056 3042 */ 3057 3043 int execute_in_process_context(work_func_t fn, struct execute_work *ew) ··· 3308 3294 * apply_workqueue_attrs() may race against userland updating the 3309 3295 * attributes. 3310 3296 * 3311 - * Returns 0 on success, -errno on failure. 3297 + * Return: 0 on success, -errno on failure. 3312 3298 */ 3313 3299 int workqueue_sysfs_register(struct workqueue_struct *wq) 3314 3300 { ··· 3401 3387 * @gfp_mask: allocation mask to use 3402 3388 * 3403 3389 * Allocate a new workqueue_attrs, initialize with default settings and 3404 - * return it. Returns NULL on failure. 3390 + * return it. 3391 + * 3392 + * Return: The allocated new workqueue_attr on success. %NULL on failure. 3405 3393 */ 3406 3394 struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask) 3407 3395 { ··· 3456 3440 * @pool: worker_pool to initialize 3457 3441 * 3458 3442 * Initiailize a newly zalloc'd @pool. It also allocates @pool->attrs. 3459 - * Returns 0 on success, -errno on failure. Even on failure, all fields 3443 + * 3444 + * Return: 0 on success, -errno on failure. Even on failure, all fields 3460 3445 * inside @pool proper are initialized and put_unbound_pool() can be called 3461 3446 * on @pool safely to release it. 3462 3447 */ ··· 3564 3547 * Obtain a worker_pool which has the same attributes as @attrs, bump the 3565 3548 * reference count and return it. If there already is a matching 3566 3549 * worker_pool, it will be used; otherwise, this function attempts to 3567 - * create a new one. On failure, returns NULL. 3550 + * create a new one. 3568 3551 * 3569 3552 * Should be called with wq_pool_mutex held. 3553 + * 3554 + * Return: On success, a worker_pool with the same attributes as @attrs. 3555 + * On failure, %NULL. 3570 3556 */ 3571 3557 static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) 3572 3558 { ··· 3799 3779 * 3800 3780 * Calculate the cpumask a workqueue with @attrs should use on @node. If 3801 3781 * @cpu_going_down is >= 0, that cpu is considered offline during 3802 - * calculation. The result is stored in @cpumask. This function returns 3803 - * %true if the resulting @cpumask is different from @attrs->cpumask, 3804 - * %false if equal. 3782 + * calculation. The result is stored in @cpumask. 3805 3783 * 3806 3784 * If NUMA affinity is not enabled, @attrs->cpumask is always used. If 3807 3785 * enabled and @node has online CPUs requested by @attrs, the returned ··· 3808 3790 * 3809 3791 * The caller is responsible for ensuring that the cpumask of @node stays 3810 3792 * stable. 3793 + * 3794 + * Return: %true if the resulting @cpumask is different from @attrs->cpumask, 3795 + * %false if equal. 3811 3796 */ 3812 3797 static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node, 3813 3798 int cpu_going_down, cpumask_t *cpumask) ··· 3864 3843 * items finish. Note that a work item which repeatedly requeues itself 3865 3844 * back-to-back will stay on its current pwq. 3866 3845 * 3867 - * Performs GFP_KERNEL allocations. Returns 0 on success and -errno on 3868 - * failure. 3846 + * Performs GFP_KERNEL allocations. 3847 + * 3848 + * Return: 0 on success and -errno on failure. 3869 3849 */ 3870 3850 int apply_workqueue_attrs(struct workqueue_struct *wq, 3871 3851 const struct workqueue_attrs *attrs) ··· 4334 4312 * 4335 4313 * Determine whether %current is a workqueue rescuer. Can be used from 4336 4314 * work functions to determine whether it's being run off the rescuer task. 4315 + * 4316 + * Return: %true if %current is a workqueue rescuer. %false otherwise. 4337 4317 */ 4338 4318 bool current_is_workqueue_rescuer(void) 4339 4319 { ··· 4359 4335 * workqueue being congested on one CPU doesn't mean the workqueue is also 4360 4336 * contested on other CPUs / NUMA nodes. 4361 4337 * 4362 - * RETURNS: 4338 + * Return: 4363 4339 * %true if congested, %false otherwise. 4364 4340 */ 4365 4341 bool workqueue_congested(int cpu, struct workqueue_struct *wq) ··· 4392 4368 * synchronization around this function and the test result is 4393 4369 * unreliable and only useful as advisory hints or for debugging. 4394 4370 * 4395 - * RETURNS: 4371 + * Return: 4396 4372 * OR'd bitmask of WORK_BUSY_* bits. 4397 4373 */ 4398 4374 unsigned int work_busy(struct work_struct *work) ··· 4770 4746 * @fn: the function to run 4771 4747 * @arg: the function arg 4772 4748 * 4773 - * This will return the value @fn returns. 4774 4749 * It is up to the caller to ensure that the cpu doesn't go offline. 4775 4750 * The caller must not hold any locks which would prevent @fn from completing. 4751 + * 4752 + * Return: The value @fn returns. 4776 4753 */ 4777 4754 long work_on_cpu(int cpu, long (*fn)(void *), void *arg) 4778 4755 { ··· 4838 4813 * CONTEXT: 4839 4814 * Grabs and releases wq_pool_mutex. 4840 4815 * 4841 - * RETURNS: 4816 + * Return: 4842 4817 * %true if some freezable workqueues are still busy. %false if freezing 4843 4818 * is complete. 4844 4819 */