Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sched/wait: Disambiguate wq_entry->task_list and wq_head->task_list naming

So I've noticed a number of instances where it was not obvious from the
code whether ->task_list was for a wait-queue head or a wait-queue entry.

Furthermore, there's a number of wait-queue users where the lists are
not for 'tasks' but other entities (poll tables, etc.), in which case
the 'task_list' name is actively confusing.

To clear this all up, name the wait-queue head and entry list structure
fields unambiguously:

struct wait_queue_head::task_list => ::head
struct wait_queue_entry::task_list => ::entry

For example, this code:

rqw->wait.task_list.next != &wait->task_list

... is was pretty unclear (to me) what it's doing, while now it's written this way:

rqw->wait.head.next != &wait->entry

... which makes it pretty clear that we are iterating a list until we see the head.

Other examples are:

list_for_each_entry_safe(pos, next, &x->task_list, task_list) {
list_for_each_entry(wq, &fence->wait.task_list, task_list) {

... where it's unclear (to me) what we are iterating, and during review it's
hard to tell whether it's trying to walk a wait-queue entry (which would be
a bug), while now it's written as:

list_for_each_entry_safe(pos, next, &x->head, entry) {
list_for_each_entry(wq, &fence->wait.head, entry) {

Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>

+66 -68
+1 -1
block/blk-mq.c
··· 933 933 934 934 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait); 935 935 936 - list_del(&wait->task_list); 936 + list_del(&wait->entry); 937 937 clear_bit_unlock(BLK_MQ_S_TAG_WAITING, &hctx->state); 938 938 blk_mq_run_hw_queue(hctx, true); 939 939 return 1;
+1 -1
block/blk-wbt.c
··· 520 520 * in line to be woken up, wait for our turn. 521 521 */ 522 522 if (waitqueue_active(&rqw->wait) && 523 - rqw->wait.task_list.next != &wait->task_list) 523 + rqw->wait.head.next != &wait->entry) 524 524 return false; 525 525 526 526 return atomic_inc_below(&rqw->inflight, get_limit(rwb, rw));
+4 -4
block/kyber-iosched.c
··· 385 385 386 386 for (i = 0; i < KYBER_NUM_DOMAINS; i++) { 387 387 INIT_LIST_HEAD(&khd->rqs[i]); 388 - INIT_LIST_HEAD(&khd->domain_wait[i].task_list); 388 + INIT_LIST_HEAD(&khd->domain_wait[i].entry); 389 389 atomic_set(&khd->wait_index[i], 0); 390 390 } 391 391 ··· 512 512 { 513 513 struct blk_mq_hw_ctx *hctx = READ_ONCE(wait->private); 514 514 515 - list_del_init(&wait->task_list); 515 + list_del_init(&wait->entry); 516 516 blk_mq_run_hw_queue(hctx, true); 517 517 return 1; 518 518 } ··· 536 536 * run when one becomes available. Note that this is serialized on 537 537 * khd->lock, but we still need to be careful about the waker. 538 538 */ 539 - if (list_empty_careful(&wait->task_list)) { 539 + if (list_empty_careful(&wait->entry)) { 540 540 init_waitqueue_func_entry(wait, kyber_domain_wake); 541 541 wait->private = hctx; 542 542 ws = sbq_wait_ptr(domain_tokens, ··· 736 736 struct kyber_hctx_data *khd = hctx->sched_data; \ 737 737 wait_queue_entry_t *wait = &khd->domain_wait[domain]; \ 738 738 \ 739 - seq_printf(m, "%d\n", !list_empty_careful(&wait->task_list)); \ 739 + seq_printf(m, "%d\n", !list_empty_careful(&wait->entry)); \ 740 740 return 0; \ 741 741 } 742 742 KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_READ, read)
+10 -11
drivers/gpu/drm/i915/i915_sw_fence.c
··· 160 160 161 161 /* 162 162 * To prevent unbounded recursion as we traverse the graph of 163 - * i915_sw_fences, we move the task_list from this, the next ready 164 - * fence, to the tail of the original fence's task_list 163 + * i915_sw_fences, we move the entry list from this, the next ready 164 + * fence, to the tail of the original fence's entry list 165 165 * (and so added to the list to be woken). 166 166 */ 167 167 168 168 spin_lock_irqsave_nested(&x->lock, flags, 1 + !!continuation); 169 169 if (continuation) { 170 - list_for_each_entry_safe(pos, next, &x->task_list, task_list) { 170 + list_for_each_entry_safe(pos, next, &x->head, entry) { 171 171 if (pos->func == autoremove_wake_function) 172 172 pos->func(pos, TASK_NORMAL, 0, continuation); 173 173 else 174 - list_move_tail(&pos->task_list, continuation); 174 + list_move_tail(&pos->entry, continuation); 175 175 } 176 176 } else { 177 177 LIST_HEAD(extra); 178 178 179 179 do { 180 - list_for_each_entry_safe(pos, next, 181 - &x->task_list, task_list) 180 + list_for_each_entry_safe(pos, next, &x->head, entry) 182 181 pos->func(pos, TASK_NORMAL, 0, &extra); 183 182 184 183 if (list_empty(&extra)) 185 184 break; 186 185 187 - list_splice_tail_init(&extra, &x->task_list); 186 + list_splice_tail_init(&extra, &x->head); 188 187 } while (1); 189 188 } 190 189 spin_unlock_irqrestore(&x->lock, flags); ··· 255 256 256 257 static int i915_sw_fence_wake(wait_queue_entry_t *wq, unsigned mode, int flags, void *key) 257 258 { 258 - list_del(&wq->task_list); 259 + list_del(&wq->entry); 259 260 __i915_sw_fence_complete(wq->private, key); 260 261 i915_sw_fence_put(wq->private); 261 262 if (wq->flags & I915_SW_FENCE_FLAG_ALLOC) ··· 274 275 if (fence == signaler) 275 276 return true; 276 277 277 - list_for_each_entry(wq, &fence->wait.task_list, task_list) { 278 + list_for_each_entry(wq, &fence->wait.head, entry) { 278 279 if (wq->func != i915_sw_fence_wake) 279 280 continue; 280 281 ··· 292 293 if (!__test_and_clear_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags)) 293 294 return; 294 295 295 - list_for_each_entry(wq, &fence->wait.task_list, task_list) { 296 + list_for_each_entry(wq, &fence->wait.head, entry) { 296 297 if (wq->func != i915_sw_fence_wake) 297 298 continue; 298 299 ··· 349 350 pending |= I915_SW_FENCE_FLAG_ALLOC; 350 351 } 351 352 352 - INIT_LIST_HEAD(&wq->task_list); 353 + INIT_LIST_HEAD(&wq->entry); 353 354 wq->flags = pending; 354 355 wq->func = i915_sw_fence_wake; 355 356 wq->private = i915_sw_fence_get(fence);
+1 -1
drivers/rtc/rtc-imxdi.c
··· 709 709 /*If the write wait queue is empty then there is no pending 710 710 operations. It means the interrupt is for DryIce -Security. 711 711 IRQ must be returned as none.*/ 712 - if (list_empty_careful(&imxdi->write_wait.task_list)) 712 + if (list_empty_careful(&imxdi->write_wait.head)) 713 713 return rc; 714 714 715 715 /* DSR_WCF clears itself on DSR read */
+1 -1
fs/cachefiles/rdwr.c
··· 48 48 } 49 49 50 50 /* remove from the waitqueue */ 51 - list_del(&wait->task_list); 51 + list_del(&wait->entry); 52 52 53 53 /* move onto the action list and queue for FS-Cache thread pool */ 54 54 ASSERT(monitor->op);
+1 -1
fs/eventpoll.c
··· 1094 1094 * can't use __remove_wait_queue(). whead->lock is held by 1095 1095 * the caller. 1096 1096 */ 1097 - list_del_init(&wait->task_list); 1097 + list_del_init(&wait->entry); 1098 1098 } 1099 1099 1100 1100 spin_lock_irqsave(&ep->lock, flags);
+1 -1
fs/fs_pin.c
··· 61 61 rcu_read_unlock(); 62 62 schedule(); 63 63 rcu_read_lock(); 64 - if (likely(list_empty(&wait.task_list))) 64 + if (likely(list_empty(&wait.entry))) 65 65 break; 66 66 /* OK, we know p couldn't have been freed yet */ 67 67 spin_lock_irq(&p->wait.lock);
+1 -2
fs/nilfs2/segment.c
··· 2206 2206 unsigned long flags; 2207 2207 2208 2208 spin_lock_irqsave(&sci->sc_wait_request.lock, flags); 2209 - list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.task_list, 2210 - wq.task_list) { 2209 + list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.head, wq.entry) { 2211 2210 if (!atomic_read(&wrq->done) && 2212 2211 nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) { 2213 2212 wrq->err = err;
+4 -4
fs/orangefs/orangefs-bufmap.c
··· 46 46 spin_lock(&m->q.lock); 47 47 if (m->c != -1) { 48 48 for (;;) { 49 - if (likely(list_empty(&wait.task_list))) 49 + if (likely(list_empty(&wait.entry))) 50 50 __add_wait_queue_entry_tail(&m->q, &wait); 51 51 set_current_state(TASK_UNINTERRUPTIBLE); 52 52 ··· 84 84 85 85 do { 86 86 long n = left, t; 87 - if (likely(list_empty(&wait.task_list))) 87 + if (likely(list_empty(&wait.entry))) 88 88 __add_wait_queue_entry_tail_exclusive(&m->q, &wait); 89 89 set_current_state(TASK_INTERRUPTIBLE); 90 90 ··· 108 108 left = -EINTR; 109 109 } while (left > 0); 110 110 111 - if (!list_empty(&wait.task_list)) 112 - list_del(&wait.task_list); 111 + if (!list_empty(&wait.entry)) 112 + list_del(&wait.entry); 113 113 else if (left <= 0 && waitqueue_active(&m->q)) 114 114 __wake_up_locked_key(&m->q, TASK_INTERRUPTIBLE, NULL); 115 115 __set_current_state(TASK_RUNNING);
+11 -11
fs/userfaultfd.c
··· 129 129 * wouldn't be enough, the smp_mb__before_spinlock is 130 130 * enough to avoid an explicit smp_mb() here. 131 131 */ 132 - list_del_init(&wq->task_list); 132 + list_del_init(&wq->entry); 133 133 out: 134 134 return ret; 135 135 } ··· 522 522 * and it's fine not to block on the spinlock. The uwq on this 523 523 * kernel stack can be released after the list_del_init. 524 524 */ 525 - if (!list_empty_careful(&uwq.wq.task_list)) { 525 + if (!list_empty_careful(&uwq.wq.entry)) { 526 526 spin_lock(&ctx->fault_pending_wqh.lock); 527 527 /* 528 528 * No need of list_del_init(), the uwq on the stack 529 529 * will be freed shortly anyway. 530 530 */ 531 - list_del(&uwq.wq.task_list); 531 + list_del(&uwq.wq.entry); 532 532 spin_unlock(&ctx->fault_pending_wqh.lock); 533 533 } 534 534 ··· 869 869 if (!waitqueue_active(wqh)) 870 870 goto out; 871 871 /* walk in reverse to provide FIFO behavior to read userfaults */ 872 - wq = list_last_entry(&wqh->task_list, typeof(*wq), task_list); 872 + wq = list_last_entry(&wqh->head, typeof(*wq), entry); 873 873 uwq = container_of(wq, struct userfaultfd_wait_queue, wq); 874 874 out: 875 875 return uwq; ··· 1003 1003 * changes __remove_wait_queue() to use 1004 1004 * list_del_init() in turn breaking the 1005 1005 * !list_empty_careful() check in 1006 - * handle_userfault(). The uwq->wq.task_list 1006 + * handle_userfault(). The uwq->wq.head list 1007 1007 * must never be empty at any time during the 1008 1008 * refile, or the waitqueue could disappear 1009 1009 * from under us. The "wait_queue_head_t" 1010 1010 * parameter of __remove_wait_queue() is unused 1011 1011 * anyway. 1012 1012 */ 1013 - list_del(&uwq->wq.task_list); 1013 + list_del(&uwq->wq.entry); 1014 1014 __add_wait_queue(&ctx->fault_wqh, &uwq->wq); 1015 1015 1016 1016 write_seqcount_end(&ctx->refile_seq); ··· 1032 1032 fork_nctx = (struct userfaultfd_ctx *) 1033 1033 (unsigned long) 1034 1034 uwq->msg.arg.reserved.reserved1; 1035 - list_move(&uwq->wq.task_list, &fork_event); 1035 + list_move(&uwq->wq.entry, &fork_event); 1036 1036 spin_unlock(&ctx->event_wqh.lock); 1037 1037 ret = 0; 1038 1038 break; ··· 1069 1069 if (!list_empty(&fork_event)) { 1070 1070 uwq = list_first_entry(&fork_event, 1071 1071 typeof(*uwq), 1072 - wq.task_list); 1073 - list_del(&uwq->wq.task_list); 1072 + wq.entry); 1073 + list_del(&uwq->wq.entry); 1074 1074 __add_wait_queue(&ctx->event_wqh, &uwq->wq); 1075 1075 userfaultfd_event_complete(ctx, uwq); 1076 1076 } ··· 1752 1752 unsigned long pending = 0, total = 0; 1753 1753 1754 1754 spin_lock(&ctx->fault_pending_wqh.lock); 1755 - list_for_each_entry(wq, &ctx->fault_pending_wqh.task_list, task_list) { 1755 + list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) { 1756 1756 uwq = container_of(wq, struct userfaultfd_wait_queue, wq); 1757 1757 pending++; 1758 1758 total++; 1759 1759 } 1760 - list_for_each_entry(wq, &ctx->fault_wqh.task_list, task_list) { 1760 + list_for_each_entry(wq, &ctx->fault_wqh.head, entry) { 1761 1761 uwq = container_of(wq, struct userfaultfd_wait_queue, wq); 1762 1762 total++; 1763 1763 }
+10 -10
include/linux/wait.h
··· 26 26 unsigned int flags; 27 27 void *private; 28 28 wait_queue_func_t func; 29 - struct list_head task_list; 29 + struct list_head entry; 30 30 }; 31 31 32 32 struct wait_queue_head { 33 33 spinlock_t lock; 34 - struct list_head task_list; 34 + struct list_head head; 35 35 }; 36 36 typedef struct wait_queue_head wait_queue_head_t; 37 37 ··· 44 44 #define __WAITQUEUE_INITIALIZER(name, tsk) { \ 45 45 .private = tsk, \ 46 46 .func = default_wake_function, \ 47 - .task_list = { NULL, NULL } } 47 + .entry = { NULL, NULL } } 48 48 49 49 #define DECLARE_WAITQUEUE(name, tsk) \ 50 50 struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk) 51 51 52 52 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \ 53 53 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ 54 - .task_list = { &(name).task_list, &(name).task_list } } 54 + .head = { &(name).head, &(name).head } } 55 55 56 56 #define DECLARE_WAIT_QUEUE_HEAD(name) \ 57 57 struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name) ··· 121 121 */ 122 122 static inline int waitqueue_active(struct wait_queue_head *wq_head) 123 123 { 124 - return !list_empty(&wq_head->task_list); 124 + return !list_empty(&wq_head->head); 125 125 } 126 126 127 127 /** ··· 151 151 152 152 static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) 153 153 { 154 - list_add(&wq_entry->task_list, &wq_head->task_list); 154 + list_add(&wq_entry->entry, &wq_head->head); 155 155 } 156 156 157 157 /* ··· 166 166 167 167 static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) 168 168 { 169 - list_add_tail(&wq_entry->task_list, &wq_head->task_list); 169 + list_add_tail(&wq_entry->entry, &wq_head->head); 170 170 } 171 171 172 172 static inline void ··· 179 179 static inline void 180 180 __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) 181 181 { 182 - list_del(&wq_entry->task_list); 182 + list_del(&wq_entry->entry); 183 183 } 184 184 185 185 void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key); ··· 952 952 struct wait_queue_entry name = { \ 953 953 .private = current, \ 954 954 .func = function, \ 955 - .task_list = LIST_HEAD_INIT((name).task_list), \ 955 + .entry = LIST_HEAD_INIT((name).entry), \ 956 956 } 957 957 958 958 #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function) ··· 961 961 do { \ 962 962 (wait)->private = current; \ 963 963 (wait)->func = autoremove_wake_function; \ 964 - INIT_LIST_HEAD(&(wait)->task_list); \ 964 + INIT_LIST_HEAD(&(wait)->entry); \ 965 965 (wait)->flags = 0; \ 966 966 } while (0) 967 967
+2 -2
include/linux/wait_bit.h
··· 45 45 .wq_entry = { \ 46 46 .private = current, \ 47 47 .func = wake_bit_function, \ 48 - .task_list = \ 49 - LIST_HEAD_INIT((name).wq_entry.task_list), \ 48 + .entry = \ 49 + LIST_HEAD_INIT((name).wq_entry.entry), \ 50 50 }, \ 51 51 } 52 52
+12 -12
kernel/sched/wait.c
··· 16 16 { 17 17 spin_lock_init(&wq_head->lock); 18 18 lockdep_set_class_and_name(&wq_head->lock, key, name); 19 - INIT_LIST_HEAD(&wq_head->task_list); 19 + INIT_LIST_HEAD(&wq_head->head); 20 20 } 21 21 22 22 EXPORT_SYMBOL(__init_waitqueue_head); ··· 68 68 { 69 69 wait_queue_entry_t *curr, *next; 70 70 71 - list_for_each_entry_safe(curr, next, &wq_head->task_list, task_list) { 71 + list_for_each_entry_safe(curr, next, &wq_head->head, entry) { 72 72 unsigned flags = curr->flags; 73 73 74 74 if (curr->func(curr, mode, wake_flags, key) && ··· 176 176 177 177 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE; 178 178 spin_lock_irqsave(&wq_head->lock, flags); 179 - if (list_empty(&wq_entry->task_list)) 179 + if (list_empty(&wq_entry->entry)) 180 180 __add_wait_queue(wq_head, wq_entry); 181 181 set_current_state(state); 182 182 spin_unlock_irqrestore(&wq_head->lock, flags); ··· 190 190 191 191 wq_entry->flags |= WQ_FLAG_EXCLUSIVE; 192 192 spin_lock_irqsave(&wq_head->lock, flags); 193 - if (list_empty(&wq_entry->task_list)) 193 + if (list_empty(&wq_entry->entry)) 194 194 __add_wait_queue_entry_tail(wq_head, wq_entry); 195 195 set_current_state(state); 196 196 spin_unlock_irqrestore(&wq_head->lock, flags); ··· 202 202 wq_entry->flags = flags; 203 203 wq_entry->private = current; 204 204 wq_entry->func = autoremove_wake_function; 205 - INIT_LIST_HEAD(&wq_entry->task_list); 205 + INIT_LIST_HEAD(&wq_entry->entry); 206 206 } 207 207 EXPORT_SYMBOL(init_wait_entry); 208 208 ··· 225 225 * can't see us, it should wake up another exclusive waiter if 226 226 * we fail. 227 227 */ 228 - list_del_init(&wq_entry->task_list); 228 + list_del_init(&wq_entry->entry); 229 229 ret = -ERESTARTSYS; 230 230 } else { 231 - if (list_empty(&wq_entry->task_list)) { 231 + if (list_empty(&wq_entry->entry)) { 232 232 if (wq_entry->flags & WQ_FLAG_EXCLUSIVE) 233 233 __add_wait_queue_entry_tail(wq_head, wq_entry); 234 234 else ··· 251 251 */ 252 252 int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait) 253 253 { 254 - if (likely(list_empty(&wait->task_list))) 254 + if (likely(list_empty(&wait->entry))) 255 255 __add_wait_queue_entry_tail(wq, wait); 256 256 257 257 set_current_state(TASK_INTERRUPTIBLE); ··· 267 267 268 268 int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait) 269 269 { 270 - if (likely(list_empty(&wait->task_list))) 270 + if (likely(list_empty(&wait->entry))) 271 271 __add_wait_queue_entry_tail(wq, wait); 272 272 273 273 set_current_state(TASK_INTERRUPTIBLE); ··· 308 308 * have _one_ other CPU that looks at or modifies 309 309 * the list). 310 310 */ 311 - if (!list_empty_careful(&wq_entry->task_list)) { 311 + if (!list_empty_careful(&wq_entry->entry)) { 312 312 spin_lock_irqsave(&wq_head->lock, flags); 313 - list_del_init(&wq_entry->task_list); 313 + list_del_init(&wq_entry->entry); 314 314 spin_unlock_irqrestore(&wq_head->lock, flags); 315 315 } 316 316 } ··· 321 321 int ret = default_wake_function(wq_entry, mode, sync, key); 322 322 323 323 if (ret) 324 - list_del_init(&wq_entry->task_list); 324 + list_del_init(&wq_entry->entry); 325 325 return ret; 326 326 } 327 327 EXPORT_SYMBOL(autoremove_wake_function);
+2 -2
kernel/sched/wait_bit.c
··· 205 205 .wq_entry = { \ 206 206 .private = current, \ 207 207 .func = wake_atomic_t_function, \ 208 - .task_list = \ 209 - LIST_HEAD_INIT((name).wq_entry.task_list), \ 208 + .entry = \ 209 + LIST_HEAD_INIT((name).wq_entry.entry), \ 210 210 }, \ 211 211 } 212 212
+1 -1
mm/filemap.c
··· 845 845 for (;;) { 846 846 spin_lock_irq(&q->lock); 847 847 848 - if (likely(list_empty(&wait->task_list))) { 848 + if (likely(list_empty(&wait->entry))) { 849 849 if (lock) 850 850 __add_wait_queue_entry_tail_exclusive(q, wait); 851 851 else
+1 -1
mm/memcontrol.c
··· 1570 1570 owait.wait.flags = 0; 1571 1571 owait.wait.func = memcg_oom_wake_function; 1572 1572 owait.wait.private = current; 1573 - INIT_LIST_HEAD(&owait.wait.task_list); 1573 + INIT_LIST_HEAD(&owait.wait.entry); 1574 1574 1575 1575 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); 1576 1576 mem_cgroup_mark_under_oom(memcg);
+2 -2
mm/shmem.c
··· 1905 1905 static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) 1906 1906 { 1907 1907 int ret = default_wake_function(wait, mode, sync, key); 1908 - list_del_init(&wait->task_list); 1908 + list_del_init(&wait->entry); 1909 1909 return ret; 1910 1910 } 1911 1911 ··· 2840 2840 spin_lock(&inode->i_lock); 2841 2841 inode->i_private = NULL; 2842 2842 wake_up_all(&shmem_falloc_waitq); 2843 - WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.task_list)); 2843 + WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head)); 2844 2844 spin_unlock(&inode->i_lock); 2845 2845 error = 0; 2846 2846 goto out;