Merge tag 'afs-fixes-20190117' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs

Pull AFS fixes from David Howells:
"Here's a set of fixes for AFS:

- Use struct_size() for kzalloc() size calculation.

- When calling YFS.CreateFile rather than AFS.CreateFile, it is
possible to create a file with a file lock already held. The
default value indicating no lock required is actually -1, not 0.

- Fix an oops in inode/vnode validation if the target inode doesn't
have a server interest assigned (ie. a server that will notify us
of changes by third parties).

- Fix refcounting of keys in file locking.

- Fix a race in refcounting asynchronous operations in the event of
an error during request transmission. The provision of a dedicated
function to get an extra ref on a call is split into a separate
commit"

* tag 'afs-fixes-20190117' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs:
afs: Fix race in async call refcounting
afs: Provide a function to get a ref on a call
afs: Fix key refcounting in file locking code
afs: Don't set vnode->cb_s_break in afs_validate()
afs: Set correct lock type for the yfs CreateFile
afs: Use struct_size() in kzalloc()

+61 -18
+2 -2
fs/afs/flock.c
··· 208 /* The new front of the queue now owns the state variables. */ 209 next = list_entry(vnode->pending_locks.next, 210 struct file_lock, fl_u.afs.link); 211 - vnode->lock_key = afs_file_key(next->fl_file); 212 vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE; 213 vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB; 214 goto again; ··· 413 /* The new front of the queue now owns the state variables. */ 414 next = list_entry(vnode->pending_locks.next, 415 struct file_lock, fl_u.afs.link); 416 - vnode->lock_key = afs_file_key(next->fl_file); 417 vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE; 418 vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB; 419 afs_lock_may_be_available(vnode);
··· 208 /* The new front of the queue now owns the state variables. */ 209 next = list_entry(vnode->pending_locks.next, 210 struct file_lock, fl_u.afs.link); 211 + vnode->lock_key = key_get(afs_file_key(next->fl_file)); 212 vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE; 213 vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB; 214 goto again; ··· 413 /* The new front of the queue now owns the state variables. */ 414 next = list_entry(vnode->pending_locks.next, 415 struct file_lock, fl_u.afs.link); 416 + vnode->lock_key = key_get(afs_file_key(next->fl_file)); 417 vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE; 418 vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB; 419 afs_lock_may_be_available(vnode);
+2 -1
fs/afs/inode.c
··· 414 } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { 415 valid = true; 416 } else { 417 - vnode->cb_s_break = vnode->cb_interest->server->cb_s_break; 418 vnode->cb_v_break = vnode->volume->cb_v_break; 419 valid = false; 420 } ··· 545 #endif 546 547 afs_put_permits(rcu_access_pointer(vnode->permit_cache)); 548 _leave(""); 549 } 550
··· 414 } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { 415 valid = true; 416 } else { 417 vnode->cb_v_break = vnode->volume->cb_v_break; 418 valid = false; 419 } ··· 546 #endif 547 548 afs_put_permits(rcu_access_pointer(vnode->permit_cache)); 549 + key_put(vnode->lock_key); 550 + vnode->lock_key = NULL; 551 _leave(""); 552 } 553
+11
fs/afs/protocol_yfs.h
··· 161 struct yfs_xdr_u64 max_quota; 162 struct yfs_xdr_u64 file_quota; 163 } __packed;
··· 161 struct yfs_xdr_u64 max_quota; 162 struct yfs_xdr_u64 file_quota; 163 } __packed; 164 + 165 + enum yfs_lock_type { 166 + yfs_LockNone = -1, 167 + yfs_LockRead = 0, 168 + yfs_LockWrite = 1, 169 + yfs_LockExtend = 2, 170 + yfs_LockRelease = 3, 171 + yfs_LockMandatoryRead = 0x100, 172 + yfs_LockMandatoryWrite = 0x101, 173 + yfs_LockMandatoryExtend = 0x102, 174 + };
+42 -11
fs/afs/rxrpc.c
··· 23 static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long); 24 static long afs_wait_for_call_to_complete(struct afs_call *, struct afs_addr_cursor *); 25 static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long); 26 static void afs_process_async_call(struct work_struct *); 27 static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long); 28 static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long); ··· 204 } 205 } 206 207 /* 208 * Queue the call for actual work. 209 */ 210 static void afs_queue_call_work(struct afs_call *call) 211 { 212 if (call->type->work) { 213 - int u = atomic_inc_return(&call->usage); 214 - 215 - trace_afs_call(call, afs_call_trace_work, u, 216 - atomic_read(&call->net->nr_outstanding_calls), 217 - __builtin_return_address(0)); 218 - 219 INIT_WORK(&call->work, call->type->work); 220 221 if (!queue_work(afs_wq, &call->work)) 222 afs_put_call(call); 223 } ··· 405 } 406 } 407 408 /* create a call */ 409 rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key, 410 (unsigned long)call, ··· 451 goto error_do_abort; 452 } 453 454 - /* at this point, an async call may no longer exist as it may have 455 - * already completed */ 456 - if (call->async) 457 return -EINPROGRESS; 458 459 return afs_wait_for_call_to_complete(call, ac); 460 461 error_do_abort: 462 - call->state = AFS_CALL_COMPLETE; 463 if (ret != -ECONNABORTED) { 464 rxrpc_kernel_abort_call(call->net->socket, rxcall, 465 RX_USER_ABORT, ret, "KSD"); ··· 478 error_kill_call: 479 if (call->type->done) 480 call->type->done(call); 481 - afs_put_call(call); 482 ac->error = ret; 483 _leave(" = %d", ret); 484 return ret; 485 }
··· 23 static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long); 24 static long afs_wait_for_call_to_complete(struct afs_call *, struct afs_addr_cursor *); 25 static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long); 26 + static void afs_delete_async_call(struct work_struct *); 27 static void afs_process_async_call(struct work_struct *); 28 static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long); 29 static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long); ··· 203 } 204 } 205 206 + static struct afs_call *afs_get_call(struct afs_call *call, 207 + enum afs_call_trace why) 208 + { 209 + int u = atomic_inc_return(&call->usage); 210 + 211 + trace_afs_call(call, why, u, 212 + atomic_read(&call->net->nr_outstanding_calls), 213 + __builtin_return_address(0)); 214 + return call; 215 + } 216 + 217 /* 218 * Queue the call for actual work. 219 */ 220 static void afs_queue_call_work(struct afs_call *call) 221 { 222 if (call->type->work) { 223 INIT_WORK(&call->work, call->type->work); 224 225 + afs_get_call(call, afs_call_trace_work); 226 if (!queue_work(afs_wq, &call->work)) 227 afs_put_call(call); 228 } ··· 398 } 399 } 400 401 + /* If the call is going to be asynchronous, we need an extra ref for 402 + * the call to hold itself so the caller need not hang on to its ref. 403 + */ 404 + if (call->async) 405 + afs_get_call(call, afs_call_trace_get); 406 + 407 /* create a call */ 408 rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key, 409 (unsigned long)call, ··· 438 goto error_do_abort; 439 } 440 441 + /* Note that at this point, we may have received the reply or an abort 442 + * - and an asynchronous call may already have completed. 443 + */ 444 + if (call->async) { 445 + afs_put_call(call); 446 return -EINPROGRESS; 447 + } 448 449 return afs_wait_for_call_to_complete(call, ac); 450 451 error_do_abort: 452 if (ret != -ECONNABORTED) { 453 rxrpc_kernel_abort_call(call->net->socket, rxcall, 454 RX_USER_ABORT, ret, "KSD"); ··· 463 error_kill_call: 464 if (call->type->done) 465 call->type->done(call); 466 + 467 + /* We need to dispose of the extra ref we grabbed for an async call. 468 + * The call, however, might be queued on afs_async_calls and we need to 469 + * make sure we don't get any more notifications that might requeue it. 470 + */ 471 + if (call->rxcall) { 472 + rxrpc_kernel_end_call(call->net->socket, call->rxcall); 473 + call->rxcall = NULL; 474 + } 475 + if (call->async) { 476 + if (cancel_work_sync(&call->async_work)) 477 + afs_put_call(call); 478 + afs_put_call(call); 479 + } 480 + 481 ac->error = ret; 482 + call->state = AFS_CALL_COMPLETE; 483 + afs_put_call(call); 484 _leave(" = %d", ret); 485 return ret; 486 }
+1 -3
fs/afs/server_list.c
··· 42 if (vldb->fs_mask[i] & type_mask) 43 nr_servers++; 44 45 - slist = kzalloc(sizeof(struct afs_server_list) + 46 - sizeof(struct afs_server_entry) * nr_servers, 47 - GFP_KERNEL); 48 if (!slist) 49 goto error; 50
··· 42 if (vldb->fs_mask[i] & type_mask) 43 nr_servers++; 44 45 + slist = kzalloc(struct_size(slist, servers, nr_servers), GFP_KERNEL); 46 if (!slist) 47 goto error; 48
+1 -1
fs/afs/yfsclient.c
··· 803 bp = xdr_encode_YFSFid(bp, &vnode->fid); 804 bp = xdr_encode_string(bp, name, namesz); 805 bp = xdr_encode_YFSStoreStatus_mode(bp, mode); 806 - bp = xdr_encode_u32(bp, 0); /* ViceLockType */ 807 yfs_check_req(call, bp); 808 809 afs_use_fs_server(call, fc->cbi);
··· 803 bp = xdr_encode_YFSFid(bp, &vnode->fid); 804 bp = xdr_encode_string(bp, name, namesz); 805 bp = xdr_encode_YFSStoreStatus_mode(bp, mode); 806 + bp = xdr_encode_u32(bp, yfs_LockNone); /* ViceLockType */ 807 yfs_check_req(call, bp); 808 809 afs_use_fs_server(call, fc->cbi);
+2
include/trace/events/afs.h
··· 25 enum afs_call_trace { 26 afs_call_trace_alloc, 27 afs_call_trace_free, 28 afs_call_trace_put, 29 afs_call_trace_wake, 30 afs_call_trace_work, ··· 160 #define afs_call_traces \ 161 EM(afs_call_trace_alloc, "ALLOC") \ 162 EM(afs_call_trace_free, "FREE ") \ 163 EM(afs_call_trace_put, "PUT ") \ 164 EM(afs_call_trace_wake, "WAKE ") \ 165 E_(afs_call_trace_work, "WORK ")
··· 25 enum afs_call_trace { 26 afs_call_trace_alloc, 27 afs_call_trace_free, 28 + afs_call_trace_get, 29 afs_call_trace_put, 30 afs_call_trace_wake, 31 afs_call_trace_work, ··· 159 #define afs_call_traces \ 160 EM(afs_call_trace_alloc, "ALLOC") \ 161 EM(afs_call_trace_free, "FREE ") \ 162 + EM(afs_call_trace_get, "GET ") \ 163 EM(afs_call_trace_put, "PUT ") \ 164 EM(afs_call_trace_wake, "WAKE ") \ 165 E_(afs_call_trace_work, "WORK ")