Merge branch 'akpm' (patches from Andrew)

Merge fixes from Andrew Morton:
"20 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
rapidio/rio_cm: avoid GFP_KERNEL in atomic context
Revert "ocfs2: bump up o2cb network protocol version"
ocfs2: fix start offset to ocfs2_zero_range_for_truncate()
cgroup: duplicate cgroup reference when cloning sockets
mm: memcontrol: make per-cpu charge cache IRQ-safe for socket accounting
ocfs2: fix double unlock in case retry after free truncate log
fanotify: fix list corruption in fanotify_get_response()
fsnotify: add a way to stop queueing events on group shutdown
ocfs2: fix trans extend while free cached blocks
ocfs2: fix trans extend while flush truncate log
ipc/shm: fix crash if CONFIG_SHMEM is not set
mm: fix the page_swap_info() BUG_ON check
autofs: use dentry flags to block walks during expire
MAINTAINERS: update email for VLYNQ bus entry
mm: avoid endless recursion in dump_page()
mm, thp: fix leaking mapped pte in __collapse_huge_page_swapin()
khugepaged: fix use-after-free in collapse_huge_page()
MAINTAINERS: Maik has moved
ocfs2/dlm: fix race between convert and migration
mem-hotplug: don't clear the only node in new_node_page()

+2 -2
MAINTAINERS
··· 6103 F: drivers/cpufreq/intel_pstate.c 6104 6105 INTEL FRAMEBUFFER DRIVER (excluding 810 and 815) 6106 - M: Maik Broemme <mbroemme@plusserver.de> 6107 L: linux-fbdev@vger.kernel.org 6108 S: Maintained 6109 F: Documentation/fb/intelfb.txt ··· 12569 F: net/8021q/ 12570 12571 VLYNQ BUS 12572 - M: Florian Fainelli <florian@openwrt.org> 12573 L: openwrt-devel@lists.openwrt.org (subscribers-only) 12574 S: Maintained 12575 F: drivers/vlynq/vlynq.c
··· 6103 F: drivers/cpufreq/intel_pstate.c 6104 6105 INTEL FRAMEBUFFER DRIVER (excluding 810 and 815) 6106 + M: Maik Broemme <mbroemme@libmpq.org> 6107 L: linux-fbdev@vger.kernel.org 6108 S: Maintained 6109 F: Documentation/fb/intelfb.txt ··· 12569 F: net/8021q/ 12570 12571 VLYNQ BUS 12572 + M: Florian Fainelli <f.fainelli@gmail.com> 12573 L: openwrt-devel@lists.openwrt.org (subscribers-only) 12574 S: Maintained 12575 F: drivers/vlynq/vlynq.c
+16 -3
drivers/rapidio/rio_cm.c
··· 2247 { 2248 struct rio_channel *ch; 2249 unsigned int i; 2250 2251 riocm_debug(EXIT, "."); 2252 2253 spin_lock_bh(&idr_lock); 2254 idr_for_each_entry(&ch_idr, ch, i) { 2255 - riocm_debug(EXIT, "close ch %d", ch->id); 2256 - if (ch->state == RIO_CM_CONNECTED) 2257 - riocm_send_close(ch); 2258 } 2259 spin_unlock_bh(&idr_lock); 2260 2261 return NOTIFY_DONE; 2262 }
··· 2247 { 2248 struct rio_channel *ch; 2249 unsigned int i; 2250 + LIST_HEAD(list); 2251 2252 riocm_debug(EXIT, "."); 2253 2254 + /* 2255 + * If there are any channels left in connected state send 2256 + * close notification to the connection partner. 2257 + * First build a list of channels that require a closing 2258 + * notification because function riocm_send_close() should 2259 + * be called outside of spinlock protected code. 2260 + */ 2261 spin_lock_bh(&idr_lock); 2262 idr_for_each_entry(&ch_idr, ch, i) { 2263 + if (ch->state == RIO_CM_CONNECTED) { 2264 + riocm_debug(EXIT, "close ch %d", ch->id); 2265 + idr_remove(&ch_idr, ch->id); 2266 + list_add(&ch->ch_node, &list); 2267 + } 2268 } 2269 spin_unlock_bh(&idr_lock); 2270 + 2271 + list_for_each_entry(ch, &list, ch_node) 2272 + riocm_send_close(ch); 2273 2274 return NOTIFY_DONE; 2275 }
+42 -13
fs/autofs4/expire.c
··· 417 } 418 return NULL; 419 } 420 /* 421 * Find an eligible tree to time-out 422 * A tree is eligible if :- ··· 433 struct dentry *root = sb->s_root; 434 struct dentry *dentry; 435 struct dentry *expired; 436 struct autofs_info *ino; 437 438 if (!root) ··· 444 445 dentry = NULL; 446 while ((dentry = get_next_positive_subdir(dentry, root))) { 447 spin_lock(&sbi->fs_lock); 448 ino = autofs4_dentry_ino(dentry); 449 - if (ino->flags & AUTOFS_INF_WANT_EXPIRE) 450 - expired = NULL; 451 - else 452 - expired = should_expire(dentry, mnt, timeout, how); 453 - if (!expired) { 454 spin_unlock(&sbi->fs_lock); 455 continue; 456 } 457 ino = autofs4_dentry_ino(expired); 458 ino->flags |= AUTOFS_INF_WANT_EXPIRE; 459 spin_unlock(&sbi->fs_lock); 460 synchronize_rcu(); 461 - spin_lock(&sbi->fs_lock); 462 - if (should_expire(expired, mnt, timeout, how)) { 463 - if (expired != dentry) 464 - dput(dentry); 465 - goto found; 466 - } 467 468 ino->flags &= ~AUTOFS_INF_WANT_EXPIRE; 469 if (expired != dentry) 470 dput(expired); 471 - spin_unlock(&sbi->fs_lock); 472 } 473 return NULL; 474 ··· 500 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); 501 struct autofs_info *ino = autofs4_dentry_ino(dentry); 502 int status; 503 504 /* Block on any pending expire */ 505 if (!(ino->flags & AUTOFS_INF_WANT_EXPIRE)) ··· 508 if (rcu_walk) 509 return -ECHILD; 510 511 spin_lock(&sbi->fs_lock); 512 - if (ino->flags & AUTOFS_INF_EXPIRING) { 513 spin_unlock(&sbi->fs_lock); 514 515 pr_debug("waiting for expire %p name=%pd\n", dentry, dentry);
··· 417 } 418 return NULL; 419 } 420 + 421 /* 422 * Find an eligible tree to time-out 423 * A tree is eligible if :- ··· 432 struct dentry *root = sb->s_root; 433 struct dentry *dentry; 434 struct dentry *expired; 435 + struct dentry *found; 436 struct autofs_info *ino; 437 438 if (!root) ··· 442 443 dentry = NULL; 444 while ((dentry = get_next_positive_subdir(dentry, root))) { 445 + int flags = how; 446 + 447 spin_lock(&sbi->fs_lock); 448 ino = autofs4_dentry_ino(dentry); 449 + if (ino->flags & AUTOFS_INF_WANT_EXPIRE) { 450 spin_unlock(&sbi->fs_lock); 451 continue; 452 } 453 + spin_unlock(&sbi->fs_lock); 454 + 455 + expired = should_expire(dentry, mnt, timeout, flags); 456 + if (!expired) 457 + continue; 458 + 459 + spin_lock(&sbi->fs_lock); 460 ino = autofs4_dentry_ino(expired); 461 ino->flags |= AUTOFS_INF_WANT_EXPIRE; 462 spin_unlock(&sbi->fs_lock); 463 synchronize_rcu(); 464 465 + /* Make sure a reference is not taken on found if 466 + * things have changed. 467 + */ 468 + flags &= ~AUTOFS_EXP_LEAVES; 469 + found = should_expire(expired, mnt, timeout, how); 470 + if (!found || found != expired) 471 + /* Something has changed, continue */ 472 + goto next; 473 + 474 + if (expired != dentry) 475 + dput(dentry); 476 + 477 + spin_lock(&sbi->fs_lock); 478 + goto found; 479 + next: 480 + spin_lock(&sbi->fs_lock); 481 ino->flags &= ~AUTOFS_INF_WANT_EXPIRE; 482 + spin_unlock(&sbi->fs_lock); 483 if (expired != dentry) 484 dput(expired); 485 } 486 return NULL; 487 ··· 483 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); 484 struct autofs_info *ino = autofs4_dentry_ino(dentry); 485 int status; 486 + int state; 487 488 /* Block on any pending expire */ 489 if (!(ino->flags & AUTOFS_INF_WANT_EXPIRE)) ··· 490 if (rcu_walk) 491 return -ECHILD; 492 493 + retry: 494 spin_lock(&sbi->fs_lock); 495 + state = ino->flags & (AUTOFS_INF_WANT_EXPIRE | AUTOFS_INF_EXPIRING); 496 + if (state == AUTOFS_INF_WANT_EXPIRE) { 497 + spin_unlock(&sbi->fs_lock); 498 + /* 499 + * Possibly being selected for expire, wait until 500 + * it's selected or not. 501 + */ 502 + schedule_timeout_uninterruptible(HZ/10); 503 + goto retry; 504 + } 505 + if (state & AUTOFS_INF_EXPIRING) { 506 spin_unlock(&sbi->fs_lock); 507 508 pr_debug("waiting for expire %p name=%pd\n", dentry, dentry);
+1 -12
fs/notify/fanotify/fanotify.c
··· 67 68 pr_debug("%s: group=%p event=%p\n", __func__, group, event); 69 70 - wait_event(group->fanotify_data.access_waitq, event->response || 71 - atomic_read(&group->fanotify_data.bypass_perm)); 72 - 73 - if (!event->response) { /* bypass_perm set */ 74 - /* 75 - * Event was canceled because group is being destroyed. Remove 76 - * it from group's event list because we are responsible for 77 - * freeing the permission event. 78 - */ 79 - fsnotify_remove_event(group, &event->fae.fse); 80 - return 0; 81 - } 82 83 /* userspace responded, convert to something usable */ 84 switch (event->response) {
··· 67 68 pr_debug("%s: group=%p event=%p\n", __func__, group, event); 69 70 + wait_event(group->fanotify_data.access_waitq, event->response); 71 72 /* userspace responded, convert to something usable */ 73 switch (event->response) {
+24 -12
fs/notify/fanotify/fanotify_user.c
··· 358 359 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS 360 struct fanotify_perm_event_info *event, *next; 361 362 /* 363 - * There may be still new events arriving in the notification queue 364 - * but since userspace cannot use fanotify fd anymore, no event can 365 - * enter or leave access_list by now. 366 */ 367 spin_lock(&group->fanotify_data.access_lock); 368 - 369 - atomic_inc(&group->fanotify_data.bypass_perm); 370 - 371 list_for_each_entry_safe(event, next, &group->fanotify_data.access_list, 372 fae.fse.list) { 373 pr_debug("%s: found group=%p event=%p\n", __func__, group, ··· 383 spin_unlock(&group->fanotify_data.access_lock); 384 385 /* 386 - * Since bypass_perm is set, newly queued events will not wait for 387 - * access response. Wake up the already sleeping ones now. 388 - * synchronize_srcu() in fsnotify_destroy_group() will wait for all 389 - * processes sleeping in fanotify_handle_event() waiting for access 390 - * response and thus also for all permission events to be freed. 391 */ 392 wake_up(&group->fanotify_data.access_waitq); 393 #endif 394 ··· 768 spin_lock_init(&group->fanotify_data.access_lock); 769 init_waitqueue_head(&group->fanotify_data.access_waitq); 770 INIT_LIST_HEAD(&group->fanotify_data.access_list); 771 - atomic_set(&group->fanotify_data.bypass_perm, 0); 772 #endif 773 switch (flags & FAN_ALL_CLASS_BITS) { 774 case FAN_CLASS_NOTIF:
··· 358 359 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS 360 struct fanotify_perm_event_info *event, *next; 361 + struct fsnotify_event *fsn_event; 362 363 /* 364 + * Stop new events from arriving in the notification queue. since 365 + * userspace cannot use fanotify fd anymore, no event can enter or 366 + * leave access_list by now either. 367 + */ 368 + fsnotify_group_stop_queueing(group); 369 + 370 + /* 371 + * Process all permission events on access_list and notification queue 372 + * and simulate reply from userspace. 373 */ 374 spin_lock(&group->fanotify_data.access_lock); 375 list_for_each_entry_safe(event, next, &group->fanotify_data.access_list, 376 fae.fse.list) { 377 pr_debug("%s: found group=%p event=%p\n", __func__, group, ··· 379 spin_unlock(&group->fanotify_data.access_lock); 380 381 /* 382 + * Destroy all non-permission events. For permission events just 383 + * dequeue them and set the response. They will be freed once the 384 + * response is consumed and fanotify_get_response() returns. 385 */ 386 + mutex_lock(&group->notification_mutex); 387 + while (!fsnotify_notify_queue_is_empty(group)) { 388 + fsn_event = fsnotify_remove_first_event(group); 389 + if (!(fsn_event->mask & FAN_ALL_PERM_EVENTS)) 390 + fsnotify_destroy_event(group, fsn_event); 391 + else 392 + FANOTIFY_PE(fsn_event)->response = FAN_ALLOW; 393 + } 394 + mutex_unlock(&group->notification_mutex); 395 + 396 + /* Response for all permission events it set, wakeup waiters */ 397 wake_up(&group->fanotify_data.access_waitq); 398 #endif 399 ··· 755 spin_lock_init(&group->fanotify_data.access_lock); 756 init_waitqueue_head(&group->fanotify_data.access_waitq); 757 INIT_LIST_HEAD(&group->fanotify_data.access_list); 758 #endif 759 switch (flags & FAN_ALL_CLASS_BITS) { 760 case FAN_CLASS_NOTIF:
+19
fs/notify/group.c
··· 40 } 41 42 /* 43 * Trying to get rid of a group. Remove all marks, flush all events and release 44 * the group reference. 45 * Note that another thread calling fsnotify_clear_marks_by_group() may still ··· 58 */ 59 void fsnotify_destroy_group(struct fsnotify_group *group) 60 { 61 /* clear all inode marks for this group, attach them to destroy_list */ 62 fsnotify_detach_group_marks(group); 63
··· 40 } 41 42 /* 43 + * Stop queueing new events for this group. Once this function returns 44 + * fsnotify_add_event() will not add any new events to the group's queue. 45 + */ 46 + void fsnotify_group_stop_queueing(struct fsnotify_group *group) 47 + { 48 + mutex_lock(&group->notification_mutex); 49 + group->shutdown = true; 50 + mutex_unlock(&group->notification_mutex); 51 + } 52 + 53 + /* 54 * Trying to get rid of a group. Remove all marks, flush all events and release 55 * the group reference. 56 * Note that another thread calling fsnotify_clear_marks_by_group() may still ··· 47 */ 48 void fsnotify_destroy_group(struct fsnotify_group *group) 49 { 50 + /* 51 + * Stop queueing new events. The code below is careful enough to not 52 + * require this but fanotify needs to stop queuing events even before 53 + * fsnotify_destroy_group() is called and this makes the other callers 54 + * of fsnotify_destroy_group() to see the same behavior. 55 + */ 56 + fsnotify_group_stop_queueing(group); 57 + 58 /* clear all inode marks for this group, attach them to destroy_list */ 59 fsnotify_detach_group_marks(group); 60
+7 -16
fs/notify/notification.c
··· 82 * Add an event to the group notification queue. The group can later pull this 83 * event off the queue to deal with. The function returns 0 if the event was 84 * added to the queue, 1 if the event was merged with some other queued event, 85 - * 2 if the queue of events has overflown. 86 */ 87 int fsnotify_add_event(struct fsnotify_group *group, 88 struct fsnotify_event *event, ··· 96 pr_debug("%s: group=%p event=%p\n", __func__, group, event); 97 98 mutex_lock(&group->notification_mutex); 99 100 if (group->q_len >= group->max_events) { 101 ret = 2; ··· 129 wake_up(&group->notification_waitq); 130 kill_fasync(&group->fsn_fa, SIGIO, POLL_IN); 131 return ret; 132 - } 133 - 134 - /* 135 - * Remove @event from group's notification queue. It is the responsibility of 136 - * the caller to destroy the event. 137 - */ 138 - void fsnotify_remove_event(struct fsnotify_group *group, 139 - struct fsnotify_event *event) 140 - { 141 - mutex_lock(&group->notification_mutex); 142 - if (!list_empty(&event->list)) { 143 - list_del_init(&event->list); 144 - group->q_len--; 145 - } 146 - mutex_unlock(&group->notification_mutex); 147 } 148 149 /*
··· 82 * Add an event to the group notification queue. The group can later pull this 83 * event off the queue to deal with. The function returns 0 if the event was 84 * added to the queue, 1 if the event was merged with some other queued event, 85 + * 2 if the event was not queued - either the queue of events has overflown 86 + * or the group is shutting down. 87 */ 88 int fsnotify_add_event(struct fsnotify_group *group, 89 struct fsnotify_event *event, ··· 95 pr_debug("%s: group=%p event=%p\n", __func__, group, event); 96 97 mutex_lock(&group->notification_mutex); 98 + 99 + if (group->shutdown) { 100 + mutex_unlock(&group->notification_mutex); 101 + return 2; 102 + } 103 104 if (group->q_len >= group->max_events) { 105 ret = 2; ··· 123 wake_up(&group->notification_waitq); 124 kill_fasync(&group->fsn_fa, SIGIO, POLL_IN); 125 return ret; 126 } 127 128 /*
+19 -37
fs/ocfs2/alloc.c
··· 5922 } 5923 5924 static int ocfs2_replay_truncate_records(struct ocfs2_super *osb, 5925 - handle_t *handle, 5926 struct inode *data_alloc_inode, 5927 struct buffer_head *data_alloc_bh) 5928 { ··· 5934 struct ocfs2_truncate_log *tl; 5935 struct inode *tl_inode = osb->osb_tl_inode; 5936 struct buffer_head *tl_bh = osb->osb_tl_bh; 5937 5938 di = (struct ocfs2_dinode *) tl_bh->b_data; 5939 tl = &di->id2.i_dealloc; 5940 i = le16_to_cpu(tl->tl_used) - 1; 5941 while (i >= 0) { 5942 /* Caller has given us at least enough credits to 5943 * update the truncate log dinode */ 5944 status = ocfs2_journal_access_di(handle, INODE_CACHE(tl_inode), tl_bh, ··· 5981 } 5982 } 5983 5984 - status = ocfs2_extend_trans(handle, 5985 - OCFS2_TRUNCATE_LOG_FLUSH_ONE_REC); 5986 - if (status < 0) { 5987 - mlog_errno(status); 5988 - goto bail; 5989 - } 5990 i--; 5991 } 5992 ··· 5996 { 5997 int status; 5998 unsigned int num_to_flush; 5999 - handle_t *handle; 6000 struct inode *tl_inode = osb->osb_tl_inode; 6001 struct inode *data_alloc_inode = NULL; 6002 struct buffer_head *tl_bh = osb->osb_tl_bh; ··· 6039 goto out_mutex; 6040 } 6041 6042 - handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_FLUSH_ONE_REC); 6043 - if (IS_ERR(handle)) { 6044 - status = PTR_ERR(handle); 6045 - mlog_errno(status); 6046 - goto out_unlock; 6047 - } 6048 - 6049 - status = ocfs2_replay_truncate_records(osb, handle, data_alloc_inode, 6050 data_alloc_bh); 6051 if (status < 0) 6052 mlog_errno(status); 6053 6054 - ocfs2_commit_trans(osb, handle); 6055 - 6056 - out_unlock: 6057 brelse(data_alloc_bh); 6058 ocfs2_inode_unlock(data_alloc_inode, 1); 6059 ··· 6404 goto out_mutex; 6405 } 6406 6407 - handle = ocfs2_start_trans(osb, OCFS2_SUBALLOC_FREE); 6408 - if (IS_ERR(handle)) { 6409 - ret = PTR_ERR(handle); 6410 - mlog_errno(ret); 6411 - goto out_unlock; 6412 - } 6413 - 6414 while (head) { 6415 if (head->free_bg) 6416 bg_blkno = head->free_bg; 6417 else 6418 bg_blkno = ocfs2_which_suballoc_group(head->free_blk, 6419 head->free_bit); 6420 trace_ocfs2_free_cached_blocks( 6421 (unsigned long long)head->free_blk, head->free_bit); 6422 6423 ret = ocfs2_free_suballoc_bits(handle, inode, di_bh, 6424 head->free_bit, bg_blkno, 1); 6425 - if (ret) { 6426 mlog_errno(ret); 6427 - goto out_journal; 6428 - } 6429 6430 - ret = ocfs2_extend_trans(handle, OCFS2_SUBALLOC_FREE); 6431 - if (ret) { 6432 - mlog_errno(ret); 6433 - goto out_journal; 6434 - } 6435 6436 tmp = head; 6437 head = head->free_next; 6438 kfree(tmp); 6439 } 6440 - 6441 - out_journal: 6442 - ocfs2_commit_trans(osb, handle); 6443 6444 out_unlock: 6445 ocfs2_inode_unlock(inode, 1);
··· 5922 } 5923 5924 static int ocfs2_replay_truncate_records(struct ocfs2_super *osb, 5925 struct inode *data_alloc_inode, 5926 struct buffer_head *data_alloc_bh) 5927 { ··· 5935 struct ocfs2_truncate_log *tl; 5936 struct inode *tl_inode = osb->osb_tl_inode; 5937 struct buffer_head *tl_bh = osb->osb_tl_bh; 5938 + handle_t *handle; 5939 5940 di = (struct ocfs2_dinode *) tl_bh->b_data; 5941 tl = &di->id2.i_dealloc; 5942 i = le16_to_cpu(tl->tl_used) - 1; 5943 while (i >= 0) { 5944 + handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_FLUSH_ONE_REC); 5945 + if (IS_ERR(handle)) { 5946 + status = PTR_ERR(handle); 5947 + mlog_errno(status); 5948 + goto bail; 5949 + } 5950 + 5951 /* Caller has given us at least enough credits to 5952 * update the truncate log dinode */ 5953 status = ocfs2_journal_access_di(handle, INODE_CACHE(tl_inode), tl_bh, ··· 5974 } 5975 } 5976 5977 + ocfs2_commit_trans(osb, handle); 5978 i--; 5979 } 5980 ··· 5994 { 5995 int status; 5996 unsigned int num_to_flush; 5997 struct inode *tl_inode = osb->osb_tl_inode; 5998 struct inode *data_alloc_inode = NULL; 5999 struct buffer_head *tl_bh = osb->osb_tl_bh; ··· 6038 goto out_mutex; 6039 } 6040 6041 + status = ocfs2_replay_truncate_records(osb, data_alloc_inode, 6042 data_alloc_bh); 6043 if (status < 0) 6044 mlog_errno(status); 6045 6046 brelse(data_alloc_bh); 6047 ocfs2_inode_unlock(data_alloc_inode, 1); 6048 ··· 6413 goto out_mutex; 6414 } 6415 6416 while (head) { 6417 if (head->free_bg) 6418 bg_blkno = head->free_bg; 6419 else 6420 bg_blkno = ocfs2_which_suballoc_group(head->free_blk, 6421 head->free_bit); 6422 + handle = ocfs2_start_trans(osb, OCFS2_SUBALLOC_FREE); 6423 + if (IS_ERR(handle)) { 6424 + ret = PTR_ERR(handle); 6425 + mlog_errno(ret); 6426 + goto out_unlock; 6427 + } 6428 + 6429 trace_ocfs2_free_cached_blocks( 6430 (unsigned long long)head->free_blk, head->free_bit); 6431 6432 ret = ocfs2_free_suballoc_bits(handle, inode, di_bh, 6433 head->free_bit, bg_blkno, 1); 6434 + if (ret) 6435 mlog_errno(ret); 6436 6437 + ocfs2_commit_trans(osb, handle); 6438 6439 tmp = head; 6440 head = head->free_next; 6441 kfree(tmp); 6442 } 6443 6444 out_unlock: 6445 ocfs2_inode_unlock(inode, 1);
+1 -4
fs/ocfs2/cluster/tcp_internal.h
··· 44 * version here in tcp_internal.h should not need to be bumped for 45 * filesystem locking changes. 46 * 47 - * New in version 12 48 - * - Negotiate hb timeout when storage is down. 49 - * 50 * New in version 11 51 * - Negotiation of filesystem locking in the dlm join. 52 * ··· 75 * - full 64 bit i_size in the metadata lock lvbs 76 * - introduction of "rw" lock and pushing meta/data locking down 77 */ 78 - #define O2NET_PROTOCOL_VERSION 12ULL 79 struct o2net_handshake { 80 __be64 protocol_version; 81 __be64 connector_id;
··· 44 * version here in tcp_internal.h should not need to be bumped for 45 * filesystem locking changes. 46 * 47 * New in version 11 48 * - Negotiation of filesystem locking in the dlm join. 49 * ··· 78 * - full 64 bit i_size in the metadata lock lvbs 79 * - introduction of "rw" lock and pushing meta/data locking down 80 */ 81 + #define O2NET_PROTOCOL_VERSION 11ULL 82 struct o2net_handshake { 83 __be64 protocol_version; 84 __be64 connector_id;
+6 -6
fs/ocfs2/dlm/dlmconvert.c
··· 268 struct dlm_lock *lock, int flags, int type) 269 { 270 enum dlm_status status; 271 - u8 old_owner = res->owner; 272 273 mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type, 274 lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS); ··· 334 335 spin_lock(&res->spinlock); 336 res->state &= ~DLM_LOCK_RES_IN_PROGRESS; 337 - lock->convert_pending = 0; 338 /* if it failed, move it back to granted queue. 339 * if master returns DLM_NORMAL and then down before sending ast, 340 * it may have already been moved to granted queue, reset to ··· 342 if (status != DLM_NOTQUEUED) 343 dlm_error(status); 344 dlm_revert_pending_convert(res, lock); 345 - } else if ((res->state & DLM_LOCK_RES_RECOVERING) || 346 - (old_owner != res->owner)) { 347 - mlog(0, "res %.*s is in recovering or has been recovered.\n", 348 - res->lockname.len, res->lockname.name); 349 status = DLM_RECOVERING; 350 } 351 bail: 352 spin_unlock(&res->spinlock); 353
··· 268 struct dlm_lock *lock, int flags, int type) 269 { 270 enum dlm_status status; 271 272 mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type, 273 lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS); ··· 335 336 spin_lock(&res->spinlock); 337 res->state &= ~DLM_LOCK_RES_IN_PROGRESS; 338 /* if it failed, move it back to granted queue. 339 * if master returns DLM_NORMAL and then down before sending ast, 340 * it may have already been moved to granted queue, reset to ··· 344 if (status != DLM_NOTQUEUED) 345 dlm_error(status); 346 dlm_revert_pending_convert(res, lock); 347 + } else if (!lock->convert_pending) { 348 + mlog(0, "%s: res %.*s, owner died and lock has been moved back " 349 + "to granted list, retry convert.\n", 350 + dlm->name, res->lockname.len, res->lockname.name); 351 status = DLM_RECOVERING; 352 } 353 + 354 + lock->convert_pending = 0; 355 bail: 356 spin_unlock(&res->spinlock); 357
+24 -10
fs/ocfs2/file.c
··· 1506 u64 start, u64 len) 1507 { 1508 int ret = 0; 1509 - u64 tmpend, end = start + len; 1510 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1511 unsigned int csize = osb->s_clustersize; 1512 handle_t *handle; ··· 1539 } 1540 1541 /* 1542 - * We want to get the byte offset of the end of the 1st cluster. 1543 */ 1544 - tmpend = (u64)osb->s_clustersize + (start & ~(osb->s_clustersize - 1)); 1545 - if (tmpend > end) 1546 - tmpend = end; 1547 1548 - trace_ocfs2_zero_partial_clusters_range1((unsigned long long)start, 1549 - (unsigned long long)tmpend); 1550 1551 - ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend); 1552 - if (ret) 1553 - mlog_errno(ret); 1554 1555 if (tmpend < end) { 1556 /*
··· 1506 u64 start, u64 len) 1507 { 1508 int ret = 0; 1509 + u64 tmpend = 0; 1510 + u64 end = start + len; 1511 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1512 unsigned int csize = osb->s_clustersize; 1513 handle_t *handle; ··· 1538 } 1539 1540 /* 1541 + * If start is on a cluster boundary and end is somewhere in another 1542 + * cluster, we have not COWed the cluster starting at start, unless 1543 + * end is also within the same cluster. So, in this case, we skip this 1544 + * first call to ocfs2_zero_range_for_truncate() truncate and move on 1545 + * to the next one. 1546 */ 1547 + if ((start & (csize - 1)) != 0) { 1548 + /* 1549 + * We want to get the byte offset of the end of the 1st 1550 + * cluster. 1551 + */ 1552 + tmpend = (u64)osb->s_clustersize + 1553 + (start & ~(osb->s_clustersize - 1)); 1554 + if (tmpend > end) 1555 + tmpend = end; 1556 1557 + trace_ocfs2_zero_partial_clusters_range1( 1558 + (unsigned long long)start, 1559 + (unsigned long long)tmpend); 1560 1561 + ret = ocfs2_zero_range_for_truncate(inode, handle, start, 1562 + tmpend); 1563 + if (ret) 1564 + mlog_errno(ret); 1565 + } 1566 1567 if (tmpend < end) { 1568 /*
+12 -2
fs/ocfs2/suballoc.c
··· 1199 inode_unlock((*ac)->ac_inode); 1200 1201 ret = ocfs2_try_to_free_truncate_log(osb, bits_wanted); 1202 - if (ret == 1) 1203 goto retry; 1204 1205 if (ret < 0) 1206 mlog_errno(ret); 1207 1208 inode_lock((*ac)->ac_inode); 1209 - ocfs2_inode_lock((*ac)->ac_inode, NULL, 1); 1210 } 1211 if (status < 0) { 1212 if (status != -ENOSPC)
··· 1199 inode_unlock((*ac)->ac_inode); 1200 1201 ret = ocfs2_try_to_free_truncate_log(osb, bits_wanted); 1202 + if (ret == 1) { 1203 + iput((*ac)->ac_inode); 1204 + (*ac)->ac_inode = NULL; 1205 goto retry; 1206 + } 1207 1208 if (ret < 0) 1209 mlog_errno(ret); 1210 1211 inode_lock((*ac)->ac_inode); 1212 + ret = ocfs2_inode_lock((*ac)->ac_inode, NULL, 1); 1213 + if (ret < 0) { 1214 + mlog_errno(ret); 1215 + inode_unlock((*ac)->ac_inode); 1216 + iput((*ac)->ac_inode); 1217 + (*ac)->ac_inode = NULL; 1218 + goto bail; 1219 + } 1220 } 1221 if (status < 0) { 1222 if (status != -ENOSPC)
+9
fs/ramfs/file-mmu.c
··· 27 #include <linux/fs.h> 28 #include <linux/mm.h> 29 #include <linux/ramfs.h> 30 31 #include "internal.h" 32 33 const struct file_operations ramfs_file_operations = { 34 .read_iter = generic_file_read_iter, ··· 46 .splice_read = generic_file_splice_read, 47 .splice_write = iter_file_splice_write, 48 .llseek = generic_file_llseek, 49 }; 50 51 const struct inode_operations ramfs_file_inode_operations = {
··· 27 #include <linux/fs.h> 28 #include <linux/mm.h> 29 #include <linux/ramfs.h> 30 + #include <linux/sched.h> 31 32 #include "internal.h" 33 + 34 + static unsigned long ramfs_mmu_get_unmapped_area(struct file *file, 35 + unsigned long addr, unsigned long len, unsigned long pgoff, 36 + unsigned long flags) 37 + { 38 + return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); 39 + } 40 41 const struct file_operations ramfs_file_operations = { 42 .read_iter = generic_file_read_iter, ··· 38 .splice_read = generic_file_splice_read, 39 .splice_write = iter_file_splice_write, 40 .llseek = generic_file_llseek, 41 + .get_unmapped_area = ramfs_mmu_get_unmapped_area, 42 }; 43 44 const struct inode_operations ramfs_file_inode_operations = {
+3 -3
include/linux/fsnotify_backend.h
··· 148 #define FS_PRIO_1 1 /* fanotify content based access control */ 149 #define FS_PRIO_2 2 /* fanotify pre-content access */ 150 unsigned int priority; 151 152 /* stores all fastpath marks assoc with this group so they can be cleaned on unregister */ 153 struct mutex mark_mutex; /* protect marks_list */ ··· 180 spinlock_t access_lock; 181 struct list_head access_list; 182 wait_queue_head_t access_waitq; 183 - atomic_t bypass_perm; 184 #endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */ 185 int f_flags; 186 unsigned int max_marks; ··· 292 extern void fsnotify_get_group(struct fsnotify_group *group); 293 /* drop reference on a group from fsnotify_alloc_group */ 294 extern void fsnotify_put_group(struct fsnotify_group *group); 295 /* destroy group */ 296 extern void fsnotify_destroy_group(struct fsnotify_group *group); 297 /* fasync handler function */ ··· 306 struct fsnotify_event *event, 307 int (*merge)(struct list_head *, 308 struct fsnotify_event *)); 309 - /* Remove passed event from groups notification queue */ 310 - extern void fsnotify_remove_event(struct fsnotify_group *group, struct fsnotify_event *event); 311 /* true if the group notification queue is empty */ 312 extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group); 313 /* return, but do not dequeue the first event on the notification queue */
··· 148 #define FS_PRIO_1 1 /* fanotify content based access control */ 149 #define FS_PRIO_2 2 /* fanotify pre-content access */ 150 unsigned int priority; 151 + bool shutdown; /* group is being shut down, don't queue more events */ 152 153 /* stores all fastpath marks assoc with this group so they can be cleaned on unregister */ 154 struct mutex mark_mutex; /* protect marks_list */ ··· 179 spinlock_t access_lock; 180 struct list_head access_list; 181 wait_queue_head_t access_waitq; 182 #endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */ 183 int f_flags; 184 unsigned int max_marks; ··· 292 extern void fsnotify_get_group(struct fsnotify_group *group); 293 /* drop reference on a group from fsnotify_alloc_group */ 294 extern void fsnotify_put_group(struct fsnotify_group *group); 295 + /* group destruction begins, stop queuing new events */ 296 + extern void fsnotify_group_stop_queueing(struct fsnotify_group *group); 297 /* destroy group */ 298 extern void fsnotify_destroy_group(struct fsnotify_group *group); 299 /* fasync handler function */ ··· 304 struct fsnotify_event *event, 305 int (*merge)(struct list_head *, 306 struct fsnotify_event *)); 307 /* true if the group notification queue is empty */ 308 extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group); 309 /* return, but do not dequeue the first event on the notification queue */
+6
kernel/cgroup.c
··· 6270 if (cgroup_sk_alloc_disabled) 6271 return; 6272 6273 rcu_read_lock(); 6274 6275 while (true) {
··· 6270 if (cgroup_sk_alloc_disabled) 6271 return; 6272 6273 + /* Socket clone path */ 6274 + if (skcd->val) { 6275 + cgroup_get(sock_cgroup_ptr(skcd)); 6276 + return; 6277 + } 6278 + 6279 rcu_read_lock(); 6280 6281 while (true) {
+4 -2
mm/debug.c
··· 42 43 void __dump_page(struct page *page, const char *reason) 44 { 45 pr_emerg("page:%p count:%d mapcount:%d mapping:%p index:%#lx", 46 - page, page_ref_count(page), page_mapcount(page), 47 - page->mapping, page->index); 48 if (PageCompound(page)) 49 pr_cont(" compound_mapcount: %d", compound_mapcount(page)); 50 pr_cont("\n");
··· 42 43 void __dump_page(struct page *page, const char *reason) 44 { 45 + int mapcount = PageSlab(page) ? 0 : page_mapcount(page); 46 + 47 pr_emerg("page:%p count:%d mapcount:%d mapping:%p index:%#lx", 48 + page, page_ref_count(page), mapcount, 49 + page->mapping, page_to_pgoff(page)); 50 if (PageCompound(page)) 51 pr_cont(" compound_mapcount: %d", compound_mapcount(page)); 52 pr_cont("\n");
+13 -12
mm/khugepaged.c
··· 838 * value (scan code). 839 */ 840 841 - static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address) 842 { 843 struct vm_area_struct *vma; 844 unsigned long hstart, hend; ··· 847 if (unlikely(khugepaged_test_exit(mm))) 848 return SCAN_ANY_PROCESS; 849 850 - vma = find_vma(mm, address); 851 if (!vma) 852 return SCAN_VMA_NULL; 853 ··· 882 .pmd = pmd, 883 }; 884 885 fe.pte = pte_offset_map(pmd, address); 886 for (; fe.address < address + HPAGE_PMD_NR*PAGE_SIZE; 887 fe.pte++, fe.address += PAGE_SIZE) { ··· 894 if (!is_swap_pte(pteval)) 895 continue; 896 swapped_in++; 897 - /* we only decide to swapin, if there is enough young ptes */ 898 - if (referenced < HPAGE_PMD_NR/2) { 899 - trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); 900 - return false; 901 - } 902 ret = do_swap_page(&fe, pteval); 903 904 /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */ 905 if (ret & VM_FAULT_RETRY) { 906 down_read(&mm->mmap_sem); 907 - if (hugepage_vma_revalidate(mm, address)) { 908 /* vma is no longer available, don't continue to swapin */ 909 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); 910 return false; ··· 924 static void collapse_huge_page(struct mm_struct *mm, 925 unsigned long address, 926 struct page **hpage, 927 - struct vm_area_struct *vma, 928 int node, int referenced) 929 { 930 pmd_t *pmd, _pmd; ··· 933 spinlock_t *pmd_ptl, *pte_ptl; 934 int isolated = 0, result = 0; 935 struct mem_cgroup *memcg; 936 unsigned long mmun_start; /* For mmu_notifiers */ 937 unsigned long mmun_end; /* For mmu_notifiers */ 938 gfp_t gfp; ··· 962 } 963 964 down_read(&mm->mmap_sem); 965 - result = hugepage_vma_revalidate(mm, address); 966 if (result) { 967 mem_cgroup_cancel_charge(new_page, memcg, true); 968 up_read(&mm->mmap_sem); ··· 995 * handled by the anon_vma lock + PG_lock. 996 */ 997 down_write(&mm->mmap_sem); 998 - result = hugepage_vma_revalidate(mm, address); 999 if (result) 1000 goto out; 1001 /* check if the pmd is still valid */ ··· 1203 if (ret) { 1204 node = khugepaged_find_target_node(); 1205 /* collapse_huge_page will return with the mmap_sem released */ 1206 - collapse_huge_page(mm, address, hpage, vma, node, referenced); 1207 } 1208 out: 1209 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
··· 838 * value (scan code). 839 */ 840 841 + static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, 842 + struct vm_area_struct **vmap) 843 { 844 struct vm_area_struct *vma; 845 unsigned long hstart, hend; ··· 846 if (unlikely(khugepaged_test_exit(mm))) 847 return SCAN_ANY_PROCESS; 848 849 + *vmap = vma = find_vma(mm, address); 850 if (!vma) 851 return SCAN_VMA_NULL; 852 ··· 881 .pmd = pmd, 882 }; 883 884 + /* we only decide to swapin, if there is enough young ptes */ 885 + if (referenced < HPAGE_PMD_NR/2) { 886 + trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); 887 + return false; 888 + } 889 fe.pte = pte_offset_map(pmd, address); 890 for (; fe.address < address + HPAGE_PMD_NR*PAGE_SIZE; 891 fe.pte++, fe.address += PAGE_SIZE) { ··· 888 if (!is_swap_pte(pteval)) 889 continue; 890 swapped_in++; 891 ret = do_swap_page(&fe, pteval); 892 893 /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */ 894 if (ret & VM_FAULT_RETRY) { 895 down_read(&mm->mmap_sem); 896 + if (hugepage_vma_revalidate(mm, address, &fe.vma)) { 897 /* vma is no longer available, don't continue to swapin */ 898 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); 899 return false; ··· 923 static void collapse_huge_page(struct mm_struct *mm, 924 unsigned long address, 925 struct page **hpage, 926 int node, int referenced) 927 { 928 pmd_t *pmd, _pmd; ··· 933 spinlock_t *pmd_ptl, *pte_ptl; 934 int isolated = 0, result = 0; 935 struct mem_cgroup *memcg; 936 + struct vm_area_struct *vma; 937 unsigned long mmun_start; /* For mmu_notifiers */ 938 unsigned long mmun_end; /* For mmu_notifiers */ 939 gfp_t gfp; ··· 961 } 962 963 down_read(&mm->mmap_sem); 964 + result = hugepage_vma_revalidate(mm, address, &vma); 965 if (result) { 966 mem_cgroup_cancel_charge(new_page, memcg, true); 967 up_read(&mm->mmap_sem); ··· 994 * handled by the anon_vma lock + PG_lock. 995 */ 996 down_write(&mm->mmap_sem); 997 + result = hugepage_vma_revalidate(mm, address, &vma); 998 if (result) 999 goto out; 1000 /* check if the pmd is still valid */ ··· 1202 if (ret) { 1203 node = khugepaged_find_target_node(); 1204 /* collapse_huge_page will return with the mmap_sem released */ 1205 + collapse_huge_page(mm, address, hpage, node, referenced); 1206 } 1207 out: 1208 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
+22 -9
mm/memcontrol.c
··· 1740 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 1741 { 1742 struct memcg_stock_pcp *stock; 1743 bool ret = false; 1744 1745 if (nr_pages > CHARGE_BATCH) 1746 return ret; 1747 1748 - stock = &get_cpu_var(memcg_stock); 1749 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { 1750 stock->nr_pages -= nr_pages; 1751 ret = true; 1752 } 1753 - put_cpu_var(memcg_stock); 1754 return ret; 1755 } 1756 ··· 1776 stock->cached = NULL; 1777 } 1778 1779 - /* 1780 - * This must be called under preempt disabled or must be called by 1781 - * a thread which is pinned to local cpu. 1782 - */ 1783 static void drain_local_stock(struct work_struct *dummy) 1784 { 1785 - struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock); 1786 drain_stock(stock); 1787 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 1788 } 1789 1790 /* ··· 1796 */ 1797 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 1798 { 1799 - struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock); 1800 1801 if (stock->cached != memcg) { /* reset if necessary */ 1802 drain_stock(stock); 1803 stock->cached = memcg; 1804 } 1805 stock->nr_pages += nr_pages; 1806 - put_cpu_var(memcg_stock); 1807 } 1808 1809 /*
··· 1740 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 1741 { 1742 struct memcg_stock_pcp *stock; 1743 + unsigned long flags; 1744 bool ret = false; 1745 1746 if (nr_pages > CHARGE_BATCH) 1747 return ret; 1748 1749 + local_irq_save(flags); 1750 + 1751 + stock = this_cpu_ptr(&memcg_stock); 1752 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { 1753 stock->nr_pages -= nr_pages; 1754 ret = true; 1755 } 1756 + 1757 + local_irq_restore(flags); 1758 + 1759 return ret; 1760 } 1761 ··· 1771 stock->cached = NULL; 1772 } 1773 1774 static void drain_local_stock(struct work_struct *dummy) 1775 { 1776 + struct memcg_stock_pcp *stock; 1777 + unsigned long flags; 1778 + 1779 + local_irq_save(flags); 1780 + 1781 + stock = this_cpu_ptr(&memcg_stock); 1782 drain_stock(stock); 1783 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 1784 + 1785 + local_irq_restore(flags); 1786 } 1787 1788 /* ··· 1788 */ 1789 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 1790 { 1791 + struct memcg_stock_pcp *stock; 1792 + unsigned long flags; 1793 1794 + local_irq_save(flags); 1795 + 1796 + stock = this_cpu_ptr(&memcg_stock); 1797 if (stock->cached != memcg) { /* reset if necessary */ 1798 drain_stock(stock); 1799 stock->cached = memcg; 1800 } 1801 stock->nr_pages += nr_pages; 1802 + 1803 + local_irq_restore(flags); 1804 } 1805 1806 /*
+3 -1
mm/memory_hotplug.c
··· 1567 return alloc_huge_page_node(page_hstate(compound_head(page)), 1568 next_node_in(nid, nmask)); 1569 1570 - node_clear(nid, nmask); 1571 if (PageHighMem(page) 1572 || (zone_idx(page_zone(page)) == ZONE_MOVABLE)) 1573 gfp_mask |= __GFP_HIGHMEM;
··· 1567 return alloc_huge_page_node(page_hstate(compound_head(page)), 1568 next_node_in(nid, nmask)); 1569 1570 + if (nid != next_node_in(nid, nmask)) 1571 + node_clear(nid, nmask); 1572 + 1573 if (PageHighMem(page) 1574 || (zone_idx(page_zone(page)) == ZONE_MOVABLE)) 1575 gfp_mask |= __GFP_HIGHMEM;
+3
mm/page_io.c
··· 264 int ret; 265 struct swap_info_struct *sis = page_swap_info(page); 266 267 if (sis->flags & SWP_FILE) { 268 struct kiocb kiocb; 269 struct file *swap_file = sis->swap_file; ··· 338 int ret = 0; 339 struct swap_info_struct *sis = page_swap_info(page); 340 341 VM_BUG_ON_PAGE(!PageLocked(page), page); 342 VM_BUG_ON_PAGE(PageUptodate(page), page); 343 if (frontswap_load(page) == 0) { ··· 388 389 if (sis->flags & SWP_FILE) { 390 struct address_space *mapping = sis->swap_file->f_mapping; 391 return mapping->a_ops->set_page_dirty(page); 392 } else { 393 return __set_page_dirty_no_writeback(page);
··· 264 int ret; 265 struct swap_info_struct *sis = page_swap_info(page); 266 267 + BUG_ON(!PageSwapCache(page)); 268 if (sis->flags & SWP_FILE) { 269 struct kiocb kiocb; 270 struct file *swap_file = sis->swap_file; ··· 337 int ret = 0; 338 struct swap_info_struct *sis = page_swap_info(page); 339 340 + BUG_ON(!PageSwapCache(page)); 341 VM_BUG_ON_PAGE(!PageLocked(page), page); 342 VM_BUG_ON_PAGE(PageUptodate(page), page); 343 if (frontswap_load(page) == 0) { ··· 386 387 if (sis->flags & SWP_FILE) { 388 struct address_space *mapping = sis->swap_file->f_mapping; 389 + BUG_ON(!PageSwapCache(page)); 390 return mapping->a_ops->set_page_dirty(page); 391 } else { 392 return __set_page_dirty_no_writeback(page);
-1
mm/swapfile.c
··· 2724 struct swap_info_struct *page_swap_info(struct page *page) 2725 { 2726 swp_entry_t swap = { .val = page_private(page) }; 2727 - BUG_ON(!PageSwapCache(page)); 2728 return swap_info[swp_type(swap)]; 2729 } 2730
··· 2724 struct swap_info_struct *page_swap_info(struct page *page) 2725 { 2726 swp_entry_t swap = { .val = page_private(page) }; 2727 return swap_info[swp_type(swap)]; 2728 } 2729
+4 -1
net/core/sock.c
··· 1362 if (!try_module_get(prot->owner)) 1363 goto out_free_sec; 1364 sk_tx_queue_clear(sk); 1365 - cgroup_sk_alloc(&sk->sk_cgrp_data); 1366 } 1367 1368 return sk; ··· 1421 sock_net_set(sk, net); 1422 atomic_set(&sk->sk_wmem_alloc, 1); 1423 1424 sock_update_classid(&sk->sk_cgrp_data); 1425 sock_update_netprioidx(&sk->sk_cgrp_data); 1426 } ··· 1566 newsk->sk_priority = 0; 1567 newsk->sk_incoming_cpu = raw_smp_processor_id(); 1568 atomic64_set(&newsk->sk_cookie, 0); 1569 /* 1570 * Before updating sk_refcnt, we must commit prior changes to memory 1571 * (Documentation/RCU/rculist_nulls.txt for details)
··· 1362 if (!try_module_get(prot->owner)) 1363 goto out_free_sec; 1364 sk_tx_queue_clear(sk); 1365 } 1366 1367 return sk; ··· 1422 sock_net_set(sk, net); 1423 atomic_set(&sk->sk_wmem_alloc, 1); 1424 1425 + cgroup_sk_alloc(&sk->sk_cgrp_data); 1426 sock_update_classid(&sk->sk_cgrp_data); 1427 sock_update_netprioidx(&sk->sk_cgrp_data); 1428 } ··· 1566 newsk->sk_priority = 0; 1567 newsk->sk_incoming_cpu = raw_smp_processor_id(); 1568 atomic64_set(&newsk->sk_cookie, 0); 1569 + 1570 + cgroup_sk_alloc(&newsk->sk_cgrp_data); 1571 + 1572 /* 1573 * Before updating sk_refcnt, we must commit prior changes to memory 1574 * (Documentation/RCU/rculist_nulls.txt for details)