Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.13-rc4 200 lines 5.4 kB view raw
1/* 2 * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com> 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2, or (at your option) 7 * any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; see the file COPYING. If not, write to 16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 17 */ 18 19#include <linux/fs.h> 20#include <linux/init.h> 21#include <linux/kernel.h> 22#include <linux/module.h> 23#include <linux/mount.h> 24#include <linux/mutex.h> 25#include <linux/spinlock.h> 26 27#include <linux/atomic.h> 28 29#include <linux/fsnotify_backend.h> 30#include "fsnotify.h" 31#include "../mount.h" 32 33void fsnotify_clear_marks_by_mount(struct vfsmount *mnt) 34{ 35 struct fsnotify_mark *mark, *lmark; 36 struct hlist_node *n; 37 struct mount *m = real_mount(mnt); 38 LIST_HEAD(free_list); 39 40 spin_lock(&mnt->mnt_root->d_lock); 41 hlist_for_each_entry_safe(mark, n, &m->mnt_fsnotify_marks, m.m_list) { 42 list_add(&mark->m.free_m_list, &free_list); 43 hlist_del_init_rcu(&mark->m.m_list); 44 fsnotify_get_mark(mark); 45 } 46 spin_unlock(&mnt->mnt_root->d_lock); 47 48 list_for_each_entry_safe(mark, lmark, &free_list, m.free_m_list) { 49 struct fsnotify_group *group; 50 51 spin_lock(&mark->lock); 52 fsnotify_get_group(mark->group); 53 group = mark->group; 54 spin_unlock(&mark->lock); 55 56 fsnotify_destroy_mark(mark, group); 57 fsnotify_put_mark(mark); 58 fsnotify_put_group(group); 59 } 60} 61 62void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group) 63{ 64 fsnotify_clear_marks_by_group_flags(group, FSNOTIFY_MARK_FLAG_VFSMOUNT); 65} 66 67/* 68 * Recalculate the mask of events relevant to a given vfsmount locked. 69 */ 70static void fsnotify_recalc_vfsmount_mask_locked(struct vfsmount *mnt) 71{ 72 struct mount *m = real_mount(mnt); 73 struct fsnotify_mark *mark; 74 __u32 new_mask = 0; 75 76 assert_spin_locked(&mnt->mnt_root->d_lock); 77 78 hlist_for_each_entry(mark, &m->mnt_fsnotify_marks, m.m_list) 79 new_mask |= mark->mask; 80 m->mnt_fsnotify_mask = new_mask; 81} 82 83/* 84 * Recalculate the mnt->mnt_fsnotify_mask, or the mask of all FS_* event types 85 * any notifier is interested in hearing for this mount point 86 */ 87void fsnotify_recalc_vfsmount_mask(struct vfsmount *mnt) 88{ 89 spin_lock(&mnt->mnt_root->d_lock); 90 fsnotify_recalc_vfsmount_mask_locked(mnt); 91 spin_unlock(&mnt->mnt_root->d_lock); 92} 93 94void fsnotify_destroy_vfsmount_mark(struct fsnotify_mark *mark) 95{ 96 struct vfsmount *mnt = mark->m.mnt; 97 98 BUG_ON(!mutex_is_locked(&mark->group->mark_mutex)); 99 assert_spin_locked(&mark->lock); 100 101 spin_lock(&mnt->mnt_root->d_lock); 102 103 hlist_del_init_rcu(&mark->m.m_list); 104 mark->m.mnt = NULL; 105 106 fsnotify_recalc_vfsmount_mask_locked(mnt); 107 108 spin_unlock(&mnt->mnt_root->d_lock); 109} 110 111static struct fsnotify_mark *fsnotify_find_vfsmount_mark_locked(struct fsnotify_group *group, 112 struct vfsmount *mnt) 113{ 114 struct mount *m = real_mount(mnt); 115 struct fsnotify_mark *mark; 116 117 assert_spin_locked(&mnt->mnt_root->d_lock); 118 119 hlist_for_each_entry(mark, &m->mnt_fsnotify_marks, m.m_list) { 120 if (mark->group == group) { 121 fsnotify_get_mark(mark); 122 return mark; 123 } 124 } 125 return NULL; 126} 127 128/* 129 * given a group and vfsmount, find the mark associated with that combination. 130 * if found take a reference to that mark and return it, else return NULL 131 */ 132struct fsnotify_mark *fsnotify_find_vfsmount_mark(struct fsnotify_group *group, 133 struct vfsmount *mnt) 134{ 135 struct fsnotify_mark *mark; 136 137 spin_lock(&mnt->mnt_root->d_lock); 138 mark = fsnotify_find_vfsmount_mark_locked(group, mnt); 139 spin_unlock(&mnt->mnt_root->d_lock); 140 141 return mark; 142} 143 144/* 145 * Attach an initialized mark to a given group and vfsmount. 146 * These marks may be used for the fsnotify backend to determine which 147 * event types should be delivered to which groups. 148 */ 149int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark, 150 struct fsnotify_group *group, struct vfsmount *mnt, 151 int allow_dups) 152{ 153 struct mount *m = real_mount(mnt); 154 struct fsnotify_mark *lmark, *last = NULL; 155 int ret = 0; 156 157 mark->flags |= FSNOTIFY_MARK_FLAG_VFSMOUNT; 158 159 BUG_ON(!mutex_is_locked(&group->mark_mutex)); 160 assert_spin_locked(&mark->lock); 161 162 spin_lock(&mnt->mnt_root->d_lock); 163 164 mark->m.mnt = mnt; 165 166 /* is mark the first mark? */ 167 if (hlist_empty(&m->mnt_fsnotify_marks)) { 168 hlist_add_head_rcu(&mark->m.m_list, &m->mnt_fsnotify_marks); 169 goto out; 170 } 171 172 /* should mark be in the middle of the current list? */ 173 hlist_for_each_entry(lmark, &m->mnt_fsnotify_marks, m.m_list) { 174 last = lmark; 175 176 if ((lmark->group == group) && !allow_dups) { 177 ret = -EEXIST; 178 goto out; 179 } 180 181 if (mark->group->priority < lmark->group->priority) 182 continue; 183 184 if ((mark->group->priority == lmark->group->priority) && 185 (mark->group < lmark->group)) 186 continue; 187 188 hlist_add_before_rcu(&mark->m.m_list, &lmark->m.m_list); 189 goto out; 190 } 191 192 BUG_ON(last == NULL); 193 /* mark should be the last entry. last is the current last entry */ 194 hlist_add_after_rcu(&last->m.m_list, &mark->m.m_list); 195out: 196 fsnotify_recalc_vfsmount_mask_locked(mnt); 197 spin_unlock(&mnt->mnt_root->d_lock); 198 199 return ret; 200}