Merge git://git.kernel.org/pub/scm/linux/kernel/git/arjan/linux-2.6-async-2

* git://git.kernel.org/pub/scm/linux/kernel/git/arjan/linux-2.6-async-2:
async: make async a command line option for now
partial revert of asynchronous inode delete

+21 -14
+7 -12
fs/inode.c
··· 1139 * I_FREEING is set so that no-one will take a new reference to the inode while 1140 * it is being deleted. 1141 */ 1142 - static void generic_delete_inode_async(void *data, async_cookie_t cookie) 1143 { 1144 - struct inode *inode = data; 1145 const struct super_operations *op = inode->i_sb->s_op; 1146 1147 security_inode_delete(inode); 1148 ··· 1170 wake_up_inode(inode); 1171 BUG_ON(inode->i_state != I_CLEAR); 1172 destroy_inode(inode); 1173 - } 1174 - 1175 - void generic_delete_inode(struct inode *inode) 1176 - { 1177 - list_del_init(&inode->i_list); 1178 - list_del_init(&inode->i_sb_list); 1179 - inode->i_state |= I_FREEING; 1180 - inodes_stat.nr_inodes--; 1181 - spin_unlock(&inode_lock); 1182 - async_schedule_special(generic_delete_inode_async, inode, &inode->i_sb->s_async_list); 1183 } 1184 1185 EXPORT_SYMBOL(generic_delete_inode);
··· 1139 * I_FREEING is set so that no-one will take a new reference to the inode while 1140 * it is being deleted. 1141 */ 1142 + void generic_delete_inode(struct inode *inode) 1143 { 1144 const struct super_operations *op = inode->i_sb->s_op; 1145 + 1146 + list_del_init(&inode->i_list); 1147 + list_del_init(&inode->i_sb_list); 1148 + inode->i_state |= I_FREEING; 1149 + inodes_stat.nr_inodes--; 1150 + spin_unlock(&inode_lock); 1151 1152 security_inode_delete(inode); 1153 ··· 1165 wake_up_inode(inode); 1166 BUG_ON(inode->i_state != I_CLEAR); 1167 destroy_inode(inode); 1168 } 1169 1170 EXPORT_SYMBOL(generic_delete_inode);
+14 -2
kernel/async.c
··· 65 static LIST_HEAD(async_running); 66 static DEFINE_SPINLOCK(async_lock); 67 68 struct async_entry { 69 struct list_head list; 70 async_cookie_t cookie; ··· 171 * If we're out of memory or if there's too much work 172 * pending already, we execute synchronously. 173 */ 174 - if (!entry || atomic_read(&entry_count) > MAX_WORK) { 175 kfree(entry); 176 spin_lock_irqsave(&async_lock, flags); 177 newcookie = next_cookie++; ··· 318 319 static int __init async_init(void) 320 { 321 - kthread_run(async_manager_thread, NULL, "async/mgr"); 322 return 0; 323 } 324 325 core_initcall(async_init);
··· 65 static LIST_HEAD(async_running); 66 static DEFINE_SPINLOCK(async_lock); 67 68 + static int async_enabled = 0; 69 + 70 struct async_entry { 71 struct list_head list; 72 async_cookie_t cookie; ··· 169 * If we're out of memory or if there's too much work 170 * pending already, we execute synchronously. 171 */ 172 + if (!async_enabled || !entry || atomic_read(&entry_count) > MAX_WORK) { 173 kfree(entry); 174 spin_lock_irqsave(&async_lock, flags); 175 newcookie = next_cookie++; ··· 316 317 static int __init async_init(void) 318 { 319 + if (async_enabled) 320 + kthread_run(async_manager_thread, NULL, "async/mgr"); 321 return 0; 322 } 323 + 324 + static int __init setup_async(char *str) 325 + { 326 + async_enabled = 1; 327 + return 1; 328 + } 329 + 330 + __setup("fastboot", setup_async); 331 + 332 333 core_initcall(async_init);