Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dm mpath: use atomic_t for counting members of 'struct multipath'

The use of atomic_t for nr_valid_paths, pg_init_in_progress and
pg_init_count will allow relaxing the use of the m->lock spinlock.

Suggested-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Tested-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>

+33 -28
+33 -28
drivers/md/dm-mpath.c
··· 76 76 77 77 wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */ 78 78 79 - unsigned pg_init_in_progress; /* Only one pg_init allowed at once */ 80 - 81 - unsigned nr_valid_paths; /* Total number of usable paths */ 82 79 struct pgpath *current_pgpath; 83 80 struct priority_group *current_pg; 84 81 struct priority_group *next_pg; /* Switch to this PG if set */ ··· 83 86 unsigned long flags; /* Multipath state flags */ 84 87 85 88 unsigned pg_init_retries; /* Number of times to retry pg_init */ 86 - unsigned pg_init_count; /* Number of times pg_init called */ 87 89 unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */ 90 + 91 + atomic_t nr_valid_paths; /* Total number of usable paths */ 92 + atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */ 93 + atomic_t pg_init_count; /* Number of times pg_init called */ 88 94 89 95 struct work_struct trigger_event; 90 96 ··· 195 195 INIT_LIST_HEAD(&m->priority_groups); 196 196 spin_lock_init(&m->lock); 197 197 set_bit(MPATHF_QUEUE_IO, &m->flags); 198 + atomic_set(&m->nr_valid_paths, 0); 199 + atomic_set(&m->pg_init_in_progress, 0); 200 + atomic_set(&m->pg_init_count, 0); 198 201 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; 199 202 INIT_WORK(&m->trigger_event, trigger_event); 200 203 init_waitqueue_head(&m->pg_init_wait); ··· 282 279 struct pgpath *pgpath; 283 280 unsigned long pg_init_delay = 0; 284 281 285 - if (m->pg_init_in_progress || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) 282 + if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) 286 283 return 0; 287 284 288 - m->pg_init_count++; 285 + atomic_inc(&m->pg_init_count); 289 286 clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); 290 287 291 288 /* Check here to reset pg_init_required */ ··· 301 298 continue; 302 299 if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path, 303 300 pg_init_delay)) 304 - m->pg_init_in_progress++; 301 + atomic_inc(&m->pg_init_in_progress); 305 302 } 306 - return m->pg_init_in_progress; 303 + return atomic_read(&m->pg_init_in_progress); 307 304 } 308 305 309 306 static void __switch_pg(struct multipath *m, struct pgpath *pgpath) ··· 319 316 clear_bit(MPATHF_QUEUE_IO, &m->flags); 320 317 } 321 318 322 - m->pg_init_count = 0; 319 + atomic_set(&m->pg_init_count, 0); 323 320 } 324 321 325 322 static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg, ··· 344 341 struct priority_group *pg; 345 342 bool bypassed = true; 346 343 347 - if (!m->nr_valid_paths) { 344 + if (!atomic_read(&m->nr_valid_paths)) { 348 345 clear_bit(MPATHF_QUEUE_IO, &m->flags); 349 346 goto failed; 350 347 } ··· 905 902 /* parse the priority groups */ 906 903 while (as.argc) { 907 904 struct priority_group *pg; 905 + unsigned nr_valid_paths = atomic_read(&m->nr_valid_paths); 908 906 909 907 pg = parse_priority_group(&as, m); 910 908 if (IS_ERR(pg)) { ··· 913 909 goto bad; 914 910 } 915 911 916 - m->nr_valid_paths += pg->nr_pgpaths; 912 + nr_valid_paths += pg->nr_pgpaths; 913 + atomic_set(&m->nr_valid_paths, nr_valid_paths); 914 + 917 915 list_add_tail(&pg->list, &m->priority_groups); 918 916 pg_count++; 919 917 pg->pg_num = pg_count; ··· 945 939 static void multipath_wait_for_pg_init_completion(struct multipath *m) 946 940 { 947 941 DECLARE_WAITQUEUE(wait, current); 948 - unsigned long flags; 949 942 950 943 add_wait_queue(&m->pg_init_wait, &wait); 951 944 952 945 while (1) { 953 946 set_current_state(TASK_UNINTERRUPTIBLE); 954 947 955 - spin_lock_irqsave(&m->lock, flags); 956 - if (!m->pg_init_in_progress) { 957 - spin_unlock_irqrestore(&m->lock, flags); 948 + if (!atomic_read(&m->pg_init_in_progress)) 958 949 break; 959 - } 960 - spin_unlock_irqrestore(&m->lock, flags); 961 950 962 951 io_schedule(); 963 952 } ··· 1002 1001 pgpath->is_active = false; 1003 1002 pgpath->fail_count++; 1004 1003 1005 - m->nr_valid_paths--; 1004 + atomic_dec(&m->nr_valid_paths); 1006 1005 1007 1006 if (pgpath == m->current_pgpath) 1008 1007 m->current_pgpath = NULL; 1009 1008 1010 1009 dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti, 1011 - pgpath->path.dev->name, m->nr_valid_paths); 1010 + pgpath->path.dev->name, atomic_read(&m->nr_valid_paths)); 1012 1011 1013 1012 schedule_work(&m->trigger_event); 1014 1013 ··· 1026 1025 int r = 0, run_queue = 0; 1027 1026 unsigned long flags; 1028 1027 struct multipath *m = pgpath->pg->m; 1028 + unsigned nr_valid_paths; 1029 1029 1030 1030 spin_lock_irqsave(&m->lock, flags); 1031 1031 ··· 1041 1039 1042 1040 pgpath->is_active = true; 1043 1041 1044 - if (!m->nr_valid_paths++) { 1042 + nr_valid_paths = atomic_inc_return(&m->nr_valid_paths); 1043 + if (nr_valid_paths == 1) { 1045 1044 m->current_pgpath = NULL; 1046 1045 run_queue = 1; 1047 1046 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { 1048 1047 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work)) 1049 - m->pg_init_in_progress++; 1048 + atomic_inc(&m->pg_init_in_progress); 1050 1049 } 1051 1050 1052 1051 dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti, 1053 - pgpath->path.dev->name, m->nr_valid_paths); 1052 + pgpath->path.dev->name, nr_valid_paths); 1054 1053 1055 1054 schedule_work(&m->trigger_event); 1056 1055 ··· 1169 1166 1170 1167 spin_lock_irqsave(&m->lock, flags); 1171 1168 1172 - if (m->pg_init_count <= m->pg_init_retries && !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) 1169 + if (atomic_read(&m->pg_init_count) <= m->pg_init_retries && 1170 + !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) 1173 1171 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); 1174 1172 else 1175 1173 limit_reached = true; ··· 1240 1236 } else if (!test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) 1241 1237 pg->bypassed = false; 1242 1238 1243 - if (--m->pg_init_in_progress) 1239 + if (atomic_dec_return(&m->pg_init_in_progress) > 0) 1244 1240 /* Activations of other paths are still on going */ 1245 1241 goto out; 1246 1242 ··· 1321 1317 fail_path(mpio->pgpath); 1322 1318 1323 1319 spin_lock_irqsave(&m->lock, flags); 1324 - if (!m->nr_valid_paths) { 1320 + if (!atomic_read(&m->nr_valid_paths)) { 1325 1321 if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { 1326 1322 if (!__must_push_back(m)) 1327 1323 r = -EIO; ··· 1425 1421 1426 1422 /* Features */ 1427 1423 if (type == STATUSTYPE_INFO) 1428 - DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags), m->pg_init_count); 1424 + DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags), 1425 + atomic_read(&m->pg_init_count)); 1429 1426 else { 1430 1427 DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) + 1431 1428 (m->pg_init_retries > 0) * 2 + ··· 1680 1675 spin_lock_irqsave(&m->lock, flags); 1681 1676 1682 1677 /* pg_init in progress or no paths available */ 1683 - if (m->pg_init_in_progress || 1684 - (!m->nr_valid_paths && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) { 1678 + if (atomic_read(&m->pg_init_in_progress) || 1679 + (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) { 1685 1680 busy = true; 1686 1681 goto out; 1687 1682 }