Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

target: Use array_zalloc for device_list

Turns an order-8 allocation into slab-sized ones, thereby preventing
allocation failures with memory fragmentation.

This likely saves memory as well, as the slab allocator can pack objects
more tightly than the buddy allocator.

(nab: Fix lio-core patch fuzz)

Signed-off-by: Joern Engel <joern@logfs.org>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>

authored by

Jörn Engel and committed by
Nicholas Bellinger
f2083241 4a5a75f3

+47 -47
+10 -10
drivers/target/target_core_device.c
··· 72 72 } 73 73 74 74 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags); 75 - se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun]; 75 + se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun]; 76 76 if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { 77 77 struct se_dev_entry *deve = se_cmd->se_deve; 78 78 ··· 182 182 } 183 183 184 184 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags); 185 - se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun]; 185 + se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun]; 186 186 deve = se_cmd->se_deve; 187 187 188 188 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { ··· 240 240 241 241 spin_lock_irq(&nacl->device_list_lock); 242 242 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 243 - deve = &nacl->device_list[i]; 243 + deve = nacl->device_list[i]; 244 244 245 245 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) 246 246 continue; ··· 286 286 287 287 spin_lock_irq(&nacl->device_list_lock); 288 288 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 289 - deve = &nacl->device_list[i]; 289 + deve = nacl->device_list[i]; 290 290 291 291 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) 292 292 continue; ··· 306 306 } 307 307 spin_unlock_irq(&nacl->device_list_lock); 308 308 309 - kfree(nacl->device_list); 309 + array_free(nacl->device_list, TRANSPORT_MAX_LUNS_PER_TPG); 310 310 nacl->device_list = NULL; 311 311 312 312 return 0; ··· 318 318 unsigned long flags; 319 319 320 320 spin_lock_irqsave(&se_nacl->device_list_lock, flags); 321 - deve = &se_nacl->device_list[se_cmd->orig_fe_lun]; 321 + deve = se_nacl->device_list[se_cmd->orig_fe_lun]; 322 322 deve->deve_cmds--; 323 323 spin_unlock_irqrestore(&se_nacl->device_list_lock, flags); 324 324 } ··· 331 331 struct se_dev_entry *deve; 332 332 333 333 spin_lock_irq(&nacl->device_list_lock); 334 - deve = &nacl->device_list[mapped_lun]; 334 + deve = nacl->device_list[mapped_lun]; 335 335 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { 336 336 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; 337 337 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; ··· 356 356 int enable) 357 357 { 358 358 struct se_port *port = lun->lun_sep; 359 - struct se_dev_entry *deve = &nacl->device_list[mapped_lun]; 359 + struct se_dev_entry *deve = nacl->device_list[mapped_lun]; 360 360 int trans = 0; 361 361 /* 362 362 * If the MappedLUN entry is being disabled, the entry in ··· 470 470 471 471 spin_lock_irq(&nacl->device_list_lock); 472 472 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 473 - deve = &nacl->device_list[i]; 473 + deve = nacl->device_list[i]; 474 474 if (lun != deve->se_lun) 475 475 continue; 476 476 spin_unlock_irq(&nacl->device_list_lock); ··· 669 669 670 670 spin_lock_irq(&se_sess->se_node_acl->device_list_lock); 671 671 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 672 - deve = &se_sess->se_node_acl->device_list[i]; 672 + deve = se_sess->se_node_acl->device_list[i]; 673 673 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) 674 674 continue; 675 675 se_lun = deve->se_lun;
+3 -3
drivers/target/target_core_fabric_configfs.c
··· 108 108 * tpg_1/attrib/demo_mode_write_protect=1 109 109 */ 110 110 spin_lock_irq(&lacl->se_lun_nacl->device_list_lock); 111 - deve = &lacl->se_lun_nacl->device_list[lacl->mapped_lun]; 111 + deve = lacl->se_lun_nacl->device_list[lacl->mapped_lun]; 112 112 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) 113 113 lun_access = deve->lun_flags; 114 114 else ··· 137 137 struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci), 138 138 struct se_lun_acl, se_lun_group); 139 139 struct se_node_acl *nacl = lacl->se_lun_nacl; 140 - struct se_dev_entry *deve = &nacl->device_list[lacl->mapped_lun]; 140 + struct se_dev_entry *deve = nacl->device_list[lacl->mapped_lun]; 141 141 struct se_portal_group *se_tpg; 142 142 /* 143 143 * Determine if the underlying MappedLUN has already been released.. ··· 168 168 ssize_t len; 169 169 170 170 spin_lock_irq(&se_nacl->device_list_lock); 171 - deve = &se_nacl->device_list[lacl->mapped_lun]; 171 + deve = se_nacl->device_list[lacl->mapped_lun]; 172 172 len = sprintf(page, "%d\n", 173 173 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) ? 174 174 1 : 0);
+7 -7
drivers/target/target_core_pr.c
··· 328 328 return core_scsi2_reservation_seq_non_holder(cmd, 329 329 cdb, pr_reg_type); 330 330 331 - se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; 331 + se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; 332 332 /* 333 333 * Determine if the registration should be ignored due to 334 334 * non-matching ISIDs in core_scsi3_pr_reservation_check(). ··· 990 990 { 991 991 struct se_subsystem_dev *su_dev = dev->se_sub_dev; 992 992 struct se_node_acl *nacl = lun_acl->se_lun_nacl; 993 - struct se_dev_entry *deve = &nacl->device_list[lun_acl->mapped_lun]; 993 + struct se_dev_entry *deve = nacl->device_list[lun_acl->mapped_lun]; 994 994 995 995 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 996 996 return 0; ··· 1499 1499 1500 1500 memset(dest_iport, 0, 64); 1501 1501 1502 - local_se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; 1502 + local_se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; 1503 1503 /* 1504 1504 * Allocate a struct pr_transport_id_holder and setup the 1505 1505 * local_node_acl and local_se_deve pointers and add to ··· 2116 2116 return -EINVAL; 2117 2117 } 2118 2118 se_tpg = se_sess->se_tpg; 2119 - se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; 2119 + se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; 2120 2120 2121 2121 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid) { 2122 2122 memset(&isid_buf[0], 0, PR_REG_ISID_LEN); ··· 2432 2432 return -EINVAL; 2433 2433 } 2434 2434 se_tpg = se_sess->se_tpg; 2435 - se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; 2435 + se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; 2436 2436 /* 2437 2437 * Locate the existing *pr_reg via struct se_node_acl pointers 2438 2438 */ ··· 3005 3005 return -EINVAL; 3006 3006 } 3007 3007 3008 - se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; 3008 + se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; 3009 3009 pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, 3010 3010 se_sess); 3011 3011 if (!pr_reg_n) { ··· 3366 3366 memset(i_buf, 0, PR_REG_ISID_ID_LEN); 3367 3367 se_tpg = se_sess->se_tpg; 3368 3368 tf_ops = se_tpg->se_tpg_tfo; 3369 - se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; 3369 + se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; 3370 3370 /* 3371 3371 * Follow logic from spc4r17 Section 5.7.8, Table 50 -- 3372 3372 * Register behaviors for a REGISTER AND MOVE service action
+18 -18
drivers/target/target_core_stat.c
··· 1253 1253 ssize_t ret; 1254 1254 1255 1255 spin_lock_irq(&nacl->device_list_lock); 1256 - deve = &nacl->device_list[lacl->mapped_lun]; 1256 + deve = nacl->device_list[lacl->mapped_lun]; 1257 1257 if (!deve->se_lun || !deve->se_lun_acl) { 1258 1258 spin_unlock_irq(&nacl->device_list_lock); 1259 1259 return -ENODEV; ··· 1279 1279 ssize_t ret; 1280 1280 1281 1281 spin_lock_irq(&nacl->device_list_lock); 1282 - deve = &nacl->device_list[lacl->mapped_lun]; 1282 + deve = nacl->device_list[lacl->mapped_lun]; 1283 1283 if (!deve->se_lun || !deve->se_lun_acl) { 1284 1284 spin_unlock_irq(&nacl->device_list_lock); 1285 1285 return -ENODEV; ··· 1304 1304 ssize_t ret; 1305 1305 1306 1306 spin_lock_irq(&nacl->device_list_lock); 1307 - deve = &nacl->device_list[lacl->mapped_lun]; 1307 + deve = nacl->device_list[lacl->mapped_lun]; 1308 1308 if (!deve->se_lun || !deve->se_lun_acl) { 1309 1309 spin_unlock_irq(&nacl->device_list_lock); 1310 1310 return -ENODEV; ··· 1327 1327 ssize_t ret; 1328 1328 1329 1329 spin_lock_irq(&nacl->device_list_lock); 1330 - deve = &nacl->device_list[lacl->mapped_lun]; 1330 + deve = nacl->device_list[lacl->mapped_lun]; 1331 1331 if (!deve->se_lun || !deve->se_lun_acl) { 1332 1332 spin_unlock_irq(&nacl->device_list_lock); 1333 1333 return -ENODEV; ··· 1349 1349 ssize_t ret; 1350 1350 1351 1351 spin_lock_irq(&nacl->device_list_lock); 1352 - deve = &nacl->device_list[lacl->mapped_lun]; 1352 + deve = nacl->device_list[lacl->mapped_lun]; 1353 1353 if (!deve->se_lun || !deve->se_lun_acl) { 1354 1354 spin_unlock_irq(&nacl->device_list_lock); 1355 1355 return -ENODEV; ··· 1371 1371 ssize_t ret; 1372 1372 1373 1373 spin_lock_irq(&nacl->device_list_lock); 1374 - deve = &nacl->device_list[lacl->mapped_lun]; 1374 + deve = nacl->device_list[lacl->mapped_lun]; 1375 1375 if (!deve->se_lun || !deve->se_lun_acl) { 1376 1376 spin_unlock_irq(&nacl->device_list_lock); 1377 1377 return -ENODEV; ··· 1393 1393 ssize_t ret; 1394 1394 1395 1395 spin_lock_irq(&nacl->device_list_lock); 1396 - deve = &nacl->device_list[lacl->mapped_lun]; 1396 + deve = nacl->device_list[lacl->mapped_lun]; 1397 1397 if (!deve->se_lun || !deve->se_lun_acl) { 1398 1398 spin_unlock_irq(&nacl->device_list_lock); 1399 1399 return -ENODEV; ··· 1415 1415 ssize_t ret; 1416 1416 1417 1417 spin_lock_irq(&nacl->device_list_lock); 1418 - deve = &nacl->device_list[lacl->mapped_lun]; 1418 + deve = nacl->device_list[lacl->mapped_lun]; 1419 1419 if (!deve->se_lun || !deve->se_lun_acl) { 1420 1420 spin_unlock_irq(&nacl->device_list_lock); 1421 1421 return -ENODEV; ··· 1437 1437 ssize_t ret; 1438 1438 1439 1439 spin_lock_irq(&nacl->device_list_lock); 1440 - deve = &nacl->device_list[lacl->mapped_lun]; 1440 + deve = nacl->device_list[lacl->mapped_lun]; 1441 1441 if (!deve->se_lun || !deve->se_lun_acl) { 1442 1442 spin_unlock_irq(&nacl->device_list_lock); 1443 1443 return -ENODEV; ··· 1459 1459 ssize_t ret; 1460 1460 1461 1461 spin_lock_irq(&nacl->device_list_lock); 1462 - deve = &nacl->device_list[lacl->mapped_lun]; 1462 + deve = nacl->device_list[lacl->mapped_lun]; 1463 1463 if (!deve->se_lun || !deve->se_lun_acl) { 1464 1464 spin_unlock_irq(&nacl->device_list_lock); 1465 1465 return -ENODEV; ··· 1481 1481 ssize_t ret; 1482 1482 1483 1483 spin_lock_irq(&nacl->device_list_lock); 1484 - deve = &nacl->device_list[lacl->mapped_lun]; 1484 + deve = nacl->device_list[lacl->mapped_lun]; 1485 1485 if (!deve->se_lun || !deve->se_lun_acl) { 1486 1486 spin_unlock_irq(&nacl->device_list_lock); 1487 1487 return -ENODEV; ··· 1503 1503 ssize_t ret; 1504 1504 1505 1505 spin_lock_irq(&nacl->device_list_lock); 1506 - deve = &nacl->device_list[lacl->mapped_lun]; 1506 + deve = nacl->device_list[lacl->mapped_lun]; 1507 1507 if (!deve->se_lun || !deve->se_lun_acl) { 1508 1508 spin_unlock_irq(&nacl->device_list_lock); 1509 1509 return -ENODEV; ··· 1525 1525 ssize_t ret; 1526 1526 1527 1527 spin_lock_irq(&nacl->device_list_lock); 1528 - deve = &nacl->device_list[lacl->mapped_lun]; 1528 + deve = nacl->device_list[lacl->mapped_lun]; 1529 1529 if (!deve->se_lun || !deve->se_lun_acl) { 1530 1530 spin_unlock_irq(&nacl->device_list_lock); 1531 1531 return -ENODEV; ··· 1548 1548 ssize_t ret; 1549 1549 1550 1550 spin_lock_irq(&nacl->device_list_lock); 1551 - deve = &nacl->device_list[lacl->mapped_lun]; 1551 + deve = nacl->device_list[lacl->mapped_lun]; 1552 1552 if (!deve->se_lun || !deve->se_lun_acl) { 1553 1553 spin_unlock_irq(&nacl->device_list_lock); 1554 1554 return -ENODEV; ··· 1621 1621 ssize_t ret; 1622 1622 1623 1623 spin_lock_irq(&nacl->device_list_lock); 1624 - deve = &nacl->device_list[lacl->mapped_lun]; 1624 + deve = nacl->device_list[lacl->mapped_lun]; 1625 1625 if (!deve->se_lun || !deve->se_lun_acl) { 1626 1626 spin_unlock_irq(&nacl->device_list_lock); 1627 1627 return -ENODEV; ··· 1647 1647 ssize_t ret; 1648 1648 1649 1649 spin_lock_irq(&nacl->device_list_lock); 1650 - deve = &nacl->device_list[lacl->mapped_lun]; 1650 + deve = nacl->device_list[lacl->mapped_lun]; 1651 1651 if (!deve->se_lun || !deve->se_lun_acl) { 1652 1652 spin_unlock_irq(&nacl->device_list_lock); 1653 1653 return -ENODEV; ··· 1672 1672 ssize_t ret; 1673 1673 1674 1674 spin_lock_irq(&nacl->device_list_lock); 1675 - deve = &nacl->device_list[lacl->mapped_lun]; 1675 + deve = nacl->device_list[lacl->mapped_lun]; 1676 1676 if (!deve->se_lun || !deve->se_lun_acl) { 1677 1677 spin_unlock_irq(&nacl->device_list_lock); 1678 1678 return -ENODEV; ··· 1721 1721 ssize_t ret; 1722 1722 1723 1723 spin_lock_irq(&nacl->device_list_lock); 1724 - deve = &nacl->device_list[lacl->mapped_lun]; 1724 + deve = nacl->device_list[lacl->mapped_lun]; 1725 1725 if (!deve->se_lun || !deve->se_lun_acl) { 1726 1726 spin_unlock_irq(&nacl->device_list_lock); 1727 1727 return -ENODEV;
+4 -4
drivers/target/target_core_tpg.c
··· 64 64 65 65 spin_lock_irq(&nacl->device_list_lock); 66 66 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 67 - deve = &nacl->device_list[i]; 67 + deve = nacl->device_list[i]; 68 68 69 69 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) 70 70 continue; ··· 259 259 struct se_dev_entry *deve; 260 260 int i; 261 261 262 - nacl->device_list = kzalloc(sizeof(struct se_dev_entry) * 263 - TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL); 262 + nacl->device_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG, 263 + sizeof(struct se_dev_entry), GFP_KERNEL); 264 264 if (!nacl->device_list) { 265 265 pr_err("Unable to allocate memory for" 266 266 " struct se_node_acl->device_list\n"); 267 267 return -ENOMEM; 268 268 } 269 269 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 270 - deve = &nacl->device_list[i]; 270 + deve = nacl->device_list[i]; 271 271 272 272 atomic_set(&deve->ua_count, 0); 273 273 atomic_set(&deve->pr_ref_count, 0);
+4 -4
drivers/target/target_core_ua.c
··· 53 53 if (!nacl) 54 54 return 0; 55 55 56 - deve = &nacl->device_list[cmd->orig_fe_lun]; 56 + deve = nacl->device_list[cmd->orig_fe_lun]; 57 57 if (!atomic_read(&deve->ua_count)) 58 58 return 0; 59 59 /* ··· 110 110 ua->ua_ascq = ascq; 111 111 112 112 spin_lock_irq(&nacl->device_list_lock); 113 - deve = &nacl->device_list[unpacked_lun]; 113 + deve = nacl->device_list[unpacked_lun]; 114 114 115 115 spin_lock(&deve->ua_lock); 116 116 list_for_each_entry_safe(ua_p, ua_tmp, &deve->ua_list, ua_nacl_list) { ··· 220 220 return; 221 221 222 222 spin_lock_irq(&nacl->device_list_lock); 223 - deve = &nacl->device_list[cmd->orig_fe_lun]; 223 + deve = nacl->device_list[cmd->orig_fe_lun]; 224 224 if (!atomic_read(&deve->ua_count)) { 225 225 spin_unlock_irq(&nacl->device_list_lock); 226 226 return; ··· 289 289 return -EINVAL; 290 290 291 291 spin_lock_irq(&nacl->device_list_lock); 292 - deve = &nacl->device_list[cmd->orig_fe_lun]; 292 + deve = nacl->device_list[cmd->orig_fe_lun]; 293 293 if (!atomic_read(&deve->ua_count)) { 294 294 spin_unlock_irq(&nacl->device_list_lock); 295 295 return -EPERM;
+1 -1
include/target/target_core_base.h
··· 629 629 spinlock_t stats_lock; 630 630 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ 631 631 atomic_t acl_pr_ref_count; 632 - struct se_dev_entry *device_list; 632 + struct se_dev_entry **device_list; 633 633 struct se_session *nacl_sess; 634 634 struct se_portal_group *se_tpg; 635 635 spinlock_t device_list_lock;