Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

target: Follow up core updates from AGrover and HCH (round 4)

This patch contains the squashed version of forth round series cleanups
from Andy and Christoph following the post heavy lifting in the preceeding:
'Eliminate usage of struct se_mem' and 'Make all control CDBs scatter-gather'
changes. This also includes a conversion of target core and the v3.0
mainline fabric modules (loopback and tcm_fc) to use pr_debug and the
CONFIG_DYNAMIC_DEBUG infrastructure!

These have been squashed into this third and final round for v3.1.

target: Remove ifdeffed code in t_g_process_write
target: Remove direct ramdisk code
target: Rename task_sg_num to task_sg_nents
target: Remove custom debug macros for pr_debug. Use pr_err().
target: Remove custom debug macros in mainline fabrics
target: Set WSNZ=1 in block limits VPD. Abort if WRITE_SAME sectors = 0
target: Remove transport do_se_mem_map callback
target: Further simplify transport_free_pages
target: Redo task allocation return value handling
target: Remove extra parentheses
target: change alloc_task call to take *cdb, not *cmd

(nab: Fix bogus struct file assignments in fd_do_readv and fd_do_writev)

Signed-off-by: Andy Grover <agrover@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>

authored by

Andy Grover and committed by
Nicholas Bellinger
6708bb27 ec98f782

+1388 -2045
-6
drivers/target/loopback/Kconfig
··· 3 3 help 4 4 Say Y here to enable the TCM Virtual SAS target and Linux/SCSI LLD 5 5 fabric loopback module. 6 - 7 - config LOOPBACK_TARGET_CDB_DEBUG 8 - bool "TCM loopback fabric module CDB debug code" 9 - depends on LOOPBACK_TARGET 10 - help 11 - Say Y here to enable the TCM loopback fabric module CDB debug code
+47 -47
drivers/target/loopback/tcm_loop.c
··· 79 79 80 80 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC); 81 81 if (!tl_cmd) { 82 - printk(KERN_ERR "Unable to allocate struct tcm_loop_cmd\n"); 82 + pr_err("Unable to allocate struct tcm_loop_cmd\n"); 83 83 set_host_byte(sc, DID_ERROR); 84 84 return NULL; 85 85 } ··· 281 281 struct tcm_loop_hba *tl_hba; 282 282 struct tcm_loop_tpg *tl_tpg; 283 283 284 - TL_CDB_DEBUG("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x" 284 + pr_debug("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x" 285 285 " scsi_buf_len: %u\n", sc->device->host->host_no, 286 286 sc->device->id, sc->device->channel, sc->device->lun, 287 287 sc->cmnd[0], scsi_bufflen(sc)); ··· 331 331 */ 332 332 tl_nexus = tl_hba->tl_nexus; 333 333 if (!tl_nexus) { 334 - printk(KERN_ERR "Unable to perform device reset without" 334 + pr_err("Unable to perform device reset without" 335 335 " active I_T Nexus\n"); 336 336 return FAILED; 337 337 } ··· 344 344 345 345 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL); 346 346 if (!tl_cmd) { 347 - printk(KERN_ERR "Unable to allocate memory for tl_cmd\n"); 347 + pr_err("Unable to allocate memory for tl_cmd\n"); 348 348 return FAILED; 349 349 } 350 350 351 351 tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL); 352 352 if (!tl_tmr) { 353 - printk(KERN_ERR "Unable to allocate memory for tl_tmr\n"); 353 + pr_err("Unable to allocate memory for tl_tmr\n"); 354 354 goto release; 355 355 } 356 356 init_waitqueue_head(&tl_tmr->tl_tmr_wait); ··· 435 435 sh = scsi_host_alloc(&tcm_loop_driver_template, 436 436 sizeof(struct tcm_loop_hba)); 437 437 if (!sh) { 438 - printk(KERN_ERR "Unable to allocate struct scsi_host\n"); 438 + pr_err("Unable to allocate struct scsi_host\n"); 439 439 return -ENODEV; 440 440 } 441 441 tl_hba->sh = sh; ··· 454 454 455 455 error = scsi_add_host(sh, &tl_hba->dev); 456 456 if (error) { 457 - printk(KERN_ERR "%s: scsi_add_host failed\n", __func__); 457 + pr_err("%s: scsi_add_host failed\n", __func__); 458 458 scsi_host_put(sh); 459 459 return -ENODEV; 460 460 } ··· 495 495 496 496 ret = device_register(&tl_hba->dev); 497 497 if (ret) { 498 - printk(KERN_ERR "device_register() failed for" 498 + pr_err("device_register() failed for" 499 499 " tl_hba->dev: %d\n", ret); 500 500 return -ENODEV; 501 501 } ··· 513 513 514 514 tcm_loop_primary = root_device_register("tcm_loop_0"); 515 515 if (IS_ERR(tcm_loop_primary)) { 516 - printk(KERN_ERR "Unable to allocate tcm_loop_primary\n"); 516 + pr_err("Unable to allocate tcm_loop_primary\n"); 517 517 return PTR_ERR(tcm_loop_primary); 518 518 } 519 519 520 520 ret = bus_register(&tcm_loop_lld_bus); 521 521 if (ret) { 522 - printk(KERN_ERR "bus_register() failed for tcm_loop_lld_bus\n"); 522 + pr_err("bus_register() failed for tcm_loop_lld_bus\n"); 523 523 goto dev_unreg; 524 524 } 525 525 526 526 ret = driver_register(&tcm_loop_driverfs); 527 527 if (ret) { 528 - printk(KERN_ERR "driver_register() failed for" 528 + pr_err("driver_register() failed for" 529 529 "tcm_loop_driverfs\n"); 530 530 goto bus_unreg; 531 531 } 532 532 533 - printk(KERN_INFO "Initialized TCM Loop Core Bus\n"); 533 + pr_debug("Initialized TCM Loop Core Bus\n"); 534 534 return ret; 535 535 536 536 bus_unreg: ··· 546 546 bus_unregister(&tcm_loop_lld_bus); 547 547 root_device_unregister(tcm_loop_primary); 548 548 549 - printk(KERN_INFO "Releasing TCM Loop Core BUS\n"); 549 + pr_debug("Releasing TCM Loop Core BUS\n"); 550 550 } 551 551 552 552 static char *tcm_loop_get_fabric_name(void) ··· 574 574 case SCSI_PROTOCOL_ISCSI: 575 575 return iscsi_get_fabric_proto_ident(se_tpg); 576 576 default: 577 - printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using" 577 + pr_err("Unknown tl_proto_id: 0x%02x, using" 578 578 " SAS emulation\n", tl_hba->tl_proto_id); 579 579 break; 580 580 } ··· 630 630 return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg, 631 631 format_code, buf); 632 632 default: 633 - printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using" 633 + pr_err("Unknown tl_proto_id: 0x%02x, using" 634 634 " SAS emulation\n", tl_hba->tl_proto_id); 635 635 break; 636 636 } ··· 660 660 return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, 661 661 format_code); 662 662 default: 663 - printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using" 663 + pr_err("Unknown tl_proto_id: 0x%02x, using" 664 664 " SAS emulation\n", tl_hba->tl_proto_id); 665 665 break; 666 666 } ··· 694 694 return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, 695 695 port_nexus_ptr); 696 696 default: 697 - printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using" 697 + pr_err("Unknown tl_proto_id: 0x%02x, using" 698 698 " SAS emulation\n", tl_hba->tl_proto_id); 699 699 break; 700 700 } ··· 743 743 744 744 tl_nacl = kzalloc(sizeof(struct tcm_loop_nacl), GFP_KERNEL); 745 745 if (!tl_nacl) { 746 - printk(KERN_ERR "Unable to allocate struct tcm_loop_nacl\n"); 746 + pr_err("Unable to allocate struct tcm_loop_nacl\n"); 747 747 return NULL; 748 748 } 749 749 ··· 853 853 struct tcm_loop_cmd, tl_se_cmd); 854 854 struct scsi_cmnd *sc = tl_cmd->sc; 855 855 856 - TL_CDB_DEBUG("tcm_loop_queue_data_in() called for scsi_cmnd: %p" 856 + pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p" 857 857 " cdb: 0x%02x\n", sc, sc->cmnd[0]); 858 858 859 859 sc->result = SAM_STAT_GOOD; ··· 868 868 struct tcm_loop_cmd, tl_se_cmd); 869 869 struct scsi_cmnd *sc = tl_cmd->sc; 870 870 871 - TL_CDB_DEBUG("tcm_loop_queue_status() called for scsi_cmnd: %p" 871 + pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p" 872 872 " cdb: 0x%02x\n", sc, sc->cmnd[0]); 873 873 874 874 if (se_cmd->sense_buffer && ··· 943 943 */ 944 944 scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun); 945 945 946 - printk(KERN_INFO "TCM_Loop_ConfigFS: Port Link Successful\n"); 946 + pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n"); 947 947 return 0; 948 948 } 949 949 ··· 961 961 sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt, 962 962 se_lun->unpacked_lun); 963 963 if (!sd) { 964 - printk(KERN_ERR "Unable to locate struct scsi_device for %d:%d:" 964 + pr_err("Unable to locate struct scsi_device for %d:%d:" 965 965 "%d\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun); 966 966 return; 967 967 } ··· 974 974 atomic_dec(&tl_tpg->tl_tpg_port_count); 975 975 smp_mb__after_atomic_dec(); 976 976 977 - printk(KERN_INFO "TCM_Loop_ConfigFS: Port Unlink Successful\n"); 977 + pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n"); 978 978 } 979 979 980 980 /* End items for tcm_loop_port_cit */ ··· 991 991 int ret = -ENOMEM; 992 992 993 993 if (tl_tpg->tl_hba->tl_nexus) { 994 - printk(KERN_INFO "tl_tpg->tl_hba->tl_nexus already exists\n"); 994 + pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n"); 995 995 return -EEXIST; 996 996 } 997 997 se_tpg = &tl_tpg->tl_se_tpg; 998 998 999 999 tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL); 1000 1000 if (!tl_nexus) { 1001 - printk(KERN_ERR "Unable to allocate struct tcm_loop_nexus\n"); 1001 + pr_err("Unable to allocate struct tcm_loop_nexus\n"); 1002 1002 return -ENOMEM; 1003 1003 } 1004 1004 /* ··· 1027 1027 __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl, 1028 1028 tl_nexus->se_sess, tl_nexus); 1029 1029 tl_tpg->tl_hba->tl_nexus = tl_nexus; 1030 - printk(KERN_INFO "TCM_Loop_ConfigFS: Established I_T Nexus to emulated" 1030 + pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated" 1031 1031 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), 1032 1032 name); 1033 1033 return 0; ··· 1053 1053 return -ENODEV; 1054 1054 1055 1055 if (atomic_read(&tpg->tl_tpg_port_count)) { 1056 - printk(KERN_ERR "Unable to remove TCM_Loop I_T Nexus with" 1056 + pr_err("Unable to remove TCM_Loop I_T Nexus with" 1057 1057 " active TPG port count: %d\n", 1058 1058 atomic_read(&tpg->tl_tpg_port_count)); 1059 1059 return -EPERM; 1060 1060 } 1061 1061 1062 - printk(KERN_INFO "TCM_Loop_ConfigFS: Removing I_T Nexus to emulated" 1062 + pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated" 1063 1063 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), 1064 1064 tl_nexus->se_sess->se_node_acl->initiatorname); 1065 1065 /* ··· 1115 1115 * tcm_loop_make_nexus() 1116 1116 */ 1117 1117 if (strlen(page) >= TL_WWN_ADDR_LEN) { 1118 - printk(KERN_ERR "Emulated NAA Sas Address: %s, exceeds" 1118 + pr_err("Emulated NAA Sas Address: %s, exceeds" 1119 1119 " max: %d\n", page, TL_WWN_ADDR_LEN); 1120 1120 return -EINVAL; 1121 1121 } ··· 1124 1124 ptr = strstr(i_port, "naa."); 1125 1125 if (ptr) { 1126 1126 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) { 1127 - printk(KERN_ERR "Passed SAS Initiator Port %s does not" 1127 + pr_err("Passed SAS Initiator Port %s does not" 1128 1128 " match target port protoid: %s\n", i_port, 1129 1129 tcm_loop_dump_proto_id(tl_hba)); 1130 1130 return -EINVAL; ··· 1135 1135 ptr = strstr(i_port, "fc."); 1136 1136 if (ptr) { 1137 1137 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) { 1138 - printk(KERN_ERR "Passed FCP Initiator Port %s does not" 1138 + pr_err("Passed FCP Initiator Port %s does not" 1139 1139 " match target port protoid: %s\n", i_port, 1140 1140 tcm_loop_dump_proto_id(tl_hba)); 1141 1141 return -EINVAL; ··· 1146 1146 ptr = strstr(i_port, "iqn."); 1147 1147 if (ptr) { 1148 1148 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) { 1149 - printk(KERN_ERR "Passed iSCSI Initiator Port %s does not" 1149 + pr_err("Passed iSCSI Initiator Port %s does not" 1150 1150 " match target port protoid: %s\n", i_port, 1151 1151 tcm_loop_dump_proto_id(tl_hba)); 1152 1152 return -EINVAL; ··· 1154 1154 port_ptr = &i_port[0]; 1155 1155 goto check_newline; 1156 1156 } 1157 - printk(KERN_ERR "Unable to locate prefix for emulated Initiator Port:" 1157 + pr_err("Unable to locate prefix for emulated Initiator Port:" 1158 1158 " %s\n", i_port); 1159 1159 return -EINVAL; 1160 1160 /* ··· 1194 1194 1195 1195 tpgt_str = strstr(name, "tpgt_"); 1196 1196 if (!tpgt_str) { 1197 - printk(KERN_ERR "Unable to locate \"tpgt_#\" directory" 1197 + pr_err("Unable to locate \"tpgt_#\" directory" 1198 1198 " group\n"); 1199 1199 return ERR_PTR(-EINVAL); 1200 1200 } ··· 1202 1202 tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0); 1203 1203 1204 1204 if (tpgt >= TL_TPGS_PER_HBA) { 1205 - printk(KERN_ERR "Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:" 1205 + pr_err("Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:" 1206 1206 " %u\n", tpgt, TL_TPGS_PER_HBA); 1207 1207 return ERR_PTR(-EINVAL); 1208 1208 } ··· 1218 1218 if (ret < 0) 1219 1219 return ERR_PTR(-ENOMEM); 1220 1220 1221 - printk(KERN_INFO "TCM_Loop_ConfigFS: Allocated Emulated %s" 1221 + pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s" 1222 1222 " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba), 1223 1223 config_item_name(&wwn->wwn_group.cg_item), tpgt); 1224 1224 ··· 1245 1245 */ 1246 1246 core_tpg_deregister(se_tpg); 1247 1247 1248 - printk(KERN_INFO "TCM_Loop_ConfigFS: Deallocated Emulated %s" 1248 + pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s" 1249 1249 " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba), 1250 1250 config_item_name(&wwn->wwn_group.cg_item), tpgt); 1251 1251 } ··· 1266 1266 1267 1267 tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL); 1268 1268 if (!tl_hba) { 1269 - printk(KERN_ERR "Unable to allocate struct tcm_loop_hba\n"); 1269 + pr_err("Unable to allocate struct tcm_loop_hba\n"); 1270 1270 return ERR_PTR(-ENOMEM); 1271 1271 } 1272 1272 /* ··· 1286 1286 } 1287 1287 ptr = strstr(name, "iqn."); 1288 1288 if (!ptr) { 1289 - printk(KERN_ERR "Unable to locate prefix for emulated Target " 1289 + pr_err("Unable to locate prefix for emulated Target " 1290 1290 "Port: %s\n", name); 1291 1291 ret = -EINVAL; 1292 1292 goto out; ··· 1295 1295 1296 1296 check_len: 1297 1297 if (strlen(name) >= TL_WWN_ADDR_LEN) { 1298 - printk(KERN_ERR "Emulated NAA %s Address: %s, exceeds" 1298 + pr_err("Emulated NAA %s Address: %s, exceeds" 1299 1299 " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba), 1300 1300 TL_WWN_ADDR_LEN); 1301 1301 ret = -EINVAL; ··· 1314 1314 1315 1315 sh = tl_hba->sh; 1316 1316 tcm_loop_hba_no_cnt++; 1317 - printk(KERN_INFO "TCM_Loop_ConfigFS: Allocated emulated Target" 1317 + pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target" 1318 1318 " %s Address: %s at Linux/SCSI Host ID: %d\n", 1319 1319 tcm_loop_dump_proto_id(tl_hba), name, sh->host_no); 1320 1320 ··· 1337 1337 */ 1338 1338 device_unregister(&tl_hba->dev); 1339 1339 1340 - printk(KERN_INFO "TCM_Loop_ConfigFS: Deallocated emulated Target" 1340 + pr_debug("TCM_Loop_ConfigFS: Deallocated emulated Target" 1341 1341 " SAS Address: %s at Linux/SCSI Host ID: %d\n", 1342 1342 config_item_name(&wwn->wwn_group.cg_item), host_no); 1343 1343 } ··· 1373 1373 */ 1374 1374 fabric = target_fabric_configfs_init(THIS_MODULE, "loopback"); 1375 1375 if (IS_ERR(fabric)) { 1376 - printk(KERN_ERR "tcm_loop_register_configfs() failed!\n"); 1376 + pr_err("tcm_loop_register_configfs() failed!\n"); 1377 1377 return PTR_ERR(fabric); 1378 1378 } 1379 1379 /* ··· 1464 1464 */ 1465 1465 ret = target_fabric_configfs_register(fabric); 1466 1466 if (ret < 0) { 1467 - printk(KERN_ERR "target_fabric_configfs_register() for" 1467 + pr_err("target_fabric_configfs_register() for" 1468 1468 " TCM_Loop failed!\n"); 1469 1469 target_fabric_configfs_free(fabric); 1470 1470 return -1; ··· 1473 1473 * Setup our local pointer to *fabric. 1474 1474 */ 1475 1475 tcm_loop_fabric_configfs = fabric; 1476 - printk(KERN_INFO "TCM_LOOP[0] - Set fabric ->" 1476 + pr_debug("TCM_LOOP[0] - Set fabric ->" 1477 1477 " tcm_loop_fabric_configfs\n"); 1478 1478 return 0; 1479 1479 } ··· 1485 1485 1486 1486 target_fabric_configfs_deregister(tcm_loop_fabric_configfs); 1487 1487 tcm_loop_fabric_configfs = NULL; 1488 - printk(KERN_INFO "TCM_LOOP[0] - Cleared" 1488 + pr_debug("TCM_LOOP[0] - Cleared" 1489 1489 " tcm_loop_fabric_configfs\n"); 1490 1490 } 1491 1491 ··· 1498 1498 __alignof__(struct tcm_loop_cmd), 1499 1499 0, NULL); 1500 1500 if (!tcm_loop_cmd_cache) { 1501 - printk(KERN_ERR "kmem_cache_create() for" 1501 + pr_debug("kmem_cache_create() for" 1502 1502 " tcm_loop_cmd_cache failed\n"); 1503 1503 return -ENOMEM; 1504 1504 }
-6
drivers/target/loopback/tcm_loop.h
··· 16 16 */ 17 17 #define TL_SCSI_MAX_CMD_LEN 32 18 18 19 - #ifdef CONFIG_LOOPBACK_TARGET_CDB_DEBUG 20 - # define TL_CDB_DEBUG(x...) printk(KERN_INFO x) 21 - #else 22 - # define TL_CDB_DEBUG(x...) 23 - #endif 24 - 25 19 struct tcm_loop_cmd { 26 20 /* State of Linux/SCSI CDB+Data descriptor */ 27 21 u32 sc_cmd_state;
+84 -84
drivers/target/target_core_alua.c
··· 167 167 int alua_access_state, primary = 0, rc; 168 168 u16 tg_pt_id, rtpi; 169 169 170 - if (!(l_port)) 170 + if (!l_port) 171 171 return PYX_TRANSPORT_LU_COMM_FAILURE; 172 172 173 173 buf = transport_kmap_first_data_page(cmd); ··· 177 177 * for the local tg_pt_gp. 178 178 */ 179 179 l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem; 180 - if (!(l_tg_pt_gp_mem)) { 181 - printk(KERN_ERR "Unable to access l_port->sep_alua_tg_pt_gp_mem\n"); 180 + if (!l_tg_pt_gp_mem) { 181 + pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n"); 182 182 rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 183 183 goto out; 184 184 } 185 185 spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); 186 186 l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp; 187 - if (!(l_tg_pt_gp)) { 187 + if (!l_tg_pt_gp) { 188 188 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); 189 - printk(KERN_ERR "Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n"); 189 + pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n"); 190 190 rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 191 191 goto out; 192 192 } 193 193 rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA); 194 194 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); 195 195 196 - if (!(rc)) { 197 - printk(KERN_INFO "Unable to process SET_TARGET_PORT_GROUPS" 196 + if (!rc) { 197 + pr_debug("Unable to process SET_TARGET_PORT_GROUPS" 198 198 " while TPGS_EXPLICT_ALUA is disabled\n"); 199 199 rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 200 200 goto out; ··· 249 249 list_for_each_entry(tg_pt_gp, 250 250 &su_dev->t10_alua.tg_pt_gps_list, 251 251 tg_pt_gp_list) { 252 - if (!(tg_pt_gp->tg_pt_gp_valid_id)) 252 + if (!tg_pt_gp->tg_pt_gp_valid_id) 253 253 continue; 254 254 255 255 if (tg_pt_id != tg_pt_gp->tg_pt_gp_id) ··· 498 498 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 499 499 int out_alua_state, nonop_delay_msecs; 500 500 501 - if (!(port)) 501 + if (!port) 502 502 return 0; 503 503 /* 504 504 * First, check for a struct se_port specific secondary ALUA target port ··· 506 506 */ 507 507 if (atomic_read(&port->sep_tg_pt_secondary_offline)) { 508 508 *alua_ascq = ASCQ_04H_ALUA_OFFLINE; 509 - printk(KERN_INFO "ALUA: Got secondary offline status for local" 509 + pr_debug("ALUA: Got secondary offline status for local" 510 510 " target port\n"); 511 511 *alua_ascq = ASCQ_04H_ALUA_OFFLINE; 512 512 return 1; ··· 548 548 */ 549 549 case ALUA_ACCESS_STATE_OFFLINE: 550 550 default: 551 - printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n", 551 + pr_err("Unknown ALUA access state: 0x%02x\n", 552 552 out_alua_state); 553 553 return -EINVAL; 554 554 } ··· 580 580 *primary = 0; 581 581 break; 582 582 default: 583 - printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n", state); 583 + pr_err("Unknown ALUA access state: 0x%02x\n", state); 584 584 return -EINVAL; 585 585 } 586 586 ··· 638 638 * The ALUA Active/NonOptimized access state delay can be disabled 639 639 * in via configfs with a value of zero 640 640 */ 641 - if (!(cmd->alua_nonop_delay)) 641 + if (!cmd->alua_nonop_delay) 642 642 return 0; 643 643 /* 644 644 * struct se_cmd->alua_nonop_delay gets set by a target port group ··· 667 667 668 668 file = filp_open(path, flags, 0600); 669 669 if (IS_ERR(file) || !file || !file->f_dentry) { 670 - printk(KERN_ERR "filp_open(%s) for ALUA metadata failed\n", 670 + pr_err("filp_open(%s) for ALUA metadata failed\n", 671 671 path); 672 672 return -ENODEV; 673 673 } ··· 681 681 set_fs(old_fs); 682 682 683 683 if (ret < 0) { 684 - printk(KERN_ERR "Error writing ALUA metadata file: %s\n", path); 684 + pr_err("Error writing ALUA metadata file: %s\n", path); 685 685 filp_close(file, NULL); 686 686 return -EIO; 687 687 } ··· 778 778 * se_deve->se_lun_acl pointer may be NULL for a 779 779 * entry created without explict Node+MappedLUN ACLs 780 780 */ 781 - if (!(lacl)) 781 + if (!lacl) 782 782 continue; 783 783 784 784 if (explict && ··· 820 820 */ 821 821 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state); 822 822 823 - printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu" 823 + pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" 824 824 " from primary access state %s to %s\n", (explict) ? "explict" : 825 825 "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), 826 826 tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state), ··· 851 851 return -EINVAL; 852 852 853 853 md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL); 854 - if (!(md_buf)) { 855 - printk("Unable to allocate buf for ALUA metadata\n"); 854 + if (!md_buf) { 855 + pr_err("Unable to allocate buf for ALUA metadata\n"); 856 856 return -ENOMEM; 857 857 } 858 858 ··· 867 867 * we only do transition on the passed *l_tp_pt_gp, and not 868 868 * on all of the matching target port groups IDs in default_lu_gp. 869 869 */ 870 - if (!(lu_gp->lu_gp_id)) { 870 + if (!lu_gp->lu_gp_id) { 871 871 /* 872 872 * core_alua_do_transition_tg_pt() will always return 873 873 * success. ··· 899 899 &su_dev->t10_alua.tg_pt_gps_list, 900 900 tg_pt_gp_list) { 901 901 902 - if (!(tg_pt_gp->tg_pt_gp_valid_id)) 902 + if (!tg_pt_gp->tg_pt_gp_valid_id) 903 903 continue; 904 904 /* 905 905 * If the target behavior port asymmetric access state ··· 941 941 } 942 942 spin_unlock(&lu_gp->lu_gp_lock); 943 943 944 - printk(KERN_INFO "Successfully processed LU Group: %s all ALUA TG PT" 944 + pr_debug("Successfully processed LU Group: %s all ALUA TG PT" 945 945 " Group IDs: %hu %s transition to primary state: %s\n", 946 946 config_item_name(&lu_gp->lu_gp_group.cg_item), 947 947 l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict", ··· 1001 1001 1002 1002 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1003 1003 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 1004 - if (!(tg_pt_gp)) { 1004 + if (!tg_pt_gp) { 1005 1005 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1006 - printk(KERN_ERR "Unable to complete secondary state" 1006 + pr_err("Unable to complete secondary state" 1007 1007 " transition\n"); 1008 1008 return -EINVAL; 1009 1009 } ··· 1022 1022 ALUA_STATUS_ALTERED_BY_EXPLICT_STPG : 1023 1023 ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA; 1024 1024 1025 - printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu" 1025 + pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" 1026 1026 " to secondary access state: %s\n", (explict) ? "explict" : 1027 1027 "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), 1028 1028 tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE"); ··· 1040 1040 */ 1041 1041 if (port->sep_tg_pt_secondary_write_md) { 1042 1042 md_buf = kzalloc(md_buf_len, GFP_KERNEL); 1043 - if (!(md_buf)) { 1044 - printk(KERN_ERR "Unable to allocate md_buf for" 1043 + if (!md_buf) { 1044 + pr_err("Unable to allocate md_buf for" 1045 1045 " secondary ALUA access metadata\n"); 1046 1046 return -ENOMEM; 1047 1047 } ··· 1062 1062 struct t10_alua_lu_gp *lu_gp; 1063 1063 1064 1064 lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL); 1065 - if (!(lu_gp)) { 1066 - printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp\n"); 1065 + if (!lu_gp) { 1066 + pr_err("Unable to allocate struct t10_alua_lu_gp\n"); 1067 1067 return ERR_PTR(-ENOMEM); 1068 1068 } 1069 1069 INIT_LIST_HEAD(&lu_gp->lu_gp_node); ··· 1088 1088 * The lu_gp->lu_gp_id may only be set once.. 1089 1089 */ 1090 1090 if (lu_gp->lu_gp_valid_id) { 1091 - printk(KERN_WARNING "ALUA LU Group already has a valid ID," 1091 + pr_warn("ALUA LU Group already has a valid ID," 1092 1092 " ignoring request\n"); 1093 1093 return -EINVAL; 1094 1094 } 1095 1095 1096 1096 spin_lock(&lu_gps_lock); 1097 1097 if (alua_lu_gps_count == 0x0000ffff) { 1098 - printk(KERN_ERR "Maximum ALUA alua_lu_gps_count:" 1098 + pr_err("Maximum ALUA alua_lu_gps_count:" 1099 1099 " 0x0000ffff reached\n"); 1100 1100 spin_unlock(&lu_gps_lock); 1101 1101 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp); ··· 1107 1107 1108 1108 list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) { 1109 1109 if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) { 1110 - if (!(lu_gp_id)) 1110 + if (!lu_gp_id) 1111 1111 goto again; 1112 1112 1113 - printk(KERN_WARNING "ALUA Logical Unit Group ID: %hu" 1113 + pr_warn("ALUA Logical Unit Group ID: %hu" 1114 1114 " already exists, ignoring request\n", 1115 1115 lu_gp_id); 1116 1116 spin_unlock(&lu_gps_lock); ··· 1133 1133 struct t10_alua_lu_gp_member *lu_gp_mem; 1134 1134 1135 1135 lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL); 1136 - if (!(lu_gp_mem)) { 1137 - printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp_member\n"); 1136 + if (!lu_gp_mem) { 1137 + pr_err("Unable to allocate struct t10_alua_lu_gp_member\n"); 1138 1138 return ERR_PTR(-ENOMEM); 1139 1139 } 1140 1140 INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list); ··· 1218 1218 return; 1219 1219 1220 1220 lu_gp_mem = dev->dev_alua_lu_gp_mem; 1221 - if (!(lu_gp_mem)) 1221 + if (!lu_gp_mem) 1222 1222 return; 1223 1223 1224 1224 while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt)) ··· 1226 1226 1227 1227 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 1228 1228 lu_gp = lu_gp_mem->lu_gp; 1229 - if ((lu_gp)) { 1229 + if (lu_gp) { 1230 1230 spin_lock(&lu_gp->lu_gp_lock); 1231 1231 if (lu_gp_mem->lu_gp_assoc) { 1232 1232 list_del(&lu_gp_mem->lu_gp_mem_list); ··· 1248 1248 1249 1249 spin_lock(&lu_gps_lock); 1250 1250 list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) { 1251 - if (!(lu_gp->lu_gp_valid_id)) 1251 + if (!lu_gp->lu_gp_valid_id) 1252 1252 continue; 1253 1253 ci = &lu_gp->lu_gp_group.cg_item; 1254 - if (!(strcmp(config_item_name(ci), name))) { 1254 + if (!strcmp(config_item_name(ci), name)) { 1255 1255 atomic_inc(&lu_gp->lu_gp_ref_cnt); 1256 1256 spin_unlock(&lu_gps_lock); 1257 1257 return lu_gp; ··· 1307 1307 struct t10_alua_tg_pt_gp *tg_pt_gp; 1308 1308 1309 1309 tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL); 1310 - if (!(tg_pt_gp)) { 1311 - printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp\n"); 1310 + if (!tg_pt_gp) { 1311 + pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n"); 1312 1312 return NULL; 1313 1313 } 1314 1314 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list); ··· 1356 1356 * The tg_pt_gp->tg_pt_gp_id may only be set once.. 1357 1357 */ 1358 1358 if (tg_pt_gp->tg_pt_gp_valid_id) { 1359 - printk(KERN_WARNING "ALUA TG PT Group already has a valid ID," 1359 + pr_warn("ALUA TG PT Group already has a valid ID," 1360 1360 " ignoring request\n"); 1361 1361 return -EINVAL; 1362 1362 } 1363 1363 1364 1364 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 1365 1365 if (su_dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) { 1366 - printk(KERN_ERR "Maximum ALUA alua_tg_pt_gps_count:" 1366 + pr_err("Maximum ALUA alua_tg_pt_gps_count:" 1367 1367 " 0x0000ffff reached\n"); 1368 1368 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 1369 1369 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); ··· 1376 1376 list_for_each_entry(tg_pt_gp_tmp, &su_dev->t10_alua.tg_pt_gps_list, 1377 1377 tg_pt_gp_list) { 1378 1378 if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) { 1379 - if (!(tg_pt_gp_id)) 1379 + if (!tg_pt_gp_id) 1380 1380 goto again; 1381 1381 1382 - printk(KERN_ERR "ALUA Target Port Group ID: %hu already" 1382 + pr_err("ALUA Target Port Group ID: %hu already" 1383 1383 " exists, ignoring request\n", tg_pt_gp_id); 1384 1384 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 1385 1385 return -EINVAL; ··· 1403 1403 1404 1404 tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache, 1405 1405 GFP_KERNEL); 1406 - if (!(tg_pt_gp_mem)) { 1407 - printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp_member\n"); 1406 + if (!tg_pt_gp_mem) { 1407 + pr_err("Unable to allocate struct t10_alua_tg_pt_gp_member\n"); 1408 1408 return ERR_PTR(-ENOMEM); 1409 1409 } 1410 1410 INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list); ··· 1491 1491 return; 1492 1492 1493 1493 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 1494 - if (!(tg_pt_gp_mem)) 1494 + if (!tg_pt_gp_mem) 1495 1495 return; 1496 1496 1497 1497 while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt)) ··· 1499 1499 1500 1500 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1501 1501 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 1502 - if ((tg_pt_gp)) { 1502 + if (tg_pt_gp) { 1503 1503 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1504 1504 if (tg_pt_gp_mem->tg_pt_gp_assoc) { 1505 1505 list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list); ··· 1524 1524 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 1525 1525 list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list, 1526 1526 tg_pt_gp_list) { 1527 - if (!(tg_pt_gp->tg_pt_gp_valid_id)) 1527 + if (!tg_pt_gp->tg_pt_gp_valid_id) 1528 1528 continue; 1529 1529 ci = &tg_pt_gp->tg_pt_gp_group.cg_item; 1530 - if (!(strcmp(config_item_name(ci), name))) { 1530 + if (!strcmp(config_item_name(ci), name)) { 1531 1531 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 1532 1532 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 1533 1533 return tg_pt_gp; ··· 1592 1592 return len; 1593 1593 1594 1594 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 1595 - if (!(tg_pt_gp_mem)) 1595 + if (!tg_pt_gp_mem) 1596 1596 return len; 1597 1597 1598 1598 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1599 1599 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 1600 - if ((tg_pt_gp)) { 1600 + if (tg_pt_gp) { 1601 1601 tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item; 1602 1602 len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:" 1603 1603 " %hu\nTG Port Primary Access State: %s\nTG Port " ··· 1634 1634 lun = port->sep_lun; 1635 1635 1636 1636 if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) { 1637 - printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for" 1637 + pr_warn("SPC3_ALUA_EMULATED not enabled for" 1638 1638 " %s/tpgt_%hu/%s\n", tpg->se_tpg_tfo->tpg_get_wwn(tpg), 1639 1639 tpg->se_tpg_tfo->tpg_get_tag(tpg), 1640 1640 config_item_name(&lun->lun_group.cg_item)); ··· 1642 1642 } 1643 1643 1644 1644 if (count > TG_PT_GROUP_NAME_BUF) { 1645 - printk(KERN_ERR "ALUA Target Port Group alias too large!\n"); 1645 + pr_err("ALUA Target Port Group alias too large!\n"); 1646 1646 return -EINVAL; 1647 1647 } 1648 1648 memset(buf, 0, TG_PT_GROUP_NAME_BUF); ··· 1659 1659 */ 1660 1660 tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev, 1661 1661 strstrip(buf)); 1662 - if (!(tg_pt_gp_new)) 1662 + if (!tg_pt_gp_new) 1663 1663 return -ENODEV; 1664 1664 } 1665 1665 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 1666 - if (!(tg_pt_gp_mem)) { 1666 + if (!tg_pt_gp_mem) { 1667 1667 if (tg_pt_gp_new) 1668 1668 core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new); 1669 - printk(KERN_ERR "NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n"); 1669 + pr_err("NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n"); 1670 1670 return -EINVAL; 1671 1671 } 1672 1672 1673 1673 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1674 1674 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 1675 - if ((tg_pt_gp)) { 1675 + if (tg_pt_gp) { 1676 1676 /* 1677 1677 * Clearing an existing tg_pt_gp association, and replacing 1678 1678 * with the default_tg_pt_gp. 1679 1679 */ 1680 - if (!(tg_pt_gp_new)) { 1681 - printk(KERN_INFO "Target_Core_ConfigFS: Moving" 1680 + if (!tg_pt_gp_new) { 1681 + pr_debug("Target_Core_ConfigFS: Moving" 1682 1682 " %s/tpgt_%hu/%s from ALUA Target Port Group:" 1683 1683 " alua/%s, ID: %hu back to" 1684 1684 " default_tg_pt_gp\n", ··· 1707 1707 */ 1708 1708 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new); 1709 1709 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1710 - printk(KERN_INFO "Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA" 1710 + pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA" 1711 1711 " Target Port Group: alua/%s, ID: %hu\n", (move) ? 1712 1712 "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg), 1713 1713 tpg->se_tpg_tfo->tpg_get_tag(tpg), ··· 1744 1744 1745 1745 ret = strict_strtoul(page, 0, &tmp); 1746 1746 if (ret < 0) { 1747 - printk(KERN_ERR "Unable to extract alua_access_type\n"); 1747 + pr_err("Unable to extract alua_access_type\n"); 1748 1748 return -EINVAL; 1749 1749 } 1750 1750 if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) { 1751 - printk(KERN_ERR "Illegal value for alua_access_type:" 1751 + pr_err("Illegal value for alua_access_type:" 1752 1752 " %lu\n", tmp); 1753 1753 return -EINVAL; 1754 1754 } ··· 1782 1782 1783 1783 ret = strict_strtoul(page, 0, &tmp); 1784 1784 if (ret < 0) { 1785 - printk(KERN_ERR "Unable to extract nonop_delay_msecs\n"); 1785 + pr_err("Unable to extract nonop_delay_msecs\n"); 1786 1786 return -EINVAL; 1787 1787 } 1788 1788 if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) { 1789 - printk(KERN_ERR "Passed nonop_delay_msecs: %lu, exceeds" 1789 + pr_err("Passed nonop_delay_msecs: %lu, exceeds" 1790 1790 " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp, 1791 1791 ALUA_MAX_NONOP_DELAY_MSECS); 1792 1792 return -EINVAL; ··· 1813 1813 1814 1814 ret = strict_strtoul(page, 0, &tmp); 1815 1815 if (ret < 0) { 1816 - printk(KERN_ERR "Unable to extract trans_delay_msecs\n"); 1816 + pr_err("Unable to extract trans_delay_msecs\n"); 1817 1817 return -EINVAL; 1818 1818 } 1819 1819 if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) { 1820 - printk(KERN_ERR "Passed trans_delay_msecs: %lu, exceeds" 1820 + pr_err("Passed trans_delay_msecs: %lu, exceeds" 1821 1821 " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp, 1822 1822 ALUA_MAX_TRANS_DELAY_MSECS); 1823 1823 return -EINVAL; ··· 1844 1844 1845 1845 ret = strict_strtoul(page, 0, &tmp); 1846 1846 if (ret < 0) { 1847 - printk(KERN_ERR "Unable to extract preferred ALUA value\n"); 1847 + pr_err("Unable to extract preferred ALUA value\n"); 1848 1848 return -EINVAL; 1849 1849 } 1850 1850 if ((tmp != 0) && (tmp != 1)) { 1851 - printk(KERN_ERR "Illegal value for preferred ALUA: %lu\n", tmp); 1851 + pr_err("Illegal value for preferred ALUA: %lu\n", tmp); 1852 1852 return -EINVAL; 1853 1853 } 1854 1854 tg_pt_gp->tg_pt_gp_pref = (int)tmp; ··· 1858 1858 1859 1859 ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page) 1860 1860 { 1861 - if (!(lun->lun_sep)) 1861 + if (!lun->lun_sep) 1862 1862 return -ENODEV; 1863 1863 1864 1864 return sprintf(page, "%d\n", ··· 1874 1874 unsigned long tmp; 1875 1875 int ret; 1876 1876 1877 - if (!(lun->lun_sep)) 1877 + if (!lun->lun_sep) 1878 1878 return -ENODEV; 1879 1879 1880 1880 ret = strict_strtoul(page, 0, &tmp); 1881 1881 if (ret < 0) { 1882 - printk(KERN_ERR "Unable to extract alua_tg_pt_offline value\n"); 1882 + pr_err("Unable to extract alua_tg_pt_offline value\n"); 1883 1883 return -EINVAL; 1884 1884 } 1885 1885 if ((tmp != 0) && (tmp != 1)) { 1886 - printk(KERN_ERR "Illegal value for alua_tg_pt_offline: %lu\n", 1886 + pr_err("Illegal value for alua_tg_pt_offline: %lu\n", 1887 1887 tmp); 1888 1888 return -EINVAL; 1889 1889 } 1890 1890 tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem; 1891 - if (!(tg_pt_gp_mem)) { 1892 - printk(KERN_ERR "Unable to locate *tg_pt_gp_mem\n"); 1891 + if (!tg_pt_gp_mem) { 1892 + pr_err("Unable to locate *tg_pt_gp_mem\n"); 1893 1893 return -EINVAL; 1894 1894 } 1895 1895 ··· 1918 1918 1919 1919 ret = strict_strtoul(page, 0, &tmp); 1920 1920 if (ret < 0) { 1921 - printk(KERN_ERR "Unable to extract alua_tg_pt_status\n"); 1921 + pr_err("Unable to extract alua_tg_pt_status\n"); 1922 1922 return -EINVAL; 1923 1923 } 1924 1924 if ((tmp != ALUA_STATUS_NONE) && 1925 1925 (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) && 1926 1926 (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) { 1927 - printk(KERN_ERR "Illegal value for alua_tg_pt_status: %lu\n", 1927 + pr_err("Illegal value for alua_tg_pt_status: %lu\n", 1928 1928 tmp); 1929 1929 return -EINVAL; 1930 1930 } ··· 1951 1951 1952 1952 ret = strict_strtoul(page, 0, &tmp); 1953 1953 if (ret < 0) { 1954 - printk(KERN_ERR "Unable to extract alua_tg_pt_write_md\n"); 1954 + pr_err("Unable to extract alua_tg_pt_write_md\n"); 1955 1955 return -EINVAL; 1956 1956 } 1957 1957 if ((tmp != 0) && (tmp != 1)) { 1958 - printk(KERN_ERR "Illegal value for alua_tg_pt_write_md:" 1958 + pr_err("Illegal value for alua_tg_pt_write_md:" 1959 1959 " %lu\n", tmp); 1960 1960 return -EINVAL; 1961 1961 } ··· 1979 1979 !(dev->se_sub_dev->se_dev_attrib.emulate_alua)) || force_pt) { 1980 1980 alua->alua_type = SPC_ALUA_PASSTHROUGH; 1981 1981 alua->alua_state_check = &core_alua_state_check_nop; 1982 - printk(KERN_INFO "%s: Using SPC_ALUA_PASSTHROUGH, no ALUA" 1982 + pr_debug("%s: Using SPC_ALUA_PASSTHROUGH, no ALUA" 1983 1983 " emulation\n", dev->transport->name); 1984 1984 return 0; 1985 1985 } ··· 1988 1988 * use emulated ALUA. 1989 1989 */ 1990 1990 if (dev->transport->get_device_rev(dev) >= SCSI_3) { 1991 - printk(KERN_INFO "%s: Enabling ALUA Emulation for SPC-3" 1991 + pr_debug("%s: Enabling ALUA Emulation for SPC-3" 1992 1992 " device\n", dev->transport->name); 1993 1993 /* 1994 1994 * Associate this struct se_device with the default ALUA ··· 2005 2005 default_lu_gp); 2006 2006 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 2007 2007 2008 - printk(KERN_INFO "%s: Adding to default ALUA LU Group:" 2008 + pr_debug("%s: Adding to default ALUA LU Group:" 2009 2009 " core/alua/lu_gps/default_lu_gp\n", 2010 2010 dev->transport->name); 2011 2011 } else { 2012 2012 alua->alua_type = SPC2_ALUA_DISABLED; 2013 2013 alua->alua_state_check = &core_alua_state_check_nop; 2014 - printk(KERN_INFO "%s: Disabling ALUA Emulation for SPC-2" 2014 + pr_debug("%s: Disabling ALUA Emulation for SPC-2" 2015 2015 " device\n", dev->transport->name); 2016 2016 } 2017 2017
+26 -24
drivers/target/target_core_cdb.c
··· 73 73 * payload going back for EVPD=0 74 74 */ 75 75 if (cmd->data_length < 6) { 76 - printk(KERN_ERR "SCSI Inquiry payload length: %u" 76 + pr_err("SCSI Inquiry payload length: %u" 77 77 " too small for EVPD=0\n", cmd->data_length); 78 78 return -EINVAL; 79 79 } ··· 327 327 328 328 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 329 329 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 330 - if (!(tg_pt_gp)) { 330 + if (!tg_pt_gp) { 331 331 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 332 332 goto check_lu_gp; 333 333 } ··· 358 358 goto check_scsi_name; 359 359 } 360 360 lu_gp_mem = dev->dev_alua_lu_gp_mem; 361 - if (!(lu_gp_mem)) 361 + if (!lu_gp_mem) 362 362 goto check_scsi_name; 363 363 364 364 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 365 365 lu_gp = lu_gp_mem->lu_gp; 366 - if (!(lu_gp)) { 366 + if (!lu_gp) { 367 367 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 368 368 goto check_scsi_name; 369 369 } ··· 475 475 have_tp = 1; 476 476 477 477 if (cmd->data_length < (0x10 + 4)) { 478 - printk(KERN_INFO "Received data_length: %u" 478 + pr_debug("Received data_length: %u" 479 479 " too small for EVPD 0xb0\n", 480 480 cmd->data_length); 481 481 return -EINVAL; 482 482 } 483 483 484 484 if (have_tp && cmd->data_length < (0x3c + 4)) { 485 - printk(KERN_INFO "Received data_length: %u" 485 + pr_debug("Received data_length: %u" 486 486 " too small for TPE=1 EVPD 0xb0\n", 487 487 cmd->data_length); 488 488 have_tp = 0; ··· 490 490 491 491 buf[0] = dev->transport->get_device_type(dev); 492 492 buf[3] = have_tp ? 0x3c : 0x10; 493 + 494 + /* Set WSNZ to 1 */ 495 + buf[4] = 0x01; 493 496 494 497 /* 495 498 * Set OPTIMAL TRANSFER LENGTH GRANULARITY ··· 670 667 * payload length left for the next outgoing EVPD metadata 671 668 */ 672 669 if (cmd->data_length < 4) { 673 - printk(KERN_ERR "SCSI Inquiry payload length: %u" 670 + pr_err("SCSI Inquiry payload length: %u" 674 671 " too small for EVPD=1\n", cmd->data_length); 675 672 return -EINVAL; 676 673 } ··· 688 685 } 689 686 690 687 transport_kunmap_first_data_page(cmd); 691 - printk(KERN_ERR "Unknown VPD Code: 0x%02x\n", cdb[2]); 688 + pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]); 692 689 return -EINVAL; 693 690 } 694 691 ··· 894 891 length += target_modesense_control(dev, &buf[offset+length]); 895 892 break; 896 893 default: 897 - printk(KERN_ERR "Got Unknown Mode Page: 0x%02x\n", 894 + pr_err("Got Unknown Mode Page: 0x%02x\n", 898 895 cdb[2] & 0x3f); 899 896 return PYX_TRANSPORT_UNKNOWN_MODE_PAGE; 900 897 } ··· 950 947 int err = 0; 951 948 952 949 if (cdb[1] & 0x01) { 953 - printk(KERN_ERR "REQUEST_SENSE description emulation not" 950 + pr_err("REQUEST_SENSE description emulation not" 954 951 " supported\n"); 955 952 return PYX_TRANSPORT_INVALID_CDB_FIELD; 956 953 } 957 954 958 955 buf = transport_kmap_first_data_page(cmd); 959 956 960 - if (!(core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq))) { 957 + if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) { 961 958 /* 962 959 * CURRENT ERROR, UNIT ATTENTION 963 960 */ ··· 1031 1028 buf = transport_kmap_first_data_page(cmd); 1032 1029 1033 1030 ptr = &buf[offset]; 1034 - printk(KERN_INFO "UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu" 1031 + pr_debug("UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu" 1035 1032 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); 1036 1033 1037 1034 while (size) { 1038 1035 lba = get_unaligned_be64(&ptr[0]); 1039 1036 range = get_unaligned_be32(&ptr[8]); 1040 - printk(KERN_INFO "UNMAP: Using lba: %llu and range: %u\n", 1037 + pr_debug("UNMAP: Using lba: %llu and range: %u\n", 1041 1038 (unsigned long long)lba, range); 1042 1039 1043 1040 ret = dev->transport->do_discard(dev, lba, range); 1044 1041 if (ret < 0) { 1045 - printk(KERN_ERR "blkdev_issue_discard() failed: %d\n", 1042 + pr_err("blkdev_issue_discard() failed: %d\n", 1046 1043 ret); 1047 1044 goto err; 1048 1045 } ··· 1087 1084 else 1088 1085 range = (dev->transport->get_blocks(dev) - lba); 1089 1086 1090 - printk(KERN_INFO "WRITE_SAME UNMAP: LBA: %llu Range: %llu\n", 1087 + pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n", 1091 1088 (unsigned long long)lba, (unsigned long long)range); 1092 1089 1093 1090 ret = dev->transport->do_discard(dev, lba, range); 1094 1091 if (ret < 0) { 1095 - printk(KERN_INFO "blkdev_issue_discard() failed for WRITE_SAME\n"); 1092 + pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n"); 1096 1093 return ret; 1097 1094 } 1098 1095 ··· 1128 1125 ret = target_emulate_readcapacity_16(cmd); 1129 1126 break; 1130 1127 default: 1131 - printk(KERN_ERR "Unsupported SA: 0x%02x\n", 1128 + pr_err("Unsupported SA: 0x%02x\n", 1132 1129 cmd->t_task_cdb[1] & 0x1f); 1133 1130 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1134 1131 } ··· 1138 1135 break; 1139 1136 case UNMAP: 1140 1137 if (!dev->transport->do_discard) { 1141 - printk(KERN_ERR "UNMAP emulation not supported for: %s\n", 1138 + pr_err("UNMAP emulation not supported for: %s\n", 1142 1139 dev->transport->name); 1143 1140 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1144 1141 } ··· 1146 1143 break; 1147 1144 case WRITE_SAME_16: 1148 1145 if (!dev->transport->do_discard) { 1149 - printk(KERN_ERR "WRITE_SAME_16 emulation not supported" 1146 + pr_err("WRITE_SAME_16 emulation not supported" 1150 1147 " for: %s\n", dev->transport->name); 1151 1148 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1152 1149 } ··· 1158 1155 switch (service_action) { 1159 1156 case WRITE_SAME_32: 1160 1157 if (!dev->transport->do_discard) { 1161 - printk(KERN_ERR "WRITE_SAME_32 SA emulation not" 1158 + pr_err("WRITE_SAME_32 SA emulation not" 1162 1159 " supported for: %s\n", 1163 1160 dev->transport->name); 1164 1161 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; ··· 1166 1163 ret = target_emulate_write_same(task, 1); 1167 1164 break; 1168 1165 default: 1169 - printk(KERN_ERR "Unsupported VARIABLE_LENGTH_CMD SA:" 1166 + pr_err("Unsupported VARIABLE_LENGTH_CMD SA:" 1170 1167 " 0x%02x\n", service_action); 1171 1168 break; 1172 1169 } ··· 1174 1171 case SYNCHRONIZE_CACHE: 1175 1172 case 0x91: /* SYNCHRONIZE_CACHE_16: */ 1176 1173 if (!dev->transport->do_sync_cache) { 1177 - printk(KERN_ERR 1178 - "SYNCHRONIZE_CACHE emulation not supported" 1174 + pr_err("SYNCHRONIZE_CACHE emulation not supported" 1179 1175 " for: %s\n", dev->transport->name); 1180 1176 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1181 1177 } ··· 1191 1189 case WRITE_FILEMARKS: 1192 1190 break; 1193 1191 default: 1194 - printk(KERN_ERR "Unsupported SCSI Opcode: 0x%02x for %s\n", 1192 + pr_err("Unsupported SCSI Opcode: 0x%02x for %s\n", 1195 1193 cmd->t_task_cdb[0], dev->transport->name); 1196 1194 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1197 1195 }
+232 -232
drivers/target/target_core_configfs.c
··· 104 104 { 105 105 struct target_fabric_configfs *tf; 106 106 107 - if (!(name)) 107 + if (!name) 108 108 return NULL; 109 109 110 110 mutex_lock(&g_tf_lock); 111 111 list_for_each_entry(tf, &g_tf_list, tf_list) { 112 - if (!(strcmp(tf->tf_name, name))) { 112 + if (!strcmp(tf->tf_name, name)) { 113 113 atomic_inc(&tf->tf_access_cnt); 114 114 mutex_unlock(&g_tf_lock); 115 115 return tf; ··· 130 130 struct target_fabric_configfs *tf; 131 131 int ret; 132 132 133 - printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> group: %p name:" 133 + pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:" 134 134 " %s\n", group, name); 135 135 /* 136 136 * Ensure that TCM subsystem plugins are loaded at this point for ··· 150 150 * registered, but simply provids auto loading logic for modules with 151 151 * mkdir(2) system calls with known TCM fabric modules. 152 152 */ 153 - if (!(strncmp(name, "iscsi", 5))) { 153 + if (!strncmp(name, "iscsi", 5)) { 154 154 /* 155 155 * Automatically load the LIO Target fabric module when the 156 156 * following is called: ··· 159 159 */ 160 160 ret = request_module("iscsi_target_mod"); 161 161 if (ret < 0) { 162 - printk(KERN_ERR "request_module() failed for" 162 + pr_err("request_module() failed for" 163 163 " iscsi_target_mod.ko: %d\n", ret); 164 164 return ERR_PTR(-EINVAL); 165 165 } 166 - } else if (!(strncmp(name, "loopback", 8))) { 166 + } else if (!strncmp(name, "loopback", 8)) { 167 167 /* 168 168 * Automatically load the tcm_loop fabric module when the 169 169 * following is called: ··· 172 172 */ 173 173 ret = request_module("tcm_loop"); 174 174 if (ret < 0) { 175 - printk(KERN_ERR "request_module() failed for" 175 + pr_err("request_module() failed for" 176 176 " tcm_loop.ko: %d\n", ret); 177 177 return ERR_PTR(-EINVAL); 178 178 } 179 179 } 180 180 181 181 tf = target_core_get_fabric(name); 182 - if (!(tf)) { 183 - printk(KERN_ERR "target_core_get_fabric() failed for %s\n", 182 + if (!tf) { 183 + pr_err("target_core_get_fabric() failed for %s\n", 184 184 name); 185 185 return ERR_PTR(-EINVAL); 186 186 } 187 - printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Located fabric:" 187 + pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:" 188 188 " %s\n", tf->tf_name); 189 189 /* 190 190 * On a successful target_core_get_fabric() look, the returned 191 191 * struct target_fabric_configfs *tf will contain a usage reference. 192 192 */ 193 - printk(KERN_INFO "Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n", 193 + pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n", 194 194 &TF_CIT_TMPL(tf)->tfc_wwn_cit); 195 195 196 196 tf->tf_group.default_groups = tf->tf_default_groups; ··· 202 202 config_group_init_type_name(&tf->tf_disc_group, "discovery_auth", 203 203 &TF_CIT_TMPL(tf)->tfc_discovery_cit); 204 204 205 - printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Allocated Fabric:" 205 + pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:" 206 206 " %s\n", tf->tf_group.cg_item.ci_name); 207 207 /* 208 208 * Setup tf_ops.tf_subsys pointer for usage with configfs_depend_item() 209 209 */ 210 210 tf->tf_ops.tf_subsys = tf->tf_subsys; 211 211 tf->tf_fabric = &tf->tf_group.cg_item; 212 - printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric" 212 + pr_debug("Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric" 213 213 " for %s\n", name); 214 214 215 215 return &tf->tf_group; ··· 228 228 struct config_item *df_item; 229 229 int i; 230 230 231 - printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Looking up %s in" 231 + pr_debug("Target_Core_ConfigFS: DEREGISTER -> Looking up %s in" 232 232 " tf list\n", config_item_name(item)); 233 233 234 - printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> located fabric:" 234 + pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:" 235 235 " %s\n", tf->tf_name); 236 236 atomic_dec(&tf->tf_access_cnt); 237 237 238 - printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing" 238 + pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing" 239 239 " tf->tf_fabric for %s\n", tf->tf_name); 240 240 tf->tf_fabric = NULL; 241 241 242 - printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing ci" 242 + pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci" 243 243 " %s\n", config_item_name(item)); 244 244 245 245 tf_group = &tf->tf_group; ··· 307 307 struct target_fabric_configfs *tf; 308 308 309 309 if (!(name)) { 310 - printk(KERN_ERR "Unable to locate passed fabric name\n"); 310 + pr_err("Unable to locate passed fabric name\n"); 311 311 return ERR_PTR(-EINVAL); 312 312 } 313 313 if (strlen(name) >= TARGET_FABRIC_NAME_SIZE) { 314 - printk(KERN_ERR "Passed name: %s exceeds TARGET_FABRIC" 314 + pr_err("Passed name: %s exceeds TARGET_FABRIC" 315 315 "_NAME_SIZE\n", name); 316 316 return ERR_PTR(-EINVAL); 317 317 } 318 318 319 319 tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL); 320 - if (!(tf)) 320 + if (!tf) 321 321 return ERR_PTR(-ENOMEM); 322 322 323 323 INIT_LIST_HEAD(&tf->tf_list); ··· 336 336 list_add_tail(&tf->tf_list, &g_tf_list); 337 337 mutex_unlock(&g_tf_lock); 338 338 339 - printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>" 339 + pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>" 340 340 ">>>>>>>>>>>>>>\n"); 341 - printk(KERN_INFO "Initialized struct target_fabric_configfs: %p for" 341 + pr_debug("Initialized struct target_fabric_configfs: %p for" 342 342 " %s\n", tf, tf->tf_name); 343 343 return tf; 344 344 } ··· 367 367 { 368 368 struct target_core_fabric_ops *tfo = &tf->tf_ops; 369 369 370 - if (!(tfo->get_fabric_name)) { 371 - printk(KERN_ERR "Missing tfo->get_fabric_name()\n"); 370 + if (!tfo->get_fabric_name) { 371 + pr_err("Missing tfo->get_fabric_name()\n"); 372 372 return -EINVAL; 373 373 } 374 - if (!(tfo->get_fabric_proto_ident)) { 375 - printk(KERN_ERR "Missing tfo->get_fabric_proto_ident()\n"); 374 + if (!tfo->get_fabric_proto_ident) { 375 + pr_err("Missing tfo->get_fabric_proto_ident()\n"); 376 376 return -EINVAL; 377 377 } 378 - if (!(tfo->tpg_get_wwn)) { 379 - printk(KERN_ERR "Missing tfo->tpg_get_wwn()\n"); 378 + if (!tfo->tpg_get_wwn) { 379 + pr_err("Missing tfo->tpg_get_wwn()\n"); 380 380 return -EINVAL; 381 381 } 382 - if (!(tfo->tpg_get_tag)) { 383 - printk(KERN_ERR "Missing tfo->tpg_get_tag()\n"); 382 + if (!tfo->tpg_get_tag) { 383 + pr_err("Missing tfo->tpg_get_tag()\n"); 384 384 return -EINVAL; 385 385 } 386 - if (!(tfo->tpg_get_default_depth)) { 387 - printk(KERN_ERR "Missing tfo->tpg_get_default_depth()\n"); 386 + if (!tfo->tpg_get_default_depth) { 387 + pr_err("Missing tfo->tpg_get_default_depth()\n"); 388 388 return -EINVAL; 389 389 } 390 - if (!(tfo->tpg_get_pr_transport_id)) { 391 - printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id()\n"); 390 + if (!tfo->tpg_get_pr_transport_id) { 391 + pr_err("Missing tfo->tpg_get_pr_transport_id()\n"); 392 392 return -EINVAL; 393 393 } 394 - if (!(tfo->tpg_get_pr_transport_id_len)) { 395 - printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id_len()\n"); 394 + if (!tfo->tpg_get_pr_transport_id_len) { 395 + pr_err("Missing tfo->tpg_get_pr_transport_id_len()\n"); 396 396 return -EINVAL; 397 397 } 398 - if (!(tfo->tpg_check_demo_mode)) { 399 - printk(KERN_ERR "Missing tfo->tpg_check_demo_mode()\n"); 398 + if (!tfo->tpg_check_demo_mode) { 399 + pr_err("Missing tfo->tpg_check_demo_mode()\n"); 400 400 return -EINVAL; 401 401 } 402 - if (!(tfo->tpg_check_demo_mode_cache)) { 403 - printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_cache()\n"); 402 + if (!tfo->tpg_check_demo_mode_cache) { 403 + pr_err("Missing tfo->tpg_check_demo_mode_cache()\n"); 404 404 return -EINVAL; 405 405 } 406 - if (!(tfo->tpg_check_demo_mode_write_protect)) { 407 - printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_write_protect()\n"); 406 + if (!tfo->tpg_check_demo_mode_write_protect) { 407 + pr_err("Missing tfo->tpg_check_demo_mode_write_protect()\n"); 408 408 return -EINVAL; 409 409 } 410 - if (!(tfo->tpg_check_prod_mode_write_protect)) { 411 - printk(KERN_ERR "Missing tfo->tpg_check_prod_mode_write_protect()\n"); 410 + if (!tfo->tpg_check_prod_mode_write_protect) { 411 + pr_err("Missing tfo->tpg_check_prod_mode_write_protect()\n"); 412 412 return -EINVAL; 413 413 } 414 - if (!(tfo->tpg_alloc_fabric_acl)) { 415 - printk(KERN_ERR "Missing tfo->tpg_alloc_fabric_acl()\n"); 414 + if (!tfo->tpg_alloc_fabric_acl) { 415 + pr_err("Missing tfo->tpg_alloc_fabric_acl()\n"); 416 416 return -EINVAL; 417 417 } 418 - if (!(tfo->tpg_release_fabric_acl)) { 419 - printk(KERN_ERR "Missing tfo->tpg_release_fabric_acl()\n"); 418 + if (!tfo->tpg_release_fabric_acl) { 419 + pr_err("Missing tfo->tpg_release_fabric_acl()\n"); 420 420 return -EINVAL; 421 421 } 422 - if (!(tfo->tpg_get_inst_index)) { 423 - printk(KERN_ERR "Missing tfo->tpg_get_inst_index()\n"); 422 + if (!tfo->tpg_get_inst_index) { 423 + pr_err("Missing tfo->tpg_get_inst_index()\n"); 424 424 return -EINVAL; 425 425 } 426 426 if (!tfo->release_cmd) { 427 - printk(KERN_ERR "Missing tfo->release_cmd()\n"); 427 + pr_err("Missing tfo->release_cmd()\n"); 428 428 return -EINVAL; 429 429 } 430 - if (!(tfo->shutdown_session)) { 431 - printk(KERN_ERR "Missing tfo->shutdown_session()\n"); 430 + if (!tfo->shutdown_session) { 431 + pr_err("Missing tfo->shutdown_session()\n"); 432 432 return -EINVAL; 433 433 } 434 - if (!(tfo->close_session)) { 435 - printk(KERN_ERR "Missing tfo->close_session()\n"); 434 + if (!tfo->close_session) { 435 + pr_err("Missing tfo->close_session()\n"); 436 436 return -EINVAL; 437 437 } 438 - if (!(tfo->stop_session)) { 439 - printk(KERN_ERR "Missing tfo->stop_session()\n"); 438 + if (!tfo->stop_session) { 439 + pr_err("Missing tfo->stop_session()\n"); 440 440 return -EINVAL; 441 441 } 442 - if (!(tfo->fall_back_to_erl0)) { 443 - printk(KERN_ERR "Missing tfo->fall_back_to_erl0()\n"); 442 + if (!tfo->fall_back_to_erl0) { 443 + pr_err("Missing tfo->fall_back_to_erl0()\n"); 444 444 return -EINVAL; 445 445 } 446 - if (!(tfo->sess_logged_in)) { 447 - printk(KERN_ERR "Missing tfo->sess_logged_in()\n"); 446 + if (!tfo->sess_logged_in) { 447 + pr_err("Missing tfo->sess_logged_in()\n"); 448 448 return -EINVAL; 449 449 } 450 - if (!(tfo->sess_get_index)) { 451 - printk(KERN_ERR "Missing tfo->sess_get_index()\n"); 450 + if (!tfo->sess_get_index) { 451 + pr_err("Missing tfo->sess_get_index()\n"); 452 452 return -EINVAL; 453 453 } 454 - if (!(tfo->write_pending)) { 455 - printk(KERN_ERR "Missing tfo->write_pending()\n"); 454 + if (!tfo->write_pending) { 455 + pr_err("Missing tfo->write_pending()\n"); 456 456 return -EINVAL; 457 457 } 458 - if (!(tfo->write_pending_status)) { 459 - printk(KERN_ERR "Missing tfo->write_pending_status()\n"); 458 + if (!tfo->write_pending_status) { 459 + pr_err("Missing tfo->write_pending_status()\n"); 460 460 return -EINVAL; 461 461 } 462 - if (!(tfo->set_default_node_attributes)) { 463 - printk(KERN_ERR "Missing tfo->set_default_node_attributes()\n"); 462 + if (!tfo->set_default_node_attributes) { 463 + pr_err("Missing tfo->set_default_node_attributes()\n"); 464 464 return -EINVAL; 465 465 } 466 - if (!(tfo->get_task_tag)) { 467 - printk(KERN_ERR "Missing tfo->get_task_tag()\n"); 466 + if (!tfo->get_task_tag) { 467 + pr_err("Missing tfo->get_task_tag()\n"); 468 468 return -EINVAL; 469 469 } 470 - if (!(tfo->get_cmd_state)) { 471 - printk(KERN_ERR "Missing tfo->get_cmd_state()\n"); 470 + if (!tfo->get_cmd_state) { 471 + pr_err("Missing tfo->get_cmd_state()\n"); 472 472 return -EINVAL; 473 473 } 474 - if (!(tfo->queue_data_in)) { 475 - printk(KERN_ERR "Missing tfo->queue_data_in()\n"); 474 + if (!tfo->queue_data_in) { 475 + pr_err("Missing tfo->queue_data_in()\n"); 476 476 return -EINVAL; 477 477 } 478 - if (!(tfo->queue_status)) { 479 - printk(KERN_ERR "Missing tfo->queue_status()\n"); 478 + if (!tfo->queue_status) { 479 + pr_err("Missing tfo->queue_status()\n"); 480 480 return -EINVAL; 481 481 } 482 - if (!(tfo->queue_tm_rsp)) { 483 - printk(KERN_ERR "Missing tfo->queue_tm_rsp()\n"); 482 + if (!tfo->queue_tm_rsp) { 483 + pr_err("Missing tfo->queue_tm_rsp()\n"); 484 484 return -EINVAL; 485 485 } 486 - if (!(tfo->set_fabric_sense_len)) { 487 - printk(KERN_ERR "Missing tfo->set_fabric_sense_len()\n"); 486 + if (!tfo->set_fabric_sense_len) { 487 + pr_err("Missing tfo->set_fabric_sense_len()\n"); 488 488 return -EINVAL; 489 489 } 490 - if (!(tfo->get_fabric_sense_len)) { 491 - printk(KERN_ERR "Missing tfo->get_fabric_sense_len()\n"); 490 + if (!tfo->get_fabric_sense_len) { 491 + pr_err("Missing tfo->get_fabric_sense_len()\n"); 492 492 return -EINVAL; 493 493 } 494 - if (!(tfo->is_state_remove)) { 495 - printk(KERN_ERR "Missing tfo->is_state_remove()\n"); 494 + if (!tfo->is_state_remove) { 495 + pr_err("Missing tfo->is_state_remove()\n"); 496 496 return -EINVAL; 497 497 } 498 498 /* ··· 500 500 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in 501 501 * target_core_fabric_configfs.c WWN+TPG group context code. 502 502 */ 503 - if (!(tfo->fabric_make_wwn)) { 504 - printk(KERN_ERR "Missing tfo->fabric_make_wwn()\n"); 503 + if (!tfo->fabric_make_wwn) { 504 + pr_err("Missing tfo->fabric_make_wwn()\n"); 505 505 return -EINVAL; 506 506 } 507 - if (!(tfo->fabric_drop_wwn)) { 508 - printk(KERN_ERR "Missing tfo->fabric_drop_wwn()\n"); 507 + if (!tfo->fabric_drop_wwn) { 508 + pr_err("Missing tfo->fabric_drop_wwn()\n"); 509 509 return -EINVAL; 510 510 } 511 - if (!(tfo->fabric_make_tpg)) { 512 - printk(KERN_ERR "Missing tfo->fabric_make_tpg()\n"); 511 + if (!tfo->fabric_make_tpg) { 512 + pr_err("Missing tfo->fabric_make_tpg()\n"); 513 513 return -EINVAL; 514 514 } 515 - if (!(tfo->fabric_drop_tpg)) { 516 - printk(KERN_ERR "Missing tfo->fabric_drop_tpg()\n"); 515 + if (!tfo->fabric_drop_tpg) { 516 + pr_err("Missing tfo->fabric_drop_tpg()\n"); 517 517 return -EINVAL; 518 518 } 519 519 ··· 533 533 { 534 534 int ret; 535 535 536 - if (!(tf)) { 537 - printk(KERN_ERR "Unable to locate target_fabric_configfs" 536 + if (!tf) { 537 + pr_err("Unable to locate target_fabric_configfs" 538 538 " pointer\n"); 539 539 return -EINVAL; 540 540 } 541 - if (!(tf->tf_subsys)) { 542 - printk(KERN_ERR "Unable to target struct config_subsystem" 541 + if (!tf->tf_subsys) { 542 + pr_err("Unable to target struct config_subsystem" 543 543 " pointer\n"); 544 544 return -EINVAL; 545 545 } ··· 547 547 if (ret < 0) 548 548 return ret; 549 549 550 - printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>" 550 + pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>" 551 551 ">>>>>>>>>>\n"); 552 552 return 0; 553 553 } ··· 558 558 { 559 559 struct configfs_subsystem *su; 560 560 561 - if (!(tf)) { 562 - printk(KERN_ERR "Unable to locate passed target_fabric_" 561 + if (!tf) { 562 + pr_err("Unable to locate passed target_fabric_" 563 563 "configfs\n"); 564 564 return; 565 565 } 566 566 su = tf->tf_subsys; 567 - if (!(su)) { 568 - printk(KERN_ERR "Unable to locate passed tf->tf_subsys" 567 + if (!su) { 568 + pr_err("Unable to locate passed tf->tf_subsys" 569 569 " pointer\n"); 570 570 return; 571 571 } 572 - printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>" 572 + pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>" 573 573 ">>>>>>>>>>>>\n"); 574 574 mutex_lock(&g_tf_lock); 575 575 if (atomic_read(&tf->tf_access_cnt)) { 576 576 mutex_unlock(&g_tf_lock); 577 - printk(KERN_ERR "Non zero tf->tf_access_cnt for fabric %s\n", 577 + pr_err("Non zero tf->tf_access_cnt for fabric %s\n", 578 578 tf->tf_name); 579 579 BUG(); 580 580 } 581 581 list_del(&tf->tf_list); 582 582 mutex_unlock(&g_tf_lock); 583 583 584 - printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing tf:" 584 + pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing tf:" 585 585 " %s\n", tf->tf_name); 586 586 tf->tf_module = NULL; 587 587 tf->tf_subsys = NULL; 588 588 kfree(tf); 589 589 590 - printk("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>" 590 + pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>" 591 591 ">>>>>\n"); 592 592 } 593 593 EXPORT_SYMBOL(target_fabric_configfs_deregister); ··· 609 609 \ 610 610 spin_lock(&se_dev->se_dev_lock); \ 611 611 dev = se_dev->se_dev_ptr; \ 612 - if (!(dev)) { \ 612 + if (!dev) { \ 613 613 spin_unlock(&se_dev->se_dev_lock); \ 614 614 return -ENODEV; \ 615 615 } \ ··· 633 633 \ 634 634 spin_lock(&se_dev->se_dev_lock); \ 635 635 dev = se_dev->se_dev_ptr; \ 636 - if (!(dev)) { \ 636 + if (!dev) { \ 637 637 spin_unlock(&se_dev->se_dev_lock); \ 638 638 return -ENODEV; \ 639 639 } \ 640 640 ret = strict_strtoul(page, 0, &val); \ 641 641 if (ret < 0) { \ 642 642 spin_unlock(&se_dev->se_dev_lock); \ 643 - printk(KERN_ERR "strict_strtoul() failed with" \ 643 + pr_err("strict_strtoul() failed with" \ 644 644 " ret: %d\n", ret); \ 645 645 return -EINVAL; \ 646 646 } \ ··· 806 806 struct se_device *dev; 807 807 808 808 dev = se_dev->se_dev_ptr; 809 - if (!(dev)) 809 + if (!dev) 810 810 return -ENODEV; 811 811 812 812 return sprintf(page, "T10 VPD Unit Serial Number: %s\n", ··· 833 833 * VPD Unit Serial Number that OS dependent multipath can depend on. 834 834 */ 835 835 if (su_dev->su_dev_flags & SDF_FIRMWARE_VPD_UNIT_SERIAL) { 836 - printk(KERN_ERR "Underlying SCSI device firmware provided VPD" 836 + pr_err("Underlying SCSI device firmware provided VPD" 837 837 " Unit Serial, ignoring request\n"); 838 838 return -EOPNOTSUPP; 839 839 } 840 840 841 841 if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) { 842 - printk(KERN_ERR "Emulated VPD Unit Serial exceeds" 842 + pr_err("Emulated VPD Unit Serial exceeds" 843 843 " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN); 844 844 return -EOVERFLOW; 845 845 } ··· 850 850 * could cause negative effects. 851 851 */ 852 852 dev = su_dev->se_dev_ptr; 853 - if ((dev)) { 853 + if (dev) { 854 854 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 855 - printk(KERN_ERR "Unable to set VPD Unit Serial while" 855 + pr_err("Unable to set VPD Unit Serial while" 856 856 " active %d $FABRIC_MOD exports exist\n", 857 857 atomic_read(&dev->dev_export_obj.obj_access_count)); 858 858 return -EINVAL; ··· 870 870 "%s", strstrip(buf)); 871 871 su_dev->su_dev_flags |= SDF_EMULATED_VPD_UNIT_SERIAL; 872 872 873 - printk(KERN_INFO "Target_Core_ConfigFS: Set emulated VPD Unit Serial:" 873 + pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:" 874 874 " %s\n", su_dev->t10_wwn.unit_serial); 875 875 876 876 return count; ··· 892 892 ssize_t len = 0; 893 893 894 894 dev = se_dev->se_dev_ptr; 895 - if (!(dev)) 895 + if (!dev) 896 896 return -ENODEV; 897 897 898 898 memset(buf, 0, VPD_TMP_BUF_SIZE); 899 899 900 900 spin_lock(&t10_wwn->t10_vpd_lock); 901 901 list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { 902 - if (!(vpd->protocol_identifier_set)) 902 + if (!vpd->protocol_identifier_set) 903 903 continue; 904 904 905 905 transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE); 906 906 907 - if ((len + strlen(buf) >= PAGE_SIZE)) 907 + if (len + strlen(buf) >= PAGE_SIZE) 908 908 break; 909 909 910 910 len += sprintf(page+len, "%s", buf); ··· 939 939 ssize_t len = 0; \ 940 940 \ 941 941 dev = se_dev->se_dev_ptr; \ 942 - if (!(dev)) \ 942 + if (!dev) \ 943 943 return -ENODEV; \ 944 944 \ 945 945 spin_lock(&t10_wwn->t10_vpd_lock); \ ··· 949 949 \ 950 950 memset(buf, 0, VPD_TMP_BUF_SIZE); \ 951 951 transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \ 952 - if ((len + strlen(buf) >= PAGE_SIZE)) \ 952 + if (len + strlen(buf) >= PAGE_SIZE) \ 953 953 break; \ 954 954 len += sprintf(page+len, "%s", buf); \ 955 955 \ 956 956 memset(buf, 0, VPD_TMP_BUF_SIZE); \ 957 957 transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \ 958 - if ((len + strlen(buf) >= PAGE_SIZE)) \ 958 + if (len + strlen(buf) >= PAGE_SIZE) \ 959 959 break; \ 960 960 len += sprintf(page+len, "%s", buf); \ 961 961 \ 962 962 memset(buf, 0, VPD_TMP_BUF_SIZE); \ 963 963 transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \ 964 - if ((len + strlen(buf) >= PAGE_SIZE)) \ 964 + if (len + strlen(buf) >= PAGE_SIZE) \ 965 965 break; \ 966 966 len += sprintf(page+len, "%s", buf); \ 967 967 } \ ··· 1070 1070 1071 1071 spin_lock(&dev->dev_reservation_lock); 1072 1072 pr_reg = dev->dev_pr_res_holder; 1073 - if (!(pr_reg)) { 1073 + if (!pr_reg) { 1074 1074 *len += sprintf(page + *len, "No SPC-3 Reservation holder\n"); 1075 1075 spin_unlock(&dev->dev_reservation_lock); 1076 1076 return *len; ··· 1096 1096 1097 1097 spin_lock(&dev->dev_reservation_lock); 1098 1098 se_nacl = dev->dev_reserved_node_acl; 1099 - if (!(se_nacl)) { 1099 + if (!se_nacl) { 1100 1100 *len += sprintf(page + *len, "No SPC-2 Reservation holder\n"); 1101 1101 spin_unlock(&dev->dev_reservation_lock); 1102 1102 return *len; ··· 1115 1115 { 1116 1116 ssize_t len = 0; 1117 1117 1118 - if (!(su_dev->se_dev_ptr)) 1118 + if (!su_dev->se_dev_ptr) 1119 1119 return -ENODEV; 1120 1120 1121 1121 switch (su_dev->t10_pr.res_type) { ··· 1152 1152 ssize_t len = 0; 1153 1153 1154 1154 dev = su_dev->se_dev_ptr; 1155 - if (!(dev)) 1155 + if (!dev) 1156 1156 return -ENODEV; 1157 1157 1158 1158 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) ··· 1160 1160 1161 1161 spin_lock(&dev->dev_reservation_lock); 1162 1162 pr_reg = dev->dev_pr_res_holder; 1163 - if (!(pr_reg)) { 1163 + if (!pr_reg) { 1164 1164 len = sprintf(page, "No SPC-3 Reservation holder\n"); 1165 1165 spin_unlock(&dev->dev_reservation_lock); 1166 1166 return len; ··· 1189 1189 struct se_subsystem_dev *su_dev, 1190 1190 char *page) 1191 1191 { 1192 - if (!(su_dev->se_dev_ptr)) 1192 + if (!su_dev->se_dev_ptr) 1193 1193 return -ENODEV; 1194 1194 1195 1195 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) ··· 1216 1216 ssize_t len = 0; 1217 1217 1218 1218 dev = su_dev->se_dev_ptr; 1219 - if (!(dev)) 1219 + if (!dev) 1220 1220 return -ENODEV; 1221 1221 1222 1222 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) ··· 1224 1224 1225 1225 spin_lock(&dev->dev_reservation_lock); 1226 1226 pr_reg = dev->dev_pr_res_holder; 1227 - if (!(pr_reg)) { 1227 + if (!pr_reg) { 1228 1228 len = sprintf(page, "No SPC-3 Reservation holder\n"); 1229 1229 spin_unlock(&dev->dev_reservation_lock); 1230 1230 return len; ··· 1263 1263 ssize_t len = 0; 1264 1264 int reg_count = 0, prf_isid; 1265 1265 1266 - if (!(su_dev->se_dev_ptr)) 1266 + if (!su_dev->se_dev_ptr) 1267 1267 return -ENODEV; 1268 1268 1269 1269 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) ··· 1286 1286 &i_buf[0] : "", pr_reg->pr_res_key, 1287 1287 pr_reg->pr_res_generation); 1288 1288 1289 - if ((len + strlen(buf) >= PAGE_SIZE)) 1289 + if (len + strlen(buf) >= PAGE_SIZE) 1290 1290 break; 1291 1291 1292 1292 len += sprintf(page+len, "%s", buf); ··· 1294 1294 } 1295 1295 spin_unlock(&su_dev->t10_pr.registration_lock); 1296 1296 1297 - if (!(reg_count)) 1297 + if (!reg_count) 1298 1298 len += sprintf(page+len, "None\n"); 1299 1299 1300 1300 return len; ··· 1314 1314 ssize_t len = 0; 1315 1315 1316 1316 dev = su_dev->se_dev_ptr; 1317 - if (!(dev)) 1317 + if (!dev) 1318 1318 return -ENODEV; 1319 1319 1320 1320 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) ··· 1322 1322 1323 1323 spin_lock(&dev->dev_reservation_lock); 1324 1324 pr_reg = dev->dev_pr_res_holder; 1325 - if (!(pr_reg)) { 1325 + if (!pr_reg) { 1326 1326 len = sprintf(page, "No SPC-3 Reservation holder\n"); 1327 1327 spin_unlock(&dev->dev_reservation_lock); 1328 1328 return len; ··· 1345 1345 { 1346 1346 ssize_t len = 0; 1347 1347 1348 - if (!(su_dev->se_dev_ptr)) 1348 + if (!su_dev->se_dev_ptr) 1349 1349 return -ENODEV; 1350 1350 1351 1351 switch (su_dev->t10_pr.res_type) { ··· 1376 1376 struct se_subsystem_dev *su_dev, 1377 1377 char *page) 1378 1378 { 1379 - if (!(su_dev->se_dev_ptr)) 1379 + if (!su_dev->se_dev_ptr) 1380 1380 return -ENODEV; 1381 1381 1382 1382 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) ··· 1395 1395 struct se_subsystem_dev *su_dev, 1396 1396 char *page) 1397 1397 { 1398 - if (!(su_dev->se_dev_ptr)) 1398 + if (!su_dev->se_dev_ptr) 1399 1399 return -ENODEV; 1400 1400 1401 1401 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) ··· 1447 1447 u8 type = 0, scope; 1448 1448 1449 1449 dev = su_dev->se_dev_ptr; 1450 - if (!(dev)) 1450 + if (!dev) 1451 1451 return -ENODEV; 1452 1452 1453 1453 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 1454 1454 return 0; 1455 1455 1456 1456 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1457 - printk(KERN_INFO "Unable to process APTPL metadata while" 1457 + pr_debug("Unable to process APTPL metadata while" 1458 1458 " active fabric exports exist\n"); 1459 1459 return -EINVAL; 1460 1460 } ··· 1484 1484 goto out; 1485 1485 } 1486 1486 if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) { 1487 - printk(KERN_ERR "APTPL metadata initiator_node=" 1487 + pr_err("APTPL metadata initiator_node=" 1488 1488 " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n", 1489 1489 PR_APTPL_MAX_IPORT_LEN); 1490 1490 ret = -EINVAL; ··· 1498 1498 goto out; 1499 1499 } 1500 1500 if (strlen(isid) >= PR_REG_ISID_LEN) { 1501 - printk(KERN_ERR "APTPL metadata initiator_isid" 1501 + pr_err("APTPL metadata initiator_isid" 1502 1502 "= exceeds PR_REG_ISID_LEN: %d\n", 1503 1503 PR_REG_ISID_LEN); 1504 1504 ret = -EINVAL; ··· 1513 1513 } 1514 1514 ret = strict_strtoull(arg_p, 0, &tmp_ll); 1515 1515 if (ret < 0) { 1516 - printk(KERN_ERR "strict_strtoull() failed for" 1516 + pr_err("strict_strtoull() failed for" 1517 1517 " sa_res_key=\n"); 1518 1518 goto out; 1519 1519 } ··· 1559 1559 goto out; 1560 1560 } 1561 1561 if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) { 1562 - printk(KERN_ERR "APTPL metadata target_node=" 1562 + pr_err("APTPL metadata target_node=" 1563 1563 " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n", 1564 1564 PR_APTPL_MAX_TPORT_LEN); 1565 1565 ret = -EINVAL; ··· 1583 1583 } 1584 1584 } 1585 1585 1586 - if (!(i_port) || !(t_port) || !(sa_res_key)) { 1587 - printk(KERN_ERR "Illegal parameters for APTPL registration\n"); 1586 + if (!i_port || !t_port || !sa_res_key) { 1587 + pr_err("Illegal parameters for APTPL registration\n"); 1588 1588 ret = -EINVAL; 1589 1589 goto out; 1590 1590 } 1591 1591 1592 1592 if (res_holder && !(type)) { 1593 - printk(KERN_ERR "Illegal PR type: 0x%02x for reservation" 1593 + pr_err("Illegal PR type: 0x%02x for reservation" 1594 1594 " holder\n", type); 1595 1595 ret = -EINVAL; 1596 1596 goto out; ··· 1649 1649 int bl = 0; 1650 1650 ssize_t read_bytes = 0; 1651 1651 1652 - if (!(se_dev->se_dev_ptr)) 1652 + if (!se_dev->se_dev_ptr) 1653 1653 return -ENODEV; 1654 1654 1655 1655 transport_dump_dev_state(se_dev->se_dev_ptr, page, &bl); ··· 1675 1675 struct se_hba *hba = se_dev->se_dev_hba; 1676 1676 struct se_subsystem_api *t = hba->transport; 1677 1677 1678 - if (!(se_dev->se_dev_su_ptr)) { 1679 - printk(KERN_ERR "Unable to locate struct se_subsystem_dev>se" 1678 + if (!se_dev->se_dev_su_ptr) { 1679 + pr_err("Unable to locate struct se_subsystem_dev>se" 1680 1680 "_dev_su_ptr\n"); 1681 1681 return -EINVAL; 1682 1682 } ··· 1712 1712 ssize_t read_bytes; 1713 1713 1714 1714 if (count > (SE_DEV_ALIAS_LEN-1)) { 1715 - printk(KERN_ERR "alias count: %d exceeds" 1715 + pr_err("alias count: %d exceeds" 1716 1716 " SE_DEV_ALIAS_LEN-1: %u\n", (int)count, 1717 1717 SE_DEV_ALIAS_LEN-1); 1718 1718 return -EINVAL; ··· 1722 1722 read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN, 1723 1723 "%s", page); 1724 1724 1725 - printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set alias: %s\n", 1725 + pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n", 1726 1726 config_item_name(&hba->hba_group.cg_item), 1727 1727 config_item_name(&se_dev->se_dev_group.cg_item), 1728 1728 se_dev->se_dev_alias); ··· 1758 1758 ssize_t read_bytes; 1759 1759 1760 1760 if (count > (SE_UDEV_PATH_LEN-1)) { 1761 - printk(KERN_ERR "udev_path count: %d exceeds" 1761 + pr_err("udev_path count: %d exceeds" 1762 1762 " SE_UDEV_PATH_LEN-1: %u\n", (int)count, 1763 1763 SE_UDEV_PATH_LEN-1); 1764 1764 return -EINVAL; ··· 1768 1768 read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN, 1769 1769 "%s", page); 1770 1770 1771 - printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set udev_path: %s\n", 1771 + pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n", 1772 1772 config_item_name(&hba->hba_group.cg_item), 1773 1773 config_item_name(&se_dev->se_dev_group.cg_item), 1774 1774 se_dev->se_dev_udev_path); ··· 1796 1796 char *ptr; 1797 1797 1798 1798 ptr = strstr(page, "1"); 1799 - if (!(ptr)) { 1800 - printk(KERN_ERR "For dev_enable ops, only valid value" 1799 + if (!ptr) { 1800 + pr_err("For dev_enable ops, only valid value" 1801 1801 " is \"1\"\n"); 1802 1802 return -EINVAL; 1803 1803 } 1804 - if ((se_dev->se_dev_ptr)) { 1805 - printk(KERN_ERR "se_dev->se_dev_ptr already set for storage" 1804 + if (se_dev->se_dev_ptr) { 1805 + pr_err("se_dev->se_dev_ptr already set for storage" 1806 1806 " object\n"); 1807 1807 return -EEXIST; 1808 1808 } ··· 1817 1817 return -EINVAL; 1818 1818 1819 1819 se_dev->se_dev_ptr = dev; 1820 - printk(KERN_INFO "Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:" 1820 + pr_debug("Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:" 1821 1821 " %p\n", se_dev->se_dev_ptr); 1822 1822 1823 1823 return count; ··· 1841 1841 ssize_t len = 0; 1842 1842 1843 1843 dev = su_dev->se_dev_ptr; 1844 - if (!(dev)) 1844 + if (!dev) 1845 1845 return -ENODEV; 1846 1846 1847 1847 if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) 1848 1848 return len; 1849 1849 1850 1850 lu_gp_mem = dev->dev_alua_lu_gp_mem; 1851 - if (!(lu_gp_mem)) { 1852 - printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem" 1851 + if (!lu_gp_mem) { 1852 + pr_err("NULL struct se_device->dev_alua_lu_gp_mem" 1853 1853 " pointer\n"); 1854 1854 return -EINVAL; 1855 1855 } 1856 1856 1857 1857 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 1858 1858 lu_gp = lu_gp_mem->lu_gp; 1859 - if ((lu_gp)) { 1859 + if (lu_gp) { 1860 1860 lu_ci = &lu_gp->lu_gp_group.cg_item; 1861 1861 len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n", 1862 1862 config_item_name(lu_ci), lu_gp->lu_gp_id); ··· 1880 1880 int move = 0; 1881 1881 1882 1882 dev = su_dev->se_dev_ptr; 1883 - if (!(dev)) 1883 + if (!dev) 1884 1884 return -ENODEV; 1885 1885 1886 1886 if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) { 1887 - printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for %s/%s\n", 1887 + pr_warn("SPC3_ALUA_EMULATED not enabled for %s/%s\n", 1888 1888 config_item_name(&hba->hba_group.cg_item), 1889 1889 config_item_name(&su_dev->se_dev_group.cg_item)); 1890 1890 return -EINVAL; 1891 1891 } 1892 1892 if (count > LU_GROUP_NAME_BUF) { 1893 - printk(KERN_ERR "ALUA LU Group Alias too large!\n"); 1893 + pr_err("ALUA LU Group Alias too large!\n"); 1894 1894 return -EINVAL; 1895 1895 } 1896 1896 memset(buf, 0, LU_GROUP_NAME_BUF); ··· 1906 1906 * core_alua_get_lu_gp_by_name below(). 1907 1907 */ 1908 1908 lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf)); 1909 - if (!(lu_gp_new)) 1909 + if (!lu_gp_new) 1910 1910 return -ENODEV; 1911 1911 } 1912 1912 lu_gp_mem = dev->dev_alua_lu_gp_mem; 1913 - if (!(lu_gp_mem)) { 1913 + if (!lu_gp_mem) { 1914 1914 if (lu_gp_new) 1915 1915 core_alua_put_lu_gp_from_name(lu_gp_new); 1916 - printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem" 1916 + pr_err("NULL struct se_device->dev_alua_lu_gp_mem" 1917 1917 " pointer\n"); 1918 1918 return -EINVAL; 1919 1919 } 1920 1920 1921 1921 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 1922 1922 lu_gp = lu_gp_mem->lu_gp; 1923 - if ((lu_gp)) { 1923 + if (lu_gp) { 1924 1924 /* 1925 1925 * Clearing an existing lu_gp association, and replacing 1926 1926 * with NULL 1927 1927 */ 1928 - if (!(lu_gp_new)) { 1929 - printk(KERN_INFO "Target_Core_ConfigFS: Releasing %s/%s" 1928 + if (!lu_gp_new) { 1929 + pr_debug("Target_Core_ConfigFS: Releasing %s/%s" 1930 1930 " from ALUA LU Group: core/alua/lu_gps/%s, ID:" 1931 1931 " %hu\n", 1932 1932 config_item_name(&hba->hba_group.cg_item), ··· 1951 1951 __core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new); 1952 1952 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 1953 1953 1954 - printk(KERN_INFO "Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:" 1954 + pr_debug("Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:" 1955 1955 " core/alua/lu_gps/%s, ID: %hu\n", 1956 1956 (move) ? "Moving" : "Adding", 1957 1957 config_item_name(&hba->hba_group.cg_item), ··· 1995 1995 *`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable` 1996 1996 */ 1997 1997 if (se_dev->se_dev_ptr) { 1998 - printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_" 1998 + pr_debug("Target_Core_ConfigFS: Calling se_free_" 1999 1999 "virtual_device() for se_dev_ptr: %p\n", 2000 2000 se_dev->se_dev_ptr); 2001 2001 ··· 2004 2004 /* 2005 2005 * Release struct se_subsystem_dev->se_dev_su_ptr.. 2006 2006 */ 2007 - printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_" 2007 + pr_debug("Target_Core_ConfigFS: Calling t->free_" 2008 2008 "device() for se_dev_su_ptr: %p\n", 2009 2009 se_dev->se_dev_su_ptr); 2010 2010 2011 2011 t->free_device(se_dev->se_dev_su_ptr); 2012 2012 } 2013 2013 2014 - printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem" 2014 + pr_debug("Target_Core_ConfigFS: Deallocating se_subsystem" 2015 2015 "_dev_t: %p\n", se_dev); 2016 2016 kfree(se_dev); 2017 2017 } ··· 2026 2026 struct target_core_configfs_attribute *tc_attr = container_of( 2027 2027 attr, struct target_core_configfs_attribute, attr); 2028 2028 2029 - if (!(tc_attr->show)) 2029 + if (!tc_attr->show) 2030 2030 return -EINVAL; 2031 2031 2032 2032 return tc_attr->show(se_dev, page); ··· 2042 2042 struct target_core_configfs_attribute *tc_attr = container_of( 2043 2043 attr, struct target_core_configfs_attribute, attr); 2044 2044 2045 - if (!(tc_attr->store)) 2045 + if (!tc_attr->store) 2046 2046 return -EINVAL; 2047 2047 2048 2048 return tc_attr->store(se_dev, page, count); ··· 2085 2085 struct t10_alua_lu_gp *lu_gp, 2086 2086 char *page) 2087 2087 { 2088 - if (!(lu_gp->lu_gp_valid_id)) 2088 + if (!lu_gp->lu_gp_valid_id) 2089 2089 return 0; 2090 2090 2091 2091 return sprintf(page, "%hu\n", lu_gp->lu_gp_id); ··· 2102 2102 2103 2103 ret = strict_strtoul(page, 0, &lu_gp_id); 2104 2104 if (ret < 0) { 2105 - printk(KERN_ERR "strict_strtoul() returned %d for" 2105 + pr_err("strict_strtoul() returned %d for" 2106 2106 " lu_gp_id\n", ret); 2107 2107 return -EINVAL; 2108 2108 } 2109 2109 if (lu_gp_id > 0x0000ffff) { 2110 - printk(KERN_ERR "ALUA lu_gp_id: %lu exceeds maximum:" 2110 + pr_err("ALUA lu_gp_id: %lu exceeds maximum:" 2111 2111 " 0x0000ffff\n", lu_gp_id); 2112 2112 return -EINVAL; 2113 2113 } ··· 2116 2116 if (ret < 0) 2117 2117 return -EINVAL; 2118 2118 2119 - printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Logical Unit" 2119 + pr_debug("Target_Core_ConfigFS: Set ALUA Logical Unit" 2120 2120 " Group: core/alua/lu_gps/%s to ID: %hu\n", 2121 2121 config_item_name(&alua_lu_gp_cg->cg_item), 2122 2122 lu_gp->lu_gp_id); ··· 2154 2154 cur_len++; /* Extra byte for NULL terminator */ 2155 2155 2156 2156 if ((cur_len + len) > PAGE_SIZE) { 2157 - printk(KERN_WARNING "Ran out of lu_gp_show_attr" 2157 + pr_warn("Ran out of lu_gp_show_attr" 2158 2158 "_members buffer\n"); 2159 2159 break; 2160 2160 } ··· 2218 2218 config_group_init_type_name(alua_lu_gp_cg, name, 2219 2219 &target_core_alua_lu_gp_cit); 2220 2220 2221 - printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Logical Unit" 2221 + pr_debug("Target_Core_ConfigFS: Allocated ALUA Logical Unit" 2222 2222 " Group: core/alua/lu_gps/%s\n", 2223 2223 config_item_name(alua_lu_gp_ci)); 2224 2224 ··· 2233 2233 struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item), 2234 2234 struct t10_alua_lu_gp, lu_gp_group); 2235 2235 2236 - printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit" 2236 + pr_debug("Target_Core_ConfigFS: Releasing ALUA Logical Unit" 2237 2237 " Group: core/alua/lu_gps/%s, ID: %hu\n", 2238 2238 config_item_name(item), lu_gp->lu_gp_id); 2239 2239 /* ··· 2292 2292 unsigned long tmp; 2293 2293 int new_state, ret; 2294 2294 2295 - if (!(tg_pt_gp->tg_pt_gp_valid_id)) { 2296 - printk(KERN_ERR "Unable to do implict ALUA on non valid" 2295 + if (!tg_pt_gp->tg_pt_gp_valid_id) { 2296 + pr_err("Unable to do implict ALUA on non valid" 2297 2297 " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id); 2298 2298 return -EINVAL; 2299 2299 } 2300 2300 2301 2301 ret = strict_strtoul(page, 0, &tmp); 2302 2302 if (ret < 0) { 2303 - printk("Unable to extract new ALUA access state from" 2303 + pr_err("Unable to extract new ALUA access state from" 2304 2304 " %s\n", page); 2305 2305 return -EINVAL; 2306 2306 } 2307 2307 new_state = (int)tmp; 2308 2308 2309 2309 if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) { 2310 - printk(KERN_ERR "Unable to process implict configfs ALUA" 2310 + pr_err("Unable to process implict configfs ALUA" 2311 2311 " transition while TPGS_IMPLICT_ALUA is diabled\n"); 2312 2312 return -EINVAL; 2313 2313 } ··· 2338 2338 unsigned long tmp; 2339 2339 int new_status, ret; 2340 2340 2341 - if (!(tg_pt_gp->tg_pt_gp_valid_id)) { 2342 - printk(KERN_ERR "Unable to do set ALUA access status on non" 2341 + if (!tg_pt_gp->tg_pt_gp_valid_id) { 2342 + pr_err("Unable to do set ALUA access status on non" 2343 2343 " valid tg_pt_gp ID: %hu\n", 2344 2344 tg_pt_gp->tg_pt_gp_valid_id); 2345 2345 return -EINVAL; ··· 2347 2347 2348 2348 ret = strict_strtoul(page, 0, &tmp); 2349 2349 if (ret < 0) { 2350 - printk(KERN_ERR "Unable to extract new ALUA access status" 2350 + pr_err("Unable to extract new ALUA access status" 2351 2351 " from %s\n", page); 2352 2352 return -EINVAL; 2353 2353 } ··· 2356 2356 if ((new_status != ALUA_STATUS_NONE) && 2357 2357 (new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) && 2358 2358 (new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) { 2359 - printk(KERN_ERR "Illegal ALUA access status: 0x%02x\n", 2359 + pr_err("Illegal ALUA access status: 0x%02x\n", 2360 2360 new_status); 2361 2361 return -EINVAL; 2362 2362 } ··· 2407 2407 2408 2408 ret = strict_strtoul(page, 0, &tmp); 2409 2409 if (ret < 0) { 2410 - printk(KERN_ERR "Unable to extract alua_write_metadata\n"); 2410 + pr_err("Unable to extract alua_write_metadata\n"); 2411 2411 return -EINVAL; 2412 2412 } 2413 2413 2414 2414 if ((tmp != 0) && (tmp != 1)) { 2415 - printk(KERN_ERR "Illegal value for alua_write_metadata:" 2415 + pr_err("Illegal value for alua_write_metadata:" 2416 2416 " %lu\n", tmp); 2417 2417 return -EINVAL; 2418 2418 } ··· 2494 2494 struct t10_alua_tg_pt_gp *tg_pt_gp, 2495 2495 char *page) 2496 2496 { 2497 - if (!(tg_pt_gp->tg_pt_gp_valid_id)) 2497 + if (!tg_pt_gp->tg_pt_gp_valid_id) 2498 2498 return 0; 2499 2499 2500 2500 return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id); ··· 2511 2511 2512 2512 ret = strict_strtoul(page, 0, &tg_pt_gp_id); 2513 2513 if (ret < 0) { 2514 - printk(KERN_ERR "strict_strtoul() returned %d for" 2514 + pr_err("strict_strtoul() returned %d for" 2515 2515 " tg_pt_gp_id\n", ret); 2516 2516 return -EINVAL; 2517 2517 } 2518 2518 if (tg_pt_gp_id > 0x0000ffff) { 2519 - printk(KERN_ERR "ALUA tg_pt_gp_id: %lu exceeds maximum:" 2519 + pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum:" 2520 2520 " 0x0000ffff\n", tg_pt_gp_id); 2521 2521 return -EINVAL; 2522 2522 } ··· 2525 2525 if (ret < 0) 2526 2526 return -EINVAL; 2527 2527 2528 - printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Target Port Group: " 2528 + pr_debug("Target_Core_ConfigFS: Set ALUA Target Port Group: " 2529 2529 "core/alua/tg_pt_gps/%s to ID: %hu\n", 2530 2530 config_item_name(&alua_tg_pt_gp_cg->cg_item), 2531 2531 tg_pt_gp->tg_pt_gp_id); ··· 2566 2566 cur_len++; /* Extra byte for NULL terminator */ 2567 2567 2568 2568 if ((cur_len + len) > PAGE_SIZE) { 2569 - printk(KERN_WARNING "Ran out of lu_gp_show_attr" 2569 + pr_warn("Ran out of lu_gp_show_attr" 2570 2570 "_members buffer\n"); 2571 2571 break; 2572 2572 } ··· 2632 2632 struct config_item *alua_tg_pt_gp_ci = NULL; 2633 2633 2634 2634 tg_pt_gp = core_alua_allocate_tg_pt_gp(su_dev, name, 0); 2635 - if (!(tg_pt_gp)) 2635 + if (!tg_pt_gp) 2636 2636 return NULL; 2637 2637 2638 2638 alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group; ··· 2641 2641 config_group_init_type_name(alua_tg_pt_gp_cg, name, 2642 2642 &target_core_alua_tg_pt_gp_cit); 2643 2643 2644 - printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Target Port" 2644 + pr_debug("Target_Core_ConfigFS: Allocated ALUA Target Port" 2645 2645 " Group: alua/tg_pt_gps/%s\n", 2646 2646 config_item_name(alua_tg_pt_gp_ci)); 2647 2647 ··· 2655 2655 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item), 2656 2656 struct t10_alua_tg_pt_gp, tg_pt_gp_group); 2657 2657 2658 - printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port" 2658 + pr_debug("Target_Core_ConfigFS: Releasing ALUA Target Port" 2659 2659 " Group: alua/tg_pt_gps/%s, ID: %hu\n", 2660 2660 config_item_name(item), tg_pt_gp->tg_pt_gp_id); 2661 2661 /* ··· 2746 2746 2747 2747 se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL); 2748 2748 if (!se_dev) { 2749 - printk(KERN_ERR "Unable to allocate memory for" 2749 + pr_err("Unable to allocate memory for" 2750 2750 " struct se_subsystem_dev\n"); 2751 2751 goto unlock; 2752 2752 } ··· 2770 2770 2771 2771 dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 7, 2772 2772 GFP_KERNEL); 2773 - if (!(dev_cg->default_groups)) 2773 + if (!dev_cg->default_groups) 2774 2774 goto out; 2775 2775 /* 2776 2776 * Set se_dev_su_ptr from struct se_subsystem_api returned void ptr ··· 2781 2781 * configfs tree for device object's struct config_group. 2782 2782 */ 2783 2783 se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, name); 2784 - if (!(se_dev->se_dev_su_ptr)) { 2785 - printk(KERN_ERR "Unable to locate subsystem dependent pointer" 2784 + if (!se_dev->se_dev_su_ptr) { 2785 + pr_err("Unable to locate subsystem dependent pointer" 2786 2786 " from allocate_virtdevice()\n"); 2787 2787 goto out; 2788 2788 } ··· 2813 2813 * Add core/$HBA/$DEV/alua/default_tg_pt_gp 2814 2814 */ 2815 2815 tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1); 2816 - if (!(tg_pt_gp)) 2816 + if (!tg_pt_gp) 2817 2817 goto out; 2818 2818 2819 2819 tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group; 2820 2820 tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, 2821 2821 GFP_KERNEL); 2822 - if (!(tg_pt_gp_cg->default_groups)) { 2823 - printk(KERN_ERR "Unable to allocate tg_pt_gp_cg->" 2822 + if (!tg_pt_gp_cg->default_groups) { 2823 + pr_err("Unable to allocate tg_pt_gp_cg->" 2824 2824 "default_groups\n"); 2825 2825 goto out; 2826 2826 } ··· 2837 2837 dev_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 4, 2838 2838 GFP_KERNEL); 2839 2839 if (!dev_stat_grp->default_groups) { 2840 - printk(KERN_ERR "Unable to allocate dev_stat_grp->default_groups\n"); 2840 + pr_err("Unable to allocate dev_stat_grp->default_groups\n"); 2841 2841 goto out; 2842 2842 } 2843 2843 target_stat_setup_dev_default_groups(se_dev); 2844 2844 2845 - printk(KERN_INFO "Target_Core_ConfigFS: Allocated struct se_subsystem_dev:" 2845 + pr_debug("Target_Core_ConfigFS: Allocated struct se_subsystem_dev:" 2846 2846 " %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr); 2847 2847 2848 2848 mutex_unlock(&hba->hba_access_mutex); ··· 2975 2975 2976 2976 ret = strict_strtoul(page, 0, &mode_flag); 2977 2977 if (ret < 0) { 2978 - printk(KERN_ERR "Unable to extract hba mode flag: %d\n", ret); 2978 + pr_err("Unable to extract hba mode flag: %d\n", ret); 2979 2979 return -EINVAL; 2980 2980 } 2981 2981 2982 2982 spin_lock(&hba->device_lock); 2983 - if (!(list_empty(&hba->hba_dev_list))) { 2984 - printk(KERN_ERR "Unable to set hba_mode with active devices\n"); 2983 + if (!list_empty(&hba->hba_dev_list)) { 2984 + pr_err("Unable to set hba_mode with active devices\n"); 2985 2985 spin_unlock(&hba->device_lock); 2986 2986 return -EINVAL; 2987 2987 } ··· 3040 3040 3041 3041 memset(buf, 0, TARGET_CORE_NAME_MAX_LEN); 3042 3042 if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) { 3043 - printk(KERN_ERR "Passed *name strlen(): %d exceeds" 3043 + pr_err("Passed *name strlen(): %d exceeds" 3044 3044 " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name), 3045 3045 TARGET_CORE_NAME_MAX_LEN); 3046 3046 return ERR_PTR(-ENAMETOOLONG); ··· 3048 3048 snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name); 3049 3049 3050 3050 str = strstr(buf, "_"); 3051 - if (!(str)) { 3052 - printk(KERN_ERR "Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n"); 3051 + if (!str) { 3052 + pr_err("Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n"); 3053 3053 return ERR_PTR(-EINVAL); 3054 3054 } 3055 3055 se_plugin_str = buf; ··· 3058 3058 * Namely rd_direct and rd_mcp.. 3059 3059 */ 3060 3060 str2 = strstr(str+1, "_"); 3061 - if ((str2)) { 3061 + if (str2) { 3062 3062 *str2 = '\0'; /* Terminate for *se_plugin_str */ 3063 3063 str2++; /* Skip to start of plugin dependent ID */ 3064 3064 str = str2; ··· 3069 3069 3070 3070 ret = strict_strtoul(str, 0, &plugin_dep_id); 3071 3071 if (ret < 0) { 3072 - printk(KERN_ERR "strict_strtoul() returned %d for" 3072 + pr_err("strict_strtoul() returned %d for" 3073 3073 " plugin_dep_id\n", ret); 3074 3074 return ERR_PTR(-EINVAL); 3075 3075 } ··· 3122 3122 struct t10_alua_lu_gp *lu_gp; 3123 3123 int ret; 3124 3124 3125 - printk(KERN_INFO "TARGET_CORE[0]: Loading Generic Kernel Storage" 3125 + pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage" 3126 3126 " Engine: %s on %s/%s on "UTS_RELEASE"\n", 3127 3127 TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine); 3128 3128 ··· 3142 3142 target_cg = &subsys->su_group; 3143 3143 target_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, 3144 3144 GFP_KERNEL); 3145 - if (!(target_cg->default_groups)) { 3146 - printk(KERN_ERR "Unable to allocate target_cg->default_groups\n"); 3145 + if (!target_cg->default_groups) { 3146 + pr_err("Unable to allocate target_cg->default_groups\n"); 3147 3147 goto out_global; 3148 3148 } 3149 3149 ··· 3157 3157 hba_cg = &target_core_hbagroup; 3158 3158 hba_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, 3159 3159 GFP_KERNEL); 3160 - if (!(hba_cg->default_groups)) { 3161 - printk(KERN_ERR "Unable to allocate hba_cg->default_groups\n"); 3160 + if (!hba_cg->default_groups) { 3161 + pr_err("Unable to allocate hba_cg->default_groups\n"); 3162 3162 goto out_global; 3163 3163 } 3164 3164 config_group_init_type_name(&alua_group, ··· 3172 3172 alua_cg = &alua_group; 3173 3173 alua_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, 3174 3174 GFP_KERNEL); 3175 - if (!(alua_cg->default_groups)) { 3176 - printk(KERN_ERR "Unable to allocate alua_cg->default_groups\n"); 3175 + if (!alua_cg->default_groups) { 3176 + pr_err("Unable to allocate alua_cg->default_groups\n"); 3177 3177 goto out_global; 3178 3178 } 3179 3179 ··· 3191 3191 lu_gp_cg = &alua_lu_gps_group; 3192 3192 lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, 3193 3193 GFP_KERNEL); 3194 - if (!(lu_gp_cg->default_groups)) { 3195 - printk(KERN_ERR "Unable to allocate lu_gp_cg->default_groups\n"); 3194 + if (!lu_gp_cg->default_groups) { 3195 + pr_err("Unable to allocate lu_gp_cg->default_groups\n"); 3196 3196 goto out_global; 3197 3197 } 3198 3198 ··· 3206 3206 */ 3207 3207 ret = configfs_register_subsystem(subsys); 3208 3208 if (ret < 0) { 3209 - printk(KERN_ERR "Error %d while registering subsystem %s\n", 3209 + pr_err("Error %d while registering subsystem %s\n", 3210 3210 ret, subsys->su_group.cg_item.ci_namebuf); 3211 3211 goto out_global; 3212 3212 } 3213 - printk(KERN_INFO "TARGET_CORE[0]: Initialized ConfigFS Fabric" 3213 + pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric" 3214 3214 " Infrastructure: "TARGET_CORE_CONFIGFS_VERSION" on %s/%s" 3215 3215 " on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine); 3216 3216 /* ··· 3290 3290 core_alua_free_lu_gp(default_lu_gp); 3291 3291 default_lu_gp = NULL; 3292 3292 3293 - printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric" 3293 + pr_debug("TARGET_CORE[0]: Released ConfigFS Fabric" 3294 3294 " Infrastructure\n"); 3295 3295 3296 3296 core_dev_release_virtual_lun0();
+107 -107
drivers/target/target_core_device.c
··· 84 84 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) { 85 85 se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; 86 86 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 87 - printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" 87 + pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" 88 88 " Access for 0x%08x\n", 89 89 se_cmd->se_tfo->get_fabric_name(), 90 90 unpacked_lun); ··· 117 117 if (unpacked_lun != 0) { 118 118 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; 119 119 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 120 - printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 120 + pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 121 121 " Access for 0x%08x\n", 122 122 se_cmd->se_tfo->get_fabric_name(), 123 123 unpacked_lun); ··· 204 204 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); 205 205 206 206 if (!se_lun) { 207 - printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 207 + pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 208 208 " Access for 0x%08x\n", 209 209 se_cmd->se_tfo->get_fabric_name(), 210 210 unpacked_lun); ··· 255 255 continue; 256 256 257 257 lun = deve->se_lun; 258 - if (!(lun)) { 259 - printk(KERN_ERR "%s device entries device pointer is" 258 + if (!lun) { 259 + pr_err("%s device entries device pointer is" 260 260 " NULL, but Initiator has access.\n", 261 261 tpg->se_tpg_tfo->get_fabric_name()); 262 262 continue; 263 263 } 264 264 port = lun->lun_sep; 265 - if (!(port)) { 266 - printk(KERN_ERR "%s device entries device pointer is" 265 + if (!port) { 266 + pr_err("%s device entries device pointer is" 267 267 " NULL, but Initiator has access.\n", 268 268 tpg->se_tpg_tfo->get_fabric_name()); 269 269 continue; ··· 301 301 continue; 302 302 303 303 if (!deve->se_lun) { 304 - printk(KERN_ERR "%s device entries device pointer is" 304 + pr_err("%s device entries device pointer is" 305 305 " NULL, but Initiator has access.\n", 306 306 tpg->se_tpg_tfo->get_fabric_name()); 307 307 continue; ··· 372 372 * struct se_dev_entry pointers below as logic in 373 373 * core_alua_do_transition_tg_pt() depends on these being present. 374 374 */ 375 - if (!(enable)) { 375 + if (!enable) { 376 376 /* 377 377 * deve->se_lun_acl will be NULL for demo-mode created LUNs 378 378 * that have not been explicitly concerted to MappedLUNs -> ··· 395 395 */ 396 396 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { 397 397 if (deve->se_lun_acl != NULL) { 398 - printk(KERN_ERR "struct se_dev_entry->se_lun_acl" 398 + pr_err("struct se_dev_entry->se_lun_acl" 399 399 " already set for demo mode -> explict" 400 400 " LUN ACL transition\n"); 401 401 spin_unlock_irq(&nacl->device_list_lock); 402 402 return -EINVAL; 403 403 } 404 404 if (deve->se_lun != lun) { 405 - printk(KERN_ERR "struct se_dev_entry->se_lun does" 405 + pr_err("struct se_dev_entry->se_lun does" 406 406 " match passed struct se_lun for demo mode" 407 407 " -> explict LUN ACL transition\n"); 408 408 spin_unlock_irq(&nacl->device_list_lock); ··· 501 501 struct se_port *port, *port_tmp; 502 502 503 503 port = kzalloc(sizeof(struct se_port), GFP_KERNEL); 504 - if (!(port)) { 505 - printk(KERN_ERR "Unable to allocate struct se_port\n"); 504 + if (!port) { 505 + pr_err("Unable to allocate struct se_port\n"); 506 506 return ERR_PTR(-ENOMEM); 507 507 } 508 508 INIT_LIST_HEAD(&port->sep_alua_list); ··· 513 513 514 514 spin_lock(&dev->se_port_lock); 515 515 if (dev->dev_port_count == 0x0000ffff) { 516 - printk(KERN_WARNING "Reached dev->dev_port_count ==" 516 + pr_warn("Reached dev->dev_port_count ==" 517 517 " 0x0000ffff\n"); 518 518 spin_unlock(&dev->se_port_lock); 519 519 return ERR_PTR(-ENOSPC); ··· 532 532 * 3h to FFFFh Relative port 3 through 65 535 533 533 */ 534 534 port->sep_rtpi = dev->dev_rpti_counter++; 535 - if (!(port->sep_rtpi)) 535 + if (!port->sep_rtpi) 536 536 goto again; 537 537 538 538 list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) { ··· 570 570 if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { 571 571 tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); 572 572 if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { 573 - printk(KERN_ERR "Unable to allocate t10_alua_tg_pt" 573 + pr_err("Unable to allocate t10_alua_tg_pt" 574 574 "_gp_member_t\n"); 575 575 return; 576 576 } ··· 578 578 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, 579 579 su_dev->t10_alua.default_tg_pt_gp); 580 580 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 581 - printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port" 581 + pr_debug("%s/%s: Adding to default ALUA Target Port" 582 582 " Group: alua/default_tg_pt_gp\n", 583 583 dev->transport->name, tpg->se_tpg_tfo->get_fabric_name()); 584 584 } ··· 663 663 list_for_each_entry(se_task, &se_cmd->t_task_list, t_list) 664 664 break; 665 665 666 - if (!(se_task)) { 667 - printk(KERN_ERR "Unable to locate struct se_task for struct se_cmd\n"); 666 + if (!se_task) { 667 + pr_err("Unable to locate struct se_task for struct se_cmd\n"); 668 668 return PYX_TRANSPORT_LU_COMM_FAILURE; 669 669 } 670 670 ··· 675 675 * coming via a target_core_mod PASSTHROUGH op, and not through 676 676 * a $FABRIC_MOD. In that case, report LUN=0 only. 677 677 */ 678 - if (!(se_sess)) { 678 + if (!se_sess) { 679 679 int_to_scsilun(0, (struct scsi_lun *)&buf[offset]); 680 680 lun_count = 1; 681 681 goto done; ··· 893 893 int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout) 894 894 { 895 895 if (task_timeout > DA_TASK_TIMEOUT_MAX) { 896 - printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then" 896 + pr_err("dev[%p]: Passed task_timeout: %u larger then" 897 897 " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout); 898 898 return -EINVAL; 899 899 } else { 900 900 dev->se_sub_dev->se_dev_attrib.task_timeout = task_timeout; 901 - printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n", 901 + pr_debug("dev[%p]: Set SE Device task_timeout: %u\n", 902 902 dev, task_timeout); 903 903 } 904 904 ··· 910 910 u32 max_unmap_lba_count) 911 911 { 912 912 dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count; 913 - printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n", 913 + pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n", 914 914 dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count); 915 915 return 0; 916 916 } ··· 921 921 { 922 922 dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 923 923 max_unmap_block_desc_count; 924 - printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n", 924 + pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n", 925 925 dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count); 926 926 return 0; 927 927 } ··· 931 931 u32 unmap_granularity) 932 932 { 933 933 dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity; 934 - printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n", 934 + pr_debug("dev[%p]: Set unmap_granularity: %u\n", 935 935 dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity); 936 936 return 0; 937 937 } ··· 941 941 u32 unmap_granularity_alignment) 942 942 { 943 943 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment; 944 - printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n", 944 + pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n", 945 945 dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment); 946 946 return 0; 947 947 } ··· 949 949 int se_dev_set_emulate_dpo(struct se_device *dev, int flag) 950 950 { 951 951 if ((flag != 0) && (flag != 1)) { 952 - printk(KERN_ERR "Illegal value %d\n", flag); 952 + pr_err("Illegal value %d\n", flag); 953 953 return -EINVAL; 954 954 } 955 955 if (dev->transport->dpo_emulated == NULL) { 956 - printk(KERN_ERR "dev->transport->dpo_emulated is NULL\n"); 956 + pr_err("dev->transport->dpo_emulated is NULL\n"); 957 957 return -EINVAL; 958 958 } 959 959 if (dev->transport->dpo_emulated(dev) == 0) { 960 - printk(KERN_ERR "dev->transport->dpo_emulated not supported\n"); 960 + pr_err("dev->transport->dpo_emulated not supported\n"); 961 961 return -EINVAL; 962 962 } 963 963 dev->se_sub_dev->se_dev_attrib.emulate_dpo = flag; 964 - printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation" 964 + pr_debug("dev[%p]: SE Device Page Out (DPO) Emulation" 965 965 " bit: %d\n", dev, dev->se_sub_dev->se_dev_attrib.emulate_dpo); 966 966 return 0; 967 967 } ··· 969 969 int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) 970 970 { 971 971 if ((flag != 0) && (flag != 1)) { 972 - printk(KERN_ERR "Illegal value %d\n", flag); 972 + pr_err("Illegal value %d\n", flag); 973 973 return -EINVAL; 974 974 } 975 975 if (dev->transport->fua_write_emulated == NULL) { 976 - printk(KERN_ERR "dev->transport->fua_write_emulated is NULL\n"); 976 + pr_err("dev->transport->fua_write_emulated is NULL\n"); 977 977 return -EINVAL; 978 978 } 979 979 if (dev->transport->fua_write_emulated(dev) == 0) { 980 - printk(KERN_ERR "dev->transport->fua_write_emulated not supported\n"); 980 + pr_err("dev->transport->fua_write_emulated not supported\n"); 981 981 return -EINVAL; 982 982 } 983 983 dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag; 984 - printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", 984 + pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", 985 985 dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write); 986 986 return 0; 987 987 } ··· 989 989 int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) 990 990 { 991 991 if ((flag != 0) && (flag != 1)) { 992 - printk(KERN_ERR "Illegal value %d\n", flag); 992 + pr_err("Illegal value %d\n", flag); 993 993 return -EINVAL; 994 994 } 995 995 if (dev->transport->fua_read_emulated == NULL) { 996 - printk(KERN_ERR "dev->transport->fua_read_emulated is NULL\n"); 996 + pr_err("dev->transport->fua_read_emulated is NULL\n"); 997 997 return -EINVAL; 998 998 } 999 999 if (dev->transport->fua_read_emulated(dev) == 0) { 1000 - printk(KERN_ERR "dev->transport->fua_read_emulated not supported\n"); 1000 + pr_err("dev->transport->fua_read_emulated not supported\n"); 1001 1001 return -EINVAL; 1002 1002 } 1003 1003 dev->se_sub_dev->se_dev_attrib.emulate_fua_read = flag; 1004 - printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n", 1004 + pr_debug("dev[%p]: SE Device Forced Unit Access READs: %d\n", 1005 1005 dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_read); 1006 1006 return 0; 1007 1007 } ··· 1009 1009 int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) 1010 1010 { 1011 1011 if ((flag != 0) && (flag != 1)) { 1012 - printk(KERN_ERR "Illegal value %d\n", flag); 1012 + pr_err("Illegal value %d\n", flag); 1013 1013 return -EINVAL; 1014 1014 } 1015 1015 if (dev->transport->write_cache_emulated == NULL) { 1016 - printk(KERN_ERR "dev->transport->write_cache_emulated is NULL\n"); 1016 + pr_err("dev->transport->write_cache_emulated is NULL\n"); 1017 1017 return -EINVAL; 1018 1018 } 1019 1019 if (dev->transport->write_cache_emulated(dev) == 0) { 1020 - printk(KERN_ERR "dev->transport->write_cache_emulated not supported\n"); 1020 + pr_err("dev->transport->write_cache_emulated not supported\n"); 1021 1021 return -EINVAL; 1022 1022 } 1023 1023 dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag; 1024 - printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", 1024 + pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", 1025 1025 dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache); 1026 1026 return 0; 1027 1027 } ··· 1029 1029 int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) 1030 1030 { 1031 1031 if ((flag != 0) && (flag != 1) && (flag != 2)) { 1032 - printk(KERN_ERR "Illegal value %d\n", flag); 1032 + pr_err("Illegal value %d\n", flag); 1033 1033 return -EINVAL; 1034 1034 } 1035 1035 1036 1036 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1037 - printk(KERN_ERR "dev[%p]: Unable to change SE Device" 1037 + pr_err("dev[%p]: Unable to change SE Device" 1038 1038 " UA_INTRLCK_CTRL while dev_export_obj: %d count" 1039 1039 " exists\n", dev, 1040 1040 atomic_read(&dev->dev_export_obj.obj_access_count)); 1041 1041 return -EINVAL; 1042 1042 } 1043 1043 dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag; 1044 - printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", 1044 + pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", 1045 1045 dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl); 1046 1046 1047 1047 return 0; ··· 1050 1050 int se_dev_set_emulate_tas(struct se_device *dev, int flag) 1051 1051 { 1052 1052 if ((flag != 0) && (flag != 1)) { 1053 - printk(KERN_ERR "Illegal value %d\n", flag); 1053 + pr_err("Illegal value %d\n", flag); 1054 1054 return -EINVAL; 1055 1055 } 1056 1056 1057 1057 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1058 - printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while" 1058 + pr_err("dev[%p]: Unable to change SE Device TAS while" 1059 1059 " dev_export_obj: %d count exists\n", dev, 1060 1060 atomic_read(&dev->dev_export_obj.obj_access_count)); 1061 1061 return -EINVAL; 1062 1062 } 1063 1063 dev->se_sub_dev->se_dev_attrib.emulate_tas = flag; 1064 - printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n", 1064 + pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n", 1065 1065 dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled"); 1066 1066 1067 1067 return 0; ··· 1070 1070 int se_dev_set_emulate_tpu(struct se_device *dev, int flag) 1071 1071 { 1072 1072 if ((flag != 0) && (flag != 1)) { 1073 - printk(KERN_ERR "Illegal value %d\n", flag); 1073 + pr_err("Illegal value %d\n", flag); 1074 1074 return -EINVAL; 1075 1075 } 1076 1076 /* 1077 1077 * We expect this value to be non-zero when generic Block Layer 1078 1078 * Discard supported is detected iblock_create_virtdevice(). 1079 1079 */ 1080 - if (!(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count)) { 1081 - printk(KERN_ERR "Generic Block Discard not supported\n"); 1080 + if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { 1081 + pr_err("Generic Block Discard not supported\n"); 1082 1082 return -ENOSYS; 1083 1083 } 1084 1084 1085 1085 dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag; 1086 - printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", 1086 + pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", 1087 1087 dev, flag); 1088 1088 return 0; 1089 1089 } ··· 1091 1091 int se_dev_set_emulate_tpws(struct se_device *dev, int flag) 1092 1092 { 1093 1093 if ((flag != 0) && (flag != 1)) { 1094 - printk(KERN_ERR "Illegal value %d\n", flag); 1094 + pr_err("Illegal value %d\n", flag); 1095 1095 return -EINVAL; 1096 1096 } 1097 1097 /* 1098 1098 * We expect this value to be non-zero when generic Block Layer 1099 1099 * Discard supported is detected iblock_create_virtdevice(). 1100 1100 */ 1101 - if (!(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count)) { 1102 - printk(KERN_ERR "Generic Block Discard not supported\n"); 1101 + if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { 1102 + pr_err("Generic Block Discard not supported\n"); 1103 1103 return -ENOSYS; 1104 1104 } 1105 1105 1106 1106 dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag; 1107 - printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", 1107 + pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", 1108 1108 dev, flag); 1109 1109 return 0; 1110 1110 } ··· 1112 1112 int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) 1113 1113 { 1114 1114 if ((flag != 0) && (flag != 1)) { 1115 - printk(KERN_ERR "Illegal value %d\n", flag); 1115 + pr_err("Illegal value %d\n", flag); 1116 1116 return -EINVAL; 1117 1117 } 1118 1118 dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag; 1119 - printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev, 1119 + pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev, 1120 1120 (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled"); 1121 1121 return 0; 1122 1122 } ··· 1141 1141 u32 orig_queue_depth = dev->queue_depth; 1142 1142 1143 1143 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1144 - printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while" 1144 + pr_err("dev[%p]: Unable to change SE Device TCQ while" 1145 1145 " dev_export_obj: %d count exists\n", dev, 1146 1146 atomic_read(&dev->dev_export_obj.obj_access_count)); 1147 1147 return -EINVAL; 1148 1148 } 1149 - if (!(queue_depth)) { 1150 - printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue" 1149 + if (!queue_depth) { 1150 + pr_err("dev[%p]: Illegal ZERO value for queue" 1151 1151 "_depth\n", dev); 1152 1152 return -EINVAL; 1153 1153 } 1154 1154 1155 1155 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1156 1156 if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) { 1157 - printk(KERN_ERR "dev[%p]: Passed queue_depth: %u" 1157 + pr_err("dev[%p]: Passed queue_depth: %u" 1158 1158 " exceeds TCM/SE_Device TCQ: %u\n", 1159 1159 dev, queue_depth, 1160 1160 dev->se_sub_dev->se_dev_attrib.hw_queue_depth); ··· 1163 1163 } else { 1164 1164 if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) { 1165 1165 if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) { 1166 - printk(KERN_ERR "dev[%p]: Passed queue_depth:" 1166 + pr_err("dev[%p]: Passed queue_depth:" 1167 1167 " %u exceeds TCM/SE_Device MAX" 1168 1168 " TCQ: %u\n", dev, queue_depth, 1169 1169 dev->se_sub_dev->se_dev_attrib.hw_queue_depth); ··· 1178 1178 else if (queue_depth < orig_queue_depth) 1179 1179 atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left); 1180 1180 1181 - printk(KERN_INFO "dev[%p]: SE Device TCQ Depth changed to: %u\n", 1181 + pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", 1182 1182 dev, queue_depth); 1183 1183 return 0; 1184 1184 } ··· 1188 1188 int force = 0; /* Force setting for VDEVS */ 1189 1189 1190 1190 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1191 - printk(KERN_ERR "dev[%p]: Unable to change SE Device" 1191 + pr_err("dev[%p]: Unable to change SE Device" 1192 1192 " max_sectors while dev_export_obj: %d count exists\n", 1193 1193 dev, atomic_read(&dev->dev_export_obj.obj_access_count)); 1194 1194 return -EINVAL; 1195 1195 } 1196 - if (!(max_sectors)) { 1197 - printk(KERN_ERR "dev[%p]: Illegal ZERO value for" 1196 + if (!max_sectors) { 1197 + pr_err("dev[%p]: Illegal ZERO value for" 1198 1198 " max_sectors\n", dev); 1199 1199 return -EINVAL; 1200 1200 } 1201 1201 if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) { 1202 - printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than" 1202 + pr_err("dev[%p]: Passed max_sectors: %u less than" 1203 1203 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors, 1204 1204 DA_STATUS_MAX_SECTORS_MIN); 1205 1205 return -EINVAL; 1206 1206 } 1207 1207 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1208 1208 if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) { 1209 - printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" 1209 + pr_err("dev[%p]: Passed max_sectors: %u" 1210 1210 " greater than TCM/SE_Device max_sectors:" 1211 1211 " %u\n", dev, max_sectors, 1212 1212 dev->se_sub_dev->se_dev_attrib.hw_max_sectors); 1213 1213 return -EINVAL; 1214 1214 } 1215 1215 } else { 1216 - if (!(force) && (max_sectors > 1216 + if (!force && (max_sectors > 1217 1217 dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) { 1218 - printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" 1218 + pr_err("dev[%p]: Passed max_sectors: %u" 1219 1219 " greater than TCM/SE_Device max_sectors" 1220 1220 ": %u, use force=1 to override.\n", dev, 1221 1221 max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors); 1222 1222 return -EINVAL; 1223 1223 } 1224 1224 if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) { 1225 - printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" 1225 + pr_err("dev[%p]: Passed max_sectors: %u" 1226 1226 " greater than DA_STATUS_MAX_SECTORS_MAX:" 1227 1227 " %u\n", dev, max_sectors, 1228 1228 DA_STATUS_MAX_SECTORS_MAX); ··· 1231 1231 } 1232 1232 1233 1233 dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors; 1234 - printk("dev[%p]: SE Device max_sectors changed to %u\n", 1234 + pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", 1235 1235 dev, max_sectors); 1236 1236 return 0; 1237 1237 } ··· 1239 1239 int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) 1240 1240 { 1241 1241 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1242 - printk(KERN_ERR "dev[%p]: Unable to change SE Device" 1242 + pr_err("dev[%p]: Unable to change SE Device" 1243 1243 " optimal_sectors while dev_export_obj: %d count exists\n", 1244 1244 dev, atomic_read(&dev->dev_export_obj.obj_access_count)); 1245 1245 return -EINVAL; 1246 1246 } 1247 1247 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1248 - printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be" 1248 + pr_err("dev[%p]: Passed optimal_sectors cannot be" 1249 1249 " changed for TCM/pSCSI\n", dev); 1250 1250 return -EINVAL; 1251 1251 } 1252 1252 if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) { 1253 - printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be" 1253 + pr_err("dev[%p]: Passed optimal_sectors %u cannot be" 1254 1254 " greater than max_sectors: %u\n", dev, 1255 1255 optimal_sectors, dev->se_sub_dev->se_dev_attrib.max_sectors); 1256 1256 return -EINVAL; 1257 1257 } 1258 1258 1259 1259 dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors; 1260 - printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n", 1260 + pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n", 1261 1261 dev, optimal_sectors); 1262 1262 return 0; 1263 1263 } ··· 1265 1265 int se_dev_set_block_size(struct se_device *dev, u32 block_size) 1266 1266 { 1267 1267 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1268 - printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size" 1268 + pr_err("dev[%p]: Unable to change SE Device block_size" 1269 1269 " while dev_export_obj: %d count exists\n", dev, 1270 1270 atomic_read(&dev->dev_export_obj.obj_access_count)); 1271 1271 return -EINVAL; ··· 1275 1275 (block_size != 1024) && 1276 1276 (block_size != 2048) && 1277 1277 (block_size != 4096)) { 1278 - printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u" 1278 + pr_err("dev[%p]: Illegal value for block_device: %u" 1279 1279 " for SE device, must be 512, 1024, 2048 or 4096\n", 1280 1280 dev, block_size); 1281 1281 return -EINVAL; 1282 1282 } 1283 1283 1284 1284 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1285 - printk(KERN_ERR "dev[%p]: Not allowed to change block_size for" 1285 + pr_err("dev[%p]: Not allowed to change block_size for" 1286 1286 " Physical Device, use for Linux/SCSI to change" 1287 1287 " block_size for underlying hardware\n", dev); 1288 1288 return -EINVAL; 1289 1289 } 1290 1290 1291 1291 dev->se_sub_dev->se_dev_attrib.block_size = block_size; 1292 - printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n", 1292 + pr_debug("dev[%p]: SE Device block_size changed to %u\n", 1293 1293 dev, block_size); 1294 1294 return 0; 1295 1295 } ··· 1304 1304 u32 lun_access = 0; 1305 1305 1306 1306 if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) { 1307 - printk(KERN_ERR "Unable to export struct se_device while dev_access_obj: %d\n", 1307 + pr_err("Unable to export struct se_device while dev_access_obj: %d\n", 1308 1308 atomic_read(&dev->dev_access_obj.obj_access_count)); 1309 1309 return NULL; 1310 1310 } 1311 1311 1312 1312 lun_p = core_tpg_pre_addlun(tpg, lun); 1313 - if ((IS_ERR(lun_p)) || !(lun_p)) 1313 + if ((IS_ERR(lun_p)) || !lun_p) 1314 1314 return NULL; 1315 1315 1316 1316 if (dev->dev_flags & DF_READ_ONLY) ··· 1321 1321 if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0) 1322 1322 return NULL; 1323 1323 1324 - printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" 1324 + pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" 1325 1325 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), 1326 1326 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun, 1327 1327 tpg->se_tpg_tfo->get_fabric_name(), hba->hba_id); ··· 1357 1357 int ret = 0; 1358 1358 1359 1359 lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret); 1360 - if (!(lun)) 1360 + if (!lun) 1361 1361 return ret; 1362 1362 1363 1363 core_tpg_post_dellun(tpg, lun); 1364 1364 1365 - printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from" 1365 + pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from" 1366 1366 " device object\n", tpg->se_tpg_tfo->get_fabric_name(), 1367 1367 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, 1368 1368 tpg->se_tpg_tfo->get_fabric_name()); ··· 1376 1376 1377 1377 spin_lock(&tpg->tpg_lun_lock); 1378 1378 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { 1379 - printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS" 1379 + pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS" 1380 1380 "_PER_TPG-1: %u for Target Portal Group: %hu\n", 1381 1381 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1382 1382 TRANSPORT_MAX_LUNS_PER_TPG-1, ··· 1387 1387 lun = &tpg->tpg_lun_list[unpacked_lun]; 1388 1388 1389 1389 if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) { 1390 - printk(KERN_ERR "%s Logical Unit Number: %u is not free on" 1390 + pr_err("%s Logical Unit Number: %u is not free on" 1391 1391 " Target Portal Group: %hu, ignoring request.\n", 1392 1392 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1393 1393 tpg->se_tpg_tfo->tpg_get_tag(tpg)); ··· 1409 1409 1410 1410 spin_lock(&tpg->tpg_lun_lock); 1411 1411 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { 1412 - printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER" 1412 + pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER" 1413 1413 "_TPG-1: %u for Target Portal Group: %hu\n", 1414 1414 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1415 1415 TRANSPORT_MAX_LUNS_PER_TPG-1, ··· 1420 1420 lun = &tpg->tpg_lun_list[unpacked_lun]; 1421 1421 1422 1422 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { 1423 - printk(KERN_ERR "%s Logical Unit Number: %u is not active on" 1423 + pr_err("%s Logical Unit Number: %u is not active on" 1424 1424 " Target Portal Group: %hu, ignoring request.\n", 1425 1425 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1426 1426 tpg->se_tpg_tfo->tpg_get_tag(tpg)); ··· 1442 1442 struct se_node_acl *nacl; 1443 1443 1444 1444 if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) { 1445 - printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n", 1445 + pr_err("%s InitiatorName exceeds maximum size.\n", 1446 1446 tpg->se_tpg_tfo->get_fabric_name()); 1447 1447 *ret = -EOVERFLOW; 1448 1448 return NULL; 1449 1449 } 1450 1450 nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname); 1451 - if (!(nacl)) { 1451 + if (!nacl) { 1452 1452 *ret = -EINVAL; 1453 1453 return NULL; 1454 1454 } 1455 1455 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); 1456 - if (!(lacl)) { 1457 - printk(KERN_ERR "Unable to allocate memory for struct se_lun_acl.\n"); 1456 + if (!lacl) { 1457 + pr_err("Unable to allocate memory for struct se_lun_acl.\n"); 1458 1458 *ret = -ENOMEM; 1459 1459 return NULL; 1460 1460 } ··· 1477 1477 struct se_node_acl *nacl; 1478 1478 1479 1479 lun = core_dev_get_lun(tpg, unpacked_lun); 1480 - if (!(lun)) { 1481 - printk(KERN_ERR "%s Logical Unit Number: %u is not active on" 1480 + if (!lun) { 1481 + pr_err("%s Logical Unit Number: %u is not active on" 1482 1482 " Target Portal Group: %hu, ignoring request.\n", 1483 1483 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1484 1484 tpg->se_tpg_tfo->tpg_get_tag(tpg)); ··· 1486 1486 } 1487 1487 1488 1488 nacl = lacl->se_lun_nacl; 1489 - if (!(nacl)) 1489 + if (!nacl) 1490 1490 return -EINVAL; 1491 1491 1492 1492 if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) && ··· 1505 1505 smp_mb__after_atomic_inc(); 1506 1506 spin_unlock(&lun->lun_acl_lock); 1507 1507 1508 - printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " 1508 + pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " 1509 1509 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 1510 1510 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun, 1511 1511 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO", ··· 1530 1530 struct se_node_acl *nacl; 1531 1531 1532 1532 nacl = lacl->se_lun_nacl; 1533 - if (!(nacl)) 1533 + if (!nacl) 1534 1534 return -EINVAL; 1535 1535 1536 1536 spin_lock(&lun->lun_acl_lock); ··· 1544 1544 1545 1545 lacl->se_lun = NULL; 1546 1546 1547 - printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for" 1547 + pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for" 1548 1548 " InitiatorNode: %s Mapped LUN: %u\n", 1549 1549 tpg->se_tpg_tfo->get_fabric_name(), 1550 1550 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, ··· 1557 1557 struct se_portal_group *tpg, 1558 1558 struct se_lun_acl *lacl) 1559 1559 { 1560 - printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" 1560 + pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" 1561 1561 " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(), 1562 1562 tpg->se_tpg_tfo->tpg_get_tag(tpg), 1563 1563 tpg->se_tpg_tfo->get_fabric_name(), ··· 1575 1575 char buf[16]; 1576 1576 int ret; 1577 1577 1578 - hba = core_alloc_hba("rd_dr", 0, HBA_FLAGS_INTERNAL_USE); 1578 + hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE); 1579 1579 if (IS_ERR(hba)) 1580 1580 return PTR_ERR(hba); 1581 1581 ··· 1583 1583 t = hba->transport; 1584 1584 1585 1585 se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL); 1586 - if (!(se_dev)) { 1587 - printk(KERN_ERR "Unable to allocate memory for" 1586 + if (!se_dev) { 1587 + pr_err("Unable to allocate memory for" 1588 1588 " struct se_subsystem_dev\n"); 1589 1589 ret = -ENOMEM; 1590 1590 goto out; ··· 1606 1606 se_dev->se_dev_hba = hba; 1607 1607 1608 1608 se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0"); 1609 - if (!(se_dev->se_dev_su_ptr)) { 1610 - printk(KERN_ERR "Unable to locate subsystem dependent pointer" 1609 + if (!se_dev->se_dev_su_ptr) { 1610 + pr_err("Unable to locate subsystem dependent pointer" 1611 1611 " from allocate_virtdevice()\n"); 1612 1612 ret = -ENOMEM; 1613 1613 goto out; ··· 1643 1643 struct se_hba *hba = lun0_hba; 1644 1644 struct se_subsystem_dev *su_dev = lun0_su_dev; 1645 1645 1646 - if (!(hba)) 1646 + if (!hba) 1647 1647 return; 1648 1648 1649 1649 if (g_lun0_dev)
+43 -67
drivers/target/target_core_fabric_configfs.c
··· 60 60 cit->ct_group_ops = _group_ops; \ 61 61 cit->ct_attrs = _attrs; \ 62 62 cit->ct_owner = tf->tf_module; \ 63 - printk("Setup generic %s\n", __stringify(_name)); \ 63 + pr_debug("Setup generic %s\n", __stringify(_name)); \ 64 64 } 65 65 66 66 /* Start of tfc_tpg_mappedlun_cit */ ··· 80 80 /* 81 81 * Ensure that the source port exists 82 82 */ 83 - if (!(lun->lun_sep) || !(lun->lun_sep->sep_tpg)) { 84 - printk(KERN_ERR "Source se_lun->lun_sep or lun->lun_sep->sep" 83 + if (!lun->lun_sep || !lun->lun_sep->sep_tpg) { 84 + pr_err("Source se_lun->lun_sep or lun->lun_sep->sep" 85 85 "_tpg does not exist\n"); 86 86 return -EINVAL; 87 87 } ··· 96 96 * Make sure the SymLink is going to the same $FABRIC/$WWN/tpgt_$TPGT 97 97 */ 98 98 if (strcmp(config_item_name(wwn_ci), config_item_name(wwn_ci_s))) { 99 - printk(KERN_ERR "Illegal Initiator ACL SymLink outside of %s\n", 99 + pr_err("Illegal Initiator ACL SymLink outside of %s\n", 100 100 config_item_name(wwn_ci)); 101 101 return -EINVAL; 102 102 } 103 103 if (strcmp(config_item_name(tpg_ci), config_item_name(tpg_ci_s))) { 104 - printk(KERN_ERR "Illegal Initiator ACL Symlink outside of %s" 104 + pr_err("Illegal Initiator ACL Symlink outside of %s" 105 105 " TPGT: %s\n", config_item_name(wwn_ci), 106 106 config_item_name(tpg_ci)); 107 107 return -EINVAL; ··· 147 147 /* 148 148 * Determine if the underlying MappedLUN has already been released.. 149 149 */ 150 - if (!(deve->se_lun)) 150 + if (!deve->se_lun) 151 151 return 0; 152 152 153 153 lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group); ··· 202 202 TRANSPORT_LUNFLAGS_READ_WRITE, 203 203 lacl->se_lun_nacl); 204 204 205 - printk(KERN_INFO "%s_ConfigFS: Changed Initiator ACL: %s" 205 + pr_debug("%s_ConfigFS: Changed Initiator ACL: %s" 206 206 " Mapped LUN: %u Write Protect bit to %s\n", 207 207 se_tpg->se_tpg_tfo->get_fabric_name(), 208 208 lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF"); ··· 327 327 int ret = 0; 328 328 329 329 acl_ci = &group->cg_item; 330 - if (!(acl_ci)) { 331 - printk(KERN_ERR "Unable to locatel acl_ci\n"); 330 + if (!acl_ci) { 331 + pr_err("Unable to locatel acl_ci\n"); 332 332 return NULL; 333 333 } 334 334 335 335 buf = kzalloc(strlen(name) + 1, GFP_KERNEL); 336 - if (!(buf)) { 337 - printk(KERN_ERR "Unable to allocate memory for name buf\n"); 336 + if (!buf) { 337 + pr_err("Unable to allocate memory for name buf\n"); 338 338 return ERR_PTR(-ENOMEM); 339 339 } 340 340 snprintf(buf, strlen(name) + 1, "%s", name); ··· 342 342 * Make sure user is creating iscsi/$IQN/$TPGT/acls/$INITIATOR/lun_$ID. 343 343 */ 344 344 if (strstr(buf, "lun_") != buf) { 345 - printk(KERN_ERR "Unable to locate \"lun_\" from buf: %s" 345 + pr_err("Unable to locate \"lun_\" from buf: %s" 346 346 " name: %s\n", buf, name); 347 347 ret = -EINVAL; 348 348 goto out; ··· 358 358 359 359 lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun, 360 360 config_item_name(acl_ci), &ret); 361 - if (!(lacl)) { 361 + if (!lacl) { 362 362 ret = -EINVAL; 363 363 goto out; 364 364 } ··· 367 367 lacl_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, 368 368 GFP_KERNEL); 369 369 if (!lacl_cg->default_groups) { 370 - printk(KERN_ERR "Unable to allocate lacl_cg->default_groups\n"); 370 + pr_err("Unable to allocate lacl_cg->default_groups\n"); 371 371 ret = -ENOMEM; 372 372 goto out; 373 373 } ··· 383 383 ml_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3, 384 384 GFP_KERNEL); 385 385 if (!ml_stat_grp->default_groups) { 386 - printk(KERN_ERR "Unable to allocate ml_stat_grp->default_groups\n"); 386 + pr_err("Unable to allocate ml_stat_grp->default_groups\n"); 387 387 ret = -ENOMEM; 388 388 goto out; 389 389 } ··· 474 474 struct se_node_acl *se_nacl; 475 475 struct config_group *nacl_cg; 476 476 477 - if (!(tf->tf_ops.fabric_make_nodeacl)) { 478 - printk(KERN_ERR "tf->tf_ops.fabric_make_nodeacl is NULL\n"); 477 + if (!tf->tf_ops.fabric_make_nodeacl) { 478 + pr_err("tf->tf_ops.fabric_make_nodeacl is NULL\n"); 479 479 return ERR_PTR(-ENOSYS); 480 480 } 481 481 ··· 572 572 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; 573 573 struct se_tpg_np *se_tpg_np; 574 574 575 - if (!(tf->tf_ops.fabric_make_np)) { 576 - printk(KERN_ERR "tf->tf_ops.fabric_make_np is NULL\n"); 575 + if (!tf->tf_ops.fabric_make_np) { 576 + pr_err("tf->tf_ops.fabric_make_np is NULL\n"); 577 577 return ERR_PTR(-ENOSYS); 578 578 } 579 579 580 580 se_tpg_np = tf->tf_ops.fabric_make_np(se_tpg, group, name); 581 - if (!(se_tpg_np) || IS_ERR(se_tpg_np)) 581 + if (!se_tpg_np || IS_ERR(se_tpg_np)) 582 582 return ERR_PTR(-EINVAL); 583 583 584 584 se_tpg_np->tpg_np_parent = se_tpg; ··· 627 627 struct se_lun *lun, 628 628 char *page) 629 629 { 630 - if (!(lun)) 631 - return -ENODEV; 632 - 633 - if (!(lun->lun_sep)) 630 + if (!lun || !lun->lun_sep) 634 631 return -ENODEV; 635 632 636 633 return core_alua_show_tg_pt_gp_info(lun->lun_sep, page); ··· 638 641 const char *page, 639 642 size_t count) 640 643 { 641 - if (!(lun)) 642 - return -ENODEV; 643 - 644 - if (!(lun->lun_sep)) 644 + if (!lun || !lun->lun_sep) 645 645 return -ENODEV; 646 646 647 647 return core_alua_store_tg_pt_gp_info(lun->lun_sep, page, count); ··· 653 659 struct se_lun *lun, 654 660 char *page) 655 661 { 656 - if (!(lun)) 657 - return -ENODEV; 658 - 659 - if (!(lun->lun_sep)) 662 + if (!lun || !lun->lun_sep) 660 663 return -ENODEV; 661 664 662 665 return core_alua_show_offline_bit(lun, page); ··· 664 673 const char *page, 665 674 size_t count) 666 675 { 667 - if (!(lun)) 668 - return -ENODEV; 669 - 670 - if (!(lun->lun_sep)) 676 + if (!lun || !lun->lun_sep) 671 677 return -ENODEV; 672 678 673 679 return core_alua_store_offline_bit(lun, page, count); ··· 679 691 struct se_lun *lun, 680 692 char *page) 681 693 { 682 - if (!(lun)) 683 - return -ENODEV; 684 - 685 - if (!(lun->lun_sep)) 694 + if (!lun || !lun->lun_sep) 686 695 return -ENODEV; 687 696 688 697 return core_alua_show_secondary_status(lun, page); ··· 690 705 const char *page, 691 706 size_t count) 692 707 { 693 - if (!(lun)) 694 - return -ENODEV; 695 - 696 - if (!(lun->lun_sep)) 708 + if (!lun || !lun->lun_sep) 697 709 return -ENODEV; 698 710 699 711 return core_alua_store_secondary_status(lun, page, count); ··· 705 723 struct se_lun *lun, 706 724 char *page) 707 725 { 708 - if (!(lun)) 709 - return -ENODEV; 710 - 711 - if (!(lun->lun_sep)) 726 + if (!lun || !lun->lun_sep) 712 727 return -ENODEV; 713 728 714 729 return core_alua_show_secondary_write_metadata(lun, page); ··· 716 737 const char *page, 717 738 size_t count) 718 739 { 719 - if (!(lun)) 720 - return -ENODEV; 721 - 722 - if (!(lun->lun_sep)) 740 + if (!lun || !lun->lun_sep) 723 741 return -ENODEV; 724 742 725 743 return core_alua_store_secondary_write_metadata(lun, page, count); ··· 757 781 tf = se_tpg->se_tpg_wwn->wwn_tf; 758 782 759 783 if (lun->lun_se_dev != NULL) { 760 - printk(KERN_ERR "Port Symlink already exists\n"); 784 + pr_err("Port Symlink already exists\n"); 761 785 return -EEXIST; 762 786 } 763 787 764 788 dev = se_dev->se_dev_ptr; 765 - if (!(dev)) { 766 - printk(KERN_ERR "Unable to locate struct se_device pointer from" 789 + if (!dev) { 790 + pr_err("Unable to locate struct se_device pointer from" 767 791 " %s\n", config_item_name(se_dev_ci)); 768 792 ret = -ENODEV; 769 793 goto out; ··· 771 795 772 796 lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev, 773 797 lun->unpacked_lun); 774 - if ((IS_ERR(lun_p)) || !(lun_p)) { 775 - printk(KERN_ERR "core_dev_add_lun() failed\n"); 798 + if (IS_ERR(lun_p) || !lun_p) { 799 + pr_err("core_dev_add_lun() failed\n"); 776 800 ret = -EINVAL; 777 801 goto out; 778 802 } ··· 864 888 int errno; 865 889 866 890 if (strstr(name, "lun_") != name) { 867 - printk(KERN_ERR "Unable to locate \'_\" in" 891 + pr_err("Unable to locate \'_\" in" 868 892 " \"lun_$LUN_NUMBER\"\n"); 869 893 return ERR_PTR(-EINVAL); 870 894 } ··· 872 896 return ERR_PTR(-EINVAL); 873 897 874 898 lun = core_get_lun_from_tpg(se_tpg, unpacked_lun); 875 - if (!(lun)) 899 + if (!lun) 876 900 return ERR_PTR(-EINVAL); 877 901 878 902 lun_cg = &lun->lun_group; 879 903 lun_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, 880 904 GFP_KERNEL); 881 905 if (!lun_cg->default_groups) { 882 - printk(KERN_ERR "Unable to allocate lun_cg->default_groups\n"); 906 + pr_err("Unable to allocate lun_cg->default_groups\n"); 883 907 return ERR_PTR(-ENOMEM); 884 908 } 885 909 ··· 894 918 port_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3, 895 919 GFP_KERNEL); 896 920 if (!port_stat_grp->default_groups) { 897 - printk(KERN_ERR "Unable to allocate port_stat_grp->default_groups\n"); 921 + pr_err("Unable to allocate port_stat_grp->default_groups\n"); 898 922 errno = -ENOMEM; 899 923 goto out; 900 924 } ··· 1007 1031 struct target_fabric_configfs *tf = wwn->wwn_tf; 1008 1032 struct se_portal_group *se_tpg; 1009 1033 1010 - if (!(tf->tf_ops.fabric_make_tpg)) { 1011 - printk(KERN_ERR "tf->tf_ops.fabric_make_tpg is NULL\n"); 1034 + if (!tf->tf_ops.fabric_make_tpg) { 1035 + pr_err("tf->tf_ops.fabric_make_tpg is NULL\n"); 1012 1036 return ERR_PTR(-ENOSYS); 1013 1037 } 1014 1038 1015 1039 se_tpg = tf->tf_ops.fabric_make_tpg(wwn, group, name); 1016 - if (!(se_tpg) || IS_ERR(se_tpg)) 1040 + if (!se_tpg || IS_ERR(se_tpg)) 1017 1041 return ERR_PTR(-EINVAL); 1018 1042 /* 1019 1043 * Setup default groups from pre-allocated se_tpg->tpg_default_groups ··· 1106 1130 struct target_fabric_configfs, tf_group); 1107 1131 struct se_wwn *wwn; 1108 1132 1109 - if (!(tf->tf_ops.fabric_make_wwn)) { 1110 - printk(KERN_ERR "tf->tf_ops.fabric_make_wwn is NULL\n"); 1133 + if (!tf->tf_ops.fabric_make_wwn) { 1134 + pr_err("tf->tf_ops.fabric_make_wwn is NULL\n"); 1111 1135 return ERR_PTR(-ENOSYS); 1112 1136 } 1113 1137 1114 1138 wwn = tf->tf_ops.fabric_make_wwn(tf, group, name); 1115 - if (!(wwn) || IS_ERR(wwn)) 1139 + if (!wwn || IS_ERR(wwn)) 1116 1140 return ERR_PTR(-EINVAL); 1117 1141 1118 1142 wwn->wwn_tf = tf;
+5 -5
drivers/target/target_core_fabric_lib.c
··· 172 172 ptr = &se_nacl->initiatorname[0]; 173 173 174 174 for (i = 0; i < 24; ) { 175 - if (!(strncmp(&ptr[i], ":", 1))) { 175 + if (!strncmp(&ptr[i], ":", 1)) { 176 176 i++; 177 177 continue; 178 178 } ··· 386 386 * Reserved 387 387 */ 388 388 if ((format_code != 0x00) && (format_code != 0x40)) { 389 - printk(KERN_ERR "Illegal format code: 0x%02x for iSCSI" 389 + pr_err("Illegal format code: 0x%02x for iSCSI" 390 390 " Initiator Transport ID\n", format_code); 391 391 return NULL; 392 392 } ··· 406 406 tid_len += padding; 407 407 408 408 if ((add_len + 4) != tid_len) { 409 - printk(KERN_INFO "LIO-Target Extracted add_len: %hu " 409 + pr_debug("LIO-Target Extracted add_len: %hu " 410 410 "does not match calculated tid_len: %u," 411 411 " using tid_len instead\n", add_len+4, tid_len); 412 412 *out_tid_len = tid_len; ··· 420 420 */ 421 421 if (format_code == 0x40) { 422 422 p = strstr((char *)&buf[4], ",i,0x"); 423 - if (!(p)) { 424 - printk(KERN_ERR "Unable to locate \",i,0x\" seperator" 423 + if (!p) { 424 + pr_err("Unable to locate \",i,0x\" seperator" 425 425 " for Initiator port identifier: %s\n", 426 426 (char *)&buf[4]); 427 427 return NULL;
+44 -56
drivers/target/target_core_file.c
··· 42 42 43 43 #include "target_core_file.h" 44 44 45 - #if 1 46 - #define DEBUG_FD_CACHE(x...) printk(x) 47 - #else 48 - #define DEBUG_FD_CACHE(x...) 49 - #endif 50 - 51 - #if 1 52 - #define DEBUG_FD_FUA(x...) printk(x) 53 - #else 54 - #define DEBUG_FD_FUA(x...) 55 - #endif 56 - 57 45 static struct se_subsystem_api fileio_template; 58 46 59 47 /* fd_attach_hba(): (Part of se_subsystem_api_t template) ··· 53 65 struct fd_host *fd_host; 54 66 55 67 fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL); 56 - if (!(fd_host)) { 57 - printk(KERN_ERR "Unable to allocate memory for struct fd_host\n"); 68 + if (!fd_host) { 69 + pr_err("Unable to allocate memory for struct fd_host\n"); 58 70 return -ENOMEM; 59 71 } 60 72 ··· 62 74 63 75 hba->hba_ptr = fd_host; 64 76 65 - printk(KERN_INFO "CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" 77 + pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" 66 78 " Target Core Stack %s\n", hba->hba_id, FD_VERSION, 67 79 TARGET_CORE_MOD_VERSION); 68 - printk(KERN_INFO "CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" 80 + pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" 69 81 " MaxSectors: %u\n", 70 82 hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS); 71 83 ··· 76 88 { 77 89 struct fd_host *fd_host = hba->hba_ptr; 78 90 79 - printk(KERN_INFO "CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic" 91 + pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic" 80 92 " Target Core\n", hba->hba_id, fd_host->fd_host_id); 81 93 82 94 kfree(fd_host); ··· 89 101 struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr; 90 102 91 103 fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL); 92 - if (!(fd_dev)) { 93 - printk(KERN_ERR "Unable to allocate memory for struct fd_dev\n"); 104 + if (!fd_dev) { 105 + pr_err("Unable to allocate memory for struct fd_dev\n"); 94 106 return NULL; 95 107 } 96 108 97 109 fd_dev->fd_host = fd_host; 98 110 99 - printk(KERN_INFO "FILEIO: Allocated fd_dev for %p\n", name); 111 + pr_debug("FILEIO: Allocated fd_dev for %p\n", name); 100 112 101 113 return fd_dev; 102 114 } ··· 129 141 set_fs(old_fs); 130 142 131 143 if (IS_ERR(dev_p)) { 132 - printk(KERN_ERR "getname(%s) failed: %lu\n", 144 + pr_err("getname(%s) failed: %lu\n", 133 145 fd_dev->fd_dev_name, IS_ERR(dev_p)); 134 146 ret = PTR_ERR(dev_p); 135 147 goto fail; ··· 152 164 153 165 file = filp_open(dev_p, flags, 0600); 154 166 if (IS_ERR(file)) { 155 - printk(KERN_ERR "filp_open(%s) failed\n", dev_p); 167 + pr_err("filp_open(%s) failed\n", dev_p); 156 168 ret = PTR_ERR(file); 157 169 goto fail; 158 170 } 159 171 if (!file || !file->f_dentry) { 160 - printk(KERN_ERR "filp_open(%s) failed\n", dev_p); 172 + pr_err("filp_open(%s) failed\n", dev_p); 161 173 goto fail; 162 174 } 163 175 fd_dev->fd_file = file; ··· 187 199 fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) - 188 200 fd_dev->fd_block_size); 189 201 190 - printk(KERN_INFO "FILEIO: Using size: %llu bytes from struct" 202 + pr_debug("FILEIO: Using size: %llu bytes from struct" 191 203 " block_device blocks: %llu logical_block_size: %d\n", 192 204 fd_dev->fd_dev_size, 193 205 div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size), 194 206 fd_dev->fd_block_size); 195 207 } else { 196 208 if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) { 197 - printk(KERN_ERR "FILEIO: Missing fd_dev_size=" 209 + pr_err("FILEIO: Missing fd_dev_size=" 198 210 " parameter, and no backing struct" 199 211 " block_device\n"); 200 212 goto fail; ··· 213 225 dev = transport_add_device_to_core_hba(hba, &fileio_template, 214 226 se_dev, dev_flags, fd_dev, 215 227 &dev_limits, "FILEIO", FD_VERSION); 216 - if (!(dev)) 228 + if (!dev) 217 229 goto fail; 218 230 219 231 fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++; 220 232 fd_dev->fd_queue_depth = dev->queue_depth; 221 233 222 - printk(KERN_INFO "CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s," 234 + pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s," 223 235 " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id, 224 236 fd_dev->fd_dev_name, fd_dev->fd_dev_size); 225 237 ··· 257 269 258 270 259 271 static struct se_task * 260 - fd_alloc_task(struct se_cmd *cmd) 272 + fd_alloc_task(unsigned char *cdb) 261 273 { 262 274 struct fd_request *fd_req; 263 275 264 276 fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL); 265 - if (!(fd_req)) { 266 - printk(KERN_ERR "Unable to allocate struct fd_request\n"); 277 + if (!fd_req) { 278 + pr_err("Unable to allocate struct fd_request\n"); 267 279 return NULL; 268 280 } 269 - 270 - fd_req->fd_dev = cmd->se_dev->dev_ptr; 271 281 272 282 return &fd_req->fd_task; 273 283 } ··· 273 287 static int fd_do_readv(struct se_task *task) 274 288 { 275 289 struct fd_request *req = FILE_REQ(task); 276 - struct file *fd = req->fd_dev->fd_file; 290 + struct fd_dev *dev = req->fd_task.se_dev->dev_ptr; 291 + struct file *fd = dev->fd_file; 277 292 struct scatterlist *sg = task->task_sg; 278 293 struct iovec *iov; 279 294 mm_segment_t old_fs; ··· 282 295 task->se_dev->se_sub_dev->se_dev_attrib.block_size); 283 296 int ret = 0, i; 284 297 285 - iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL); 286 - if (!(iov)) { 287 - printk(KERN_ERR "Unable to allocate fd_do_readv iov[]\n"); 298 + iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL); 299 + if (!iov) { 300 + pr_err("Unable to allocate fd_do_readv iov[]\n"); 288 301 return -ENOMEM; 289 302 } 290 303 291 - for (i = 0; i < task->task_sg_num; i++) { 304 + for (i = 0; i < task->task_sg_nents; i++) { 292 305 iov[i].iov_len = sg[i].length; 293 306 iov[i].iov_base = sg_virt(&sg[i]); 294 307 } 295 308 296 309 old_fs = get_fs(); 297 310 set_fs(get_ds()); 298 - ret = vfs_readv(fd, &iov[0], task->task_sg_num, &pos); 311 + ret = vfs_readv(fd, &iov[0], task->task_sg_nents, &pos); 299 312 set_fs(old_fs); 300 313 301 314 kfree(iov); ··· 306 319 */ 307 320 if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) { 308 321 if (ret < 0 || ret != task->task_size) { 309 - printk(KERN_ERR "vfs_readv() returned %d," 322 + pr_err("vfs_readv() returned %d," 310 323 " expecting %d for S_ISBLK\n", ret, 311 324 (int)task->task_size); 312 325 return (ret < 0 ? ret : -EINVAL); 313 326 } 314 327 } else { 315 328 if (ret < 0) { 316 - printk(KERN_ERR "vfs_readv() returned %d for non" 329 + pr_err("vfs_readv() returned %d for non" 317 330 " S_ISBLK\n", ret); 318 331 return ret; 319 332 } ··· 325 338 static int fd_do_writev(struct se_task *task) 326 339 { 327 340 struct fd_request *req = FILE_REQ(task); 328 - struct file *fd = req->fd_dev->fd_file; 341 + struct fd_dev *dev = req->fd_task.se_dev->dev_ptr; 342 + struct file *fd = dev->fd_file; 329 343 struct scatterlist *sg = task->task_sg; 330 344 struct iovec *iov; 331 345 mm_segment_t old_fs; ··· 334 346 task->se_dev->se_sub_dev->se_dev_attrib.block_size); 335 347 int ret, i = 0; 336 348 337 - iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL); 338 - if (!(iov)) { 339 - printk(KERN_ERR "Unable to allocate fd_do_writev iov[]\n"); 349 + iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL); 350 + if (!iov) { 351 + pr_err("Unable to allocate fd_do_writev iov[]\n"); 340 352 return -ENOMEM; 341 353 } 342 354 343 - for (i = 0; i < task->task_sg_num; i++) { 355 + for (i = 0; i < task->task_sg_nents; i++) { 344 356 iov[i].iov_len = sg[i].length; 345 357 iov[i].iov_base = sg_virt(&sg[i]); 346 358 } 347 359 348 360 old_fs = get_fs(); 349 361 set_fs(get_ds()); 350 - ret = vfs_writev(fd, &iov[0], task->task_sg_num, &pos); 362 + ret = vfs_writev(fd, &iov[0], task->task_sg_nents, &pos); 351 363 set_fs(old_fs); 352 364 353 365 kfree(iov); 354 366 355 367 if (ret < 0 || ret != task->task_size) { 356 - printk(KERN_ERR "vfs_writev() returned %d\n", ret); 368 + pr_err("vfs_writev() returned %d\n", ret); 357 369 return (ret < 0 ? ret : -EINVAL); 358 370 } 359 371 ··· 392 404 393 405 ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); 394 406 if (ret != 0) 395 - printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret); 407 + pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); 396 408 397 409 if (!immed) 398 410 transport_complete_sync_cache(cmd, ret == 0); ··· 437 449 loff_t end = start + task->task_size; 438 450 int ret; 439 451 440 - DEBUG_FD_CACHE("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n", 452 + pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n", 441 453 task->task_lba, task->task_size); 442 454 443 455 ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); 444 456 if (ret != 0) 445 - printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret); 457 + pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); 446 458 } 447 459 448 460 static int fd_do_task(struct se_task *task) ··· 536 548 snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME, 537 549 "%s", arg_p); 538 550 kfree(arg_p); 539 - printk(KERN_INFO "FILEIO: Referencing Path: %s\n", 551 + pr_debug("FILEIO: Referencing Path: %s\n", 540 552 fd_dev->fd_dev_name); 541 553 fd_dev->fbd_flags |= FBDF_HAS_PATH; 542 554 break; ··· 549 561 ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size); 550 562 kfree(arg_p); 551 563 if (ret < 0) { 552 - printk(KERN_ERR "strict_strtoull() failed for" 564 + pr_err("strict_strtoull() failed for" 553 565 " fd_dev_size=\n"); 554 566 goto out; 555 567 } 556 - printk(KERN_INFO "FILEIO: Referencing Size: %llu" 568 + pr_debug("FILEIO: Referencing Size: %llu" 557 569 " bytes\n", fd_dev->fd_dev_size); 558 570 fd_dev->fbd_flags |= FBDF_HAS_SIZE; 559 571 break; 560 572 case Opt_fd_buffered_io: 561 573 match_int(args, &arg); 562 574 if (arg != 1) { 563 - printk(KERN_ERR "bogus fd_buffered_io=%d value\n", arg); 575 + pr_err("bogus fd_buffered_io=%d value\n", arg); 564 576 ret = -EINVAL; 565 577 goto out; 566 578 } 567 579 568 - printk(KERN_INFO "FILEIO: Using buffered I/O" 580 + pr_debug("FILEIO: Using buffered I/O" 569 581 " operations for struct fd_dev\n"); 570 582 571 583 fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO; ··· 585 597 struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr; 586 598 587 599 if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) { 588 - printk(KERN_ERR "Missing fd_dev_name=\n"); 600 + pr_err("Missing fd_dev_name=\n"); 589 601 return -EINVAL; 590 602 } 591 603
-2
drivers/target/target_core_file.h
··· 16 16 struct se_task fd_task; 17 17 /* SCSI CDB from iSCSI Command PDU */ 18 18 unsigned char fd_scsi_cdb[TCM_MAX_COMMAND_SIZE]; 19 - /* FILEIO device */ 20 - struct fd_dev *fd_dev; 21 19 } ____cacheline_aligned; 22 20 23 21 #define FBDF_HAS_PATH 0x01
+6 -6
drivers/target/target_core_hba.c
··· 58 58 59 59 mutex_lock(&subsystem_mutex); 60 60 list_for_each_entry(s, &subsystem_list, sub_api_list) { 61 - if (!(strcmp(s->name, sub_api->name))) { 62 - printk(KERN_ERR "%p is already registered with" 61 + if (!strcmp(s->name, sub_api->name)) { 62 + pr_err("%p is already registered with" 63 63 " duplicate name %s, unable to process" 64 64 " request\n", s, s->name); 65 65 mutex_unlock(&subsystem_mutex); ··· 69 69 list_add_tail(&sub_api->sub_api_list, &subsystem_list); 70 70 mutex_unlock(&subsystem_mutex); 71 71 72 - printk(KERN_INFO "TCM: Registered subsystem plugin: %s struct module:" 72 + pr_debug("TCM: Registered subsystem plugin: %s struct module:" 73 73 " %p\n", sub_api->name, sub_api->owner); 74 74 return 0; 75 75 } ··· 109 109 110 110 hba = kzalloc(sizeof(*hba), GFP_KERNEL); 111 111 if (!hba) { 112 - printk(KERN_ERR "Unable to allocate struct se_hba\n"); 112 + pr_err("Unable to allocate struct se_hba\n"); 113 113 return ERR_PTR(-ENOMEM); 114 114 } 115 115 ··· 135 135 list_add_tail(&hba->hba_node, &hba_list); 136 136 spin_unlock(&hba_lock); 137 137 138 - printk(KERN_INFO "CORE_HBA[%d] - Attached HBA to Generic Target" 138 + pr_debug("CORE_HBA[%d] - Attached HBA to Generic Target" 139 139 " Core\n", hba->hba_id); 140 140 141 141 return hba; ··· 161 161 list_del(&hba->hba_node); 162 162 spin_unlock(&hba_lock); 163 163 164 - printk(KERN_INFO "CORE_HBA[%d] - Detached HBA from Generic Target" 164 + pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target" 165 165 " Core\n", hba->hba_id); 166 166 167 167 if (hba->transport->owner)
+55 -61
drivers/target/target_core_iblock.c
··· 47 47 48 48 #include "target_core_iblock.h" 49 49 50 - #if 0 51 - #define DEBUG_IBLOCK(x...) printk(x) 52 - #else 53 - #define DEBUG_IBLOCK(x...) 54 - #endif 55 - 56 50 static struct se_subsystem_api iblock_template; 57 51 58 52 static void iblock_bio_done(struct bio *, int); ··· 60 66 struct iblock_hba *ib_host; 61 67 62 68 ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL); 63 - if (!(ib_host)) { 64 - printk(KERN_ERR "Unable to allocate memory for" 69 + if (!ib_host) { 70 + pr_err("Unable to allocate memory for" 65 71 " struct iblock_hba\n"); 66 72 return -ENOMEM; 67 73 } ··· 70 76 71 77 hba->hba_ptr = ib_host; 72 78 73 - printk(KERN_INFO "CORE_HBA[%d] - TCM iBlock HBA Driver %s on" 79 + pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on" 74 80 " Generic Target Core Stack %s\n", hba->hba_id, 75 81 IBLOCK_VERSION, TARGET_CORE_MOD_VERSION); 76 82 77 - printk(KERN_INFO "CORE_HBA[%d] - Attached iBlock HBA: %u to Generic\n", 83 + pr_debug("CORE_HBA[%d] - Attached iBlock HBA: %u to Generic\n", 78 84 hba->hba_id, ib_host->iblock_host_id); 79 85 80 86 return 0; ··· 84 90 { 85 91 struct iblock_hba *ib_host = hba->hba_ptr; 86 92 87 - printk(KERN_INFO "CORE_HBA[%d] - Detached iBlock HBA: %u from Generic" 93 + pr_debug("CORE_HBA[%d] - Detached iBlock HBA: %u from Generic" 88 94 " Target Core\n", hba->hba_id, ib_host->iblock_host_id); 89 95 90 96 kfree(ib_host); ··· 97 103 struct iblock_hba *ib_host = hba->hba_ptr; 98 104 99 105 ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL); 100 - if (!(ib_dev)) { 101 - printk(KERN_ERR "Unable to allocate struct iblock_dev\n"); 106 + if (!ib_dev) { 107 + pr_err("Unable to allocate struct iblock_dev\n"); 102 108 return NULL; 103 109 } 104 110 ib_dev->ibd_host = ib_host; 105 111 106 - printk(KERN_INFO "IBLOCK: Allocated ib_dev for %s\n", name); 112 + pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name); 107 113 108 114 return ib_dev; 109 115 } ··· 122 128 u32 dev_flags = 0; 123 129 int ret = -EINVAL; 124 130 125 - if (!(ib_dev)) { 126 - printk(KERN_ERR "Unable to locate struct iblock_dev parameter\n"); 131 + if (!ib_dev) { 132 + pr_err("Unable to locate struct iblock_dev parameter\n"); 127 133 return ERR_PTR(ret); 128 134 } 129 135 memset(&dev_limits, 0, sizeof(struct se_dev_limits)); ··· 131 137 * These settings need to be made tunable.. 132 138 */ 133 139 ib_dev->ibd_bio_set = bioset_create(32, 64); 134 - if (!(ib_dev->ibd_bio_set)) { 135 - printk(KERN_ERR "IBLOCK: Unable to create bioset()\n"); 140 + if (!ib_dev->ibd_bio_set) { 141 + pr_err("IBLOCK: Unable to create bioset()\n"); 136 142 return ERR_PTR(-ENOMEM); 137 143 } 138 - printk(KERN_INFO "IBLOCK: Created bio_set()\n"); 144 + pr_debug("IBLOCK: Created bio_set()\n"); 139 145 /* 140 146 * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path 141 147 * must already have been set in order for echo 1 > $HBA/$DEV/enable to run. 142 148 */ 143 - printk(KERN_INFO "IBLOCK: Claiming struct block_device: %s\n", 149 + pr_debug( "IBLOCK: Claiming struct block_device: %s\n", 144 150 ib_dev->ibd_udev_path); 145 151 146 152 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, ··· 166 172 dev = transport_add_device_to_core_hba(hba, 167 173 &iblock_template, se_dev, dev_flags, ib_dev, 168 174 &dev_limits, "IBLOCK", IBLOCK_VERSION); 169 - if (!(dev)) 175 + if (!dev) 170 176 goto failed; 171 177 172 178 /* ··· 186 192 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = 187 193 q->limits.discard_alignment; 188 194 189 - printk(KERN_INFO "IBLOCK: BLOCK Discard support available," 195 + pr_debug("IBLOCK: BLOCK Discard support available," 190 196 " disabled by default\n"); 191 197 } 192 198 ··· 221 227 } 222 228 223 229 static struct se_task * 224 - iblock_alloc_task(struct se_cmd *cmd) 230 + iblock_alloc_task(unsigned char *cdb) 225 231 { 226 232 struct iblock_req *ib_req; 227 233 228 234 ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); 229 - if (!(ib_req)) { 230 - printk(KERN_ERR "Unable to allocate memory for struct iblock_req\n"); 235 + if (!ib_req) { 236 + pr_err("Unable to allocate memory for struct iblock_req\n"); 231 237 return NULL; 232 238 } 233 239 234 - ib_req->ib_dev = cmd->se_dev->dev_ptr; 235 240 atomic_set(&ib_req->ib_bio_cnt, 0); 236 241 return &ib_req->ib_task; 237 242 } ··· 338 345 */ 339 346 ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector); 340 347 if (ret != 0) { 341 - printk(KERN_ERR "IBLOCK: block_issue_flush() failed: %d " 348 + pr_err("IBLOCK: block_issue_flush() failed: %d " 342 349 " error_sector: %llu\n", ret, 343 350 (unsigned long long)error_sector); 344 351 } ··· 402 409 while (bio) { 403 410 nbio = bio->bi_next; 404 411 bio->bi_next = NULL; 405 - DEBUG_IBLOCK("Calling submit_bio() task: %p bio: %p" 406 - " bio->bi_sector: %llu\n", task, bio, bio->bi_sector); 412 + pr_debug("Calling submit_bio() task: %p bio: %p" 413 + " bio->bi_sector: %llu\n", task, bio, 414 + (unsigned long long)bio->bi_sector); 407 415 408 416 submit_bio(rw, bio); 409 417 bio = nbio; ··· 474 480 switch (token) { 475 481 case Opt_udev_path: 476 482 if (ib_dev->ibd_bd) { 477 - printk(KERN_ERR "Unable to set udev_path= while" 483 + pr_err("Unable to set udev_path= while" 478 484 " ib_dev->ibd_bd exists\n"); 479 485 ret = -EEXIST; 480 486 goto out; ··· 487 493 snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN, 488 494 "%s", arg_p); 489 495 kfree(arg_p); 490 - printk(KERN_INFO "IBLOCK: Referencing UDEV path: %s\n", 496 + pr_debug("IBLOCK: Referencing UDEV path: %s\n", 491 497 ib_dev->ibd_udev_path); 492 498 ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH; 493 499 break; ··· 510 516 struct iblock_dev *ibd = se_dev->se_dev_su_ptr; 511 517 512 518 if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) { 513 - printk(KERN_ERR "Missing udev_path= parameters for IBLOCK\n"); 519 + pr_err("Missing udev_path= parameters for IBLOCK\n"); 514 520 return -EINVAL; 515 521 } 516 522 ··· 568 574 struct bio *bio; 569 575 570 576 bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); 571 - if (!(bio)) { 572 - printk(KERN_ERR "Unable to allocate memory for bio\n"); 577 + if (!bio) { 578 + pr_err("Unable to allocate memory for bio\n"); 573 579 *ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; 574 580 return NULL; 575 581 } 576 582 577 - DEBUG_IBLOCK("Allocated bio: %p task_sg_num: %u using ibd_bio_set:" 578 - " %p\n", bio, task->task_sg_num, ib_dev->ibd_bio_set); 579 - DEBUG_IBLOCK("Allocated bio: %p task_size: %u\n", bio, task->task_size); 583 + pr_debug("Allocated bio: %p task_sg_nents: %u using ibd_bio_set:" 584 + " %p\n", bio, task->task_sg_nents, ib_dev->ibd_bio_set); 585 + pr_debug("Allocated bio: %p task_size: %u\n", bio, task->task_size); 580 586 581 587 bio->bi_bdev = ib_dev->ibd_bd; 582 588 bio->bi_private = task; ··· 585 591 bio->bi_sector = lba; 586 592 atomic_inc(&ib_req->ib_bio_cnt); 587 593 588 - DEBUG_IBLOCK("Set bio->bi_sector: %llu\n", bio->bi_sector); 589 - DEBUG_IBLOCK("Set ib_req->ib_bio_cnt: %d\n", 594 + pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector); 595 + pr_debug("Set ib_req->ib_bio_cnt: %d\n", 590 596 atomic_read(&ib_req->ib_bio_cnt)); 591 597 return bio; 592 598 } ··· 600 606 struct bio *bio = NULL, *hbio = NULL, *tbio = NULL; 601 607 struct scatterlist *sg; 602 608 int ret = 0; 603 - u32 i, sg_num = task->task_sg_num; 609 + u32 i, sg_num = task->task_sg_nents; 604 610 sector_t block_lba; 605 611 /* 606 612 * Do starting conversion up from non 512-byte blocksize with ··· 615 621 else if (dev->se_sub_dev->se_dev_attrib.block_size == 512) 616 622 block_lba = task->task_lba; 617 623 else { 618 - printk(KERN_ERR "Unsupported SCSI -> BLOCK LBA conversion:" 624 + pr_err("Unsupported SCSI -> BLOCK LBA conversion:" 619 625 " %u\n", dev->se_sub_dev->se_dev_attrib.block_size); 620 626 return PYX_TRANSPORT_LU_COMM_FAILURE; 621 627 } 622 628 623 629 bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num); 624 - if (!(bio)) 630 + if (!bio) 625 631 return ret; 626 632 627 633 ib_req->ib_bio = bio; ··· 630 636 * Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist 631 637 * from task->task_sg -> struct scatterlist memory. 632 638 */ 633 - for_each_sg(task->task_sg, sg, task->task_sg_num, i) { 634 - DEBUG_IBLOCK("task: %p bio: %p Calling bio_add_page(): page:" 639 + for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { 640 + pr_debug("task: %p bio: %p Calling bio_add_page(): page:" 635 641 " %p len: %u offset: %u\n", task, bio, sg_page(sg), 636 642 sg->length, sg->offset); 637 643 again: 638 644 ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset); 639 645 if (ret != sg->length) { 640 646 641 - DEBUG_IBLOCK("*** Set bio->bi_sector: %llu\n", 642 - bio->bi_sector); 643 - DEBUG_IBLOCK("** task->task_size: %u\n", 647 + pr_debug("*** Set bio->bi_sector: %llu\n", 648 + (unsigned long long)bio->bi_sector); 649 + pr_debug("** task->task_size: %u\n", 644 650 task->task_size); 645 - DEBUG_IBLOCK("*** bio->bi_max_vecs: %u\n", 651 + pr_debug("*** bio->bi_max_vecs: %u\n", 646 652 bio->bi_max_vecs); 647 - DEBUG_IBLOCK("*** bio->bi_vcnt: %u\n", 653 + pr_debug("*** bio->bi_vcnt: %u\n", 648 654 bio->bi_vcnt); 649 655 650 656 bio = iblock_get_bio(task, ib_req, ib_dev, &ret, 651 657 block_lba, sg_num); 652 - if (!(bio)) 658 + if (!bio) 653 659 goto fail; 654 660 655 661 tbio = tbio->bi_next = bio; 656 - DEBUG_IBLOCK("-----------------> Added +1 bio: %p to" 662 + pr_debug("-----------------> Added +1 bio: %p to" 657 663 " list, Going to again\n", bio); 658 664 goto again; 659 665 } 660 666 /* Always in 512 byte units for Linux/Block */ 661 667 block_lba += sg->length >> IBLOCK_LBA_SHIFT; 662 668 sg_num--; 663 - DEBUG_IBLOCK("task: %p bio-add_page() passed!, decremented" 669 + pr_debug("task: %p bio-add_page() passed!, decremented" 664 670 " sg_num to %u\n", task, sg_num); 665 - DEBUG_IBLOCK("task: %p bio_add_page() passed!, increased lba" 666 - " to %llu\n", task, block_lba); 667 - DEBUG_IBLOCK("task: %p bio_add_page() passed!, bio->bi_vcnt:" 671 + pr_debug("task: %p bio_add_page() passed!, increased lba" 672 + " to %llu\n", task, (unsigned long long)block_lba); 673 + pr_debug("task: %p bio_add_page() passed!, bio->bi_vcnt:" 668 674 " %u\n", task, bio->bi_vcnt); 669 675 } 670 676 ··· 710 716 /* 711 717 * Set -EIO if !BIO_UPTODATE and the passed is still err=0 712 718 */ 713 - if (!(test_bit(BIO_UPTODATE, &bio->bi_flags)) && !(err)) 719 + if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err) 714 720 err = -EIO; 715 721 716 722 if (err != 0) { 717 - printk(KERN_ERR "test_bit(BIO_UPTODATE) failed for bio: %p," 723 + pr_err("test_bit(BIO_UPTODATE) failed for bio: %p," 718 724 " err: %d\n", bio, err); 719 725 /* 720 726 * Bump the ib_bio_err_cnt and release bio. ··· 725 731 /* 726 732 * Wait to complete the task until the last bio as completed. 727 733 */ 728 - if (!(atomic_dec_and_test(&ibr->ib_bio_cnt))) 734 + if (!atomic_dec_and_test(&ibr->ib_bio_cnt)) 729 735 return; 730 736 731 737 ibr->ib_bio = NULL; 732 738 transport_complete_task(task, 0); 733 739 return; 734 740 } 735 - DEBUG_IBLOCK("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n", 736 - task, bio, task->task_lba, bio->bi_sector, err); 741 + pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n", 742 + task, bio, task->task_lba, (unsigned long long)bio->bi_sector, err); 737 743 /* 738 744 * bio_put() will call iblock_bio_destructor() to release the bio back 739 745 * to ibr->ib_bio_set. ··· 742 748 /* 743 749 * Wait to complete the task until the last bio as completed. 744 750 */ 745 - if (!(atomic_dec_and_test(&ibr->ib_bio_cnt))) 751 + if (!atomic_dec_and_test(&ibr->ib_bio_cnt)) 746 752 return; 747 753 /* 748 754 * Return GOOD status for task if zero ib_bio_err_cnt exists.
-1
drivers/target/target_core_iblock.h
··· 12 12 atomic_t ib_bio_cnt; 13 13 atomic_t ib_bio_err_cnt; 14 14 struct bio *ib_bio; 15 - struct iblock_dev *ib_dev; 16 15 } ____cacheline_aligned; 17 16 18 17 #define IBDF_HAS_UDEV_PATH 0x01
+227 -227
drivers/target/target_core_pr.c
··· 62 62 char *buf, 63 63 u32 size) 64 64 { 65 - if (!(pr_reg->isid_present_at_reg)) 65 + if (!pr_reg->isid_present_at_reg) 66 66 return 0; 67 67 68 68 snprintf(buf, size, ",i,0x%s", &pr_reg->pr_reg_isid[0]); ··· 95 95 struct se_session *sess = cmd->se_sess; 96 96 int ret; 97 97 98 - if (!(sess)) 98 + if (!sess) 99 99 return 0; 100 100 101 101 spin_lock(&dev->dev_reservation_lock); ··· 123 123 struct se_session *sess = cmd->se_sess; 124 124 struct se_portal_group *tpg = sess->se_tpg; 125 125 126 - if (!(sess) || !(tpg)) 126 + if (!sess || !tpg) 127 127 return 0; 128 128 129 129 spin_lock(&dev->dev_reservation_lock); ··· 142 142 dev->dev_res_bin_isid = 0; 143 143 dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID; 144 144 } 145 - printk(KERN_INFO "SCSI-2 Released reservation for %s LUN: %u ->" 145 + pr_debug("SCSI-2 Released reservation for %s LUN: %u ->" 146 146 " MAPPED LUN: %u for %s\n", tpg->se_tpg_tfo->get_fabric_name(), 147 147 cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun, 148 148 sess->se_node_acl->initiatorname); ··· 159 159 160 160 if ((cmd->t_task_cdb[1] & 0x01) && 161 161 (cmd->t_task_cdb[1] & 0x02)) { 162 - printk(KERN_ERR "LongIO and Obselete Bits set, returning" 162 + pr_err("LongIO and Obselete Bits set, returning" 163 163 " ILLEGAL_REQUEST\n"); 164 164 return PYX_TRANSPORT_ILLEGAL_REQUEST; 165 165 } ··· 167 167 * This is currently the case for target_core_mod passthrough struct se_cmd 168 168 * ops 169 169 */ 170 - if (!(sess) || !(tpg)) 170 + if (!sess || !tpg) 171 171 return 0; 172 172 173 173 spin_lock(&dev->dev_reservation_lock); 174 174 if (dev->dev_reserved_node_acl && 175 175 (dev->dev_reserved_node_acl != sess->se_node_acl)) { 176 - printk(KERN_ERR "SCSI-2 RESERVATION CONFLIFT for %s fabric\n", 176 + pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n", 177 177 tpg->se_tpg_tfo->get_fabric_name()); 178 - printk(KERN_ERR "Original reserver LUN: %u %s\n", 178 + pr_err("Original reserver LUN: %u %s\n", 179 179 cmd->se_lun->unpacked_lun, 180 180 dev->dev_reserved_node_acl->initiatorname); 181 - printk(KERN_ERR "Current attempt - LUN: %u -> MAPPED LUN: %u" 181 + pr_err("Current attempt - LUN: %u -> MAPPED LUN: %u" 182 182 " from %s \n", cmd->se_lun->unpacked_lun, 183 183 cmd->se_deve->mapped_lun, 184 184 sess->se_node_acl->initiatorname); ··· 192 192 dev->dev_res_bin_isid = sess->sess_bin_isid; 193 193 dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID; 194 194 } 195 - printk(KERN_INFO "SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u" 195 + pr_debug("SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u" 196 196 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), 197 197 cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun, 198 198 sess->se_node_acl->initiatorname); ··· 220 220 int crh = (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS); 221 221 int conflict = 0; 222 222 223 - if (!(se_sess)) 223 + if (!se_sess) 224 224 return 0; 225 225 226 - if (!(crh)) 226 + if (!crh) 227 227 goto after_crh; 228 228 229 229 pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, ··· 280 280 } 281 281 282 282 if (conflict) { 283 - printk(KERN_ERR "Received legacy SPC-2 RESERVE/RELEASE" 283 + pr_err("Received legacy SPC-2 RESERVE/RELEASE" 284 284 " while active SPC-3 registrations exist," 285 285 " returning RESERVATION_CONFLICT\n"); 286 286 return PYX_TRANSPORT_RESERVATION_CONFLICT; ··· 412 412 ret = (registered_nexus) ? 0 : 1; 413 413 break; 414 414 default: 415 - printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service" 415 + pr_err("Unknown PERSISTENT_RESERVE_OUT service" 416 416 " action: 0x%02x\n", cdb[1] & 0x1f); 417 417 return -EINVAL; 418 418 } ··· 459 459 ret = 0; /* Allowed */ 460 460 break; 461 461 default: 462 - printk(KERN_ERR "Unknown MI Service Action: 0x%02x\n", 462 + pr_err("Unknown MI Service Action: 0x%02x\n", 463 463 (cdb[1] & 0x1f)); 464 464 return -EINVAL; 465 465 } ··· 481 481 * Case where the CDB is explicitly allowed in the above switch 482 482 * statement. 483 483 */ 484 - if (!(ret) && !(other_cdb)) { 484 + if (!ret && !other_cdb) { 485 485 #if 0 486 - printk(KERN_INFO "Allowing explict CDB: 0x%02x for %s" 486 + pr_debug("Allowing explict CDB: 0x%02x for %s" 487 487 " reservation holder\n", cdb[0], 488 488 core_scsi3_pr_dump_type(pr_reg_type)); 489 489 #endif ··· 498 498 /* 499 499 * Conflict for write exclusive 500 500 */ 501 - printk(KERN_INFO "%s Conflict for unregistered nexus" 501 + pr_debug("%s Conflict for unregistered nexus" 502 502 " %s CDB: 0x%02x to %s reservation\n", 503 503 transport_dump_cmd_direction(cmd), 504 504 se_sess->se_node_acl->initiatorname, cdb[0], ··· 515 515 * nexuses to issue CDBs. 516 516 */ 517 517 #if 0 518 - if (!(registered_nexus)) { 519 - printk(KERN_INFO "Allowing implict CDB: 0x%02x" 518 + if (!registered_nexus) { 519 + pr_debug("Allowing implict CDB: 0x%02x" 520 520 " for %s reservation on unregistered" 521 521 " nexus\n", cdb[0], 522 522 core_scsi3_pr_dump_type(pr_reg_type)); ··· 531 531 * allow commands from registered nexuses. 532 532 */ 533 533 #if 0 534 - printk(KERN_INFO "Allowing implict CDB: 0x%02x for %s" 534 + pr_debug("Allowing implict CDB: 0x%02x for %s" 535 535 " reservation\n", cdb[0], 536 536 core_scsi3_pr_dump_type(pr_reg_type)); 537 537 #endif 538 538 return 0; 539 539 } 540 540 } 541 - printk(KERN_INFO "%s Conflict for %sregistered nexus %s CDB: 0x%2x" 541 + pr_debug("%s Conflict for %sregistered nexus %s CDB: 0x%2x" 542 542 " for %s reservation\n", transport_dump_cmd_direction(cmd), 543 543 (registered_nexus) ? "" : "un", 544 544 se_sess->se_node_acl->initiatorname, cdb[0], ··· 575 575 struct se_session *sess = cmd->se_sess; 576 576 int ret; 577 577 578 - if (!(sess)) 578 + if (!sess) 579 579 return 0; 580 580 /* 581 581 * A legacy SPC-2 reservation is being held. ··· 584 584 return core_scsi2_reservation_check(cmd, pr_reg_type); 585 585 586 586 spin_lock(&dev->dev_reservation_lock); 587 - if (!(dev->dev_pr_res_holder)) { 587 + if (!dev->dev_pr_res_holder) { 588 588 spin_unlock(&dev->dev_reservation_lock); 589 589 return 0; 590 590 } ··· 594 594 spin_unlock(&dev->dev_reservation_lock); 595 595 return -EINVAL; 596 596 } 597 - if (!(dev->dev_pr_res_holder->isid_present_at_reg)) { 597 + if (!dev->dev_pr_res_holder->isid_present_at_reg) { 598 598 spin_unlock(&dev->dev_reservation_lock); 599 599 return 0; 600 600 } ··· 624 624 struct t10_pr_registration *pr_reg; 625 625 626 626 pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_ATOMIC); 627 - if (!(pr_reg)) { 628 - printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n"); 627 + if (!pr_reg) { 628 + pr_err("Unable to allocate struct t10_pr_registration\n"); 629 629 return NULL; 630 630 } 631 631 632 632 pr_reg->pr_aptpl_buf = kzalloc(su_dev->t10_pr.pr_aptpl_buf_len, 633 633 GFP_ATOMIC); 634 - if (!(pr_reg->pr_aptpl_buf)) { 635 - printk(KERN_ERR "Unable to allocate pr_reg->pr_aptpl_buf\n"); 634 + if (!pr_reg->pr_aptpl_buf) { 635 + pr_err("Unable to allocate pr_reg->pr_aptpl_buf\n"); 636 636 kmem_cache_free(t10_pr_reg_cache, pr_reg); 637 637 return NULL; 638 638 } ··· 692 692 */ 693 693 pr_reg = __core_scsi3_do_alloc_registration(dev, nacl, deve, isid, 694 694 sa_res_key, all_tg_pt, aptpl); 695 - if (!(pr_reg)) 695 + if (!pr_reg) 696 696 return NULL; 697 697 /* 698 698 * Return pointer to pr_reg for ALL_TG_PT=0 699 699 */ 700 - if (!(all_tg_pt)) 700 + if (!all_tg_pt) 701 701 return pr_reg; 702 702 /* 703 703 * Create list of matching SCSI Initiator Port registrations ··· 717 717 * that have not been make explict via a ConfigFS 718 718 * MappedLUN group for the SCSI Initiator Node ACL. 719 719 */ 720 - if (!(deve_tmp->se_lun_acl)) 720 + if (!deve_tmp->se_lun_acl) 721 721 continue; 722 722 723 723 nacl_tmp = deve_tmp->se_lun_acl->se_lun_nacl; ··· 751 751 */ 752 752 ret = core_scsi3_lunacl_depend_item(deve_tmp); 753 753 if (ret < 0) { 754 - printk(KERN_ERR "core_scsi3_lunacl_depend" 754 + pr_err("core_scsi3_lunacl_depend" 755 755 "_item() failed\n"); 756 756 atomic_dec(&port->sep_tg_pt_ref_cnt); 757 757 smp_mb__after_atomic_dec(); ··· 769 769 pr_reg_atp = __core_scsi3_do_alloc_registration(dev, 770 770 nacl_tmp, deve_tmp, NULL, 771 771 sa_res_key, all_tg_pt, aptpl); 772 - if (!(pr_reg_atp)) { 772 + if (!pr_reg_atp) { 773 773 atomic_dec(&port->sep_tg_pt_ref_cnt); 774 774 smp_mb__after_atomic_dec(); 775 775 atomic_dec(&deve_tmp->pr_ref_count); ··· 817 817 { 818 818 struct t10_pr_registration *pr_reg; 819 819 820 - if (!(i_port) || !(t_port) || !(sa_res_key)) { 821 - printk(KERN_ERR "Illegal parameters for APTPL registration\n"); 820 + if (!i_port || !t_port || !sa_res_key) { 821 + pr_err("Illegal parameters for APTPL registration\n"); 822 822 return -EINVAL; 823 823 } 824 824 825 825 pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_KERNEL); 826 - if (!(pr_reg)) { 827 - printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n"); 826 + if (!pr_reg) { 827 + pr_err("Unable to allocate struct t10_pr_registration\n"); 828 828 return -ENOMEM; 829 829 } 830 830 pr_reg->pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, GFP_KERNEL); ··· 869 869 pr_reg->pr_res_holder = res_holder; 870 870 871 871 list_add_tail(&pr_reg->pr_reg_aptpl_list, &pr_tmpl->aptpl_reg_list); 872 - printk(KERN_INFO "SPC-3 PR APTPL Successfully added registration%s from" 872 + pr_debug("SPC-3 PR APTPL Successfully added registration%s from" 873 873 " metadata\n", (res_holder) ? "+reservation" : ""); 874 874 return 0; 875 875 } ··· 891 891 dev->dev_pr_res_holder = pr_reg; 892 892 spin_unlock(&dev->dev_reservation_lock); 893 893 894 - printk(KERN_INFO "SPC-3 PR [%s] Service Action: APTPL RESERVE created" 894 + pr_debug("SPC-3 PR [%s] Service Action: APTPL RESERVE created" 895 895 " new reservation holder TYPE: %s ALL_TG_PT: %d\n", 896 896 tpg->se_tpg_tfo->get_fabric_name(), 897 897 core_scsi3_pr_dump_type(pr_reg->pr_res_type), 898 898 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); 899 - printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n", 899 + pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n", 900 900 tpg->se_tpg_tfo->get_fabric_name(), node_acl->initiatorname, 901 901 (prf_isid) ? &i_buf[0] : ""); 902 902 } ··· 936 936 spin_lock(&pr_tmpl->aptpl_reg_lock); 937 937 list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list, 938 938 pr_reg_aptpl_list) { 939 - if (!(strcmp(pr_reg->pr_iport, i_port)) && 939 + if (!strcmp(pr_reg->pr_iport, i_port) && 940 940 (pr_reg->pr_res_mapped_lun == deve->mapped_lun) && 941 941 !(strcmp(pr_reg->pr_tport, t_port)) && 942 942 (pr_reg->pr_reg_tpgt == tpgt) && ··· 1006 1006 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], 1007 1007 PR_REG_ISID_ID_LEN); 1008 1008 1009 - printk(KERN_INFO "SPC-3 PR [%s] Service Action: REGISTER%s Initiator" 1009 + pr_debug("SPC-3 PR [%s] Service Action: REGISTER%s Initiator" 1010 1010 " Node: %s%s\n", tfo->get_fabric_name(), (register_type == 2) ? 1011 1011 "_AND_MOVE" : (register_type == 1) ? 1012 1012 "_AND_IGNORE_EXISTING_KEY" : "", nacl->initiatorname, 1013 1013 (prf_isid) ? i_buf : ""); 1014 - printk(KERN_INFO "SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n", 1014 + pr_debug("SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n", 1015 1015 tfo->get_fabric_name(), tfo->tpg_get_wwn(se_tpg), 1016 1016 tfo->tpg_get_tag(se_tpg)); 1017 - printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target" 1017 + pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target" 1018 1018 " Port(s)\n", tfo->get_fabric_name(), 1019 1019 (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE", 1020 1020 dev->transport->name); 1021 - printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:" 1021 + pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:" 1022 1022 " 0x%08x APTPL: %d\n", tfo->get_fabric_name(), 1023 1023 pr_reg->pr_res_key, pr_reg->pr_res_generation, 1024 1024 pr_reg->pr_reg_aptpl); ··· 1062 1062 /* 1063 1063 * Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE. 1064 1064 */ 1065 - if (!(pr_reg->pr_reg_all_tg_pt) || (register_move)) 1065 + if (!pr_reg->pr_reg_all_tg_pt || register_move) 1066 1066 return; 1067 1067 /* 1068 1068 * Walk pr_reg->pr_reg_atp_list and add registrations for ALL_TG_PT=1 ··· 1106 1106 1107 1107 pr_reg = __core_scsi3_alloc_registration(dev, nacl, deve, isid, 1108 1108 sa_res_key, all_tg_pt, aptpl); 1109 - if (!(pr_reg)) 1109 + if (!pr_reg) 1110 1110 return -EPERM; 1111 1111 1112 1112 __core_scsi3_add_registration(dev, nacl, pr_reg, ··· 1137 1137 * If this registration does NOT contain a fabric provided 1138 1138 * ISID, then we have found a match. 1139 1139 */ 1140 - if (!(pr_reg->isid_present_at_reg)) { 1140 + if (!pr_reg->isid_present_at_reg) { 1141 1141 /* 1142 1142 * Determine if this SCSI device server requires that 1143 1143 * SCSI Intiatior TransportID w/ ISIDs is enforced ··· 1157 1157 * SCSI Initiator Port TransportIDs, then we expect a valid 1158 1158 * matching ISID to be provided by the local SCSI Initiator Port. 1159 1159 */ 1160 - if (!(isid)) 1160 + if (!isid) 1161 1161 continue; 1162 1162 if (strcmp(isid, pr_reg->pr_reg_isid)) 1163 1163 continue; ··· 1206 1206 1207 1207 spin_lock(&dev->dev_reservation_lock); 1208 1208 pr_res_holder = dev->dev_pr_res_holder; 1209 - if (!(pr_res_holder)) { 1209 + if (!pr_res_holder) { 1210 1210 spin_unlock(&dev->dev_reservation_lock); 1211 1211 return ret; 1212 1212 } ··· 1236 1236 (!strcmp(pr_res_holder->pr_reg_nacl->initiatorname, 1237 1237 pr_reg->pr_reg_nacl->initiatorname)) && 1238 1238 (pr_res_holder->pr_res_key == pr_reg->pr_res_key)) { 1239 - printk(KERN_ERR "SPC-3 PR: Unable to perform ALL_TG_PT=1" 1239 + pr_err("SPC-3 PR: Unable to perform ALL_TG_PT=1" 1240 1240 " UNREGISTER while existing reservation with matching" 1241 1241 " key 0x%016Lx is present from another SCSI Initiator" 1242 1242 " Port\n", pr_reg->pr_res_key); ··· 1283 1283 */ 1284 1284 while (atomic_read(&pr_reg->pr_res_holders) != 0) { 1285 1285 spin_unlock(&pr_tmpl->registration_lock); 1286 - printk("SPC-3 PR [%s] waiting for pr_res_holders\n", 1286 + pr_debug("SPC-3 PR [%s] waiting for pr_res_holders\n", 1287 1287 tfo->get_fabric_name()); 1288 1288 cpu_relax(); 1289 1289 spin_lock(&pr_tmpl->registration_lock); 1290 1290 } 1291 1291 1292 - printk(KERN_INFO "SPC-3 PR [%s] Service Action: UNREGISTER Initiator" 1292 + pr_debug("SPC-3 PR [%s] Service Action: UNREGISTER Initiator" 1293 1293 " Node: %s%s\n", tfo->get_fabric_name(), 1294 1294 pr_reg->pr_reg_nacl->initiatorname, 1295 1295 (prf_isid) ? &i_buf[0] : ""); 1296 - printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target" 1296 + pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target" 1297 1297 " Port(s)\n", tfo->get_fabric_name(), 1298 1298 (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE", 1299 1299 dev->transport->name); 1300 - printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:" 1300 + pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:" 1301 1301 " 0x%08x\n", tfo->get_fabric_name(), pr_reg->pr_res_key, 1302 1302 pr_reg->pr_res_generation); 1303 1303 1304 - if (!(preempt_and_abort_list)) { 1304 + if (!preempt_and_abort_list) { 1305 1305 pr_reg->pr_reg_deve = NULL; 1306 1306 pr_reg->pr_reg_nacl = NULL; 1307 1307 kfree(pr_reg->pr_aptpl_buf); ··· 1430 1430 /* 1431 1431 * For nacl->dynamic_node_acl=1 1432 1432 */ 1433 - if (!(lun_acl)) 1433 + if (!lun_acl) 1434 1434 return 0; 1435 1435 1436 1436 nacl = lun_acl->se_lun_nacl; ··· 1448 1448 /* 1449 1449 * For nacl->dynamic_node_acl=1 1450 1450 */ 1451 - if (!(lun_acl)) { 1451 + if (!lun_acl) { 1452 1452 atomic_dec(&se_deve->pr_ref_count); 1453 1453 smp_mb__after_atomic_dec(); 1454 1454 return; ··· 1500 1500 * processing in the loop of tid_dest_list below. 1501 1501 */ 1502 1502 tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL); 1503 - if (!(tidh_new)) { 1504 - printk(KERN_ERR "Unable to allocate tidh_new\n"); 1503 + if (!tidh_new) { 1504 + pr_err("Unable to allocate tidh_new\n"); 1505 1505 return PYX_TRANSPORT_LU_COMM_FAILURE; 1506 1506 } 1507 1507 INIT_LIST_HEAD(&tidh_new->dest_list); ··· 1512 1512 local_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev, 1513 1513 se_sess->se_node_acl, local_se_deve, l_isid, 1514 1514 sa_res_key, all_tg_pt, aptpl); 1515 - if (!(local_pr_reg)) { 1515 + if (!local_pr_reg) { 1516 1516 kfree(tidh_new); 1517 1517 return PYX_TRANSPORT_LU_COMM_FAILURE; 1518 1518 } ··· 1537 1537 tpdl |= buf[27] & 0xff; 1538 1538 1539 1539 if ((tpdl + 28) != cmd->data_length) { 1540 - printk(KERN_ERR "SPC-3 PR: Illegal tpdl: %u + 28 byte header" 1540 + pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header" 1541 1541 " does not equal CDB data_length: %u\n", tpdl, 1542 1542 cmd->data_length); 1543 1543 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; ··· 1557 1557 spin_lock(&dev->se_port_lock); 1558 1558 list_for_each_entry(tmp_port, &dev->dev_sep_list, sep_list) { 1559 1559 tmp_tpg = tmp_port->sep_tpg; 1560 - if (!(tmp_tpg)) 1560 + if (!tmp_tpg) 1561 1561 continue; 1562 1562 tmp_tf_ops = tmp_tpg->se_tpg_tfo; 1563 - if (!(tmp_tf_ops)) 1563 + if (!tmp_tf_ops) 1564 1564 continue; 1565 - if (!(tmp_tf_ops->get_fabric_proto_ident) || 1566 - !(tmp_tf_ops->tpg_parse_pr_out_transport_id)) 1565 + if (!tmp_tf_ops->get_fabric_proto_ident || 1566 + !tmp_tf_ops->tpg_parse_pr_out_transport_id) 1567 1567 continue; 1568 1568 /* 1569 1569 * Look for the matching proto_ident provided by ··· 1577 1577 i_str = tmp_tf_ops->tpg_parse_pr_out_transport_id( 1578 1578 tmp_tpg, (const char *)ptr, &tid_len, 1579 1579 &iport_ptr); 1580 - if (!(i_str)) 1580 + if (!i_str) 1581 1581 continue; 1582 1582 1583 1583 atomic_inc(&tmp_tpg->tpg_pr_ref_count); ··· 1586 1586 1587 1587 ret = core_scsi3_tpg_depend_item(tmp_tpg); 1588 1588 if (ret != 0) { 1589 - printk(KERN_ERR " core_scsi3_tpg_depend_item()" 1589 + pr_err(" core_scsi3_tpg_depend_item()" 1590 1590 " for tmp_tpg\n"); 1591 1591 atomic_dec(&tmp_tpg->tpg_pr_ref_count); 1592 1592 smp_mb__after_atomic_dec(); ··· 1607 1607 } 1608 1608 spin_unlock_bh(&tmp_tpg->acl_node_lock); 1609 1609 1610 - if (!(dest_node_acl)) { 1610 + if (!dest_node_acl) { 1611 1611 core_scsi3_tpg_undepend_item(tmp_tpg); 1612 1612 spin_lock(&dev->se_port_lock); 1613 1613 continue; ··· 1615 1615 1616 1616 ret = core_scsi3_nodeacl_depend_item(dest_node_acl); 1617 1617 if (ret != 0) { 1618 - printk(KERN_ERR "configfs_depend_item() failed" 1618 + pr_err("configfs_depend_item() failed" 1619 1619 " for dest_node_acl->acl_group\n"); 1620 1620 atomic_dec(&dest_node_acl->acl_pr_ref_count); 1621 1621 smp_mb__after_atomic_dec(); ··· 1625 1625 } 1626 1626 1627 1627 dest_tpg = tmp_tpg; 1628 - printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node:" 1628 + pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node:" 1629 1629 " %s Port RTPI: %hu\n", 1630 1630 dest_tpg->se_tpg_tfo->get_fabric_name(), 1631 1631 dest_node_acl->initiatorname, dest_rtpi); ··· 1635 1635 } 1636 1636 spin_unlock(&dev->se_port_lock); 1637 1637 1638 - if (!(dest_tpg)) { 1639 - printk(KERN_ERR "SPC-3 PR SPEC_I_PT: Unable to locate" 1638 + if (!dest_tpg) { 1639 + pr_err("SPC-3 PR SPEC_I_PT: Unable to locate" 1640 1640 " dest_tpg\n"); 1641 1641 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 1642 1642 goto out; 1643 1643 } 1644 1644 #if 0 1645 - printk("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u" 1645 + pr_debug("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u" 1646 1646 " tid_len: %d for %s + %s\n", 1647 1647 dest_tpg->se_tpg_tfo->get_fabric_name(), cmd->data_length, 1648 1648 tpdl, tid_len, i_str, iport_ptr); 1649 1649 #endif 1650 1650 if (tid_len > tpdl) { 1651 - printk(KERN_ERR "SPC-3 PR SPEC_I_PT: Illegal tid_len:" 1651 + pr_err("SPC-3 PR SPEC_I_PT: Illegal tid_len:" 1652 1652 " %u for Transport ID: %s\n", tid_len, ptr); 1653 1653 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1654 1654 core_scsi3_tpg_undepend_item(dest_tpg); ··· 1662 1662 */ 1663 1663 dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, 1664 1664 dest_rtpi); 1665 - if (!(dest_se_deve)) { 1666 - printk(KERN_ERR "Unable to locate %s dest_se_deve" 1665 + if (!dest_se_deve) { 1666 + pr_err("Unable to locate %s dest_se_deve" 1667 1667 " from destination RTPI: %hu\n", 1668 1668 dest_tpg->se_tpg_tfo->get_fabric_name(), 1669 1669 dest_rtpi); ··· 1676 1676 1677 1677 ret = core_scsi3_lunacl_depend_item(dest_se_deve); 1678 1678 if (ret < 0) { 1679 - printk(KERN_ERR "core_scsi3_lunacl_depend_item()" 1679 + pr_err("core_scsi3_lunacl_depend_item()" 1680 1680 " failed\n"); 1681 1681 atomic_dec(&dest_se_deve->pr_ref_count); 1682 1682 smp_mb__after_atomic_dec(); ··· 1686 1686 goto out; 1687 1687 } 1688 1688 #if 0 1689 - printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node: %s" 1689 + pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s" 1690 1690 " dest_se_deve mapped_lun: %u\n", 1691 1691 dest_tpg->se_tpg_tfo->get_fabric_name(), 1692 1692 dest_node_acl->initiatorname, dest_se_deve->mapped_lun); ··· 1714 1714 */ 1715 1715 tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), 1716 1716 GFP_KERNEL); 1717 - if (!(tidh_new)) { 1718 - printk(KERN_ERR "Unable to allocate tidh_new\n"); 1717 + if (!tidh_new) { 1718 + pr_err("Unable to allocate tidh_new\n"); 1719 1719 core_scsi3_lunacl_undepend_item(dest_se_deve); 1720 1720 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1721 1721 core_scsi3_tpg_undepend_item(dest_tpg); ··· 1746 1746 dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev, 1747 1747 dest_node_acl, dest_se_deve, iport_ptr, 1748 1748 sa_res_key, all_tg_pt, aptpl); 1749 - if (!(dest_pr_reg)) { 1749 + if (!dest_pr_reg) { 1750 1750 core_scsi3_lunacl_undepend_item(dest_se_deve); 1751 1751 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1752 1752 core_scsi3_tpg_undepend_item(dest_tpg); ··· 1795 1795 __core_scsi3_add_registration(cmd->se_dev, dest_node_acl, 1796 1796 dest_pr_reg, 0, 0); 1797 1797 1798 - printk(KERN_INFO "SPC-3 PR [%s] SPEC_I_PT: Successfully" 1798 + pr_debug("SPC-3 PR [%s] SPEC_I_PT: Successfully" 1799 1799 " registered Transport ID for Node: %s%s Mapped LUN:" 1800 1800 " %u\n", dest_tpg->se_tpg_tfo->get_fabric_name(), 1801 1801 dest_node_acl->initiatorname, (prf_isid) ? ··· 1923 1923 } 1924 1924 1925 1925 if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { 1926 - printk(KERN_ERR "Unable to update renaming" 1926 + pr_err("Unable to update renaming" 1927 1927 " APTPL metadata\n"); 1928 1928 spin_unlock(&su_dev->t10_pr.registration_lock); 1929 1929 return -EMSGSIZE; ··· 1941 1941 lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count); 1942 1942 1943 1943 if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { 1944 - printk(KERN_ERR "Unable to update renaming" 1944 + pr_err("Unable to update renaming" 1945 1945 " APTPL metadata\n"); 1946 1946 spin_unlock(&su_dev->t10_pr.registration_lock); 1947 1947 return -EMSGSIZE; ··· 1951 1951 } 1952 1952 spin_unlock(&su_dev->t10_pr.registration_lock); 1953 1953 1954 - if (!(reg_count)) 1954 + if (!reg_count) 1955 1955 len += sprintf(buf+len, "No Registrations or Reservations"); 1956 1956 1957 1957 return 0; ··· 1993 1993 memset(path, 0, 512); 1994 1994 1995 1995 if (strlen(&wwn->unit_serial[0]) >= 512) { 1996 - printk(KERN_ERR "WWN value for struct se_device does not fit" 1996 + pr_err("WWN value for struct se_device does not fit" 1997 1997 " into path buffer\n"); 1998 1998 return -EMSGSIZE; 1999 1999 } ··· 2001 2001 snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]); 2002 2002 file = filp_open(path, flags, 0600); 2003 2003 if (IS_ERR(file) || !file || !file->f_dentry) { 2004 - printk(KERN_ERR "filp_open(%s) for APTPL metadata" 2004 + pr_err("filp_open(%s) for APTPL metadata" 2005 2005 " failed\n", path); 2006 2006 return (PTR_ERR(file) < 0 ? PTR_ERR(file) : -ENOENT); 2007 2007 } 2008 2008 2009 2009 iov[0].iov_base = &buf[0]; 2010 - if (!(pr_aptpl_buf_len)) 2010 + if (!pr_aptpl_buf_len) 2011 2011 iov[0].iov_len = (strlen(&buf[0]) + 1); /* Add extra for NULL */ 2012 2012 else 2013 2013 iov[0].iov_len = pr_aptpl_buf_len; ··· 2018 2018 set_fs(old_fs); 2019 2019 2020 2020 if (ret < 0) { 2021 - printk("Error writing APTPL metadata file: %s\n", path); 2021 + pr_debug("Error writing APTPL metadata file: %s\n", path); 2022 2022 filp_close(file, NULL); 2023 2023 return -EIO; 2024 2024 } ··· 2038 2038 /* 2039 2039 * Can be called with a NULL pointer from PROUT service action CLEAR 2040 2040 */ 2041 - if (!(in_buf)) { 2041 + if (!in_buf) { 2042 2042 memset(null_buf, 0, 64); 2043 2043 buf = &null_buf[0]; 2044 2044 /* ··· 2088 2088 unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL; 2089 2089 int pr_holder = 0, ret = 0, type; 2090 2090 2091 - if (!(se_sess) || !(se_lun)) { 2092 - printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n"); 2091 + if (!se_sess || !se_lun) { 2092 + pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); 2093 2093 return PYX_TRANSPORT_LU_COMM_FAILURE; 2094 2094 } 2095 2095 se_tpg = se_sess->se_tpg; ··· 2105 2105 * Follow logic from spc4r17 Section 5.7.7, Register Behaviors Table 47 2106 2106 */ 2107 2107 pr_reg_e = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess); 2108 - if (!(pr_reg_e)) { 2108 + if (!pr_reg_e) { 2109 2109 if (res_key) { 2110 - printk(KERN_WARNING "SPC-3 PR: Reservation Key non-zero" 2110 + pr_warn("SPC-3 PR: Reservation Key non-zero" 2111 2111 " for SA REGISTER, returning CONFLICT\n"); 2112 2112 return PYX_TRANSPORT_RESERVATION_CONFLICT; 2113 2113 } 2114 2114 /* 2115 2115 * Do nothing but return GOOD status. 2116 2116 */ 2117 - if (!(sa_res_key)) 2117 + if (!sa_res_key) 2118 2118 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 2119 2119 2120 - if (!(spec_i_pt)) { 2120 + if (!spec_i_pt) { 2121 2121 /* 2122 2122 * Perform the Service Action REGISTER on the Initiator 2123 2123 * Port Endpoint that the PRO was received from on the ··· 2128 2128 sa_res_key, all_tg_pt, aptpl, 2129 2129 ignore_key, 0); 2130 2130 if (ret != 0) { 2131 - printk(KERN_ERR "Unable to allocate" 2131 + pr_err("Unable to allocate" 2132 2132 " struct t10_pr_registration\n"); 2133 2133 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 2134 2134 } ··· 2149 2149 /* 2150 2150 * Nothing left to do for the APTPL=0 case. 2151 2151 */ 2152 - if (!(aptpl)) { 2152 + if (!aptpl) { 2153 2153 pr_tmpl->pr_aptpl_active = 0; 2154 2154 core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0); 2155 - printk("SPC-3 PR: Set APTPL Bit Deactivated for" 2155 + pr_debug("SPC-3 PR: Set APTPL Bit Deactivated for" 2156 2156 " REGISTER\n"); 2157 2157 return 0; 2158 2158 } ··· 2167 2167 ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, 2168 2168 &pr_reg->pr_aptpl_buf[0], 2169 2169 pr_tmpl->pr_aptpl_buf_len); 2170 - if (!(ret)) { 2170 + if (!ret) { 2171 2171 pr_tmpl->pr_aptpl_active = 1; 2172 - printk("SPC-3 PR: Set APTPL Bit Activated for REGISTER\n"); 2172 + pr_debug("SPC-3 PR: Set APTPL Bit Activated for REGISTER\n"); 2173 2173 } 2174 2174 2175 2175 core_scsi3_put_pr_reg(pr_reg); ··· 2181 2181 pr_reg = pr_reg_e; 2182 2182 type = pr_reg->pr_res_type; 2183 2183 2184 - if (!(ignore_key)) { 2184 + if (!ignore_key) { 2185 2185 if (res_key != pr_reg->pr_res_key) { 2186 - printk(KERN_ERR "SPC-3 PR REGISTER: Received" 2186 + pr_err("SPC-3 PR REGISTER: Received" 2187 2187 " res_key: 0x%016Lx does not match" 2188 2188 " existing SA REGISTER res_key:" 2189 2189 " 0x%016Lx\n", res_key, ··· 2193 2193 } 2194 2194 } 2195 2195 if (spec_i_pt) { 2196 - printk(KERN_ERR "SPC-3 PR UNREGISTER: SPEC_I_PT" 2196 + pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT" 2197 2197 " set while sa_res_key=0\n"); 2198 2198 core_scsi3_put_pr_reg(pr_reg); 2199 2199 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; ··· 2203 2203 * must also set ALL_TG_PT=1 in the incoming PROUT. 2204 2204 */ 2205 2205 if (pr_reg->pr_reg_all_tg_pt && !(all_tg_pt)) { 2206 - printk(KERN_ERR "SPC-3 PR UNREGISTER: ALL_TG_PT=1" 2206 + pr_err("SPC-3 PR UNREGISTER: ALL_TG_PT=1" 2207 2207 " registration exists, but ALL_TG_PT=1 bit not" 2208 2208 " present in received PROUT\n"); 2209 2209 core_scsi3_put_pr_reg(pr_reg); ··· 2215 2215 if (aptpl) { 2216 2216 pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, 2217 2217 GFP_KERNEL); 2218 - if (!(pr_aptpl_buf)) { 2219 - printk(KERN_ERR "Unable to allocate" 2218 + if (!pr_aptpl_buf) { 2219 + pr_err("Unable to allocate" 2220 2220 " pr_aptpl_buf\n"); 2221 2221 core_scsi3_put_pr_reg(pr_reg); 2222 2222 return PYX_TRANSPORT_LU_COMM_FAILURE; ··· 2227 2227 * Nexus sa_res_key=1 Change Reservation Key for registered I_T 2228 2228 * Nexus. 2229 2229 */ 2230 - if (!(sa_res_key)) { 2230 + if (!sa_res_key) { 2231 2231 pr_holder = core_scsi3_check_implict_release( 2232 2232 cmd->se_dev, pr_reg); 2233 2233 if (pr_holder < 0) { ··· 2246 2246 &pr_tmpl->registration_list, 2247 2247 pr_reg_list) { 2248 2248 2249 - if (!(pr_reg_p->pr_reg_all_tg_pt)) 2249 + if (!pr_reg_p->pr_reg_all_tg_pt) 2250 2250 continue; 2251 2251 2252 2252 if (pr_reg_p->pr_res_key != res_key) ··· 2295 2295 } 2296 2296 spin_unlock(&pr_tmpl->registration_lock); 2297 2297 2298 - if (!(aptpl)) { 2298 + if (!aptpl) { 2299 2299 pr_tmpl->pr_aptpl_active = 0; 2300 2300 core_scsi3_update_and_write_aptpl(dev, NULL, 0); 2301 - printk("SPC-3 PR: Set APTPL Bit Deactivated" 2301 + pr_debug("SPC-3 PR: Set APTPL Bit Deactivated" 2302 2302 " for UNREGISTER\n"); 2303 2303 return 0; 2304 2304 } ··· 2306 2306 ret = core_scsi3_update_and_write_aptpl(dev, 2307 2307 &pr_aptpl_buf[0], 2308 2308 pr_tmpl->pr_aptpl_buf_len); 2309 - if (!(ret)) { 2309 + if (!ret) { 2310 2310 pr_tmpl->pr_aptpl_active = 1; 2311 - printk("SPC-3 PR: Set APTPL Bit Activated" 2311 + pr_debug("SPC-3 PR: Set APTPL Bit Activated" 2312 2312 " for UNREGISTER\n"); 2313 2313 } 2314 2314 ··· 2323 2323 pr_reg->pr_res_generation = core_scsi3_pr_generation( 2324 2324 cmd->se_dev); 2325 2325 pr_reg->pr_res_key = sa_res_key; 2326 - printk("SPC-3 PR [%s] REGISTER%s: Changed Reservation" 2326 + pr_debug("SPC-3 PR [%s] REGISTER%s: Changed Reservation" 2327 2327 " Key for %s to: 0x%016Lx PRgeneration:" 2328 2328 " 0x%08x\n", cmd->se_tfo->get_fabric_name(), 2329 2329 (ignore_key) ? "_AND_IGNORE_EXISTING_KEY" : "", 2330 2330 pr_reg->pr_reg_nacl->initiatorname, 2331 2331 pr_reg->pr_res_key, pr_reg->pr_res_generation); 2332 2332 2333 - if (!(aptpl)) { 2333 + if (!aptpl) { 2334 2334 pr_tmpl->pr_aptpl_active = 0; 2335 2335 core_scsi3_update_and_write_aptpl(dev, NULL, 0); 2336 2336 core_scsi3_put_pr_reg(pr_reg); 2337 - printk("SPC-3 PR: Set APTPL Bit Deactivated" 2337 + pr_debug("SPC-3 PR: Set APTPL Bit Deactivated" 2338 2338 " for REGISTER\n"); 2339 2339 return 0; 2340 2340 } ··· 2342 2342 ret = core_scsi3_update_and_write_aptpl(dev, 2343 2343 &pr_aptpl_buf[0], 2344 2344 pr_tmpl->pr_aptpl_buf_len); 2345 - if (!(ret)) { 2345 + if (!ret) { 2346 2346 pr_tmpl->pr_aptpl_active = 1; 2347 - printk("SPC-3 PR: Set APTPL Bit Activated" 2347 + pr_debug("SPC-3 PR: Set APTPL Bit Activated" 2348 2348 " for REGISTER\n"); 2349 2349 } 2350 2350 ··· 2395 2395 2396 2396 memset(i_buf, 0, PR_REG_ISID_ID_LEN); 2397 2397 2398 - if (!(se_sess) || !(se_lun)) { 2399 - printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n"); 2398 + if (!se_sess || !se_lun) { 2399 + pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); 2400 2400 return PYX_TRANSPORT_LU_COMM_FAILURE; 2401 2401 } 2402 2402 se_tpg = se_sess->se_tpg; ··· 2406 2406 */ 2407 2407 pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, 2408 2408 se_sess); 2409 - if (!(pr_reg)) { 2410 - printk(KERN_ERR "SPC-3 PR: Unable to locate" 2409 + if (!pr_reg) { 2410 + pr_err("SPC-3 PR: Unable to locate" 2411 2411 " PR_REGISTERED *pr_reg for RESERVE\n"); 2412 2412 return PYX_TRANSPORT_LU_COMM_FAILURE; 2413 2413 } ··· 2421 2421 * registered with the logical unit for the I_T nexus; and 2422 2422 */ 2423 2423 if (res_key != pr_reg->pr_res_key) { 2424 - printk(KERN_ERR "SPC-3 PR RESERVE: Received res_key: 0x%016Lx" 2424 + pr_err("SPC-3 PR RESERVE: Received res_key: 0x%016Lx" 2425 2425 " does not match existing SA REGISTER res_key:" 2426 2426 " 0x%016Lx\n", res_key, pr_reg->pr_res_key); 2427 2427 core_scsi3_put_pr_reg(pr_reg); ··· 2438 2438 * and that persistent reservation has a scope of LU_SCOPE. 2439 2439 */ 2440 2440 if (scope != PR_SCOPE_LU_SCOPE) { 2441 - printk(KERN_ERR "SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); 2441 + pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); 2442 2442 core_scsi3_put_pr_reg(pr_reg); 2443 2443 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 2444 2444 } ··· 2462 2462 */ 2463 2463 if (pr_res_holder != pr_reg) { 2464 2464 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; 2465 - printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from" 2465 + pr_err("SPC-3 PR: Attempted RESERVE from" 2466 2466 " [%s]: %s while reservation already held by" 2467 2467 " [%s]: %s, returning RESERVATION_CONFLICT\n", 2468 2468 cmd->se_tfo->get_fabric_name(), ··· 2484 2484 if ((pr_res_holder->pr_res_type != type) || 2485 2485 (pr_res_holder->pr_res_scope != scope)) { 2486 2486 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; 2487 - printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from" 2487 + pr_err("SPC-3 PR: Attempted RESERVE from" 2488 2488 " [%s]: %s trying to change TYPE and/or SCOPE," 2489 2489 " while reservation already held by [%s]: %s," 2490 2490 " returning RESERVATION_CONFLICT\n", ··· 2522 2522 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], 2523 2523 PR_REG_ISID_ID_LEN); 2524 2524 2525 - printk(KERN_INFO "SPC-3 PR [%s] Service Action: RESERVE created new" 2525 + pr_debug("SPC-3 PR [%s] Service Action: RESERVE created new" 2526 2526 " reservation holder TYPE: %s ALL_TG_PT: %d\n", 2527 2527 cmd->se_tfo->get_fabric_name(), core_scsi3_pr_dump_type(type), 2528 2528 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); 2529 - printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n", 2529 + pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n", 2530 2530 cmd->se_tfo->get_fabric_name(), 2531 2531 se_sess->se_node_acl->initiatorname, 2532 2532 (prf_isid) ? &i_buf[0] : ""); ··· 2536 2536 ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, 2537 2537 &pr_reg->pr_aptpl_buf[0], 2538 2538 pr_tmpl->pr_aptpl_buf_len); 2539 - if (!(ret)) 2540 - printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata" 2539 + if (!ret) 2540 + pr_debug("SPC-3 PR: Updated APTPL metadata" 2541 2541 " for RESERVE\n"); 2542 2542 } 2543 2543 ··· 2564 2564 ret = core_scsi3_pro_reserve(cmd, dev, type, scope, res_key); 2565 2565 break; 2566 2566 default: 2567 - printk(KERN_ERR "SPC-3 PR: Unknown Service Action RESERVE Type:" 2567 + pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:" 2568 2568 " 0x%02x\n", type); 2569 2569 return PYX_TRANSPORT_INVALID_CDB_FIELD; 2570 2570 } ··· 2593 2593 */ 2594 2594 dev->dev_pr_res_holder = NULL; 2595 2595 2596 - printk(KERN_INFO "SPC-3 PR [%s] Service Action: %s RELEASE cleared" 2596 + pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared" 2597 2597 " reservation holder TYPE: %s ALL_TG_PT: %d\n", 2598 2598 tfo->get_fabric_name(), (explict) ? "explict" : "implict", 2599 2599 core_scsi3_pr_dump_type(pr_reg->pr_res_type), 2600 2600 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); 2601 - printk(KERN_INFO "SPC-3 PR [%s] RELEASE Node: %s%s\n", 2601 + pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n", 2602 2602 tfo->get_fabric_name(), se_nacl->initiatorname, 2603 2603 (prf_isid) ? &i_buf[0] : ""); 2604 2604 /* ··· 2620 2620 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 2621 2621 int ret, all_reg = 0; 2622 2622 2623 - if (!(se_sess) || !(se_lun)) { 2624 - printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n"); 2623 + if (!se_sess || !se_lun) { 2624 + pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); 2625 2625 return PYX_TRANSPORT_LU_COMM_FAILURE; 2626 2626 } 2627 2627 /* 2628 2628 * Locate the existing *pr_reg via struct se_node_acl pointers 2629 2629 */ 2630 2630 pr_reg = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess); 2631 - if (!(pr_reg)) { 2632 - printk(KERN_ERR "SPC-3 PR: Unable to locate" 2631 + if (!pr_reg) { 2632 + pr_err("SPC-3 PR: Unable to locate" 2633 2633 " PR_REGISTERED *pr_reg for RELEASE\n"); 2634 2634 return PYX_TRANSPORT_LU_COMM_FAILURE; 2635 2635 } ··· 2647 2647 */ 2648 2648 spin_lock(&dev->dev_reservation_lock); 2649 2649 pr_res_holder = dev->dev_pr_res_holder; 2650 - if (!(pr_res_holder)) { 2650 + if (!pr_res_holder) { 2651 2651 /* 2652 2652 * No persistent reservation, return GOOD status. 2653 2653 */ ··· 2684 2684 * that is registered with the logical unit for the I_T nexus; 2685 2685 */ 2686 2686 if (res_key != pr_reg->pr_res_key) { 2687 - printk(KERN_ERR "SPC-3 PR RELEASE: Received res_key: 0x%016Lx" 2687 + pr_err("SPC-3 PR RELEASE: Received res_key: 0x%016Lx" 2688 2688 " does not match existing SA REGISTER res_key:" 2689 2689 " 0x%016Lx\n", res_key, pr_reg->pr_res_key); 2690 2690 spin_unlock(&dev->dev_reservation_lock); ··· 2700 2700 if ((pr_res_holder->pr_res_type != type) || 2701 2701 (pr_res_holder->pr_res_scope != scope)) { 2702 2702 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; 2703 - printk(KERN_ERR "SPC-3 PR RELEASE: Attempted to release" 2703 + pr_err("SPC-3 PR RELEASE: Attempted to release" 2704 2704 " reservation from [%s]: %s with different TYPE " 2705 2705 "and/or SCOPE while reservation already held by" 2706 2706 " [%s]: %s, returning RESERVATION_CONFLICT\n", ··· 2767 2767 ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, 2768 2768 &pr_reg->pr_aptpl_buf[0], 2769 2769 pr_tmpl->pr_aptpl_buf_len); 2770 - if (!(ret)) 2771 - printk("SPC-3 PR: Updated APTPL metadata for RELEASE\n"); 2770 + if (!ret) 2771 + pr_debug("SPC-3 PR: Updated APTPL metadata for RELEASE\n"); 2772 2772 } 2773 2773 2774 2774 core_scsi3_put_pr_reg(pr_reg); ··· 2791 2791 */ 2792 2792 pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, 2793 2793 se_sess->se_node_acl, se_sess); 2794 - if (!(pr_reg_n)) { 2795 - printk(KERN_ERR "SPC-3 PR: Unable to locate" 2794 + if (!pr_reg_n) { 2795 + pr_err("SPC-3 PR: Unable to locate" 2796 2796 " PR_REGISTERED *pr_reg for CLEAR\n"); 2797 2797 return PYX_TRANSPORT_LU_COMM_FAILURE; 2798 2798 } ··· 2808 2808 * that is registered with the logical unit for the I_T nexus. 2809 2809 */ 2810 2810 if (res_key != pr_reg_n->pr_res_key) { 2811 - printk(KERN_ERR "SPC-3 PR REGISTER: Received" 2811 + pr_err("SPC-3 PR REGISTER: Received" 2812 2812 " res_key: 0x%016Lx does not match" 2813 2813 " existing SA REGISTER res_key:" 2814 2814 " 0x%016Lx\n", res_key, pr_reg_n->pr_res_key); ··· 2845 2845 * command with CLEAR service action was received, with the 2846 2846 * additional sense code set to RESERVATIONS PREEMPTED. 2847 2847 */ 2848 - if (!(calling_it_nexus)) 2848 + if (!calling_it_nexus) 2849 2849 core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 2850 2850 0x2A, ASCQ_2AH_RESERVATIONS_PREEMPTED); 2851 2851 } 2852 2852 spin_unlock(&pr_tmpl->registration_lock); 2853 2853 2854 - printk(KERN_INFO "SPC-3 PR [%s] Service Action: CLEAR complete\n", 2854 + pr_debug("SPC-3 PR [%s] Service Action: CLEAR complete\n", 2855 2855 cmd->se_tfo->get_fabric_name()); 2856 2856 2857 2857 if (pr_tmpl->pr_aptpl_active) { 2858 2858 core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0); 2859 - printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata" 2859 + pr_debug("SPC-3 PR: Updated APTPL metadata" 2860 2860 " for CLEAR\n"); 2861 2861 } 2862 2862 ··· 2895 2895 pr_reg->pr_res_type = type; 2896 2896 pr_reg->pr_res_scope = scope; 2897 2897 2898 - printk(KERN_INFO "SPC-3 PR [%s] Service Action: PREEMPT%s created new" 2898 + pr_debug("SPC-3 PR [%s] Service Action: PREEMPT%s created new" 2899 2899 " reservation holder TYPE: %s ALL_TG_PT: %d\n", 2900 2900 tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "", 2901 2901 core_scsi3_pr_dump_type(type), 2902 2902 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); 2903 - printk(KERN_INFO "SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n", 2903 + pr_debug("SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n", 2904 2904 tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "", 2905 2905 nacl->initiatorname, (prf_isid) ? &i_buf[0] : ""); 2906 2906 /* ··· 2926 2926 if (pr_reg_holder == pr_reg) 2927 2927 continue; 2928 2928 if (pr_reg->pr_res_holder) { 2929 - printk(KERN_WARNING "pr_reg->pr_res_holder still set\n"); 2929 + pr_warn("pr_reg->pr_res_holder still set\n"); 2930 2930 continue; 2931 2931 } 2932 2932 ··· 2971 2971 int all_reg = 0, calling_it_nexus = 0, released_regs = 0; 2972 2972 int prh_type = 0, prh_scope = 0, ret; 2973 2973 2974 - if (!(se_sess)) 2974 + if (!se_sess) 2975 2975 return PYX_TRANSPORT_LU_COMM_FAILURE; 2976 2976 2977 2977 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; 2978 2978 pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, 2979 2979 se_sess); 2980 - if (!(pr_reg_n)) { 2981 - printk(KERN_ERR "SPC-3 PR: Unable to locate" 2980 + if (!pr_reg_n) { 2981 + pr_err("SPC-3 PR: Unable to locate" 2982 2982 " PR_REGISTERED *pr_reg for PREEMPT%s\n", 2983 2983 (abort) ? "_AND_ABORT" : ""); 2984 2984 return PYX_TRANSPORT_RESERVATION_CONFLICT; ··· 2988 2988 return PYX_TRANSPORT_RESERVATION_CONFLICT; 2989 2989 } 2990 2990 if (scope != PR_SCOPE_LU_SCOPE) { 2991 - printk(KERN_ERR "SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); 2991 + pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); 2992 2992 core_scsi3_put_pr_reg(pr_reg_n); 2993 2993 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 2994 2994 } ··· 3001 3001 (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))) 3002 3002 all_reg = 1; 3003 3003 3004 - if (!(all_reg) && !(sa_res_key)) { 3004 + if (!all_reg && !sa_res_key) { 3005 3005 spin_unlock(&dev->dev_reservation_lock); 3006 3006 core_scsi3_put_pr_reg(pr_reg_n); 3007 3007 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; ··· 3015 3015 * server shall perform a preempt by doing the following in an 3016 3016 * uninterrupted series of actions. (See below..) 3017 3017 */ 3018 - if (!(pr_res_holder) || (pr_res_holder->pr_res_key != sa_res_key)) { 3018 + if (!pr_res_holder || (pr_res_holder->pr_res_key != sa_res_key)) { 3019 3019 /* 3020 3020 * No existing or SA Reservation Key matching reservations.. 3021 3021 * ··· 3042 3042 * was received, with the additional sense code set 3043 3043 * to REGISTRATIONS PREEMPTED. 3044 3044 */ 3045 - if (!(all_reg)) { 3045 + if (!all_reg) { 3046 3046 if (pr_reg->pr_res_key != sa_res_key) 3047 3047 continue; 3048 3048 ··· 3082 3082 NULL, 0); 3083 3083 released_regs++; 3084 3084 } 3085 - if (!(calling_it_nexus)) 3085 + if (!calling_it_nexus) 3086 3086 core_scsi3_ua_allocate(pr_reg_nacl, 3087 3087 pr_res_mapped_lun, 0x2A, 3088 3088 ASCQ_2AH_RESERVATIONS_PREEMPTED); ··· 3095 3095 * registered reservation key, then the device server shall 3096 3096 * complete the command with RESERVATION CONFLICT status. 3097 3097 */ 3098 - if (!(released_regs)) { 3098 + if (!released_regs) { 3099 3099 spin_unlock(&dev->dev_reservation_lock); 3100 3100 core_scsi3_put_pr_reg(pr_reg_n); 3101 3101 return PYX_TRANSPORT_RESERVATION_CONFLICT; ··· 3120 3120 ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, 3121 3121 &pr_reg_n->pr_aptpl_buf[0], 3122 3122 pr_tmpl->pr_aptpl_buf_len); 3123 - if (!(ret)) 3124 - printk(KERN_INFO "SPC-3 PR: Updated APTPL" 3123 + if (!ret) 3124 + pr_debug("SPC-3 PR: Updated APTPL" 3125 3125 " metadata for PREEMPT%s\n", (abort) ? 3126 3126 "_AND_ABORT" : ""); 3127 3127 } ··· 3256 3256 ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, 3257 3257 &pr_reg_n->pr_aptpl_buf[0], 3258 3258 pr_tmpl->pr_aptpl_buf_len); 3259 - if (!(ret)) 3260 - printk("SPC-3 PR: Updated APTPL metadata for PREEMPT" 3259 + if (!ret) 3260 + pr_debug("SPC-3 PR: Updated APTPL metadata for PREEMPT" 3261 3261 "%s\n", (abort) ? "_AND_ABORT" : ""); 3262 3262 } 3263 3263 ··· 3287 3287 res_key, sa_res_key, abort); 3288 3288 break; 3289 3289 default: 3290 - printk(KERN_ERR "SPC-3 PR: Unknown Service Action PREEMPT%s" 3290 + pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s" 3291 3291 " Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type); 3292 3292 return PYX_TRANSPORT_INVALID_CDB_FIELD; 3293 3293 } ··· 3321 3321 unsigned short rtpi; 3322 3322 unsigned char proto_ident; 3323 3323 3324 - if (!(se_sess) || !(se_lun)) { 3325 - printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n"); 3324 + if (!se_sess || !se_lun) { 3325 + pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); 3326 3326 return PYX_TRANSPORT_LU_COMM_FAILURE; 3327 3327 } 3328 3328 memset(dest_iport, 0, 64); ··· 3338 3338 */ 3339 3339 pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, 3340 3340 se_sess); 3341 - if (!(pr_reg)) { 3342 - printk(KERN_ERR "SPC-3 PR: Unable to locate PR_REGISTERED" 3341 + if (!pr_reg) { 3342 + pr_err("SPC-3 PR: Unable to locate PR_REGISTERED" 3343 3343 " *pr_reg for REGISTER_AND_MOVE\n"); 3344 3344 return PYX_TRANSPORT_LU_COMM_FAILURE; 3345 3345 } ··· 3348 3348 * provided during this initiator's I_T nexus registration. 3349 3349 */ 3350 3350 if (res_key != pr_reg->pr_res_key) { 3351 - printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Received" 3351 + pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received" 3352 3352 " res_key: 0x%016Lx does not match existing SA REGISTER" 3353 3353 " res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key); 3354 3354 core_scsi3_put_pr_reg(pr_reg); ··· 3357 3357 /* 3358 3358 * The service active reservation key needs to be non zero 3359 3359 */ 3360 - if (!(sa_res_key)) { 3361 - printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Received zero" 3360 + if (!sa_res_key) { 3361 + pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero" 3362 3362 " sa_res_key\n"); 3363 3363 core_scsi3_put_pr_reg(pr_reg); 3364 3364 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; ··· 3380 3380 buf = NULL; 3381 3381 3382 3382 if ((tid_len + 24) != cmd->data_length) { 3383 - printk(KERN_ERR "SPC-3 PR: Illegal tid_len: %u + 24 byte header" 3383 + pr_err("SPC-3 PR: Illegal tid_len: %u + 24 byte header" 3384 3384 " does not equal CDB data_length: %u\n", tid_len, 3385 3385 cmd->data_length); 3386 3386 core_scsi3_put_pr_reg(pr_reg); ··· 3392 3392 if (se_port->sep_rtpi != rtpi) 3393 3393 continue; 3394 3394 dest_se_tpg = se_port->sep_tpg; 3395 - if (!(dest_se_tpg)) 3395 + if (!dest_se_tpg) 3396 3396 continue; 3397 3397 dest_tf_ops = dest_se_tpg->se_tpg_tfo; 3398 - if (!(dest_tf_ops)) 3398 + if (!dest_tf_ops) 3399 3399 continue; 3400 3400 3401 3401 atomic_inc(&dest_se_tpg->tpg_pr_ref_count); ··· 3404 3404 3405 3405 ret = core_scsi3_tpg_depend_item(dest_se_tpg); 3406 3406 if (ret != 0) { 3407 - printk(KERN_ERR "core_scsi3_tpg_depend_item() failed" 3407 + pr_err("core_scsi3_tpg_depend_item() failed" 3408 3408 " for dest_se_tpg\n"); 3409 3409 atomic_dec(&dest_se_tpg->tpg_pr_ref_count); 3410 3410 smp_mb__after_atomic_dec(); ··· 3417 3417 } 3418 3418 spin_unlock(&dev->se_port_lock); 3419 3419 3420 - if (!(dest_se_tpg) || (!dest_tf_ops)) { 3421 - printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Unable to locate" 3420 + if (!dest_se_tpg || !dest_tf_ops) { 3421 + pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate" 3422 3422 " fabric ops from Relative Target Port Identifier:" 3423 3423 " %hu\n", rtpi); 3424 3424 core_scsi3_put_pr_reg(pr_reg); ··· 3428 3428 buf = transport_kmap_first_data_page(cmd); 3429 3429 proto_ident = (buf[24] & 0x0f); 3430 3430 #if 0 3431 - printk("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:" 3431 + pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:" 3432 3432 " 0x%02x\n", proto_ident); 3433 3433 #endif 3434 3434 if (proto_ident != dest_tf_ops->get_fabric_proto_ident(dest_se_tpg)) { 3435 - printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Received" 3435 + pr_err("SPC-3 PR REGISTER_AND_MOVE: Received" 3436 3436 " proto_ident: 0x%02x does not match ident: 0x%02x" 3437 3437 " from fabric: %s\n", proto_ident, 3438 3438 dest_tf_ops->get_fabric_proto_ident(dest_se_tpg), ··· 3441 3441 goto out; 3442 3442 } 3443 3443 if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) { 3444 - printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Fabric does not" 3444 + pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not" 3445 3445 " containg a valid tpg_parse_pr_out_transport_id" 3446 3446 " function pointer\n"); 3447 3447 ret = PYX_TRANSPORT_LU_COMM_FAILURE; ··· 3449 3449 } 3450 3450 initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg, 3451 3451 (const char *)&buf[24], &tmp_tid_len, &iport_ptr); 3452 - if (!(initiator_str)) { 3453 - printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Unable to locate" 3452 + if (!initiator_str) { 3453 + pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate" 3454 3454 " initiator_str from Transport ID\n"); 3455 3455 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3456 3456 goto out; ··· 3459 3459 transport_kunmap_first_data_page(cmd); 3460 3460 buf = NULL; 3461 3461 3462 - printk(KERN_INFO "SPC-3 PR [%s] Extracted initiator %s identifier: %s" 3462 + pr_debug("SPC-3 PR [%s] Extracted initiator %s identifier: %s" 3463 3463 " %s\n", dest_tf_ops->get_fabric_name(), (iport_ptr != NULL) ? 3464 3464 "port" : "device", initiator_str, (iport_ptr != NULL) ? 3465 3465 iport_ptr : ""); ··· 3474 3474 pr_reg_nacl = pr_reg->pr_reg_nacl; 3475 3475 matching_iname = (!strcmp(initiator_str, 3476 3476 pr_reg_nacl->initiatorname)) ? 1 : 0; 3477 - if (!(matching_iname)) 3477 + if (!matching_iname) 3478 3478 goto after_iport_check; 3479 3479 3480 - if (!(iport_ptr) || !(pr_reg->isid_present_at_reg)) { 3481 - printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: TransportID: %s" 3480 + if (!iport_ptr || !pr_reg->isid_present_at_reg) { 3481 + pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s" 3482 3482 " matches: %s on received I_T Nexus\n", initiator_str, 3483 3483 pr_reg_nacl->initiatorname); 3484 3484 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3485 3485 goto out; 3486 3486 } 3487 - if (!(strcmp(iport_ptr, pr_reg->pr_reg_isid))) { 3488 - printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: TransportID: %s %s" 3487 + if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) { 3488 + pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s %s" 3489 3489 " matches: %s %s on received I_T Nexus\n", 3490 3490 initiator_str, iport_ptr, pr_reg_nacl->initiatorname, 3491 3491 pr_reg->pr_reg_isid); ··· 3505 3505 } 3506 3506 spin_unlock_bh(&dest_se_tpg->acl_node_lock); 3507 3507 3508 - if (!(dest_node_acl)) { 3509 - printk(KERN_ERR "Unable to locate %s dest_node_acl for" 3508 + if (!dest_node_acl) { 3509 + pr_err("Unable to locate %s dest_node_acl for" 3510 3510 " TransportID%s\n", dest_tf_ops->get_fabric_name(), 3511 3511 initiator_str); 3512 3512 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; ··· 3514 3514 } 3515 3515 ret = core_scsi3_nodeacl_depend_item(dest_node_acl); 3516 3516 if (ret != 0) { 3517 - printk(KERN_ERR "core_scsi3_nodeacl_depend_item() for" 3517 + pr_err("core_scsi3_nodeacl_depend_item() for" 3518 3518 " dest_node_acl\n"); 3519 3519 atomic_dec(&dest_node_acl->acl_pr_ref_count); 3520 3520 smp_mb__after_atomic_dec(); ··· 3523 3523 goto out; 3524 3524 } 3525 3525 #if 0 3526 - printk(KERN_INFO "SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:" 3526 + pr_debug("SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:" 3527 3527 " %s from TransportID\n", dest_tf_ops->get_fabric_name(), 3528 3528 dest_node_acl->initiatorname); 3529 3529 #endif ··· 3532 3532 * PORT IDENTIFIER. 3533 3533 */ 3534 3534 dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, rtpi); 3535 - if (!(dest_se_deve)) { 3536 - printk(KERN_ERR "Unable to locate %s dest_se_deve from RTPI:" 3535 + if (!dest_se_deve) { 3536 + pr_err("Unable to locate %s dest_se_deve from RTPI:" 3537 3537 " %hu\n", dest_tf_ops->get_fabric_name(), rtpi); 3538 3538 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3539 3539 goto out; ··· 3541 3541 3542 3542 ret = core_scsi3_lunacl_depend_item(dest_se_deve); 3543 3543 if (ret < 0) { 3544 - printk(KERN_ERR "core_scsi3_lunacl_depend_item() failed\n"); 3544 + pr_err("core_scsi3_lunacl_depend_item() failed\n"); 3545 3545 atomic_dec(&dest_se_deve->pr_ref_count); 3546 3546 smp_mb__after_atomic_dec(); 3547 3547 dest_se_deve = NULL; ··· 3549 3549 goto out; 3550 3550 } 3551 3551 #if 0 3552 - printk(KERN_INFO "SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN" 3552 + pr_debug("SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN" 3553 3553 " ACL for dest_se_deve->mapped_lun: %u\n", 3554 3554 dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname, 3555 3555 dest_se_deve->mapped_lun); ··· 3560 3560 */ 3561 3561 spin_lock(&dev->dev_reservation_lock); 3562 3562 pr_res_holder = dev->dev_pr_res_holder; 3563 - if (!(pr_res_holder)) { 3564 - printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: No reservation" 3563 + if (!pr_res_holder) { 3564 + pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation" 3565 3565 " currently held\n"); 3566 3566 spin_unlock(&dev->dev_reservation_lock); 3567 3567 ret = PYX_TRANSPORT_INVALID_CDB_FIELD; ··· 3574 3574 * Register behaviors for a REGISTER AND MOVE service action 3575 3575 */ 3576 3576 if (pr_res_holder != pr_reg) { 3577 - printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Calling I_T" 3577 + pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T" 3578 3578 " Nexus is not reservation holder\n"); 3579 3579 spin_unlock(&dev->dev_reservation_lock); 3580 3580 ret = PYX_TRANSPORT_RESERVATION_CONFLICT; ··· 3591 3591 */ 3592 3592 if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || 3593 3593 (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) { 3594 - printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Unable to move" 3594 + pr_warn("SPC-3 PR REGISTER_AND_MOVE: Unable to move" 3595 3595 " reservation for type: %s\n", 3596 3596 core_scsi3_pr_dump_type(pr_res_holder->pr_res_type)); 3597 3597 spin_unlock(&dev->dev_reservation_lock); ··· 3626 3626 */ 3627 3627 dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl, 3628 3628 iport_ptr); 3629 - if (!(dest_pr_reg)) { 3629 + if (!dest_pr_reg) { 3630 3630 ret = core_scsi3_alloc_registration(cmd->se_dev, 3631 3631 dest_node_acl, dest_se_deve, iport_ptr, 3632 3632 sa_res_key, 0, aptpl, 2, 1); ··· 3659 3659 /* 3660 3660 * Increment PRGeneration for existing registrations.. 3661 3661 */ 3662 - if (!(new_reg)) 3662 + if (!new_reg) 3663 3663 dest_pr_reg->pr_res_generation = pr_tmpl->pr_generation++; 3664 3664 spin_unlock(&dev->dev_reservation_lock); 3665 3665 3666 - printk(KERN_INFO "SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE" 3666 + pr_debug("SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE" 3667 3667 " created new reservation holder TYPE: %s on object RTPI:" 3668 3668 " %hu PRGeneration: 0x%08x\n", dest_tf_ops->get_fabric_name(), 3669 3669 core_scsi3_pr_dump_type(type), rtpi, 3670 3670 dest_pr_reg->pr_res_generation); 3671 - printk(KERN_INFO "SPC-3 PR Successfully moved reservation from" 3671 + pr_debug("SPC-3 PR Successfully moved reservation from" 3672 3672 " %s Fabric Node: %s%s -> %s Fabric Node: %s %s\n", 3673 3673 tf_ops->get_fabric_name(), pr_reg_nacl->initiatorname, 3674 3674 (prf_isid) ? &i_buf[0] : "", dest_tf_ops->get_fabric_name(), ··· 3696 3696 * Clear the APTPL metadata if APTPL has been disabled, otherwise 3697 3697 * write out the updated metadata to struct file for this SCSI device. 3698 3698 */ 3699 - if (!(aptpl)) { 3699 + if (!aptpl) { 3700 3700 pr_tmpl->pr_aptpl_active = 0; 3701 3701 core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0); 3702 - printk("SPC-3 PR: Set APTPL Bit Deactivated for" 3702 + pr_debug("SPC-3 PR: Set APTPL Bit Deactivated for" 3703 3703 " REGISTER_AND_MOVE\n"); 3704 3704 } else { 3705 3705 pr_tmpl->pr_aptpl_active = 1; 3706 3706 ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, 3707 3707 &dest_pr_reg->pr_aptpl_buf[0], 3708 3708 pr_tmpl->pr_aptpl_buf_len); 3709 - if (!(ret)) 3710 - printk("SPC-3 PR: Set APTPL Bit Activated for" 3709 + if (!ret) 3710 + pr_debug("SPC-3 PR: Set APTPL Bit Activated for" 3711 3711 " REGISTER_AND_MOVE\n"); 3712 3712 } 3713 3713 ··· 3750 3750 * FIXME: A NULL struct se_session pointer means an this is not coming from 3751 3751 * a $FABRIC_MOD's nexus, but from internal passthrough ops. 3752 3752 */ 3753 - if (!(cmd->se_sess)) 3753 + if (!cmd->se_sess) 3754 3754 return PYX_TRANSPORT_LU_COMM_FAILURE; 3755 3755 3756 3756 if (cmd->data_length < 24) { 3757 - printk(KERN_WARNING "SPC-PR: Received PR OUT parameter list" 3757 + pr_warn("SPC-PR: Received PR OUT parameter list" 3758 3758 " length too small: %u\n", cmd->data_length); 3759 3759 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3760 3760 } ··· 3800 3800 * the sense key set to ILLEGAL REQUEST, and the additional sense 3801 3801 * code set to PARAMETER LIST LENGTH ERROR. 3802 3802 */ 3803 - if (!(spec_i_pt) && ((cdb[1] & 0x1f) != PRO_REGISTER_AND_MOVE) && 3803 + if (!spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER_AND_MOVE) && 3804 3804 (cmd->data_length != 24)) { 3805 - printk(KERN_WARNING "SPC-PR: Received PR OUT illegal parameter" 3805 + pr_warn("SPC-PR: Received PR OUT illegal parameter" 3806 3806 " list length: %u\n", cmd->data_length); 3807 3807 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3808 3808 } ··· 3836 3836 return core_scsi3_emulate_pro_register_and_move(cmd, res_key, 3837 3837 sa_res_key, aptpl, unreg); 3838 3838 default: 3839 - printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service" 3839 + pr_err("Unknown PERSISTENT_RESERVE_OUT service" 3840 3840 " action: 0x%02x\n", cdb[1] & 0x1f); 3841 3841 return PYX_TRANSPORT_INVALID_CDB_FIELD; 3842 3842 } ··· 3858 3858 u32 add_len = 0, off = 8; 3859 3859 3860 3860 if (cmd->data_length < 8) { 3861 - printk(KERN_ERR "PRIN SA READ_KEYS SCSI Data Length: %u" 3861 + pr_err("PRIN SA READ_KEYS SCSI Data Length: %u" 3862 3862 " too small\n", cmd->data_length); 3863 3863 return PYX_TRANSPORT_INVALID_CDB_FIELD; 3864 3864 } ··· 3917 3917 u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */ 3918 3918 3919 3919 if (cmd->data_length < 8) { 3920 - printk(KERN_ERR "PRIN SA READ_RESERVATIONS SCSI Data Length: %u" 3920 + pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u" 3921 3921 " too small\n", cmd->data_length); 3922 3922 return PYX_TRANSPORT_INVALID_CDB_FIELD; 3923 3923 } ··· 3999 3999 u16 add_len = 8; /* Hardcoded to 8. */ 4000 4000 4001 4001 if (cmd->data_length < 6) { 4002 - printk(KERN_ERR "PRIN SA REPORT_CAPABILITIES SCSI Data Length:" 4002 + pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:" 4003 4003 " %u too small\n", cmd->data_length); 4004 4004 return PYX_TRANSPORT_INVALID_CDB_FIELD; 4005 4005 } ··· 4060 4060 int format_code = 0; 4061 4061 4062 4062 if (cmd->data_length < 8) { 4063 - printk(KERN_ERR "PRIN SA READ_FULL_STATUS SCSI Data Length: %u" 4063 + pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u" 4064 4064 " too small\n", cmd->data_length); 4065 4065 return PYX_TRANSPORT_INVALID_CDB_FIELD; 4066 4066 } ··· 4091 4091 se_tpg, se_nacl, pr_reg, &format_code); 4092 4092 4093 4093 if ((exp_desc_len + add_len) > cmd->data_length) { 4094 - printk(KERN_WARNING "SPC-3 PRIN READ_FULL_STATUS ran" 4094 + pr_warn("SPC-3 PRIN READ_FULL_STATUS ran" 4095 4095 " out of buffer: %d\n", cmd->data_length); 4096 4096 spin_lock(&pr_tmpl->registration_lock); 4097 4097 atomic_dec(&pr_reg->pr_res_holders); ··· 4141 4141 * bit is set to one, the contents of the RELATIVE TARGET PORT 4142 4142 * IDENTIFIER field are not defined by this standard. 4143 4143 */ 4144 - if (!(pr_reg->pr_reg_all_tg_pt)) { 4144 + if (!pr_reg->pr_reg_all_tg_pt) { 4145 4145 struct se_port *port = pr_reg->pr_reg_tg_pt_lun->lun_sep; 4146 4146 4147 4147 buf[off++] = ((port->sep_rtpi >> 8) & 0xff); ··· 4203 4203 case PRI_READ_FULL_STATUS: 4204 4204 return core_scsi3_pri_read_full_status(cmd); 4205 4205 default: 4206 - printk(KERN_ERR "Unknown PERSISTENT_RESERVE_IN service" 4206 + pr_err("Unknown PERSISTENT_RESERVE_IN service" 4207 4207 " action: 0x%02x\n", cdb[1] & 0x1f); 4208 4208 return PYX_TRANSPORT_INVALID_CDB_FIELD; 4209 4209 } ··· 4224 4224 * CONFLICT status. 4225 4225 */ 4226 4226 if (dev->dev_flags & DF_SPC2_RESERVATIONS) { 4227 - printk(KERN_ERR "Received PERSISTENT_RESERVE CDB while legacy" 4227 + pr_err("Received PERSISTENT_RESERVE CDB while legacy" 4228 4228 " SPC-2 reservation is held, returning" 4229 4229 " RESERVATION_CONFLICT\n"); 4230 4230 return PYX_TRANSPORT_RESERVATION_CONFLICT; ··· 4263 4263 rest->res_type = SPC_PASSTHROUGH; 4264 4264 rest->pr_ops.t10_reservation_check = &core_pt_reservation_check; 4265 4265 rest->pr_ops.t10_seq_non_holder = &core_pt_seq_non_holder; 4266 - printk(KERN_INFO "%s: Using SPC_PASSTHROUGH, no reservation" 4266 + pr_debug("%s: Using SPC_PASSTHROUGH, no reservation" 4267 4267 " emulation\n", dev->transport->name); 4268 4268 return 0; 4269 4269 } ··· 4275 4275 rest->res_type = SPC3_PERSISTENT_RESERVATIONS; 4276 4276 rest->pr_ops.t10_reservation_check = &core_scsi3_pr_reservation_check; 4277 4277 rest->pr_ops.t10_seq_non_holder = &core_scsi3_pr_seq_non_holder; 4278 - printk(KERN_INFO "%s: Using SPC3_PERSISTENT_RESERVATIONS" 4278 + pr_debug("%s: Using SPC3_PERSISTENT_RESERVATIONS" 4279 4279 " emulation\n", dev->transport->name); 4280 4280 } else { 4281 4281 rest->res_type = SPC2_RESERVATIONS; 4282 4282 rest->pr_ops.t10_reservation_check = &core_scsi2_reservation_check; 4283 4283 rest->pr_ops.t10_seq_non_holder = 4284 4284 &core_scsi2_reservation_seq_non_holder; 4285 - printk(KERN_INFO "%s: Using SPC2_RESERVATIONS emulation\n", 4285 + pr_debug("%s: Using SPC2_RESERVATIONS emulation\n", 4286 4286 dev->transport->name); 4287 4287 } 4288 4288
+79 -106
drivers/target/target_core_pscsi.c
··· 65 65 struct pscsi_hba_virt *phv; 66 66 67 67 phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL); 68 - if (!(phv)) { 69 - printk(KERN_ERR "Unable to allocate struct pscsi_hba_virt\n"); 68 + if (!phv) { 69 + pr_err("Unable to allocate struct pscsi_hba_virt\n"); 70 70 return -ENOMEM; 71 71 } 72 72 phv->phv_host_id = host_id; ··· 74 74 75 75 hba->hba_ptr = phv; 76 76 77 - printk(KERN_INFO "CORE_HBA[%d] - TCM SCSI HBA Driver %s on" 77 + pr_debug("CORE_HBA[%d] - TCM SCSI HBA Driver %s on" 78 78 " Generic Target Core Stack %s\n", hba->hba_id, 79 79 PSCSI_VERSION, TARGET_CORE_MOD_VERSION); 80 - printk(KERN_INFO "CORE_HBA[%d] - Attached SCSI HBA to Generic\n", 80 + pr_debug("CORE_HBA[%d] - Attached SCSI HBA to Generic\n", 81 81 hba->hba_id); 82 82 83 83 return 0; ··· 91 91 if (scsi_host) { 92 92 scsi_host_put(scsi_host); 93 93 94 - printk(KERN_INFO "CORE_HBA[%d] - Detached SCSI HBA: %s from" 94 + pr_debug("CORE_HBA[%d] - Detached SCSI HBA: %s from" 95 95 " Generic Target Core\n", hba->hba_id, 96 96 (scsi_host->hostt->name) ? (scsi_host->hostt->name) : 97 97 "Unknown"); 98 98 } else 99 - printk(KERN_INFO "CORE_HBA[%d] - Detached Virtual SCSI HBA" 99 + pr_debug("CORE_HBA[%d] - Detached Virtual SCSI HBA" 100 100 " from Generic Target Core\n", hba->hba_id); 101 101 102 102 kfree(phv); ··· 110 110 /* 111 111 * Release the struct Scsi_Host 112 112 */ 113 - if (!(mode_flag)) { 114 - if (!(sh)) 113 + if (!mode_flag) { 114 + if (!sh) 115 115 return 0; 116 116 117 117 phv->phv_lld_host = NULL; 118 118 phv->phv_mode = PHV_VIRUTAL_HOST_ID; 119 119 120 - printk(KERN_INFO "CORE_HBA[%d] - Disabled pSCSI HBA Passthrough" 120 + pr_debug("CORE_HBA[%d] - Disabled pSCSI HBA Passthrough" 121 121 " %s\n", hba->hba_id, (sh->hostt->name) ? 122 122 (sh->hostt->name) : "Unknown"); 123 123 ··· 130 130 */ 131 131 sh = scsi_host_lookup(phv->phv_host_id); 132 132 if (IS_ERR(sh)) { 133 - printk(KERN_ERR "pSCSI: Unable to locate SCSI Host for" 133 + pr_err("pSCSI: Unable to locate SCSI Host for" 134 134 " phv_host_id: %d\n", phv->phv_host_id); 135 135 return PTR_ERR(sh); 136 136 } ··· 138 138 phv->phv_lld_host = sh; 139 139 phv->phv_mode = PHV_LLD_SCSI_HOST_NO; 140 140 141 - printk(KERN_INFO "CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n", 141 + pr_debug("CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n", 142 142 hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown"); 143 143 144 144 return 1; ··· 257 257 page_83 = &buf[off]; 258 258 ident_len = page_83[3]; 259 259 if (!ident_len) { 260 - printk(KERN_ERR "page_83[3]: identifier" 260 + pr_err("page_83[3]: identifier" 261 261 " length zero!\n"); 262 262 break; 263 263 } 264 - printk(KERN_INFO "T10 VPD Identifer Length: %d\n", ident_len); 264 + pr_debug("T10 VPD Identifer Length: %d\n", ident_len); 265 265 266 266 vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL); 267 267 if (!vpd) { 268 - printk(KERN_ERR "Unable to allocate memory for" 268 + pr_err("Unable to allocate memory for" 269 269 " struct t10_vpd\n"); 270 270 goto out; 271 271 } ··· 317 317 if (!sd->queue_depth) { 318 318 sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH; 319 319 320 - printk(KERN_ERR "Set broken SCSI Device %d:%d:%d" 320 + pr_err("Set broken SCSI Device %d:%d:%d" 321 321 " queue_depth to %d\n", sd->channel, sd->id, 322 322 sd->lun, sd->queue_depth); 323 323 } ··· 355 355 dev = transport_add_device_to_core_hba(hba, &pscsi_template, 356 356 se_dev, dev_flags, pdv, 357 357 &dev_limits, NULL, NULL); 358 - if (!(dev)) { 358 + if (!dev) { 359 359 pdv->pdv_sd = NULL; 360 360 return NULL; 361 361 } ··· 385 385 struct pscsi_dev_virt *pdv; 386 386 387 387 pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL); 388 - if (!(pdv)) { 389 - printk(KERN_ERR "Unable to allocate memory for struct pscsi_dev_virt\n"); 388 + if (!pdv) { 389 + pr_err("Unable to allocate memory for struct pscsi_dev_virt\n"); 390 390 return NULL; 391 391 } 392 392 pdv->pdv_se_hba = hba; 393 393 394 - printk(KERN_INFO "PSCSI: Allocated pdv: %p for %s\n", pdv, name); 394 + pr_debug("PSCSI: Allocated pdv: %p for %s\n", pdv, name); 395 395 return pdv; 396 396 } 397 397 ··· 412 412 u32 dev_flags = 0; 413 413 414 414 if (scsi_device_get(sd)) { 415 - printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n", 415 + pr_err("scsi_device_get() failed for %d:%d:%d:%d\n", 416 416 sh->host_no, sd->channel, sd->id, sd->lun); 417 417 spin_unlock_irq(sh->host_lock); 418 418 return NULL; ··· 425 425 bd = blkdev_get_by_path(se_dev->se_dev_udev_path, 426 426 FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv); 427 427 if (IS_ERR(bd)) { 428 - printk(KERN_ERR "pSCSI: blkdev_get_by_path() failed\n"); 428 + pr_err("pSCSI: blkdev_get_by_path() failed\n"); 429 429 scsi_device_put(sd); 430 430 return NULL; 431 431 } 432 432 pdv->pdv_bd = bd; 433 433 434 434 dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); 435 - if (!(dev)) { 435 + if (!dev) { 436 436 blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); 437 437 scsi_device_put(sd); 438 438 return NULL; 439 439 } 440 - printk(KERN_INFO "CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n", 440 + pr_debug("CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n", 441 441 phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun); 442 442 443 443 return dev; ··· 459 459 u32 dev_flags = 0; 460 460 461 461 if (scsi_device_get(sd)) { 462 - printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n", 462 + pr_err("scsi_device_get() failed for %d:%d:%d:%d\n", 463 463 sh->host_no, sd->channel, sd->id, sd->lun); 464 464 spin_unlock_irq(sh->host_lock); 465 465 return NULL; ··· 467 467 spin_unlock_irq(sh->host_lock); 468 468 469 469 dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); 470 - if (!(dev)) { 470 + if (!dev) { 471 471 scsi_device_put(sd); 472 472 return NULL; 473 473 } 474 - printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", 474 + pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", 475 475 phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, 476 476 sd->channel, sd->id, sd->lun); 477 477 ··· 495 495 496 496 spin_unlock_irq(sh->host_lock); 497 497 dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); 498 - if (!(dev)) 498 + if (!dev) 499 499 return NULL; 500 500 501 - printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", 501 + pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", 502 502 phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, 503 503 sd->channel, sd->id, sd->lun); 504 504 ··· 517 517 struct Scsi_Host *sh = phv->phv_lld_host; 518 518 int legacy_mode_enable = 0; 519 519 520 - if (!(pdv)) { 521 - printk(KERN_ERR "Unable to locate struct pscsi_dev_virt" 520 + if (!pdv) { 521 + pr_err("Unable to locate struct pscsi_dev_virt" 522 522 " parameter\n"); 523 523 return ERR_PTR(-EINVAL); 524 524 } ··· 526 526 * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the 527 527 * struct Scsi_Host we will need to bring the TCM/pSCSI object online 528 528 */ 529 - if (!(sh)) { 529 + if (!sh) { 530 530 if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { 531 - printk(KERN_ERR "pSCSI: Unable to locate struct" 531 + pr_err("pSCSI: Unable to locate struct" 532 532 " Scsi_Host for PHV_LLD_SCSI_HOST_NO\n"); 533 533 return ERR_PTR(-ENODEV); 534 534 } ··· 537 537 * reference, we enforce that udev_path has been set 538 538 */ 539 539 if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) { 540 - printk(KERN_ERR "pSCSI: udev_path attribute has not" 540 + pr_err("pSCSI: udev_path attribute has not" 541 541 " been set before ENABLE=1\n"); 542 542 return ERR_PTR(-EINVAL); 543 543 } ··· 548 548 */ 549 549 if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) { 550 550 spin_lock(&hba->device_lock); 551 - if (!(list_empty(&hba->hba_dev_list))) { 552 - printk(KERN_ERR "pSCSI: Unable to set hba_mode" 551 + if (!list_empty(&hba->hba_dev_list)) { 552 + pr_err("pSCSI: Unable to set hba_mode" 553 553 " with active devices\n"); 554 554 spin_unlock(&hba->device_lock); 555 555 return ERR_PTR(-EEXIST); ··· 565 565 } else { 566 566 sh = scsi_host_lookup(pdv->pdv_host_id); 567 567 if (IS_ERR(sh)) { 568 - printk(KERN_ERR "pSCSI: Unable to locate" 568 + pr_err("pSCSI: Unable to locate" 569 569 " pdv_host_id: %d\n", pdv->pdv_host_id); 570 570 return (struct se_device *) sh; 571 571 } 572 572 } 573 573 } else { 574 574 if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) { 575 - printk(KERN_ERR "pSCSI: PHV_VIRUTAL_HOST_ID set while" 575 + pr_err("pSCSI: PHV_VIRUTAL_HOST_ID set while" 576 576 " struct Scsi_Host exists\n"); 577 577 return ERR_PTR(-EEXIST); 578 578 } ··· 601 601 break; 602 602 } 603 603 604 - if (!(dev)) { 604 + if (!dev) { 605 605 if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) 606 606 scsi_host_put(sh); 607 607 else if (legacy_mode_enable) { ··· 615 615 } 616 616 spin_unlock_irq(sh->host_lock); 617 617 618 - printk(KERN_ERR "pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no, 618 + pr_err("pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no, 619 619 pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id); 620 620 621 621 if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) ··· 729 729 u32 blocksize; 730 730 731 731 buf = sg_virt(&sg[0]); 732 - if (!(buf)) { 733 - printk(KERN_ERR "Unable to get buf for scatterlist\n"); 732 + if (!buf) { 733 + pr_err("Unable to get buf for scatterlist\n"); 734 734 goto after_mode_select; 735 735 } 736 736 ··· 760 760 } 761 761 762 762 static struct se_task * 763 - pscsi_alloc_task(struct se_cmd *cmd) 763 + pscsi_alloc_task(unsigned char *cdb) 764 764 { 765 765 struct pscsi_plugin_task *pt; 766 - unsigned char *cdb = cmd->t_task_cdb; 767 - 768 - pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL); 769 - if (!pt) { 770 - printk(KERN_ERR "Unable to allocate struct pscsi_plugin_task\n"); 771 - return NULL; 772 - } 773 766 774 767 /* 775 - * If TCM Core is signaling a > TCM_MAX_COMMAND_SIZE allocation, 776 - * allocate the extended CDB buffer for per struct se_task context 777 - * pt->pscsi_cdb now. 768 + * Dynamically alloc cdb space, since it may be larger than 769 + * TCM_MAX_COMMAND_SIZE 778 770 */ 779 - if (cmd->t_task_cdb != cmd->__t_task_cdb) { 780 - 781 - pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL); 782 - if (!(pt->pscsi_cdb)) { 783 - printk(KERN_ERR "pSCSI: Unable to allocate extended" 784 - " pt->pscsi_cdb\n"); 785 - kfree(pt); 786 - return NULL; 787 - } 788 - } else 789 - pt->pscsi_cdb = &pt->__pscsi_cdb[0]; 771 + pt = kzalloc(sizeof(*pt) + scsi_command_size(cdb), GFP_KERNEL); 772 + if (!pt) { 773 + pr_err("Unable to allocate struct pscsi_plugin_task\n"); 774 + return NULL; 775 + } 790 776 791 777 return &pt->pscsi_task; 792 778 } ··· 823 837 pt->pscsi_req = blk_get_request(pdv->pdv_sd->request_queue, 824 838 (task->task_data_direction == DMA_TO_DEVICE), 825 839 GFP_KERNEL); 826 - if (!(pt->pscsi_req) || IS_ERR(pt->pscsi_req)) { 827 - printk(KERN_ERR "PSCSI: blk_get_request() failed: %ld\n", 840 + if (!pt->pscsi_req || IS_ERR(pt->pscsi_req)) { 841 + pr_err("PSCSI: blk_get_request() failed: %ld\n", 828 842 IS_ERR(pt->pscsi_req)); 829 843 return PYX_TRANSPORT_LU_COMM_FAILURE; 830 844 } ··· 869 883 static void pscsi_free_task(struct se_task *task) 870 884 { 871 885 struct pscsi_plugin_task *pt = PSCSI_TASK(task); 872 - struct se_cmd *cmd = task->task_se_cmd; 873 886 874 - /* 875 - * Release the extended CDB allocation from pscsi_alloc_task() 876 - * if one exists. 877 - */ 878 - if (cmd->t_task_cdb != cmd->__t_task_cdb) 879 - kfree(pt->pscsi_cdb); 880 887 /* 881 888 * We do not release the bio(s) here associated with this task, as 882 889 * this is handled by bio_put() and pscsi_bi_endio(). ··· 915 936 switch (token) { 916 937 case Opt_scsi_host_id: 917 938 if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { 918 - printk(KERN_ERR "PSCSI[%d]: Unable to accept" 939 + pr_err("PSCSI[%d]: Unable to accept" 919 940 " scsi_host_id while phv_mode ==" 920 941 " PHV_LLD_SCSI_HOST_NO\n", 921 942 phv->phv_host_id); ··· 924 945 } 925 946 match_int(args, &arg); 926 947 pdv->pdv_host_id = arg; 927 - printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Host ID:" 948 + pr_debug("PSCSI[%d]: Referencing SCSI Host ID:" 928 949 " %d\n", phv->phv_host_id, pdv->pdv_host_id); 929 950 pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID; 930 951 break; 931 952 case Opt_scsi_channel_id: 932 953 match_int(args, &arg); 933 954 pdv->pdv_channel_id = arg; 934 - printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Channel" 955 + pr_debug("PSCSI[%d]: Referencing SCSI Channel" 935 956 " ID: %d\n", phv->phv_host_id, 936 957 pdv->pdv_channel_id); 937 958 pdv->pdv_flags |= PDF_HAS_CHANNEL_ID; ··· 939 960 case Opt_scsi_target_id: 940 961 match_int(args, &arg); 941 962 pdv->pdv_target_id = arg; 942 - printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Target" 963 + pr_debug("PSCSI[%d]: Referencing SCSI Target" 943 964 " ID: %d\n", phv->phv_host_id, 944 965 pdv->pdv_target_id); 945 966 pdv->pdv_flags |= PDF_HAS_TARGET_ID; ··· 947 968 case Opt_scsi_lun_id: 948 969 match_int(args, &arg); 949 970 pdv->pdv_lun_id = arg; 950 - printk(KERN_INFO "PSCSI[%d]: Referencing SCSI LUN ID:" 971 + pr_debug("PSCSI[%d]: Referencing SCSI LUN ID:" 951 972 " %d\n", phv->phv_host_id, pdv->pdv_lun_id); 952 973 pdv->pdv_flags |= PDF_HAS_LUN_ID; 953 974 break; ··· 970 991 if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) || 971 992 !(pdv->pdv_flags & PDF_HAS_TARGET_ID) || 972 993 !(pdv->pdv_flags & PDF_HAS_LUN_ID)) { 973 - printk(KERN_ERR "Missing scsi_channel_id=, scsi_target_id= and" 994 + pr_err("Missing scsi_channel_id=, scsi_target_id= and" 974 995 " scsi_lun_id= parameters\n"); 975 996 return -EINVAL; 976 997 } ··· 1040 1061 * in block/blk-core.c:blk_make_request() 1041 1062 */ 1042 1063 bio = bio_kmalloc(GFP_KERNEL, sg_num); 1043 - if (!(bio)) { 1044 - printk(KERN_ERR "PSCSI: bio_kmalloc() failed\n"); 1064 + if (!bio) { 1065 + pr_err("PSCSI: bio_kmalloc() failed\n"); 1045 1066 return NULL; 1046 1067 } 1047 1068 bio->bi_end_io = pscsi_bi_endio; 1048 1069 1049 1070 return bio; 1050 1071 } 1051 - 1052 - #if 0 1053 - #define DEBUG_PSCSI(x...) printk(x) 1054 - #else 1055 - #define DEBUG_PSCSI(x...) 1056 - #endif 1057 1072 1058 1073 static int __pscsi_map_task_SG( 1059 1074 struct se_task *task, ··· 1079 1106 * is ported to upstream SCSI passthrough functionality that accepts 1080 1107 * struct scatterlist->page_link or struct page as a paraemeter. 1081 1108 */ 1082 - DEBUG_PSCSI("PSCSI: nr_pages: %d\n", nr_pages); 1109 + pr_debug("PSCSI: nr_pages: %d\n", nr_pages); 1083 1110 1084 1111 for_each_sg(task_sg, sg, task_sg_num, i) { 1085 1112 page = sg_page(sg); 1086 1113 off = sg->offset; 1087 1114 len = sg->length; 1088 1115 1089 - DEBUG_PSCSI("PSCSI: i: %d page: %p len: %d off: %d\n", i, 1116 + pr_debug("PSCSI: i: %d page: %p len: %d off: %d\n", i, 1090 1117 page, len, off); 1091 1118 1092 1119 while (len > 0 && data_len > 0) { 1093 1120 bytes = min_t(unsigned int, len, PAGE_SIZE - off); 1094 1121 bytes = min(bytes, data_len); 1095 1122 1096 - if (!(bio)) { 1123 + if (!bio) { 1097 1124 nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages); 1098 1125 nr_pages -= nr_vecs; 1099 1126 /* 1100 1127 * Calls bio_kmalloc() and sets bio->bi_end_io() 1101 1128 */ 1102 1129 bio = pscsi_get_bio(nr_vecs); 1103 - if (!(bio)) 1130 + if (!bio) 1104 1131 goto fail; 1105 1132 1106 1133 if (rw) 1107 1134 bio->bi_rw |= REQ_WRITE; 1108 1135 1109 - DEBUG_PSCSI("PSCSI: Allocated bio: %p," 1136 + pr_debug("PSCSI: Allocated bio: %p," 1110 1137 " dir: %s nr_vecs: %d\n", bio, 1111 1138 (rw) ? "rw" : "r", nr_vecs); 1112 1139 /* ··· 1121 1148 tbio = tbio->bi_next = bio; 1122 1149 } 1123 1150 1124 - DEBUG_PSCSI("PSCSI: Calling bio_add_pc_page() i: %d" 1151 + pr_debug("PSCSI: Calling bio_add_pc_page() i: %d" 1125 1152 " bio: %p page: %p len: %d off: %d\n", i, bio, 1126 1153 page, len, off); 1127 1154 ··· 1130 1157 if (rc != bytes) 1131 1158 goto fail; 1132 1159 1133 - DEBUG_PSCSI("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n", 1160 + pr_debug("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n", 1134 1161 bio->bi_vcnt, nr_vecs); 1135 1162 1136 1163 if (bio->bi_vcnt > nr_vecs) { 1137 - DEBUG_PSCSI("PSCSI: Reached bio->bi_vcnt max:" 1164 + pr_debug("PSCSI: Reached bio->bi_vcnt max:" 1138 1165 " %d i: %d bio: %p, allocating another" 1139 1166 " bio\n", bio->bi_vcnt, i, bio); 1140 1167 /* ··· 1156 1183 * Setup the primary pt->pscsi_req used for non BIDI and BIDI-COMMAND 1157 1184 * primary SCSI WRITE poayload mapped for struct se_task->task_sg[] 1158 1185 */ 1159 - if (!(bidi_read)) { 1186 + if (!bidi_read) { 1160 1187 /* 1161 1188 * Starting with v2.6.31, call blk_make_request() passing in *hbio to 1162 1189 * allocate the pSCSI task a struct request. 1163 1190 */ 1164 1191 pt->pscsi_req = blk_make_request(pdv->pdv_sd->request_queue, 1165 1192 hbio, GFP_KERNEL); 1166 - if (!(pt->pscsi_req)) { 1167 - printk(KERN_ERR "pSCSI: blk_make_request() failed\n"); 1193 + if (!pt->pscsi_req) { 1194 + pr_err("pSCSI: blk_make_request() failed\n"); 1168 1195 goto fail; 1169 1196 } 1170 1197 /* ··· 1173 1200 */ 1174 1201 pscsi_blk_init_request(task, pt, pt->pscsi_req, 0); 1175 1202 1176 - return task->task_sg_num; 1203 + return task->task_sg_nents; 1177 1204 } 1178 1205 /* 1179 1206 * Setup the secondary pt->pscsi_req->next_rq used for the extra BIDI-COMMAND ··· 1181 1208 */ 1182 1209 pt->pscsi_req->next_rq = blk_make_request(pdv->pdv_sd->request_queue, 1183 1210 hbio, GFP_KERNEL); 1184 - if (!(pt->pscsi_req->next_rq)) { 1185 - printk(KERN_ERR "pSCSI: blk_make_request() failed for BIDI\n"); 1211 + if (!pt->pscsi_req->next_rq) { 1212 + pr_err("pSCSI: blk_make_request() failed for BIDI\n"); 1186 1213 goto fail; 1187 1214 } 1188 1215 pscsi_blk_init_request(task, pt, pt->pscsi_req->next_rq, 1); 1189 1216 1190 - return task->task_sg_num; 1217 + return task->task_sg_nents; 1191 1218 fail: 1192 1219 while (hbio) { 1193 1220 bio = hbio; ··· 1206 1233 * Setup the main struct request for the task->task_sg[] payload 1207 1234 */ 1208 1235 1209 - ret = __pscsi_map_task_SG(task, task->task_sg, task->task_sg_num, 0); 1236 + ret = __pscsi_map_task_SG(task, task->task_sg, task->task_sg_nents, 0); 1210 1237 if (ret >= 0 && task->task_sg_bidi) { 1211 1238 /* 1212 1239 * If present, set up the extra BIDI-COMMAND SCSI READ 1213 1240 * struct request and payload. 1214 1241 */ 1215 1242 ret = __pscsi_map_task_SG(task, task->task_sg_bidi, 1216 - task->task_sg_num, 1); 1243 + task->task_sg_nents, 1); 1217 1244 } 1218 1245 1219 1246 if (ret < 0) ··· 1292 1319 struct pscsi_plugin_task *pt) 1293 1320 { 1294 1321 task->task_scsi_status = status_byte(pt->pscsi_result); 1295 - if ((task->task_scsi_status)) { 1322 + if (task->task_scsi_status) { 1296 1323 task->task_scsi_status <<= 1; 1297 - printk(KERN_INFO "PSCSI Status Byte exception at task: %p CDB:" 1324 + pr_debug("PSCSI Status Byte exception at task: %p CDB:" 1298 1325 " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0], 1299 1326 pt->pscsi_result); 1300 1327 } ··· 1304 1331 transport_complete_task(task, (!task->task_scsi_status)); 1305 1332 break; 1306 1333 default: 1307 - printk(KERN_INFO "PSCSI Host Byte exception at task: %p CDB:" 1334 + pr_debug("PSCSI Host Byte exception at task: %p CDB:" 1308 1335 " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0], 1309 1336 pt->pscsi_result); 1310 1337 task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
+1 -2
drivers/target/target_core_pscsi.h
··· 23 23 24 24 struct pscsi_plugin_task { 25 25 struct se_task pscsi_task; 26 - unsigned char *pscsi_cdb; 27 - unsigned char __pscsi_cdb[TCM_MAX_COMMAND_SIZE]; 28 26 unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE]; 29 27 int pscsi_direction; 30 28 int pscsi_result; 31 29 u32 pscsi_resid; 32 30 struct request *pscsi_req; 31 + unsigned char pscsi_cdb[0]; 33 32 } ____cacheline_aligned; 34 33 35 34 #define PDF_HAS_CHANNEL_ID 0x01
+67 -386
drivers/target/target_core_rd.c
··· 44 44 45 45 #include "target_core_rd.h" 46 46 47 - static struct se_subsystem_api rd_dr_template; 48 47 static struct se_subsystem_api rd_mcp_template; 49 - 50 - /* #define DEBUG_RAMDISK_MCP */ 51 - /* #define DEBUG_RAMDISK_DR */ 52 48 53 49 /* rd_attach_hba(): (Part of se_subsystem_api_t template) 54 50 * ··· 55 59 struct rd_host *rd_host; 56 60 57 61 rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL); 58 - if (!(rd_host)) { 59 - printk(KERN_ERR "Unable to allocate memory for struct rd_host\n"); 62 + if (!rd_host) { 63 + pr_err("Unable to allocate memory for struct rd_host\n"); 60 64 return -ENOMEM; 61 65 } 62 66 ··· 64 68 65 69 hba->hba_ptr = rd_host; 66 70 67 - printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on" 71 + pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on" 68 72 " Generic Target Core Stack %s\n", hba->hba_id, 69 73 RD_HBA_VERSION, TARGET_CORE_MOD_VERSION); 70 - printk(KERN_INFO "CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic" 74 + pr_debug("CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic" 71 75 " MaxSectors: %u\n", hba->hba_id, 72 76 rd_host->rd_host_id, RD_MAX_SECTORS); 73 77 ··· 78 82 { 79 83 struct rd_host *rd_host = hba->hba_ptr; 80 84 81 - printk(KERN_INFO "CORE_HBA[%d] - Detached Ramdisk HBA: %u from" 85 + pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from" 82 86 " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id); 83 87 84 88 kfree(rd_host); ··· 107 111 108 112 for (j = 0; j < sg_per_table; j++) { 109 113 pg = sg_page(&sg[j]); 110 - if ((pg)) { 114 + if (pg) { 111 115 __free_page(pg); 112 116 page_count++; 113 117 } ··· 116 120 kfree(sg); 117 121 } 118 122 119 - printk(KERN_INFO "CORE_RD[%u] - Released device space for Ramdisk" 123 + pr_debug("CORE_RD[%u] - Released device space for Ramdisk" 120 124 " Device ID: %u, pages %u in %u tables total bytes %lu\n", 121 125 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, 122 126 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); ··· 141 145 struct scatterlist *sg; 142 146 143 147 if (rd_dev->rd_page_count <= 0) { 144 - printk(KERN_ERR "Illegal page count: %u for Ramdisk device\n", 148 + pr_err("Illegal page count: %u for Ramdisk device\n", 145 149 rd_dev->rd_page_count); 146 150 return -EINVAL; 147 151 } ··· 150 154 sg_tables = (total_sg_needed / max_sg_per_table) + 1; 151 155 152 156 sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL); 153 - if (!(sg_table)) { 154 - printk(KERN_ERR "Unable to allocate memory for Ramdisk" 157 + if (!sg_table) { 158 + pr_err("Unable to allocate memory for Ramdisk" 155 159 " scatterlist tables\n"); 156 160 return -ENOMEM; 157 161 } ··· 165 169 166 170 sg = kzalloc(sg_per_table * sizeof(struct scatterlist), 167 171 GFP_KERNEL); 168 - if (!(sg)) { 169 - printk(KERN_ERR "Unable to allocate scatterlist array" 172 + if (!sg) { 173 + pr_err("Unable to allocate scatterlist array" 170 174 " for struct rd_dev\n"); 171 175 return -ENOMEM; 172 176 } 173 177 174 - sg_init_table((struct scatterlist *)&sg[0], sg_per_table); 178 + sg_init_table(sg, sg_per_table); 175 179 176 180 sg_table[i].sg_table = sg; 177 181 sg_table[i].rd_sg_count = sg_per_table; ··· 181 185 182 186 for (j = 0; j < sg_per_table; j++) { 183 187 pg = alloc_pages(GFP_KERNEL, 0); 184 - if (!(pg)) { 185 - printk(KERN_ERR "Unable to allocate scatterlist" 188 + if (!pg) { 189 + pr_err("Unable to allocate scatterlist" 186 190 " pages for struct rd_dev_sg_table\n"); 187 191 return -ENOMEM; 188 192 } ··· 194 198 total_sg_needed -= sg_per_table; 195 199 } 196 200 197 - printk(KERN_INFO "CORE_RD[%u] - Built Ramdisk Device ID: %u space of" 201 + pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of" 198 202 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, 199 203 rd_dev->rd_dev_id, rd_dev->rd_page_count, 200 204 rd_dev->sg_table_count); ··· 211 215 struct rd_host *rd_host = hba->hba_ptr; 212 216 213 217 rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL); 214 - if (!(rd_dev)) { 215 - printk(KERN_ERR "Unable to allocate memory for struct rd_dev\n"); 218 + if (!rd_dev) { 219 + pr_err("Unable to allocate memory for struct rd_dev\n"); 216 220 return NULL; 217 221 } 218 222 ··· 220 224 rd_dev->rd_direct = rd_direct; 221 225 222 226 return rd_dev; 223 - } 224 - 225 - static void *rd_DIRECT_allocate_virtdevice(struct se_hba *hba, const char *name) 226 - { 227 - return rd_allocate_virtdevice(hba, name, 1); 228 227 } 229 228 230 229 static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name) ··· 261 270 dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH; 262 271 263 272 dev = transport_add_device_to_core_hba(hba, 264 - (rd_dev->rd_direct) ? &rd_dr_template : 265 273 &rd_mcp_template, se_dev, dev_flags, rd_dev, 266 274 &dev_limits, prod, rev); 267 - if (!(dev)) 275 + if (!dev) 268 276 goto fail; 269 277 270 278 rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++; 271 279 rd_dev->rd_queue_depth = dev->queue_depth; 272 280 273 - printk(KERN_INFO "CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of" 281 + pr_debug("CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of" 274 282 " %u pages in %u tables, %lu total bytes\n", 275 283 rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" : 276 284 "DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count, ··· 281 291 fail: 282 292 rd_release_device_space(rd_dev); 283 293 return ERR_PTR(ret); 284 - } 285 - 286 - static struct se_device *rd_DIRECT_create_virtdevice( 287 - struct se_hba *hba, 288 - struct se_subsystem_dev *se_dev, 289 - void *p) 290 - { 291 - return rd_create_virtdevice(hba, se_dev, p, 1); 292 294 } 293 295 294 296 static struct se_device *rd_MEMCPY_create_virtdevice( ··· 309 327 } 310 328 311 329 static struct se_task * 312 - rd_alloc_task(struct se_cmd *cmd) 330 + rd_alloc_task(unsigned char *cdb) 313 331 { 314 332 struct rd_request *rd_req; 315 333 316 334 rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL); 317 335 if (!rd_req) { 318 - printk(KERN_ERR "Unable to allocate struct rd_request\n"); 336 + pr_err("Unable to allocate struct rd_request\n"); 319 337 return NULL; 320 338 } 321 - rd_req->rd_dev = cmd->se_dev->dev_ptr; 322 339 323 340 return &rd_req->rd_task; 324 341 } ··· 338 357 return sg_table; 339 358 } 340 359 341 - printk(KERN_ERR "Unable to locate struct rd_dev_sg_table for page: %u\n", 360 + pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n", 342 361 page); 343 362 344 363 return NULL; ··· 351 370 static int rd_MEMCPY_read(struct rd_request *req) 352 371 { 353 372 struct se_task *task = &req->rd_task; 354 - struct rd_dev *dev = req->rd_dev; 373 + struct rd_dev *dev = req->rd_task.se_dev->dev_ptr; 355 374 struct rd_dev_sg_table *table; 356 375 struct scatterlist *sg_d, *sg_s; 357 376 void *dst, *src; ··· 360 379 u32 rd_offset = req->rd_offset; 361 380 362 381 table = rd_get_sg_table(dev, req->rd_page); 363 - if (!(table)) 382 + if (!table) 364 383 return -EINVAL; 365 384 366 385 table_sg_end = (table->page_end_offset - req->rd_page); 367 386 sg_d = task->task_sg; 368 387 sg_s = &table->sg_table[req->rd_page - table->page_start_offset]; 369 - #ifdef DEBUG_RAMDISK_MCP 370 - printk(KERN_INFO "RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:" 388 + 389 + pr_debug("RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:" 371 390 " %u\n", dev->rd_dev_id, task->task_lba, req->rd_size, 372 391 req->rd_page, req->rd_offset); 373 - #endif 392 + 374 393 src_offset = rd_offset; 375 394 376 395 while (req->rd_size) { 377 396 if ((sg_d[i].length - dst_offset) < 378 397 (sg_s[j].length - src_offset)) { 379 398 length = (sg_d[i].length - dst_offset); 380 - #ifdef DEBUG_RAMDISK_MCP 381 - printk(KERN_INFO "Step 1 - sg_d[%d]: %p length: %d" 399 + 400 + pr_debug("Step 1 - sg_d[%d]: %p length: %d" 382 401 " offset: %u sg_s[%d].length: %u\n", i, 383 402 &sg_d[i], sg_d[i].length, sg_d[i].offset, j, 384 403 sg_s[j].length); 385 - printk(KERN_INFO "Step 1 - length: %u dst_offset: %u" 404 + pr_debug("Step 1 - length: %u dst_offset: %u" 386 405 " src_offset: %u\n", length, dst_offset, 387 406 src_offset); 388 - #endif 407 + 389 408 if (length > req->rd_size) 390 409 length = req->rd_size; 391 410 ··· 402 421 page_end = 0; 403 422 } else { 404 423 length = (sg_s[j].length - src_offset); 405 - #ifdef DEBUG_RAMDISK_MCP 406 - printk(KERN_INFO "Step 2 - sg_d[%d]: %p length: %d" 424 + 425 + pr_debug("Step 2 - sg_d[%d]: %p length: %d" 407 426 " offset: %u sg_s[%d].length: %u\n", i, 408 427 &sg_d[i], sg_d[i].length, sg_d[i].offset, 409 428 j, sg_s[j].length); 410 - printk(KERN_INFO "Step 2 - length: %u dst_offset: %u" 429 + pr_debug("Step 2 - length: %u dst_offset: %u" 411 430 " src_offset: %u\n", length, dst_offset, 412 431 src_offset); 413 - #endif 432 + 414 433 if (length > req->rd_size) 415 434 length = req->rd_size; 416 435 ··· 434 453 435 454 memcpy(dst, src, length); 436 455 437 - #ifdef DEBUG_RAMDISK_MCP 438 - printk(KERN_INFO "page: %u, remaining size: %u, length: %u," 456 + pr_debug("page: %u, remaining size: %u, length: %u," 439 457 " i: %u, j: %u\n", req->rd_page, 440 458 (req->rd_size - length), length, i, j); 441 - #endif 459 + 442 460 req->rd_size -= length; 443 - if (!(req->rd_size)) 461 + if (!req->rd_size) 444 462 return 0; 445 463 446 464 if (!page_end) 447 465 continue; 448 466 449 467 if (++req->rd_page <= table->page_end_offset) { 450 - #ifdef DEBUG_RAMDISK_MCP 451 - printk(KERN_INFO "page: %u in same page table\n", 468 + pr_debug("page: %u in same page table\n", 452 469 req->rd_page); 453 - #endif 454 470 continue; 455 471 } 456 - #ifdef DEBUG_RAMDISK_MCP 457 - printk(KERN_INFO "getting new page table for page: %u\n", 472 + 473 + pr_debug("getting new page table for page: %u\n", 458 474 req->rd_page); 459 - #endif 475 + 460 476 table = rd_get_sg_table(dev, req->rd_page); 461 - if (!(table)) 477 + if (!table) 462 478 return -EINVAL; 463 479 464 480 sg_s = &table->sg_table[j = 0]; ··· 471 493 static int rd_MEMCPY_write(struct rd_request *req) 472 494 { 473 495 struct se_task *task = &req->rd_task; 474 - struct rd_dev *dev = req->rd_dev; 496 + struct rd_dev *dev = req->rd_task.se_dev->dev_ptr; 475 497 struct rd_dev_sg_table *table; 476 498 struct scatterlist *sg_d, *sg_s; 477 499 void *dst, *src; ··· 480 502 u32 rd_offset = req->rd_offset; 481 503 482 504 table = rd_get_sg_table(dev, req->rd_page); 483 - if (!(table)) 505 + if (!table) 484 506 return -EINVAL; 485 507 486 508 table_sg_end = (table->page_end_offset - req->rd_page); 487 509 sg_d = &table->sg_table[req->rd_page - table->page_start_offset]; 488 510 sg_s = task->task_sg; 489 - #ifdef DEBUG_RAMDISK_MCP 490 - printk(KERN_INFO "RD[%d] Write LBA: %llu, Size: %u, Page: %u," 511 + 512 + pr_debug("RD[%d] Write LBA: %llu, Size: %u, Page: %u," 491 513 " Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size, 492 514 req->rd_page, req->rd_offset); 493 - #endif 515 + 494 516 dst_offset = rd_offset; 495 517 496 518 while (req->rd_size) { 497 519 if ((sg_s[i].length - src_offset) < 498 520 (sg_d[j].length - dst_offset)) { 499 521 length = (sg_s[i].length - src_offset); 500 - #ifdef DEBUG_RAMDISK_MCP 501 - printk(KERN_INFO "Step 1 - sg_s[%d]: %p length: %d" 522 + 523 + pr_debug("Step 1 - sg_s[%d]: %p length: %d" 502 524 " offset: %d sg_d[%d].length: %u\n", i, 503 525 &sg_s[i], sg_s[i].length, sg_s[i].offset, 504 526 j, sg_d[j].length); 505 - printk(KERN_INFO "Step 1 - length: %u src_offset: %u" 527 + pr_debug("Step 1 - length: %u src_offset: %u" 506 528 " dst_offset: %u\n", length, src_offset, 507 529 dst_offset); 508 - #endif 530 + 509 531 if (length > req->rd_size) 510 532 length = req->rd_size; 511 533 ··· 522 544 page_end = 0; 523 545 } else { 524 546 length = (sg_d[j].length - dst_offset); 525 - #ifdef DEBUG_RAMDISK_MCP 526 - printk(KERN_INFO "Step 2 - sg_s[%d]: %p length: %d" 547 + 548 + pr_debug("Step 2 - sg_s[%d]: %p length: %d" 527 549 " offset: %d sg_d[%d].length: %u\n", i, 528 550 &sg_s[i], sg_s[i].length, sg_s[i].offset, 529 551 j, sg_d[j].length); 530 - printk(KERN_INFO "Step 2 - length: %u src_offset: %u" 552 + pr_debug("Step 2 - length: %u src_offset: %u" 531 553 " dst_offset: %u\n", length, src_offset, 532 554 dst_offset); 533 - #endif 555 + 534 556 if (length > req->rd_size) 535 557 length = req->rd_size; 536 558 ··· 554 576 555 577 memcpy(dst, src, length); 556 578 557 - #ifdef DEBUG_RAMDISK_MCP 558 - printk(KERN_INFO "page: %u, remaining size: %u, length: %u," 579 + pr_debug("page: %u, remaining size: %u, length: %u," 559 580 " i: %u, j: %u\n", req->rd_page, 560 581 (req->rd_size - length), length, i, j); 561 - #endif 582 + 562 583 req->rd_size -= length; 563 - if (!(req->rd_size)) 584 + if (!req->rd_size) 564 585 return 0; 565 586 566 587 if (!page_end) 567 588 continue; 568 589 569 590 if (++req->rd_page <= table->page_end_offset) { 570 - #ifdef DEBUG_RAMDISK_MCP 571 - printk(KERN_INFO "page: %u in same page table\n", 591 + pr_debug("page: %u in same page table\n", 572 592 req->rd_page); 573 - #endif 574 593 continue; 575 594 } 576 - #ifdef DEBUG_RAMDISK_MCP 577 - printk(KERN_INFO "getting new page table for page: %u\n", 595 + 596 + pr_debug("getting new page table for page: %u\n", 578 597 req->rd_page); 579 - #endif 598 + 580 599 table = rd_get_sg_table(dev, req->rd_page); 581 - if (!(table)) 600 + if (!table) 582 601 return -EINVAL; 583 602 584 603 sg_d = &table->sg_table[j = 0]; ··· 610 635 if (ret != 0) 611 636 return ret; 612 637 613 - task->task_scsi_status = GOOD; 614 - transport_complete_task(task, 1); 615 - 616 - return PYX_TRANSPORT_SENT_TO_TRANSPORT; 617 - } 618 - 619 - /* rd_DIRECT_with_offset(): 620 - * 621 - * 622 - */ 623 - static int rd_DIRECT_with_offset( 624 - struct se_task *task, 625 - struct list_head *se_mem_list, 626 - u32 *se_mem_cnt, 627 - u32 *task_offset) 628 - { 629 - struct rd_request *req = RD_REQ(task); 630 - struct rd_dev *dev = req->rd_dev; 631 - struct rd_dev_sg_table *table; 632 - struct se_mem *se_mem; 633 - struct scatterlist *sg_s; 634 - u32 j = 0, set_offset = 1; 635 - u32 get_next_table = 0, offset_length, table_sg_end; 636 - 637 - table = rd_get_sg_table(dev, req->rd_page); 638 - if (!(table)) 639 - return -EINVAL; 640 - 641 - table_sg_end = (table->page_end_offset - req->rd_page); 642 - sg_s = &table->sg_table[req->rd_page - table->page_start_offset]; 643 - #ifdef DEBUG_RAMDISK_DR 644 - printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u Page: %u, Offset: %u\n", 645 - (task->task_data_direction == DMA_TO_DEVICE) ? 646 - "Write" : "Read", 647 - task->task_lba, req->rd_size, req->rd_page, req->rd_offset); 648 - #endif 649 - while (req->rd_size) { 650 - se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); 651 - if (!(se_mem)) { 652 - printk(KERN_ERR "Unable to allocate struct se_mem\n"); 653 - return -ENOMEM; 654 - } 655 - INIT_LIST_HEAD(&se_mem->se_list); 656 - 657 - if (set_offset) { 658 - offset_length = sg_s[j].length - req->rd_offset; 659 - if (offset_length > req->rd_size) 660 - offset_length = req->rd_size; 661 - 662 - se_mem->se_page = sg_page(&sg_s[j++]); 663 - se_mem->se_off = req->rd_offset; 664 - se_mem->se_len = offset_length; 665 - 666 - set_offset = 0; 667 - get_next_table = (j > table_sg_end); 668 - goto check_eot; 669 - } 670 - 671 - offset_length = (req->rd_size < req->rd_offset) ? 672 - req->rd_size : req->rd_offset; 673 - 674 - se_mem->se_page = sg_page(&sg_s[j]); 675 - se_mem->se_len = offset_length; 676 - 677 - set_offset = 1; 678 - 679 - check_eot: 680 - #ifdef DEBUG_RAMDISK_DR 681 - printk(KERN_INFO "page: %u, size: %u, offset_length: %u, j: %u" 682 - " se_mem: %p, se_page: %p se_off: %u se_len: %u\n", 683 - req->rd_page, req->rd_size, offset_length, j, se_mem, 684 - se_mem->se_page, se_mem->se_off, se_mem->se_len); 685 - #endif 686 - list_add_tail(&se_mem->se_list, se_mem_list); 687 - (*se_mem_cnt)++; 688 - 689 - req->rd_size -= offset_length; 690 - if (!(req->rd_size)) 691 - goto out; 692 - 693 - if (!set_offset && !get_next_table) 694 - continue; 695 - 696 - if (++req->rd_page <= table->page_end_offset) { 697 - #ifdef DEBUG_RAMDISK_DR 698 - printk(KERN_INFO "page: %u in same page table\n", 699 - req->rd_page); 700 - #endif 701 - continue; 702 - } 703 - #ifdef DEBUG_RAMDISK_DR 704 - printk(KERN_INFO "getting new page table for page: %u\n", 705 - req->rd_page); 706 - #endif 707 - table = rd_get_sg_table(dev, req->rd_page); 708 - if (!(table)) 709 - return -EINVAL; 710 - 711 - sg_s = &table->sg_table[j = 0]; 712 - } 713 - 714 - out: 715 - task->task_se_cmd->t_tasks_se_num += *se_mem_cnt; 716 - #ifdef DEBUG_RAMDISK_DR 717 - printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n", 718 - *se_mem_cnt); 719 - #endif 720 - return 0; 721 - } 722 - 723 - /* rd_DIRECT_without_offset(): 724 - * 725 - * 726 - */ 727 - static int rd_DIRECT_without_offset( 728 - struct se_task *task, 729 - struct list_head *se_mem_list, 730 - u32 *se_mem_cnt, 731 - u32 *task_offset) 732 - { 733 - struct rd_request *req = RD_REQ(task); 734 - struct rd_dev *dev = req->rd_dev; 735 - struct rd_dev_sg_table *table; 736 - struct se_mem *se_mem; 737 - struct scatterlist *sg_s; 738 - u32 length, j = 0; 739 - 740 - table = rd_get_sg_table(dev, req->rd_page); 741 - if (!(table)) 742 - return -EINVAL; 743 - 744 - sg_s = &table->sg_table[req->rd_page - table->page_start_offset]; 745 - #ifdef DEBUG_RAMDISK_DR 746 - printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u, Page: %u\n", 747 - (task->task_data_direction == DMA_TO_DEVICE) ? 748 - "Write" : "Read", 749 - task->task_lba, req->rd_size, req->rd_page); 750 - #endif 751 - while (req->rd_size) { 752 - se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); 753 - if (!(se_mem)) { 754 - printk(KERN_ERR "Unable to allocate struct se_mem\n"); 755 - return -ENOMEM; 756 - } 757 - INIT_LIST_HEAD(&se_mem->se_list); 758 - 759 - length = (req->rd_size < sg_s[j].length) ? 760 - req->rd_size : sg_s[j].length; 761 - 762 - se_mem->se_page = sg_page(&sg_s[j++]); 763 - se_mem->se_len = length; 764 - 765 - #ifdef DEBUG_RAMDISK_DR 766 - printk(KERN_INFO "page: %u, size: %u, j: %u se_mem: %p," 767 - " se_page: %p se_off: %u se_len: %u\n", req->rd_page, 768 - req->rd_size, j, se_mem, se_mem->se_page, 769 - se_mem->se_off, se_mem->se_len); 770 - #endif 771 - list_add_tail(&se_mem->se_list, se_mem_list); 772 - (*se_mem_cnt)++; 773 - 774 - req->rd_size -= length; 775 - if (!(req->rd_size)) 776 - goto out; 777 - 778 - if (++req->rd_page <= table->page_end_offset) { 779 - #ifdef DEBUG_RAMDISK_DR 780 - printk("page: %u in same page table\n", 781 - req->rd_page); 782 - #endif 783 - continue; 784 - } 785 - #ifdef DEBUG_RAMDISK_DR 786 - printk(KERN_INFO "getting new page table for page: %u\n", 787 - req->rd_page); 788 - #endif 789 - table = rd_get_sg_table(dev, req->rd_page); 790 - if (!(table)) 791 - return -EINVAL; 792 - 793 - sg_s = &table->sg_table[j = 0]; 794 - } 795 - 796 - out: 797 - task->task_se_cmd->t_tasks_se_num += *se_mem_cnt; 798 - #ifdef DEBUG_RAMDISK_DR 799 - printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n", 800 - *se_mem_cnt); 801 - #endif 802 - return 0; 803 - } 804 - 805 - /* rd_DIRECT_do_se_mem_map(): 806 - * 807 - * 808 - */ 809 - static int rd_DIRECT_do_se_mem_map( 810 - struct se_task *task, 811 - struct list_head *se_mem_list, 812 - void *in_mem, 813 - struct se_mem *in_se_mem, 814 - struct se_mem **out_se_mem, 815 - u32 *se_mem_cnt, 816 - u32 *task_offset_in) 817 - { 818 - struct se_cmd *cmd = task->task_se_cmd; 819 - struct rd_request *req = RD_REQ(task); 820 - u32 task_offset = *task_offset_in; 821 - unsigned long long lba; 822 - int ret; 823 - int block_size = task->se_dev->se_sub_dev->se_dev_attrib.block_size; 824 - 825 - lba = task->task_lba; 826 - req->rd_page = ((task->task_lba * block_size) / PAGE_SIZE); 827 - req->rd_offset = (do_div(lba, (PAGE_SIZE / block_size))) * block_size; 828 - req->rd_size = task->task_size; 829 - 830 - if (req->rd_offset) 831 - ret = rd_DIRECT_with_offset(task, se_mem_list, se_mem_cnt, 832 - task_offset_in); 833 - else 834 - ret = rd_DIRECT_without_offset(task, se_mem_list, se_mem_cnt, 835 - task_offset_in); 836 - 837 - if (ret < 0) 838 - return ret; 839 - 840 - if (cmd->se_tfo->task_sg_chaining == 0) 841 - return 0; 842 - /* 843 - * Currently prevent writers from multiple HW fabrics doing 844 - * pci_map_sg() to RD_DR's internal scatterlist memory. 845 - */ 846 - if (cmd->data_direction == DMA_TO_DEVICE) { 847 - printk(KERN_ERR "DMA_TO_DEVICE not supported for" 848 - " RAMDISK_DR with task_sg_chaining=1\n"); 849 - return -ENOSYS; 850 - } 851 - /* 852 - * Special case for if task_sg_chaining is enabled, then 853 - * we setup struct se_task->task_sg[], as it will be used by 854 - * transport_do_task_sg_chain() for creating chainged SGLs 855 - * across multiple struct se_task->task_sg[]. 856 - */ 857 - ret = transport_init_task_sg(task, 858 - list_first_entry(&cmd->t_mem_list, 859 - struct se_mem, se_list), 860 - task_offset); 861 - if (ret <= 0) 862 - return ret; 863 - 864 - return transport_map_mem_to_sg(task, se_mem_list, task->task_sg, 865 - list_first_entry(&cmd->t_mem_list, 866 - struct se_mem, se_list), 867 - out_se_mem, se_mem_cnt, task_offset_in); 868 - } 869 - 870 - /* rd_DIRECT_do_task(): (Part of se_subsystem_api_t template) 871 - * 872 - * 873 - */ 874 - static int rd_DIRECT_do_task(struct se_task *task) 875 - { 876 - /* 877 - * At this point the locally allocated RD tables have been mapped 878 - * to struct se_mem elements in rd_DIRECT_do_se_mem_map(). 879 - */ 880 638 task->task_scsi_status = GOOD; 881 639 transport_complete_task(task, 1); 882 640 ··· 660 952 case Opt_rd_pages: 661 953 match_int(args, &arg); 662 954 rd_dev->rd_page_count = arg; 663 - printk(KERN_INFO "RAMDISK: Referencing Page" 955 + pr_debug("RAMDISK: Referencing Page" 664 956 " Count: %u\n", rd_dev->rd_page_count); 665 957 rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT; 666 958 break; ··· 678 970 struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; 679 971 680 972 if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) { 681 - printk(KERN_INFO "Missing rd_pages= parameter\n"); 973 + pr_debug("Missing rd_pages= parameter\n"); 682 974 return -EINVAL; 683 975 } 684 976 ··· 730 1022 return blocks_long; 731 1023 } 732 1024 733 - static struct se_subsystem_api rd_dr_template = { 734 - .name = "rd_dr", 735 - .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, 736 - .attach_hba = rd_attach_hba, 737 - .detach_hba = rd_detach_hba, 738 - .allocate_virtdevice = rd_DIRECT_allocate_virtdevice, 739 - .create_virtdevice = rd_DIRECT_create_virtdevice, 740 - .free_device = rd_free_device, 741 - .alloc_task = rd_alloc_task, 742 - .do_task = rd_DIRECT_do_task, 743 - .free_task = rd_free_task, 744 - .check_configfs_dev_params = rd_check_configfs_dev_params, 745 - .set_configfs_dev_params = rd_set_configfs_dev_params, 746 - .show_configfs_dev_params = rd_show_configfs_dev_params, 747 - .get_cdb = rd_get_cdb, 748 - .get_device_rev = rd_get_device_rev, 749 - .get_device_type = rd_get_device_type, 750 - .get_blocks = rd_get_blocks, 751 - .do_se_mem_map = rd_DIRECT_do_se_mem_map, 752 - }; 753 - 754 1025 static struct se_subsystem_api rd_mcp_template = { 755 1026 .name = "rd_mcp", 756 1027 .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, ··· 754 1067 { 755 1068 int ret; 756 1069 757 - ret = transport_subsystem_register(&rd_dr_template); 758 - if (ret < 0) 759 - return ret; 760 - 761 1070 ret = transport_subsystem_register(&rd_mcp_template); 762 1071 if (ret < 0) { 763 - transport_subsystem_release(&rd_dr_template); 764 1072 return ret; 765 1073 } 766 1074 ··· 764 1082 765 1083 void rd_module_exit(void) 766 1084 { 767 - transport_subsystem_release(&rd_dr_template); 768 1085 transport_subsystem_release(&rd_mcp_template); 769 1086 }
-2
drivers/target/target_core_rd.h
··· 32 32 u32 rd_page_count; 33 33 /* Scatterlist count */ 34 34 u32 rd_size; 35 - /* Ramdisk device */ 36 - struct rd_dev *rd_dev; 37 35 } ____cacheline_aligned; 38 36 39 37 struct rd_dev_sg_table {
+26 -33
drivers/target/target_core_tmr.c
··· 41 41 #include "target_core_alua.h" 42 42 #include "target_core_pr.h" 43 43 44 - #define DEBUG_LUN_RESET 45 - #ifdef DEBUG_LUN_RESET 46 - #define DEBUG_LR(x...) printk(KERN_INFO x) 47 - #else 48 - #define DEBUG_LR(x...) 49 - #endif 50 - 51 44 struct se_tmr_req *core_tmr_alloc_req( 52 45 struct se_cmd *se_cmd, 53 46 void *fabric_tmr_ptr, ··· 50 57 51 58 tmr = kmem_cache_zalloc(se_tmr_req_cache, (in_interrupt()) ? 52 59 GFP_ATOMIC : GFP_KERNEL); 53 - if (!(tmr)) { 54 - printk(KERN_ERR "Unable to allocate struct se_tmr_req\n"); 60 + if (!tmr) { 61 + pr_err("Unable to allocate struct se_tmr_req\n"); 55 62 return ERR_PTR(-ENOMEM); 56 63 } 57 64 tmr->task_cmd = se_cmd; ··· 86 93 int tas, 87 94 int fe_count) 88 95 { 89 - if (!(fe_count)) { 96 + if (!fe_count) { 90 97 transport_cmd_finish_abort(cmd, 1); 91 98 return; 92 99 } 93 100 /* 94 101 * TASK ABORTED status (TAS) bit support 95 102 */ 96 - if (((tmr_nacl != NULL) && 103 + if ((tmr_nacl && 97 104 (tmr_nacl == cmd->se_sess->se_node_acl)) || tas) 98 105 transport_send_task_abort(cmd); 99 106 ··· 134 141 tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; 135 142 tmr_tpg = tmr->task_cmd->se_sess->se_tpg; 136 143 if (tmr_nacl && tmr_tpg) { 137 - DEBUG_LR("LUN_RESET: TMR caller fabric: %s" 144 + pr_debug("LUN_RESET: TMR caller fabric: %s" 138 145 " initiator port %s\n", 139 146 tmr_tpg->se_tpg_tfo->get_fabric_name(), 140 147 tmr_nacl->initiatorname); 141 148 } 142 149 } 143 - DEBUG_LR("LUN_RESET: %s starting for [%s], tas: %d\n", 150 + pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n", 144 151 (preempt_and_abort_list) ? "Preempt" : "TMR", 145 152 dev->transport->name, tas); 146 153 /* ··· 156 163 continue; 157 164 158 165 cmd = tmr_p->task_cmd; 159 - if (!(cmd)) { 160 - printk(KERN_ERR "Unable to locate struct se_cmd for TMR\n"); 166 + if (!cmd) { 167 + pr_err("Unable to locate struct se_cmd for TMR\n"); 161 168 continue; 162 169 } 163 170 /* ··· 165 172 * parameter (eg: for PROUT PREEMPT_AND_ABORT service action 166 173 * skip non regisration key matching TMRs. 167 174 */ 168 - if ((preempt_and_abort_list != NULL) && 175 + if (preempt_and_abort_list && 169 176 (core_scsi3_check_cdb_abort_and_preempt( 170 177 preempt_and_abort_list, cmd) != 0)) 171 178 continue; 172 179 spin_unlock_irq(&dev->se_tmr_lock); 173 180 174 181 spin_lock_irqsave(&cmd->t_state_lock, flags); 175 - if (!(atomic_read(&cmd->t_transport_active))) { 182 + if (!atomic_read(&cmd->t_transport_active)) { 176 183 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 177 184 spin_lock_irq(&dev->se_tmr_lock); 178 185 continue; ··· 182 189 spin_lock_irq(&dev->se_tmr_lock); 183 190 continue; 184 191 } 185 - DEBUG_LR("LUN_RESET: %s releasing TMR %p Function: 0x%02x," 192 + pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x," 186 193 " Response: 0x%02x, t_state: %d\n", 187 194 (preempt_and_abort_list) ? "Preempt" : "", tmr_p, 188 195 tmr_p->function, tmr_p->response, cmd->t_state); ··· 217 224 list_for_each_entry_safe(task, task_tmp, &dev->state_task_list, 218 225 t_state_list) { 219 226 if (!task->task_se_cmd) { 220 - printk(KERN_ERR "task->task_se_cmd is NULL!\n"); 227 + pr_err("task->task_se_cmd is NULL!\n"); 221 228 continue; 222 229 } 223 230 cmd = task->task_se_cmd; ··· 226 233 * For PREEMPT_AND_ABORT usage, only process commands 227 234 * with a matching reservation key. 228 235 */ 229 - if ((preempt_and_abort_list != NULL) && 236 + if (preempt_and_abort_list && 230 237 (core_scsi3_check_cdb_abort_and_preempt( 231 238 preempt_and_abort_list, cmd) != 0)) 232 239 continue; ··· 241 248 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 242 249 243 250 spin_lock_irqsave(&cmd->t_state_lock, flags); 244 - DEBUG_LR("LUN_RESET: %s cmd: %p task: %p" 251 + pr_debug("LUN_RESET: %s cmd: %p task: %p" 245 252 " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/" 246 253 "def_t_state: %d/%d cdb: 0x%02x\n", 247 254 (preempt_and_abort_list) ? "Preempt" : "", cmd, task, 248 255 cmd->se_tfo->get_task_tag(cmd), 0, 249 256 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, 250 257 cmd->deferred_t_state, cmd->t_task_cdb[0]); 251 - DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx" 258 + pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx" 252 259 " t_task_cdbs: %d t_task_cdbs_left: %d" 253 260 " t_task_cdbs_sent: %d -- t_transport_active: %d" 254 261 " t_transport_stop: %d t_transport_sent: %d\n", ··· 265 272 spin_unlock_irqrestore( 266 273 &cmd->t_state_lock, flags); 267 274 268 - DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown" 275 + pr_debug("LUN_RESET: Waiting for task: %p to shutdown" 269 276 " for dev: %p\n", task, dev); 270 277 wait_for_completion(&task->task_stop_comp); 271 - DEBUG_LR("LUN_RESET Completed task: %p shutdown for" 278 + pr_debug("LUN_RESET Completed task: %p shutdown for" 272 279 " dev: %p\n", task, dev); 273 280 spin_lock_irqsave(&cmd->t_state_lock, flags); 274 281 atomic_dec(&cmd->t_task_cdbs_left); ··· 281 288 } 282 289 __transport_stop_task_timer(task, &flags); 283 290 284 - if (!(atomic_dec_and_test(&cmd->t_task_cdbs_ex_left))) { 291 + if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) { 285 292 spin_unlock_irqrestore( 286 293 &cmd->t_state_lock, flags); 287 - DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for" 294 + pr_debug("LUN_RESET: Skipping task: %p, dev: %p for" 288 295 " t_task_cdbs_ex_left: %d\n", task, dev, 289 296 atomic_read(&cmd->t_task_cdbs_ex_left)); 290 297 ··· 294 301 fe_count = atomic_read(&cmd->t_fe_count); 295 302 296 303 if (atomic_read(&cmd->t_transport_active)) { 297 - DEBUG_LR("LUN_RESET: got t_transport_active = 1 for" 304 + pr_debug("LUN_RESET: got t_transport_active = 1 for" 298 305 " task: %p, t_fe_count: %d dev: %p\n", task, 299 306 fe_count, dev); 300 307 atomic_set(&cmd->t_transport_aborted, 1); ··· 305 312 spin_lock_irqsave(&dev->execute_task_lock, flags); 306 313 continue; 307 314 } 308 - DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p," 315 + pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p," 309 316 " t_fe_count: %d dev: %p\n", task, fe_count, dev); 310 317 atomic_set(&cmd->t_transport_aborted, 1); 311 318 spin_unlock_irqrestore(&cmd->t_state_lock, flags); ··· 328 335 * For PREEMPT_AND_ABORT usage, only process commands 329 336 * with a matching reservation key. 330 337 */ 331 - if ((preempt_and_abort_list != NULL) && 338 + if (preempt_and_abort_list && 332 339 (core_scsi3_check_cdb_abort_and_preempt( 333 340 preempt_and_abort_list, cmd) != 0)) 334 341 continue; ··· 343 350 list_del(&cmd->se_queue_node); 344 351 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 345 352 346 - DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:" 353 + pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:" 347 354 " %d t_fe_count: %d\n", (preempt_and_abort_list) ? 348 355 "Preempt" : "", cmd, cmd->t_state, 349 356 atomic_read(&cmd->t_fe_count)); ··· 361 368 * Clear any legacy SPC-2 reservation when called during 362 369 * LOGICAL UNIT RESET 363 370 */ 364 - if (!(preempt_and_abort_list) && 371 + if (!preempt_and_abort_list && 365 372 (dev->dev_flags & DF_SPC2_RESERVATIONS)) { 366 373 spin_lock(&dev->dev_reservation_lock); 367 374 dev->dev_reserved_node_acl = NULL; 368 375 dev->dev_flags &= ~DF_SPC2_RESERVATIONS; 369 376 spin_unlock(&dev->dev_reservation_lock); 370 - printk(KERN_INFO "LUN_RESET: SCSI-2 Released reservation\n"); 377 + pr_debug("LUN_RESET: SCSI-2 Released reservation\n"); 371 378 } 372 379 373 380 spin_lock_irq(&dev->stats_lock); 374 381 dev->num_resets++; 375 382 spin_unlock_irq(&dev->stats_lock); 376 383 377 - DEBUG_LR("LUN_RESET: %s for [%s] Complete\n", 384 + pr_debug("LUN_RESET: %s for [%s] Complete\n", 378 385 (preempt_and_abort_list) ? "Preempt" : "TMR", 379 386 dev->transport->name); 380 387 return 0;
+37 -38
drivers/target/target_core_tpg.c
··· 72 72 continue; 73 73 74 74 if (!deve->se_lun) { 75 - printk(KERN_ERR "%s device entries device pointer is" 75 + pr_err("%s device entries device pointer is" 76 76 " NULL, but Initiator has access.\n", 77 77 tpg->se_tpg_tfo->get_fabric_name()); 78 78 continue; ··· 86 86 spin_lock(&lun->lun_acl_lock); 87 87 list_for_each_entry_safe(acl, acl_tmp, 88 88 &lun->lun_acl_list, lacl_list) { 89 - if (!(strcmp(acl->initiatorname, 90 - nacl->initiatorname)) && 91 - (acl->mapped_lun == deve->mapped_lun)) 89 + if (!strcmp(acl->initiatorname, nacl->initiatorname) && 90 + (acl->mapped_lun == deve->mapped_lun)) 92 91 break; 93 92 } 94 93 95 94 if (!acl) { 96 - printk(KERN_ERR "Unable to locate struct se_lun_acl for %s," 95 + pr_err("Unable to locate struct se_lun_acl for %s," 97 96 " mapped_lun: %u\n", nacl->initiatorname, 98 97 deve->mapped_lun); 99 98 spin_unlock(&lun->lun_acl_lock); ··· 120 121 struct se_node_acl *acl; 121 122 122 123 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 123 - if (!(strcmp(acl->initiatorname, initiatorname))) 124 + if (!strcmp(acl->initiatorname, initiatorname)) 124 125 return acl; 125 126 } 126 127 ··· 139 140 140 141 spin_lock_bh(&tpg->acl_node_lock); 141 142 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 142 - if (!(strcmp(acl->initiatorname, initiatorname)) && 143 - (!(acl->dynamic_node_acl))) { 143 + if (!strcmp(acl->initiatorname, initiatorname) && 144 + !acl->dynamic_node_acl) { 144 145 spin_unlock_bh(&tpg->acl_node_lock); 145 146 return acl; 146 147 } ··· 176 177 * By default in LIO-Target $FABRIC_MOD, 177 178 * demo_mode_write_protect is ON, or READ_ONLY; 178 179 */ 179 - if (!(tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg))) { 180 + if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) { 180 181 if (dev->dev_flags & DF_READ_ONLY) 181 182 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; 182 183 else ··· 192 193 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; 193 194 } 194 195 195 - printk(KERN_INFO "TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s" 196 + pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s" 196 197 " access for LUN in Demo Mode\n", 197 198 tpg->se_tpg_tfo->get_fabric_name(), 198 199 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, ··· 215 216 struct se_node_acl *acl) 216 217 { 217 218 if (!acl->queue_depth) { 218 - printk(KERN_ERR "Queue depth for %s Initiator Node: %s is 0," 219 + pr_err("Queue depth for %s Initiator Node: %s is 0," 219 220 "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(), 220 221 acl->initiatorname); 221 222 acl->queue_depth = 1; ··· 235 236 236 237 nacl->device_list = kzalloc(sizeof(struct se_dev_entry) * 237 238 TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL); 238 - if (!(nacl->device_list)) { 239 - printk(KERN_ERR "Unable to allocate memory for" 239 + if (!nacl->device_list) { 240 + pr_err("Unable to allocate memory for" 240 241 " struct se_node_acl->device_list\n"); 241 242 return -ENOMEM; 242 243 } ··· 264 265 struct se_node_acl *acl; 265 266 266 267 acl = core_tpg_get_initiator_node_acl(tpg, initiatorname); 267 - if ((acl)) 268 + if (acl) 268 269 return acl; 269 270 270 - if (!(tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))) 271 + if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) 271 272 return NULL; 272 273 273 274 acl = tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg); 274 - if (!(acl)) 275 + if (!acl) 275 276 return NULL; 276 277 277 278 INIT_LIST_HEAD(&acl->acl_list); ··· 306 307 tpg->num_node_acls++; 307 308 spin_unlock_bh(&tpg->acl_node_lock); 308 309 309 - printk("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s" 310 + pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s" 310 311 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 311 312 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, 312 313 tpg->se_tpg_tfo->get_fabric_name(), initiatorname); ··· 356 357 357 358 spin_lock_bh(&tpg->acl_node_lock); 358 359 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); 359 - if ((acl)) { 360 + if (acl) { 360 361 if (acl->dynamic_node_acl) { 361 362 acl->dynamic_node_acl = 0; 362 - printk(KERN_INFO "%s_TPG[%u] - Replacing dynamic ACL" 363 + pr_debug("%s_TPG[%u] - Replacing dynamic ACL" 363 364 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), 364 365 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname); 365 366 spin_unlock_bh(&tpg->acl_node_lock); ··· 374 375 goto done; 375 376 } 376 377 377 - printk(KERN_ERR "ACL entry for %s Initiator" 378 + pr_err("ACL entry for %s Initiator" 378 379 " Node %s already exists for TPG %u, ignoring" 379 380 " request.\n", tpg->se_tpg_tfo->get_fabric_name(), 380 381 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); ··· 383 384 } 384 385 spin_unlock_bh(&tpg->acl_node_lock); 385 386 386 - if (!(se_nacl)) { 387 - printk("struct se_node_acl pointer is NULL\n"); 387 + if (!se_nacl) { 388 + pr_err("struct se_node_acl pointer is NULL\n"); 388 389 return ERR_PTR(-EINVAL); 389 390 } 390 391 /* ··· 424 425 spin_unlock_bh(&tpg->acl_node_lock); 425 426 426 427 done: 427 - printk(KERN_INFO "%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s" 428 + pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s" 428 429 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 429 430 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, 430 431 tpg->se_tpg_tfo->get_fabric_name(), initiatorname); ··· 462 463 /* 463 464 * Determine if the session needs to be closed by our context. 464 465 */ 465 - if (!(tpg->se_tpg_tfo->shutdown_session(sess))) 466 + if (!tpg->se_tpg_tfo->shutdown_session(sess)) 466 467 continue; 467 468 468 469 spin_unlock_bh(&tpg->session_lock); ··· 480 481 core_clear_initiator_node_from_tpg(acl, tpg); 481 482 core_free_device_list_for_node(acl, tpg); 482 483 483 - printk(KERN_INFO "%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s" 484 + pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s" 484 485 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 485 486 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, 486 487 tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname); ··· 505 506 506 507 spin_lock_bh(&tpg->acl_node_lock); 507 508 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); 508 - if (!(acl)) { 509 - printk(KERN_ERR "Access Control List entry for %s Initiator" 509 + if (!acl) { 510 + pr_err("Access Control List entry for %s Initiator" 510 511 " Node %s does not exists for TPG %hu, ignoring" 511 512 " request.\n", tpg->se_tpg_tfo->get_fabric_name(), 512 513 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); ··· 525 526 continue; 526 527 527 528 if (!force) { 528 - printk(KERN_ERR "Unable to change queue depth for %s" 529 + pr_err("Unable to change queue depth for %s" 529 530 " Initiator Node: %s while session is" 530 531 " operational. To forcefully change the queue" 531 532 " depth and force session reinstatement" ··· 542 543 /* 543 544 * Determine if the session needs to be closed by our context. 544 545 */ 545 - if (!(tpg->se_tpg_tfo->shutdown_session(sess))) 546 + if (!tpg->se_tpg_tfo->shutdown_session(sess)) 546 547 continue; 547 548 548 549 init_sess = sess; ··· 585 586 if (init_sess) 586 587 tpg->se_tpg_tfo->close_session(init_sess); 587 588 588 - printk(KERN_INFO "Successfuly changed queue depth to: %d for Initiator" 589 + pr_debug("Successfuly changed queue depth to: %d for Initiator" 589 590 " Node: %s on %s Target Portal Group: %u\n", queue_depth, 590 591 initiatorname, tpg->se_tpg_tfo->get_fabric_name(), 591 592 tpg->se_tpg_tfo->tpg_get_tag(tpg)); ··· 643 644 644 645 se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) * 645 646 TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL); 646 - if (!(se_tpg->tpg_lun_list)) { 647 - printk(KERN_ERR "Unable to allocate struct se_portal_group->" 647 + if (!se_tpg->tpg_lun_list) { 648 + pr_err("Unable to allocate struct se_portal_group->" 648 649 "tpg_lun_list\n"); 649 650 return -ENOMEM; 650 651 } ··· 685 686 list_add_tail(&se_tpg->se_tpg_node, &tpg_list); 686 687 spin_unlock_bh(&tpg_lock); 687 688 688 - printk(KERN_INFO "TARGET_CORE[%s]: Allocated %s struct se_portal_group for" 689 + pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for" 689 690 " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(), 690 691 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? 691 692 "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ? ··· 699 700 { 700 701 struct se_node_acl *nacl, *nacl_tmp; 701 702 702 - printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group" 703 + pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group" 703 704 " for endpoint: %s Portal Tag %u\n", 704 705 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? 705 706 "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(), ··· 748 749 struct se_lun *lun; 749 750 750 751 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { 751 - printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" 752 + pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" 752 753 "-1: %u for Target Portal Group: %u\n", 753 754 tpg->se_tpg_tfo->get_fabric_name(), 754 755 unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1, ··· 759 760 spin_lock(&tpg->tpg_lun_lock); 760 761 lun = &tpg->tpg_lun_list[unpacked_lun]; 761 762 if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) { 762 - printk(KERN_ERR "TPG Logical Unit Number: %u is already active" 763 + pr_err("TPG Logical Unit Number: %u is already active" 763 764 " on %s Target Portal Group: %u, ignoring request.\n", 764 765 unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(), 765 766 tpg->se_tpg_tfo->tpg_get_tag(tpg)); ··· 807 808 struct se_lun *lun; 808 809 809 810 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { 810 - printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" 811 + pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" 811 812 "-1: %u for Target Portal Group: %u\n", 812 813 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 813 814 TRANSPORT_MAX_LUNS_PER_TPG-1, ··· 818 819 spin_lock(&tpg->tpg_lun_lock); 819 820 lun = &tpg->tpg_lun_list[unpacked_lun]; 820 821 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { 821 - printk(KERN_ERR "%s Logical Unit Number: %u is not active on" 822 + pr_err("%s Logical Unit Number: %u is not active on" 822 823 " Target Portal Group: %u, ignoring request.\n", 823 824 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 824 825 tpg->se_tpg_tfo->tpg_get_tag(tpg));
+240 -450
drivers/target/target_core_transport.c
··· 58 58 #include "target_core_scdb.h" 59 59 #include "target_core_ua.h" 60 60 61 - /* #define DEBUG_CDB_HANDLER */ 62 - #ifdef DEBUG_CDB_HANDLER 63 - #define DEBUG_CDB_H(x...) printk(KERN_INFO x) 64 - #else 65 - #define DEBUG_CDB_H(x...) 66 - #endif 67 - 68 - /* #define DEBUG_CMD_MAP */ 69 - #ifdef DEBUG_CMD_MAP 70 - #define DEBUG_CMD_M(x...) printk(KERN_INFO x) 71 - #else 72 - #define DEBUG_CMD_M(x...) 73 - #endif 74 - 75 - /* #define DEBUG_MEM_ALLOC */ 76 - #ifdef DEBUG_MEM_ALLOC 77 - #define DEBUG_MEM(x...) printk(KERN_INFO x) 78 - #else 79 - #define DEBUG_MEM(x...) 80 - #endif 81 - 82 - /* #define DEBUG_MEM2_ALLOC */ 83 - #ifdef DEBUG_MEM2_ALLOC 84 - #define DEBUG_MEM2(x...) printk(KERN_INFO x) 85 - #else 86 - #define DEBUG_MEM2(x...) 87 - #endif 88 - 89 - /* #define DEBUG_SG_CALC */ 90 - #ifdef DEBUG_SG_CALC 91 - #define DEBUG_SC(x...) printk(KERN_INFO x) 92 - #else 93 - #define DEBUG_SC(x...) 94 - #endif 95 - 96 - /* #define DEBUG_SE_OBJ */ 97 - #ifdef DEBUG_SE_OBJ 98 - #define DEBUG_SO(x...) printk(KERN_INFO x) 99 - #else 100 - #define DEBUG_SO(x...) 101 - #endif 102 - 103 - /* #define DEBUG_CMD_VOL */ 104 - #ifdef DEBUG_CMD_VOL 105 - #define DEBUG_VOL(x...) printk(KERN_INFO x) 106 - #else 107 - #define DEBUG_VOL(x...) 108 - #endif 109 - 110 - /* #define DEBUG_CMD_STOP */ 111 - #ifdef DEBUG_CMD_STOP 112 - #define DEBUG_CS(x...) printk(KERN_INFO x) 113 - #else 114 - #define DEBUG_CS(x...) 115 - #endif 116 - 117 - /* #define DEBUG_PASSTHROUGH */ 118 - #ifdef DEBUG_PASSTHROUGH 119 - #define DEBUG_PT(x...) printk(KERN_INFO x) 120 - #else 121 - #define DEBUG_PT(x...) 122 - #endif 123 - 124 - /* #define DEBUG_TASK_STOP */ 125 - #ifdef DEBUG_TASK_STOP 126 - #define DEBUG_TS(x...) printk(KERN_INFO x) 127 - #else 128 - #define DEBUG_TS(x...) 129 - #endif 130 - 131 - /* #define DEBUG_TRANSPORT_STOP */ 132 - #ifdef DEBUG_TRANSPORT_STOP 133 - #define DEBUG_TRANSPORT_S(x...) printk(KERN_INFO x) 134 - #else 135 - #define DEBUG_TRANSPORT_S(x...) 136 - #endif 137 - 138 - /* #define DEBUG_TASK_FAILURE */ 139 - #ifdef DEBUG_TASK_FAILURE 140 - #define DEBUG_TF(x...) printk(KERN_INFO x) 141 - #else 142 - #define DEBUG_TF(x...) 143 - #endif 144 - 145 - /* #define DEBUG_DEV_OFFLINE */ 146 - #ifdef DEBUG_DEV_OFFLINE 147 - #define DEBUG_DO(x...) printk(KERN_INFO x) 148 - #else 149 - #define DEBUG_DO(x...) 150 - #endif 151 - 152 - /* #define DEBUG_TASK_STATE */ 153 - #ifdef DEBUG_TASK_STATE 154 - #define DEBUG_TSTATE(x...) printk(KERN_INFO x) 155 - #else 156 - #define DEBUG_TSTATE(x...) 157 - #endif 158 - 159 - /* #define DEBUG_STATUS_THR */ 160 - #ifdef DEBUG_STATUS_THR 161 - #define DEBUG_ST(x...) printk(KERN_INFO x) 162 - #else 163 - #define DEBUG_ST(x...) 164 - #endif 165 - 166 - /* #define DEBUG_TASK_TIMEOUT */ 167 - #ifdef DEBUG_TASK_TIMEOUT 168 - #define DEBUG_TT(x...) printk(KERN_INFO x) 169 - #else 170 - #define DEBUG_TT(x...) 171 - #endif 172 - 173 - /* #define DEBUG_GENERIC_REQUEST_FAILURE */ 174 - #ifdef DEBUG_GENERIC_REQUEST_FAILURE 175 - #define DEBUG_GRF(x...) printk(KERN_INFO x) 176 - #else 177 - #define DEBUG_GRF(x...) 178 - #endif 179 - 180 - /* #define DEBUG_SAM_TASK_ATTRS */ 181 - #ifdef DEBUG_SAM_TASK_ATTRS 182 - #define DEBUG_STA(x...) printk(KERN_INFO x) 183 - #else 184 - #define DEBUG_STA(x...) 185 - #endif 186 - 187 61 static int sub_api_initialized; 188 62 189 63 static struct kmem_cache *se_cmd_cache; ··· 99 225 { 100 226 se_cmd_cache = kmem_cache_create("se_cmd_cache", 101 227 sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL); 102 - if (!(se_cmd_cache)) { 103 - printk(KERN_ERR "kmem_cache_create for struct se_cmd failed\n"); 228 + if (!se_cmd_cache) { 229 + pr_err("kmem_cache_create for struct se_cmd failed\n"); 104 230 goto out; 105 231 } 106 232 se_tmr_req_cache = kmem_cache_create("se_tmr_cache", 107 233 sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req), 108 234 0, NULL); 109 - if (!(se_tmr_req_cache)) { 110 - printk(KERN_ERR "kmem_cache_create() for struct se_tmr_req" 235 + if (!se_tmr_req_cache) { 236 + pr_err("kmem_cache_create() for struct se_tmr_req" 111 237 " failed\n"); 112 238 goto out; 113 239 } 114 240 se_sess_cache = kmem_cache_create("se_sess_cache", 115 241 sizeof(struct se_session), __alignof__(struct se_session), 116 242 0, NULL); 117 - if (!(se_sess_cache)) { 118 - printk(KERN_ERR "kmem_cache_create() for struct se_session" 243 + if (!se_sess_cache) { 244 + pr_err("kmem_cache_create() for struct se_session" 119 245 " failed\n"); 120 246 goto out; 121 247 } 122 248 se_ua_cache = kmem_cache_create("se_ua_cache", 123 249 sizeof(struct se_ua), __alignof__(struct se_ua), 124 250 0, NULL); 125 - if (!(se_ua_cache)) { 126 - printk(KERN_ERR "kmem_cache_create() for struct se_ua failed\n"); 251 + if (!se_ua_cache) { 252 + pr_err("kmem_cache_create() for struct se_ua failed\n"); 127 253 goto out; 128 254 } 129 255 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", 130 256 sizeof(struct t10_pr_registration), 131 257 __alignof__(struct t10_pr_registration), 0, NULL); 132 - if (!(t10_pr_reg_cache)) { 133 - printk(KERN_ERR "kmem_cache_create() for struct t10_pr_registration" 258 + if (!t10_pr_reg_cache) { 259 + pr_err("kmem_cache_create() for struct t10_pr_registration" 134 260 " failed\n"); 135 261 goto out; 136 262 } 137 263 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", 138 264 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), 139 265 0, NULL); 140 - if (!(t10_alua_lu_gp_cache)) { 141 - printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_cache" 266 + if (!t10_alua_lu_gp_cache) { 267 + pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" 142 268 " failed\n"); 143 269 goto out; 144 270 } 145 271 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", 146 272 sizeof(struct t10_alua_lu_gp_member), 147 273 __alignof__(struct t10_alua_lu_gp_member), 0, NULL); 148 - if (!(t10_alua_lu_gp_mem_cache)) { 149 - printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_mem_" 274 + if (!t10_alua_lu_gp_mem_cache) { 275 + pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" 150 276 "cache failed\n"); 151 277 goto out; 152 278 } 153 279 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", 154 280 sizeof(struct t10_alua_tg_pt_gp), 155 281 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); 156 - if (!(t10_alua_tg_pt_gp_cache)) { 157 - printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_" 282 + if (!t10_alua_tg_pt_gp_cache) { 283 + pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 158 284 "cache failed\n"); 159 285 goto out; 160 286 } ··· 163 289 sizeof(struct t10_alua_tg_pt_gp_member), 164 290 __alignof__(struct t10_alua_tg_pt_gp_member), 165 291 0, NULL); 166 - if (!(t10_alua_tg_pt_gp_mem_cache)) { 167 - printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_" 292 + if (!t10_alua_tg_pt_gp_mem_cache) { 293 + pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 168 294 "mem_t failed\n"); 169 295 goto out; 170 296 } ··· 240 366 241 367 ret = request_module("target_core_iblock"); 242 368 if (ret != 0) 243 - printk(KERN_ERR "Unable to load target_core_iblock\n"); 369 + pr_err("Unable to load target_core_iblock\n"); 244 370 245 371 ret = request_module("target_core_file"); 246 372 if (ret != 0) 247 - printk(KERN_ERR "Unable to load target_core_file\n"); 373 + pr_err("Unable to load target_core_file\n"); 248 374 249 375 ret = request_module("target_core_pscsi"); 250 376 if (ret != 0) 251 - printk(KERN_ERR "Unable to load target_core_pscsi\n"); 377 + pr_err("Unable to load target_core_pscsi\n"); 252 378 253 379 ret = request_module("target_core_stgt"); 254 380 if (ret != 0) 255 - printk(KERN_ERR "Unable to load target_core_stgt\n"); 381 + pr_err("Unable to load target_core_stgt\n"); 256 382 257 383 return 0; 258 384 } ··· 279 405 struct se_session *se_sess; 280 406 281 407 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); 282 - if (!(se_sess)) { 283 - printk(KERN_ERR "Unable to allocate struct se_session from" 408 + if (!se_sess) { 409 + pr_err("Unable to allocate struct se_session from" 284 410 " se_sess_cache\n"); 285 411 return ERR_PTR(-ENOMEM); 286 412 } ··· 334 460 } 335 461 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); 336 462 337 - printk(KERN_INFO "TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", 463 + pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", 338 464 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr); 339 465 } 340 466 EXPORT_SYMBOL(__transport_register_session); ··· 359 485 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session 360 486 */ 361 487 se_nacl = se_sess->se_node_acl; 362 - if ((se_nacl)) { 488 + if (se_nacl) { 363 489 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 364 490 list_del(&se_sess->sess_acl_list); 365 491 /* ··· 390 516 struct se_portal_group *se_tpg = se_sess->se_tpg; 391 517 struct se_node_acl *se_nacl; 392 518 393 - if (!(se_tpg)) { 519 + if (!se_tpg) { 394 520 transport_free_session(se_sess); 395 521 return; 396 522 } ··· 406 532 * struct se_node_acl if it had been previously dynamically generated. 407 533 */ 408 534 se_nacl = se_sess->se_node_acl; 409 - if ((se_nacl)) { 535 + if (se_nacl) { 410 536 spin_lock_bh(&se_tpg->acl_node_lock); 411 537 if (se_nacl->dynamic_node_acl) { 412 - if (!(se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache( 413 - se_tpg))) { 538 + if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache( 539 + se_tpg)) { 414 540 list_del(&se_nacl->acl_list); 415 541 se_tpg->num_node_acls--; 416 542 spin_unlock_bh(&se_tpg->acl_node_lock); ··· 427 553 428 554 transport_free_session(se_sess); 429 555 430 - printk(KERN_INFO "TARGET_CORE[%s]: Deregistered fabric_sess\n", 556 + pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", 431 557 se_tpg->se_tpg_tfo->get_fabric_name()); 432 558 } 433 559 EXPORT_SYMBOL(transport_deregister_session); ··· 443 569 444 570 list_for_each_entry(task, &cmd->t_task_list, t_list) { 445 571 dev = task->se_dev; 446 - if (!(dev)) 572 + if (!dev) 447 573 continue; 448 574 449 575 if (atomic_read(&task->task_active)) 450 576 continue; 451 577 452 - if (!(atomic_read(&task->task_state_active))) 578 + if (!atomic_read(&task->task_state_active)) 453 579 continue; 454 580 455 581 spin_lock_irqsave(&dev->execute_task_lock, flags); 456 582 list_del(&task->t_state_list); 457 - DEBUG_TSTATE("Removed ITT: 0x%08x dev: %p task[%p]\n", 458 - cmd->se_tfo->tfo_get_task_tag(cmd), dev, task); 583 + pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n", 584 + cmd->se_tfo->get_task_tag(cmd), dev, task); 459 585 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 460 586 461 587 atomic_set(&task->task_state_active, 0); ··· 484 610 * command for LUN shutdown purposes. 485 611 */ 486 612 if (atomic_read(&cmd->transport_lun_stop)) { 487 - DEBUG_CS("%s:%d atomic_read(&cmd->transport_lun_stop)" 613 + pr_debug("%s:%d atomic_read(&cmd->transport_lun_stop)" 488 614 " == TRUE for ITT: 0x%08x\n", __func__, __LINE__, 489 615 cmd->se_tfo->get_task_tag(cmd)); 490 616 ··· 503 629 * this command for frontend exceptions. 504 630 */ 505 631 if (atomic_read(&cmd->t_transport_stop)) { 506 - DEBUG_CS("%s:%d atomic_read(&cmd->t_transport_stop) ==" 632 + pr_debug("%s:%d atomic_read(&cmd->t_transport_stop) ==" 507 633 " TRUE for ITT: 0x%08x\n", __func__, __LINE__, 508 634 cmd->se_tfo->get_task_tag(cmd)); 509 635 ··· 569 695 return; 570 696 571 697 spin_lock_irqsave(&cmd->t_state_lock, flags); 572 - if (!(atomic_read(&cmd->transport_dev_active))) { 698 + if (!atomic_read(&cmd->transport_dev_active)) { 573 699 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 574 700 goto check_lun; 575 701 } ··· 584 710 list_del(&cmd->se_lun_node); 585 711 atomic_set(&cmd->transport_lun_active, 0); 586 712 #if 0 587 - printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n" 713 + pr_debug("Removed ITT: 0x%08x from LUN LIST[%d]\n" 588 714 cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun); 589 715 #endif 590 716 } ··· 671 797 unsigned long flags; 672 798 673 799 spin_lock_irqsave(&qobj->cmd_queue_lock, flags); 674 - if (!(atomic_read(&cmd->t_transport_queue_active))) { 800 + if (!atomic_read(&cmd->t_transport_queue_active)) { 675 801 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 676 802 return; 677 803 } ··· 686 812 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 687 813 688 814 if (atomic_read(&cmd->t_transport_queue_active)) { 689 - printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n", 815 + pr_err("ITT: 0x%08x t_transport_queue_active: %d\n", 690 816 cmd->se_tfo->get_task_tag(cmd), 691 817 atomic_read(&cmd->t_transport_queue_active)); 692 818 } ··· 727 853 int t_state; 728 854 unsigned long flags; 729 855 #if 0 730 - printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task, 856 + pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task, 731 857 cmd->t_task_cdb[0], dev); 732 858 #endif 733 859 if (dev) ··· 773 899 * the processing thread. 774 900 */ 775 901 if (atomic_read(&task->task_timeout)) { 776 - if (!(atomic_dec_and_test( 777 - &cmd->t_task_cdbs_timeout_left))) { 902 + if (!atomic_dec_and_test( 903 + &cmd->t_task_cdbs_timeout_left)) { 778 904 spin_unlock_irqrestore(&cmd->t_state_lock, 779 905 flags); 780 906 return; ··· 792 918 * struct se_task from struct se_cmd will complete itself into the 793 919 * device queue depending upon int success. 794 920 */ 795 - if (!(atomic_dec_and_test(&cmd->t_task_cdbs_left))) { 921 + if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) { 796 922 if (!success) 797 923 cmd->t_tasks_failed = 1; 798 924 ··· 850 976 &task_prev->t_execute_list : 851 977 &dev->execute_task_list); 852 978 853 - DEBUG_STA("Set HEAD_OF_QUEUE for task CDB: 0x%02x" 979 + pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x" 854 980 " in execution queue\n", 855 - T_TASK(task->task_se_cmd)->t_task_cdb[0]); 981 + task->task_se_cmd->t_task_cdb[0]); 856 982 return 1; 857 983 } 858 984 /* ··· 894 1020 895 1021 atomic_set(&task->task_state_active, 1); 896 1022 897 - DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n", 1023 + pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", 898 1024 task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd), 899 1025 task, dev); 900 1026 } ··· 916 1042 list_add_tail(&task->t_state_list, &dev->state_task_list); 917 1043 atomic_set(&task->task_state_active, 1); 918 1044 919 - DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n", 920 - task->se_cmd->se_tfo->get_task_tag( 1045 + pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", 1046 + task->task_se_cmd->se_tfo->get_task_tag( 921 1047 task->task_se_cmd), task, dev); 922 1048 923 1049 spin_unlock(&dev->execute_task_lock); ··· 986 1112 smp_mb__after_atomic_dec(); 987 1113 spin_unlock_irq(&dev->qf_cmd_lock); 988 1114 989 - printk(KERN_INFO "Processing %s cmd: %p QUEUE_FULL in work queue" 1115 + pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 990 1116 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, 991 1117 (cmd->t_state == TRANSPORT_COMPLETE_OK) ? "COMPLETE_OK" : 992 1118 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" ··· 1071 1197 spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, 1072 1198 flags); 1073 1199 1074 - printk(KERN_ERR "Releasing ITT: 0x%08x, i_state: %u," 1200 + pr_err("Releasing ITT: 0x%08x, i_state: %u," 1075 1201 " t_state: %u directly\n", 1076 1202 cmd->se_tfo->get_task_tag(cmd), 1077 1203 cmd->se_tfo->get_cmd_state(cmd), t_state); ··· 1138 1264 if (p_buf) 1139 1265 strncpy(p_buf, buf, p_buf_len); 1140 1266 else 1141 - printk(KERN_INFO "%s", buf); 1267 + pr_debug("%s", buf); 1142 1268 } 1143 1269 1144 1270 void ··· 1188 1314 if (p_buf) 1189 1315 strncpy(p_buf, buf, p_buf_len); 1190 1316 else 1191 - printk("%s", buf); 1317 + pr_debug("%s", buf); 1192 1318 1193 1319 return ret; 1194 1320 } ··· 1248 1374 return -EINVAL; 1249 1375 strncpy(p_buf, buf, p_buf_len); 1250 1376 } else { 1251 - printk("%s", buf); 1377 + pr_debug("%s", buf); 1252 1378 } 1253 1379 1254 1380 return ret; ··· 1299 1425 if (p_buf) 1300 1426 strncpy(p_buf, buf, p_buf_len); 1301 1427 else 1302 - printk("%s", buf); 1428 + pr_debug("%s", buf); 1303 1429 1304 1430 return ret; 1305 1431 } ··· 1356 1482 } 1357 1483 1358 1484 dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED; 1359 - DEBUG_STA("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x" 1485 + pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x" 1360 1486 " device\n", dev->transport->name, 1361 1487 dev->transport->get_device_rev(dev)); 1362 1488 } ··· 1368 1494 /* 1369 1495 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer 1370 1496 */ 1371 - printk(" Vendor: "); 1497 + pr_debug(" Vendor: "); 1372 1498 for (i = 0; i < 8; i++) 1373 1499 if (wwn->vendor[i] >= 0x20) 1374 - printk("%c", wwn->vendor[i]); 1500 + pr_debug("%c", wwn->vendor[i]); 1375 1501 else 1376 - printk(" "); 1502 + pr_debug(" "); 1377 1503 1378 - printk(" Model: "); 1504 + pr_debug(" Model: "); 1379 1505 for (i = 0; i < 16; i++) 1380 1506 if (wwn->model[i] >= 0x20) 1381 - printk("%c", wwn->model[i]); 1507 + pr_debug("%c", wwn->model[i]); 1382 1508 else 1383 - printk(" "); 1509 + pr_debug(" "); 1384 1510 1385 - printk(" Revision: "); 1511 + pr_debug(" Revision: "); 1386 1512 for (i = 0; i < 4; i++) 1387 1513 if (wwn->revision[i] >= 0x20) 1388 - printk("%c", wwn->revision[i]); 1514 + pr_debug("%c", wwn->revision[i]); 1389 1515 else 1390 - printk(" "); 1516 + pr_debug(" "); 1391 1517 1392 - printk("\n"); 1518 + pr_debug("\n"); 1393 1519 1394 1520 device_type = dev->transport->get_device_type(dev); 1395 - printk(" Type: %s ", scsi_device_type(device_type)); 1396 - printk(" ANSI SCSI revision: %02x\n", 1521 + pr_debug(" Type: %s ", scsi_device_type(device_type)); 1522 + pr_debug(" ANSI SCSI revision: %02x\n", 1397 1523 dev->transport->get_device_rev(dev)); 1398 1524 } 1399 1525 ··· 1411 1537 struct se_device *dev; 1412 1538 1413 1539 dev = kzalloc(sizeof(struct se_device), GFP_KERNEL); 1414 - if (!(dev)) { 1415 - printk(KERN_ERR "Unable to allocate memory for se_dev_t\n"); 1540 + if (!dev) { 1541 + pr_err("Unable to allocate memory for se_dev_t\n"); 1416 1542 return NULL; 1417 1543 } 1418 1544 ··· 1482 1608 dev->process_thread = kthread_run(transport_processing_thread, dev, 1483 1609 "LIO_%s", dev->transport->name); 1484 1610 if (IS_ERR(dev->process_thread)) { 1485 - printk(KERN_ERR "Unable to create kthread: LIO_%s\n", 1611 + pr_err("Unable to create kthread: LIO_%s\n", 1486 1612 dev->transport->name); 1487 1613 goto out; 1488 1614 } ··· 1500 1626 */ 1501 1627 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { 1502 1628 if (!inquiry_prod || !inquiry_rev) { 1503 - printk(KERN_ERR "All non TCM/pSCSI plugins require" 1629 + pr_err("All non TCM/pSCSI plugins require" 1504 1630 " INQUIRY consts\n"); 1505 1631 goto out; 1506 1632 } ··· 1562 1688 struct se_task *task; 1563 1689 struct se_device *dev = cmd->se_dev; 1564 1690 1565 - task = dev->transport->alloc_task(cmd); 1691 + task = dev->transport->alloc_task(cmd->t_task_cdb); 1566 1692 if (!task) { 1567 - printk(KERN_ERR "Unable to allocate struct se_task\n"); 1693 + pr_err("Unable to allocate struct se_task\n"); 1568 1694 return NULL; 1569 1695 } 1570 1696 ··· 1625 1751 return 0; 1626 1752 1627 1753 if (cmd->sam_task_attr == MSG_ACA_TAG) { 1628 - DEBUG_STA("SAM Task Attribute ACA" 1754 + pr_debug("SAM Task Attribute ACA" 1629 1755 " emulation is not supported\n"); 1630 1756 return -EINVAL; 1631 1757 } ··· 1635 1761 */ 1636 1762 cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id); 1637 1763 smp_mb__after_atomic_inc(); 1638 - DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", 1764 + pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", 1639 1765 cmd->se_ordered_id, cmd->sam_task_attr, 1640 - TRANSPORT(cmd->se_dev)->name); 1766 + cmd->se_dev->transport->name); 1641 1767 return 0; 1642 1768 } 1643 1769 ··· 1678 1804 * for VARIABLE_LENGTH_CMD 1679 1805 */ 1680 1806 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { 1681 - printk(KERN_ERR "Received SCSI CDB with command_size: %d that" 1807 + pr_err("Received SCSI CDB with command_size: %d that" 1682 1808 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1683 1809 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); 1684 1810 return -EINVAL; ··· 1691 1817 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { 1692 1818 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), 1693 1819 GFP_KERNEL); 1694 - if (!(cmd->t_task_cdb)) { 1695 - printk(KERN_ERR "Unable to allocate cmd->t_task_cdb" 1820 + if (!cmd->t_task_cdb) { 1821 + pr_err("Unable to allocate cmd->t_task_cdb" 1696 1822 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", 1697 1823 scsi_command_size(cdb), 1698 1824 (unsigned long)sizeof(cmd->__t_task_cdb)); ··· 1738 1864 { 1739 1865 if (!cmd->se_lun) { 1740 1866 dump_stack(); 1741 - printk(KERN_ERR "cmd->se_lun is NULL\n"); 1867 + pr_err("cmd->se_lun is NULL\n"); 1742 1868 return -EINVAL; 1743 1869 } 1744 1870 ··· 1756 1882 { 1757 1883 if (!cmd->se_lun) { 1758 1884 dump_stack(); 1759 - printk(KERN_ERR "cmd->se_lun is NULL\n"); 1885 + pr_err("cmd->se_lun is NULL\n"); 1760 1886 return -EINVAL; 1761 1887 } 1762 1888 if (in_interrupt()) { 1763 1889 dump_stack(); 1764 - printk(KERN_ERR "transport_generic_handle_cdb cannot be called" 1890 + pr_err("transport_generic_handle_cdb cannot be called" 1765 1891 " from interrupt context\n"); 1766 1892 return -EINVAL; 1767 1893 } ··· 1780 1906 { 1781 1907 if (!cmd->se_lun) { 1782 1908 dump_stack(); 1783 - printk(KERN_ERR "cmd->se_lun is NULL\n"); 1909 + pr_err("cmd->se_lun is NULL\n"); 1784 1910 return -EINVAL; 1785 1911 } 1786 1912 ··· 1849 1975 unsigned long flags; 1850 1976 int ret = 0; 1851 1977 1852 - DEBUG_TS("ITT[0x%08x] - Stopping tasks\n", 1978 + pr_debug("ITT[0x%08x] - Stopping tasks\n", 1853 1979 cmd->se_tfo->get_task_tag(cmd)); 1854 1980 1855 1981 /* ··· 1858 1984 spin_lock_irqsave(&cmd->t_state_lock, flags); 1859 1985 list_for_each_entry_safe(task, task_tmp, 1860 1986 &cmd->t_task_list, t_list) { 1861 - DEBUG_TS("task_no[%d] - Processing task %p\n", 1987 + pr_debug("task_no[%d] - Processing task %p\n", 1862 1988 task->task_no, task); 1863 1989 /* 1864 1990 * If the struct se_task has not been sent and is not active, ··· 1871 1997 transport_remove_task_from_execute_queue(task, 1872 1998 task->se_dev); 1873 1999 1874 - DEBUG_TS("task_no[%d] - Removed from execute queue\n", 2000 + pr_debug("task_no[%d] - Removed from execute queue\n", 1875 2001 task->task_no); 1876 2002 spin_lock_irqsave(&cmd->t_state_lock, flags); 1877 2003 continue; ··· 1886 2012 spin_unlock_irqrestore(&cmd->t_state_lock, 1887 2013 flags); 1888 2014 1889 - DEBUG_TS("task_no[%d] - Waiting to complete\n", 2015 + pr_debug("task_no[%d] - Waiting to complete\n", 1890 2016 task->task_no); 1891 2017 wait_for_completion(&task->task_stop_comp); 1892 - DEBUG_TS("task_no[%d] - Stopped successfully\n", 2018 + pr_debug("task_no[%d] - Stopped successfully\n", 1893 2019 task->task_no); 1894 2020 1895 2021 spin_lock_irqsave(&cmd->t_state_lock, flags); ··· 1898 2024 atomic_set(&task->task_active, 0); 1899 2025 atomic_set(&task->task_stop, 0); 1900 2026 } else { 1901 - DEBUG_TS("task_no[%d] - Did nothing\n", task->task_no); 2027 + pr_debug("task_no[%d] - Did nothing\n", task->task_no); 1902 2028 ret++; 1903 2029 } 1904 2030 ··· 1920 2046 { 1921 2047 int ret = 0; 1922 2048 1923 - DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" 2049 + pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" 1924 2050 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), 1925 2051 cmd->t_task_cdb[0]); 1926 - DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:" 2052 + pr_debug("-----[ i_state: %d t_state/def_t_state:" 1927 2053 " %d/%d transport_error_status: %d\n", 1928 2054 cmd->se_tfo->get_cmd_state(cmd), 1929 2055 cmd->t_state, cmd->deferred_t_state, 1930 2056 cmd->transport_error_status); 1931 - DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d" 2057 + pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d" 1932 2058 " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" 1933 2059 " t_transport_active: %d t_transport_stop: %d" 1934 - " t_transport_sent: %d\n", cmd->t_task_cdbs, 2060 + " t_transport_sent: %d\n", cmd->t_task_list_num, 1935 2061 atomic_read(&cmd->t_task_cdbs_left), 1936 2062 atomic_read(&cmd->t_task_cdbs_sent), 1937 2063 atomic_read(&cmd->t_task_cdbs_ex_left), ··· 2020 2146 */ 2021 2147 break; 2022 2148 default: 2023 - printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n", 2149 + pr_err("Unknown transport error for CDB 0x%02x: %d\n", 2024 2150 cmd->t_task_cdb[0], 2025 2151 cmd->transport_error_status); 2026 2152 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; ··· 2038 2164 2039 2165 check_stop: 2040 2166 transport_lun_remove_cmd(cmd); 2041 - if (!(transport_cmd_check_stop_to_fabric(cmd))) 2167 + if (!transport_cmd_check_stop_to_fabric(cmd)) 2042 2168 ; 2043 2169 return; 2044 2170 ··· 2052 2178 unsigned long flags; 2053 2179 2054 2180 spin_lock_irqsave(&cmd->t_state_lock, flags); 2055 - if (!(atomic_read(&cmd->t_transport_timeout))) { 2181 + if (!atomic_read(&cmd->t_transport_timeout)) { 2056 2182 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2057 2183 return; 2058 2184 } ··· 2136 2262 struct se_cmd *cmd = task->task_se_cmd; 2137 2263 unsigned long flags; 2138 2264 2139 - DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd); 2265 + pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd); 2140 2266 2141 2267 spin_lock_irqsave(&cmd->t_state_lock, flags); 2142 2268 if (task->task_flags & TF_STOP) { ··· 2148 2274 /* 2149 2275 * Determine if transport_complete_task() has already been called. 2150 2276 */ 2151 - if (!(atomic_read(&task->task_active))) { 2152 - DEBUG_TT("transport task: %p cmd: %p timeout task_active" 2277 + if (!atomic_read(&task->task_active)) { 2278 + pr_debug("transport task: %p cmd: %p timeout task_active" 2153 2279 " == 0\n", task, cmd); 2154 2280 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2155 2281 return; ··· 2164 2290 task->task_scsi_status = 1; 2165 2291 2166 2292 if (atomic_read(&task->task_stop)) { 2167 - DEBUG_TT("transport task: %p cmd: %p timeout task_stop" 2293 + pr_debug("transport task: %p cmd: %p timeout task_stop" 2168 2294 " == 1\n", task, cmd); 2169 2295 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2170 2296 complete(&task->task_stop_comp); 2171 2297 return; 2172 2298 } 2173 2299 2174 - if (!(atomic_dec_and_test(&cmd->t_task_cdbs_left))) { 2175 - DEBUG_TT("transport task: %p cmd: %p timeout non zero" 2300 + if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) { 2301 + pr_debug("transport task: %p cmd: %p timeout non zero" 2176 2302 " t_task_cdbs_left\n", task, cmd); 2177 2303 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2178 2304 return; 2179 2305 } 2180 - DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n", 2306 + pr_debug("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n", 2181 2307 task, cmd); 2182 2308 2183 2309 cmd->t_state = TRANSPORT_COMPLETE_FAILURE; ··· 2200 2326 * If the task_timeout is disabled, exit now. 2201 2327 */ 2202 2328 timeout = dev->se_sub_dev->se_dev_attrib.task_timeout; 2203 - if (!(timeout)) 2329 + if (!timeout) 2204 2330 return; 2205 2331 2206 2332 init_timer(&task->task_timer); ··· 2211 2337 task->task_flags |= TF_RUNNING; 2212 2338 add_timer(&task->task_timer); 2213 2339 #if 0 2214 - printk(KERN_INFO "Starting task timer for cmd: %p task: %p seconds:" 2340 + pr_debug("Starting task timer for cmd: %p task: %p seconds:" 2215 2341 " %d\n", task->task_se_cmd, task, timeout); 2216 2342 #endif 2217 2343 } ··· 2223 2349 { 2224 2350 struct se_cmd *cmd = task->task_se_cmd; 2225 2351 2226 - if (!(task->task_flags & TF_RUNNING)) 2352 + if (!task->task_flags & TF_RUNNING) 2227 2353 return; 2228 2354 2229 2355 task->task_flags |= TF_STOP; ··· 2278 2404 if (cmd->sam_task_attr == MSG_HEAD_TAG) { 2279 2405 atomic_inc(&cmd->se_dev->dev_hoq_count); 2280 2406 smp_mb__after_atomic_inc(); 2281 - DEBUG_STA("Added HEAD_OF_QUEUE for CDB:" 2407 + pr_debug("Added HEAD_OF_QUEUE for CDB:" 2282 2408 " 0x%02x, se_ordered_id: %u\n", 2283 - cmd->_task_cdb[0], 2409 + cmd->t_task_cdb[0], 2284 2410 cmd->se_ordered_id); 2285 2411 return 1; 2286 2412 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { ··· 2292 2418 atomic_inc(&cmd->se_dev->dev_ordered_sync); 2293 2419 smp_mb__after_atomic_inc(); 2294 2420 2295 - DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered" 2421 + pr_debug("Added ORDERED for CDB: 0x%02x to ordered" 2296 2422 " list, se_ordered_id: %u\n", 2297 2423 cmd->t_task_cdb[0], 2298 2424 cmd->se_ordered_id); ··· 2301 2427 * no other older commands exist that need to be 2302 2428 * completed first. 2303 2429 */ 2304 - if (!(atomic_read(&cmd->se_dev->simple_cmds))) 2430 + if (!atomic_read(&cmd->se_dev->simple_cmds)) 2305 2431 return 1; 2306 2432 } else { 2307 2433 /* ··· 2326 2452 &cmd->se_dev->delayed_cmd_list); 2327 2453 spin_unlock(&cmd->se_dev->delayed_cmd_lock); 2328 2454 2329 - DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to" 2455 + pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to" 2330 2456 " delayed CMD list, se_ordered_id: %u\n", 2331 2457 cmd->t_task_cdb[0], cmd->sam_task_attr, 2332 2458 cmd->se_ordered_id); ··· 2360 2486 * Call transport_cmd_check_stop() to see if a fabric exception 2361 2487 * has occurred that prevents execution. 2362 2488 */ 2363 - if (!(transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING))) { 2489 + if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) { 2364 2490 /* 2365 2491 * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE 2366 2492 * attribute for the tasks of the received struct se_cmd CDB ··· 2651 2777 return sectors; 2652 2778 } 2653 2779 #if 0 2654 - printk(KERN_INFO "Returning block_size: %u, sectors: %u == %u for" 2780 + pr_debug("Returning block_size: %u, sectors: %u == %u for" 2655 2781 " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors, 2656 2782 dev->se_sub_dev->se_dev_attrib.block_size * sectors, 2657 2783 dev->transport->name); ··· 2706 2832 * 5) transfer the resulting XOR data to the data-in buffer. 2707 2833 */ 2708 2834 buf = kmalloc(cmd->data_length, GFP_KERNEL); 2709 - if (!(buf)) { 2710 - printk(KERN_ERR "Unable to allocate xor_callback buf\n"); 2835 + if (!buf) { 2836 + pr_err("Unable to allocate xor_callback buf\n"); 2711 2837 return; 2712 2838 } 2713 2839 /* ··· 2767 2893 continue; 2768 2894 2769 2895 dev = task->se_dev; 2770 - if (!(dev)) 2896 + if (!dev) 2771 2897 continue; 2772 2898 2773 2899 if (!dev->transport->get_sense_buffer) { 2774 - printk(KERN_ERR "dev->transport->get_sense_buffer" 2900 + pr_err("dev->transport->get_sense_buffer" 2775 2901 " is NULL\n"); 2776 2902 continue; 2777 2903 } 2778 2904 2779 2905 sense_buffer = dev->transport->get_sense_buffer(task); 2780 - if (!(sense_buffer)) { 2781 - printk(KERN_ERR "ITT[0x%08x]_TASK[%d]: Unable to locate" 2906 + if (!sense_buffer) { 2907 + pr_err("ITT[0x%08x]_TASK[%d]: Unable to locate" 2782 2908 " sense buffer for task with sense\n", 2783 2909 cmd->se_tfo->get_task_tag(cmd), task->task_no); 2784 2910 continue; ··· 2795 2921 cmd->scsi_sense_length = 2796 2922 (TRANSPORT_SENSE_BUFFER + offset); 2797 2923 2798 - printk(KERN_INFO "HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x" 2924 + pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x" 2799 2925 " and sense\n", 2800 2926 dev->se_hba->hba_id, dev->transport->name, 2801 2927 cmd->scsi_status); ··· 2843 2969 2844 2970 sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size); 2845 2971 2846 - if ((cmd->t_task_lba + sectors) > 2847 - transport_dev_end_lba(dev)) { 2848 - printk(KERN_ERR "LBA: %llu Sectors: %u exceeds" 2972 + if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) { 2973 + pr_err("LBA: %llu Sectors: %u exceeds" 2849 2974 " transport_dev_end_lba(): %llu\n", 2850 2975 cmd->t_task_lba, sectors, 2851 2976 transport_dev_end_lba(dev)); 2852 - printk(KERN_ERR " We should return CHECK_CONDITION" 2977 + pr_err(" We should return CHECK_CONDITION" 2853 2978 " but we don't yet\n"); 2854 2979 return 0; 2855 2980 } ··· 2899 3026 */ 2900 3027 if (ret > 0) { 2901 3028 #if 0 2902 - printk(KERN_INFO "[%s]: ALUA TG Port not available," 3029 + pr_debug("[%s]: ALUA TG Port not available," 2903 3030 " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n", 2904 3031 cmd->se_tfo->get_fabric_name(), alua_ascq); 2905 3032 #endif ··· 3065 3192 if (sector_ret) 3066 3193 goto out_unsupported_cdb; 3067 3194 3068 - if (sectors != 0) 3195 + if (sectors) 3069 3196 size = transport_get_size(sectors, cdb, cmd); 3070 - else 3071 - size = dev->se_sub_dev->se_dev_attrib.block_size; 3197 + else { 3198 + pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" 3199 + " supported\n"); 3200 + goto out_invalid_cdb_field; 3201 + } 3072 3202 3073 3203 cmd->t_task_lba = get_unaligned_be64(&cdb[12]); 3074 3204 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; ··· 3083 3207 break; 3084 3208 3085 3209 if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) { 3086 - printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA" 3210 + pr_err("WRITE_SAME PBDATA and LBDATA" 3087 3211 " bits not supported for Block Discard" 3088 3212 " Emulation\n"); 3089 3213 goto out_invalid_cdb_field; ··· 3093 3217 * tpws with the UNMAP=1 bit set. 3094 3218 */ 3095 3219 if (!(cdb[10] & 0x08)) { 3096 - printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not" 3220 + pr_err("WRITE_SAME w/o UNMAP bit not" 3097 3221 " supported for Block Discard Emulation\n"); 3098 3222 goto out_invalid_cdb_field; 3099 3223 } 3100 3224 break; 3101 3225 default: 3102 - printk(KERN_ERR "VARIABLE_LENGTH_CMD service action" 3226 + pr_err("VARIABLE_LENGTH_CMD service action" 3103 3227 " 0x%04x not supported\n", service_action); 3104 3228 goto out_unsupported_cdb; 3105 3229 } ··· 3345 3469 if (sector_ret) 3346 3470 goto out_unsupported_cdb; 3347 3471 3348 - if (sectors != 0) 3472 + if (sectors) 3349 3473 size = transport_get_size(sectors, cdb, cmd); 3350 - else 3351 - size = dev->se_sub_dev->se_dev_attrib.block_size; 3474 + else { 3475 + pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 3476 + goto out_invalid_cdb_field; 3477 + } 3352 3478 3353 3479 cmd->t_task_lba = get_unaligned_be16(&cdb[2]); 3354 3480 passthrough = (dev->transport->transport_type == ··· 3362 3484 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and 3363 3485 * TCM/FILEIO subsystem plugin backstores. 3364 3486 */ 3365 - if (!(passthrough)) { 3487 + if (!passthrough) { 3366 3488 if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) { 3367 - printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA" 3489 + pr_err("WRITE_SAME PBDATA and LBDATA" 3368 3490 " bits not supported for Block Discard" 3369 3491 " Emulation\n"); 3370 3492 goto out_invalid_cdb_field; ··· 3374 3496 * tpws with the UNMAP=1 bit set. 3375 3497 */ 3376 3498 if (!(cdb[1] & 0x08)) { 3377 - printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not " 3499 + pr_err("WRITE_SAME w/o UNMAP bit not " 3378 3500 " supported for Block Discard Emulation\n"); 3379 3501 goto out_invalid_cdb_field; 3380 3502 } ··· 3410 3532 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 3411 3533 break; 3412 3534 default: 3413 - printk(KERN_WARNING "TARGET_CORE[%s]: Unsupported SCSI Opcode" 3535 + pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode" 3414 3536 " 0x%02x, sending CHECK_CONDITION.\n", 3415 3537 cmd->se_tfo->get_fabric_name(), cdb[0]); 3416 3538 cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; ··· 3418 3540 } 3419 3541 3420 3542 if (size != cmd->data_length) { 3421 - printk(KERN_WARNING "TARGET_CORE[%s]: Expected Transfer Length:" 3543 + pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" 3422 3544 " %u does not match SCSI CDB Length: %u for SAM Opcode:" 3423 3545 " 0x%02x\n", cmd->se_tfo->get_fabric_name(), 3424 3546 cmd->data_length, size, cdb[0]); ··· 3426 3548 cmd->cmd_spdtl = size; 3427 3549 3428 3550 if (cmd->data_direction == DMA_TO_DEVICE) { 3429 - printk(KERN_ERR "Rejecting underflow/overflow" 3551 + pr_err("Rejecting underflow/overflow" 3430 3552 " WRITE data\n"); 3431 3553 goto out_invalid_cdb_field; 3432 3554 } ··· 3434 3556 * Reject READ_* or WRITE_* with overflow/underflow for 3435 3557 * type SCF_SCSI_DATA_SG_IO_CDB. 3436 3558 */ 3437 - if (!(ret) && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) { 3438 - printk(KERN_ERR "Failing OVERFLOW/UNDERFLOW for LBA op" 3559 + if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) { 3560 + pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" 3439 3561 " CDB on non 512-byte sector setup subsystem" 3440 3562 " plugin: %s\n", dev->transport->name); 3441 3563 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ ··· 3485 3607 atomic_dec(&dev->simple_cmds); 3486 3608 smp_mb__after_atomic_dec(); 3487 3609 dev->dev_cur_ordered_id++; 3488 - DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for" 3610 + pr_debug("Incremented dev->dev_cur_ordered_id: %u for" 3489 3611 " SIMPLE: %u\n", dev->dev_cur_ordered_id, 3490 3612 cmd->se_ordered_id); 3491 3613 } else if (cmd->sam_task_attr == MSG_HEAD_TAG) { 3492 3614 atomic_dec(&dev->dev_hoq_count); 3493 3615 smp_mb__after_atomic_dec(); 3494 3616 dev->dev_cur_ordered_id++; 3495 - DEBUG_STA("Incremented dev_cur_ordered_id: %u for" 3617 + pr_debug("Incremented dev_cur_ordered_id: %u for" 3496 3618 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, 3497 3619 cmd->se_ordered_id); 3498 3620 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { ··· 3503 3625 spin_unlock(&dev->ordered_cmd_lock); 3504 3626 3505 3627 dev->dev_cur_ordered_id++; 3506 - DEBUG_STA("Incremented dev_cur_ordered_id: %u for ORDERED:" 3628 + pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" 3507 3629 " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); 3508 3630 } 3509 3631 /* ··· 3518 3640 list_del(&cmd_p->se_delayed_node); 3519 3641 spin_unlock(&dev->delayed_cmd_lock); 3520 3642 3521 - DEBUG_STA("Calling add_tasks() for" 3643 + pr_debug("Calling add_tasks() for" 3522 3644 " cmd_p: 0x%02x Task Attr: 0x%02x" 3523 3645 " Dormant -> Active, se_ordered_id: %u\n", 3524 - T_TASK(cmd_p)->t_task_cdb[0], 3646 + cmd_p->t_task_cdb[0], 3525 3647 cmd_p->sam_task_attr, cmd_p->se_ordered_id); 3526 3648 3527 3649 transport_add_tasks_from_cmd(cmd_p); ··· 3690 3812 return; 3691 3813 3692 3814 queue_full: 3693 - printk(KERN_INFO "Handling complete_ok QUEUE_FULL: se_cmd: %p," 3815 + pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," 3694 3816 " data_direction: %d\n", cmd, cmd->data_direction); 3695 3817 transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf); 3696 3818 } ··· 3715 3837 if (task->se_dev) 3716 3838 task->se_dev->transport->free_task(task); 3717 3839 else 3718 - printk(KERN_ERR "task[%u] - task->se_dev is NULL\n", 3840 + pr_err("task[%u] - task->se_dev is NULL\n", 3719 3841 task->task_no); 3720 3842 spin_lock_irqsave(&cmd->t_state_lock, flags); 3721 3843 } 3722 3844 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3723 3845 } 3724 3846 3725 - static inline void transport_free_pages(struct se_cmd *cmd) 3847 + static inline void transport_free_sgl(struct scatterlist *sgl, int nents) 3726 3848 { 3727 3849 struct scatterlist *sg; 3728 - int free_page = 1; 3729 3850 int count; 3730 3851 3852 + for_each_sg(sgl, sg, nents, count) 3853 + __free_page(sg_page(sg)); 3854 + 3855 + kfree(sgl); 3856 + } 3857 + 3858 + static inline void transport_free_pages(struct se_cmd *cmd) 3859 + { 3731 3860 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) 3732 - free_page = 0; 3733 - if (cmd->se_dev->transport->do_se_mem_map) 3734 - free_page = 0; 3861 + return; 3735 3862 3736 - for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, count) { 3737 - /* 3738 - * Only called if 3739 - * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, 3740 - */ 3741 - if (free_page) 3742 - __free_page(sg_page(sg)); 3743 - 3744 - } 3745 - if (free_page) 3746 - kfree(cmd->t_data_sg); 3863 + transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents); 3747 3864 cmd->t_data_sg = NULL; 3748 3865 cmd->t_data_nents = 0; 3749 3866 3750 - for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { 3751 - /* 3752 - * Only called if 3753 - * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, 3754 - */ 3755 - if (free_page) 3756 - __free_page(sg_page(sg)); 3757 - 3758 - } 3759 - if (free_page) 3760 - kfree(cmd->t_bidi_data_sg); 3867 + transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); 3761 3868 cmd->t_bidi_data_sg = NULL; 3762 3869 cmd->t_bidi_data_nents = 0; 3763 3870 } ··· 3758 3895 3759 3896 spin_lock_irqsave(&cmd->t_state_lock, flags); 3760 3897 if (atomic_read(&cmd->t_fe_count)) { 3761 - if (!(atomic_dec_and_test(&cmd->t_fe_count))) { 3898 + if (!atomic_dec_and_test(&cmd->t_fe_count)) { 3762 3899 spin_unlock_irqrestore(&cmd->t_state_lock, 3763 3900 flags); 3764 3901 return 1; ··· 3766 3903 } 3767 3904 3768 3905 if (atomic_read(&cmd->t_se_count)) { 3769 - if (!(atomic_dec_and_test(&cmd->t_se_count))) { 3906 + if (!atomic_dec_and_test(&cmd->t_se_count)) { 3770 3907 spin_unlock_irqrestore(&cmd->t_state_lock, 3771 3908 flags); 3772 3909 return 1; ··· 3785 3922 return; 3786 3923 3787 3924 spin_lock_irqsave(&cmd->t_state_lock, flags); 3788 - if (!(atomic_read(&cmd->transport_dev_active))) { 3925 + if (!atomic_read(&cmd->transport_dev_active)) { 3789 3926 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3790 3927 goto free_pages; 3791 3928 } ··· 3816 3953 } 3817 3954 3818 3955 spin_lock_irqsave(&cmd->t_state_lock, flags); 3819 - if (!(atomic_read(&cmd->transport_dev_active))) { 3956 + if (!atomic_read(&cmd->transport_dev_active)) { 3820 3957 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3821 3958 goto free_pages; 3822 3959 } ··· 3890 4027 DMA_FROM_DEVICE, 3891 4028 cmd->t_bidi_data_sg, 3892 4029 cmd->t_bidi_data_nents); 3893 - if (!rc) { 4030 + if (rc <= 0) { 3894 4031 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 3895 4032 cmd->scsi_sense_reason = 3896 4033 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; ··· 3909 4046 cmd->data_direction, 3910 4047 cmd->t_data_sg, 3911 4048 cmd->t_data_nents); 3912 - if (!task_cdbs) { 4049 + if (task_cdbs <= 0) { 3913 4050 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 3914 4051 cmd->scsi_sense_reason = 3915 4052 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; ··· 3956 4093 unsigned int nents; 3957 4094 struct page *page; 3958 4095 int i = 0; 3959 - 3960 - /* 3961 - * If the device uses memory mapping this is enough. 3962 - */ 3963 - if (cmd->se_dev->transport->do_se_mem_map) 3964 - return 0; 3965 4096 3966 4097 nents = DIV_ROUND_UP(length, PAGE_SIZE); 3967 4098 cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL); ··· 4033 4176 4034 4177 if (!sg_first) { 4035 4178 sg_first = task->task_sg; 4036 - chained_nents = task->task_sg_num; 4179 + chained_nents = task->task_sg_nents; 4037 4180 } else { 4038 4181 sg_chain(sg_prev, sg_prev_nents, task->task_sg); 4039 - chained_nents += task->task_sg_num; 4182 + chained_nents += task->task_sg_nents; 4040 4183 } 4041 4184 4042 4185 sg_prev = task->task_sg; 4043 - sg_prev_nents = task->task_sg_num; 4186 + sg_prev_nents = task->task_sg_nents; 4044 4187 } 4045 4188 /* 4046 4189 * Setup the starting pointer and total t_tasks_sg_linked_no including ··· 4049 4192 cmd->t_tasks_sg_chained = sg_first; 4050 4193 cmd->t_tasks_sg_chained_no = chained_nents; 4051 4194 4052 - DEBUG_CMD_M("Setup cmd: %p cmd->t_tasks_sg_chained: %p and" 4195 + pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and" 4053 4196 " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained, 4054 4197 cmd->t_tasks_sg_chained_no); 4055 4198 4056 4199 for_each_sg(cmd->t_tasks_sg_chained, sg, 4057 4200 cmd->t_tasks_sg_chained_no, i) { 4058 4201 4059 - DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d\n", 4202 + pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n", 4060 4203 i, sg, sg_page(sg), sg->length, sg->offset); 4061 4204 if (sg_is_chain(sg)) 4062 - DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg); 4205 + pr_debug("SG: %p sg_is_chain=1\n", sg); 4063 4206 if (sg_is_last(sg)) 4064 - DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg); 4207 + pr_debug("SG: %p sg_is_last=1\n", sg); 4065 4208 } 4066 4209 } 4067 4210 EXPORT_SYMBOL(transport_do_task_sg_chain); ··· 4123 4266 * It's so much easier and only a waste when task_count > 1. 4124 4267 * That is extremely rare. 4125 4268 */ 4126 - task->task_sg_num = sgl_nents; 4269 + task->task_sg_nents = sgl_nents; 4127 4270 if (cmd->se_tfo->task_sg_chaining) { 4128 - task->task_sg_num++; 4271 + task->task_sg_nents++; 4129 4272 task->task_padded_sg = 1; 4130 4273 } 4131 4274 4132 4275 task->task_sg = kmalloc(sizeof(struct scatterlist) * \ 4133 - task->task_sg_num, GFP_KERNEL); 4276 + task->task_sg_nents, GFP_KERNEL); 4134 4277 if (!task->task_sg) { 4135 4278 cmd->se_dev->transport->free_task(task); 4136 4279 return -ENOMEM; 4137 4280 } 4138 4281 4139 - sg_init_table(task->task_sg, task->task_sg_num); 4282 + sg_init_table(task->task_sg, task->task_sg_nents); 4140 4283 4141 4284 task_size = task->task_size; 4142 4285 4143 4286 /* Build new sgl, only up to task_size */ 4144 - for_each_sg(task->task_sg, sg, task->task_sg_num, count) { 4287 + for_each_sg(task->task_sg, sg, task->task_sg_nents, count) { 4145 4288 if (cmd_sg->length > task_size) 4146 4289 break; 4147 4290 ··· 4168 4311 unsigned char *cdb; 4169 4312 struct se_task *task; 4170 4313 unsigned long flags; 4314 + int ret = 0; 4171 4315 4172 4316 task = transport_generic_get_task(cmd, cmd->data_direction); 4173 4317 if (!task) ··· 4189 4331 memcpy(task->task_sg, cmd->t_data_sg, 4190 4332 sizeof(struct scatterlist) * cmd->t_data_nents); 4191 4333 task->task_size = cmd->data_length; 4192 - task->task_sg_num = cmd->t_data_nents; 4334 + task->task_sg_nents = cmd->t_data_nents; 4193 4335 4194 4336 spin_lock_irqsave(&cmd->t_state_lock, flags); 4195 4337 list_add_tail(&task->t_list, &cmd->t_task_list); ··· 4197 4339 4198 4340 if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) { 4199 4341 if (dev->transport->map_task_SG) 4200 - return dev->transport->map_task_SG(task); 4201 - return 0; 4342 + ret = dev->transport->map_task_SG(task); 4202 4343 } else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { 4203 4344 if (dev->transport->cdb_none) 4204 - return dev->transport->cdb_none(task); 4205 - return 0; 4345 + ret = dev->transport->cdb_none(task); 4206 4346 } else { 4347 + pr_err("target: Unknown control cmd type!\n"); 4207 4348 BUG(); 4208 - return -ENOMEM; 4209 4349 } 4350 + 4351 + /* Success! Return number of tasks allocated */ 4352 + if (ret == 0) 4353 + return 1; 4354 + return ret; 4210 4355 } 4211 4356 4212 4357 static u32 transport_allocate_tasks( ··· 4219 4358 struct scatterlist *sgl, 4220 4359 unsigned int sgl_nents) 4221 4360 { 4222 - int ret; 4223 - 4224 - if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { 4361 + if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) 4225 4362 return transport_allocate_data_tasks(cmd, lba, data_direction, 4226 4363 sgl, sgl_nents); 4227 - } else { 4228 - ret = transport_allocate_control_task(cmd); 4229 - if (ret < 0) 4230 - return ret; 4231 - else 4232 - return 1; 4233 - } 4364 + else 4365 + return transport_allocate_control_task(cmd); 4366 + 4234 4367 } 4235 4368 4236 4369 ··· 4296 4441 */ 4297 4442 void transport_generic_process_write(struct se_cmd *cmd) 4298 4443 { 4299 - #if 0 4300 - /* 4301 - * Copy SCSI Presented DTL sector(s) from received buffers allocated to 4302 - * original EDTL 4303 - */ 4304 - if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 4305 - if (!cmd->t_tasks_se_num) { 4306 - unsigned char *dst, *buf = 4307 - (unsigned char *)cmd->t_task_buf; 4308 - 4309 - dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL); 4310 - if (!(dst)) { 4311 - printk(KERN_ERR "Unable to allocate memory for" 4312 - " WRITE underflow\n"); 4313 - transport_generic_request_failure(cmd, NULL, 4314 - PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1); 4315 - return; 4316 - } 4317 - memcpy(dst, buf, cmd->cmd_spdtl); 4318 - 4319 - kfree(cmd->t_task_buf); 4320 - cmd->t_task_buf = dst; 4321 - } else { 4322 - struct scatterlist *sg = 4323 - (struct scatterlist *sg)cmd->t_task_buf; 4324 - struct scatterlist *orig_sg; 4325 - 4326 - orig_sg = kzalloc(sizeof(struct scatterlist) * 4327 - cmd->t_tasks_se_num, 4328 - GFP_KERNEL))) { 4329 - if (!(orig_sg)) { 4330 - printk(KERN_ERR "Unable to allocate memory" 4331 - " for WRITE underflow\n"); 4332 - transport_generic_request_failure(cmd, NULL, 4333 - PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1); 4334 - return; 4335 - } 4336 - 4337 - memcpy(orig_sg, cmd->t_task_buf, 4338 - sizeof(struct scatterlist) * 4339 - cmd->t_tasks_se_num); 4340 - 4341 - cmd->data_length = cmd->cmd_spdtl; 4342 - /* 4343 - * FIXME, clear out original struct se_task and state 4344 - * information. 4345 - */ 4346 - if (transport_generic_new_cmd(cmd) < 0) { 4347 - transport_generic_request_failure(cmd, NULL, 4348 - PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1); 4349 - kfree(orig_sg); 4350 - return; 4351 - } 4352 - 4353 - transport_memcpy_write_sg(cmd, orig_sg); 4354 - } 4355 - } 4356 - #endif 4357 4444 transport_execute_tasks(cmd); 4358 4445 } 4359 4446 EXPORT_SYMBOL(transport_generic_process_write); ··· 4351 4554 return PYX_TRANSPORT_WRITE_PENDING; 4352 4555 4353 4556 queue_full: 4354 - printk(KERN_INFO "Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 4557 + pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 4355 4558 cmd->t_state = TRANSPORT_COMPLETE_QF_WP; 4356 4559 transport_handle_queue_full(cmd, cmd->se_dev, 4357 4560 transport_write_pending_qf); ··· 4383 4586 4384 4587 if (cmd->se_lun) { 4385 4588 #if 0 4386 - printk(KERN_INFO "cmd: %p ITT: 0x%08x contains" 4589 + pr_debug("cmd: %p ITT: 0x%08x contains" 4387 4590 " cmd->se_lun\n", cmd, 4388 4591 cmd->se_tfo->get_task_tag(cmd)); 4389 4592 #endif ··· 4424 4627 spin_lock_irqsave(&cmd->t_state_lock, flags); 4425 4628 if (atomic_read(&cmd->t_transport_stop)) { 4426 4629 atomic_set(&cmd->transport_lun_stop, 0); 4427 - DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop ==" 4630 + pr_debug("ConfigFS ITT[0x%08x] - t_transport_stop ==" 4428 4631 " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd)); 4429 4632 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4430 4633 transport_cmd_check_stop(cmd, 1, 0); ··· 4437 4640 4438 4641 ret = transport_stop_tasks_for_cmd(cmd); 4439 4642 4440 - DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:" 4441 - " %d\n", cmd, cmd->t_task_cdbs, ret); 4643 + pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:" 4644 + " %d\n", cmd, cmd->t_task_list_num, ret); 4442 4645 if (!ret) { 4443 - DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n", 4646 + pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n", 4444 4647 cmd->se_tfo->get_task_tag(cmd)); 4445 4648 wait_for_completion(&cmd->transport_lun_stop_comp); 4446 - DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n", 4649 + pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n", 4447 4650 cmd->se_tfo->get_task_tag(cmd)); 4448 4651 } 4449 4652 transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); 4450 4653 4451 4654 return 0; 4452 4655 } 4453 - 4454 - /* #define DEBUG_CLEAR_LUN */ 4455 - #ifdef DEBUG_CLEAR_LUN 4456 - #define DEBUG_CLEAR_L(x...) printk(KERN_INFO x) 4457 - #else 4458 - #define DEBUG_CLEAR_L(x...) 4459 - #endif 4460 4656 4461 4657 static void __transport_clear_lun_from_sessions(struct se_lun *lun) 4462 4658 { ··· 4472 4682 * progress for the iscsi_cmd_t. 4473 4683 */ 4474 4684 spin_lock(&cmd->t_state_lock); 4475 - DEBUG_CLEAR_L("SE_LUN[%d] - Setting cmd->transport" 4685 + pr_debug("SE_LUN[%d] - Setting cmd->transport" 4476 4686 "_lun_stop for ITT: 0x%08x\n", 4477 4687 cmd->se_lun->unpacked_lun, 4478 4688 cmd->se_tfo->get_task_tag(cmd)); ··· 4481 4691 4482 4692 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); 4483 4693 4484 - if (!(cmd->se_lun)) { 4485 - printk(KERN_ERR "ITT: 0x%08x, [i,t]_state: %u/%u\n", 4694 + if (!cmd->se_lun) { 4695 + pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n", 4486 4696 cmd->se_tfo->get_task_tag(cmd), 4487 4697 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); 4488 4698 BUG(); ··· 4491 4701 * If the Storage engine still owns the iscsi_cmd_t, determine 4492 4702 * and/or stop its context. 4493 4703 */ 4494 - DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x before transport" 4704 + pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport" 4495 4705 "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun, 4496 4706 cmd->se_tfo->get_task_tag(cmd)); 4497 4707 ··· 4500 4710 continue; 4501 4711 } 4502 4712 4503 - DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x after transport_lun" 4713 + pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun" 4504 4714 "_wait_for_tasks(): SUCCESS\n", 4505 4715 cmd->se_lun->unpacked_lun, 4506 4716 cmd->se_tfo->get_task_tag(cmd)); 4507 4717 4508 4718 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); 4509 - if (!(atomic_read(&cmd->transport_dev_active))) { 4719 + if (!atomic_read(&cmd->transport_dev_active)) { 4510 4720 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); 4511 4721 goto check_cond; 4512 4722 } ··· 4531 4741 */ 4532 4742 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); 4533 4743 if (atomic_read(&cmd->transport_lun_fe_stop)) { 4534 - DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for" 4744 + pr_debug("SE_LUN[%d] - Detected FE stop for" 4535 4745 " struct se_cmd: %p ITT: 0x%08x\n", 4536 4746 lun->unpacked_lun, 4537 4747 cmd, cmd->se_tfo->get_task_tag(cmd)); ··· 4543 4753 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); 4544 4754 continue; 4545 4755 } 4546 - DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n", 4756 + pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n", 4547 4757 lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); 4548 4758 4549 4759 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); ··· 4569 4779 kt = kthread_run(transport_clear_lun_thread, lun, 4570 4780 "tcm_cl_%u", lun->unpacked_lun); 4571 4781 if (IS_ERR(kt)) { 4572 - printk(KERN_ERR "Unable to start clear_lun thread\n"); 4782 + pr_err("Unable to start clear_lun thread\n"); 4573 4783 return PTR_ERR(kt); 4574 4784 } 4575 4785 wait_for_completion(&lun->lun_shutdown_comp); ··· 4602 4812 */ 4603 4813 if (atomic_read(&cmd->transport_lun_stop)) { 4604 4814 4605 - DEBUG_TRANSPORT_S("wait_for_tasks: Stopping" 4815 + pr_debug("wait_for_tasks: Stopping" 4606 4816 " wait_for_completion(&cmd->t_tasktransport_lun_fe" 4607 4817 "_stop_comp); for ITT: 0x%08x\n", 4608 4818 cmd->se_tfo->get_task_tag(cmd)); ··· 4624 4834 * struct se_cmd, now owns the structure and can be released through 4625 4835 * normal means below. 4626 4836 */ 4627 - DEBUG_TRANSPORT_S("wait_for_tasks: Stopped" 4837 + pr_debug("wait_for_tasks: Stopped" 4628 4838 " wait_for_completion(&cmd->t_tasktransport_lun_fe_" 4629 4839 "stop_comp); for ITT: 0x%08x\n", 4630 4840 cmd->se_tfo->get_task_tag(cmd)); ··· 4637 4847 4638 4848 atomic_set(&cmd->t_transport_stop, 1); 4639 4849 4640 - DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x" 4850 + pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x" 4641 4851 " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop" 4642 4852 " = TRUE\n", cmd, cmd->se_tfo->get_task_tag(cmd), 4643 4853 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, ··· 4653 4863 atomic_set(&cmd->t_transport_active, 0); 4654 4864 atomic_set(&cmd->t_transport_stop, 0); 4655 4865 4656 - DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion(" 4866 + pr_debug("wait_for_tasks: Stopped wait_for_compltion(" 4657 4867 "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n", 4658 4868 cmd->se_tfo->get_task_tag(cmd)); 4659 4869 remove: ··· 4861 5071 int ret = 0; 4862 5072 4863 5073 if (atomic_read(&cmd->t_transport_aborted) != 0) { 4864 - if (!(send_status) || 5074 + if (!send_status || 4865 5075 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) 4866 5076 return 1; 4867 5077 #if 0 4868 - printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED" 5078 + pr_debug("Sending delayed SAM_STAT_TASK_ABORTED" 4869 5079 " status for CDB: 0x%02x ITT: 0x%08x\n", 4870 5080 cmd->t_task_cdb[0], 4871 5081 cmd->se_tfo->get_task_tag(cmd)); ··· 4897 5107 } 4898 5108 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 4899 5109 #if 0 4900 - printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," 5110 + pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," 4901 5111 " ITT: 0x%08x\n", cmd->t_task_cdb[0], 4902 5112 cmd->se_tfo->get_task_tag(cmd)); 4903 5113 #endif ··· 4935 5145 tmr->response = TMR_FUNCTION_REJECTED; 4936 5146 break; 4937 5147 default: 4938 - printk(KERN_ERR "Uknown TMR function: 0x%02x.\n", 5148 + pr_err("Uknown TMR function: 0x%02x.\n", 4939 5149 tmr->function); 4940 5150 tmr->response = TMR_FUNCTION_REJECTED; 4941 5151 break; ··· 4980 5190 spin_lock_irqsave(&dev->execute_task_lock, flags); 4981 5191 while ((task = transport_get_task_from_state_list(dev))) { 4982 5192 if (!task->task_se_cmd) { 4983 - printk(KERN_ERR "task->task_se_cmd is NULL!\n"); 5193 + pr_err("task->task_se_cmd is NULL!\n"); 4984 5194 continue; 4985 5195 } 4986 5196 cmd = task->task_se_cmd; ··· 4989 5199 4990 5200 spin_lock_irqsave(&cmd->t_state_lock, flags); 4991 5201 4992 - DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x," 4993 - " i_state/def_i_state: %d/%d, t_state/def_t_state:" 5202 + pr_debug("PT: cmd: %p task: %p ITT: 0x%08x," 5203 + " i_state: %d, t_state/def_t_state:" 4994 5204 " %d/%d cdb: 0x%02x\n", cmd, task, 4995 - cmd->se_tfo->get_task_tag(cmd), cmd->cmd_sn, 4996 - cmd->se_tfo->get_cmd_state(cmd), cmd->deferred_i_state, 5205 + cmd->se_tfo->get_task_tag(cmd), 5206 + cmd->se_tfo->get_cmd_state(cmd), 4997 5207 cmd->t_state, cmd->deferred_t_state, 4998 5208 cmd->t_task_cdb[0]); 4999 - DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:" 5209 + pr_debug("PT: ITT[0x%08x] - t_tasks: %d t_task_cdbs_left:" 5000 5210 " %d t_task_cdbs_sent: %d -- t_transport_active: %d" 5001 5211 " t_transport_stop: %d t_transport_sent: %d\n", 5002 5212 cmd->se_tfo->get_task_tag(cmd), 5003 - cmd->t_task_cdbs, 5213 + cmd->t_task_list_num, 5004 5214 atomic_read(&cmd->t_task_cdbs_left), 5005 5215 atomic_read(&cmd->t_task_cdbs_sent), 5006 5216 atomic_read(&cmd->t_transport_active), ··· 5012 5222 spin_unlock_irqrestore( 5013 5223 &cmd->t_state_lock, flags); 5014 5224 5015 - DEBUG_DO("Waiting for task: %p to shutdown for dev:" 5225 + pr_debug("Waiting for task: %p to shutdown for dev:" 5016 5226 " %p\n", task, dev); 5017 5227 wait_for_completion(&task->task_stop_comp); 5018 - DEBUG_DO("Completed task: %p shutdown for dev: %p\n", 5228 + pr_debug("Completed task: %p shutdown for dev: %p\n", 5019 5229 task, dev); 5020 5230 5021 5231 spin_lock_irqsave(&cmd->t_state_lock, flags); ··· 5029 5239 } 5030 5240 __transport_stop_task_timer(task, &flags); 5031 5241 5032 - if (!(atomic_dec_and_test(&cmd->t_task_cdbs_ex_left))) { 5242 + if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) { 5033 5243 spin_unlock_irqrestore( 5034 5244 &cmd->t_state_lock, flags); 5035 5245 5036 - DEBUG_DO("Skipping task: %p, dev: %p for" 5246 + pr_debug("Skipping task: %p, dev: %p for" 5037 5247 " t_task_cdbs_ex_left: %d\n", task, dev, 5038 5248 atomic_read(&cmd->t_task_cdbs_ex_left)); 5039 5249 ··· 5042 5252 } 5043 5253 5044 5254 if (atomic_read(&cmd->t_transport_active)) { 5045 - DEBUG_DO("got t_transport_active = 1 for task: %p, dev:" 5255 + pr_debug("got t_transport_active = 1 for task: %p, dev:" 5046 5256 " %p\n", task, dev); 5047 5257 5048 5258 if (atomic_read(&cmd->t_fe_count)) { ··· 5072 5282 spin_lock_irqsave(&dev->execute_task_lock, flags); 5073 5283 continue; 5074 5284 } 5075 - DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n", 5285 + pr_debug("Got t_transport_active = 0 for task: %p, dev: %p\n", 5076 5286 task, dev); 5077 5287 5078 5288 if (atomic_read(&cmd->t_fe_count)) { ··· 5105 5315 */ 5106 5316 while ((cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj))) { 5107 5317 5108 - DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n", 5318 + pr_debug("From Device Queue: cmd: %p t_state: %d\n", 5109 5319 cmd, cmd->t_state); 5110 5320 5111 5321 if (atomic_read(&cmd->t_fe_count)) { ··· 5158 5368 5159 5369 switch (cmd->t_state) { 5160 5370 case TRANSPORT_NEW_CMD_MAP: 5161 - if (!(cmd->se_tfo->new_cmd_map)) { 5162 - printk(KERN_ERR "cmd->se_tfo->new_cmd_map is" 5371 + if (!cmd->se_tfo->new_cmd_map) { 5372 + pr_err("cmd->se_tfo->new_cmd_map is" 5163 5373 " NULL for TRANSPORT_NEW_CMD_MAP\n"); 5164 5374 BUG(); 5165 5375 } ··· 5210 5420 transport_generic_write_pending(cmd); 5211 5421 break; 5212 5422 default: 5213 - printk(KERN_ERR "Unknown t_state: %d deferred_t_state:" 5423 + pr_err("Unknown t_state: %d deferred_t_state:" 5214 5424 " %d for ITT: 0x%08x i_state: %d on SE LUN:" 5215 5425 " %u\n", cmd->t_state, cmd->deferred_t_state, 5216 5426 cmd->se_tfo->get_task_tag(cmd),
+15 -15
drivers/target/target_core_ua.c
··· 49 49 struct se_session *sess = cmd->se_sess; 50 50 struct se_node_acl *nacl; 51 51 52 - if (!(sess)) 52 + if (!sess) 53 53 return 0; 54 54 55 55 nacl = sess->se_node_acl; 56 - if (!(nacl)) 56 + if (!nacl) 57 57 return 0; 58 58 59 59 deve = &nacl->device_list[cmd->orig_fe_lun]; 60 - if (!(atomic_read(&deve->ua_count))) 60 + if (!atomic_read(&deve->ua_count)) 61 61 return 0; 62 62 /* 63 63 * From sam4r14, section 5.14 Unit attention condition: ··· 97 97 /* 98 98 * PASSTHROUGH OPS 99 99 */ 100 - if (!(nacl)) 100 + if (!nacl) 101 101 return -EINVAL; 102 102 103 103 ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC); 104 - if (!(ua)) { 105 - printk(KERN_ERR "Unable to allocate struct se_ua\n"); 104 + if (!ua) { 105 + pr_err("Unable to allocate struct se_ua\n"); 106 106 return -ENOMEM; 107 107 } 108 108 INIT_LIST_HEAD(&ua->ua_dev_list); ··· 177 177 spin_unlock(&deve->ua_lock); 178 178 spin_unlock_irq(&nacl->device_list_lock); 179 179 180 - printk(KERN_INFO "[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:" 180 + pr_debug("[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:" 181 181 " 0x%02x, ASCQ: 0x%02x\n", 182 182 nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 183 183 asc, ascq); ··· 215 215 struct se_ua *ua = NULL, *ua_p; 216 216 int head = 1; 217 217 218 - if (!(sess)) 218 + if (!sess) 219 219 return; 220 220 221 221 nacl = sess->se_node_acl; 222 - if (!(nacl)) 222 + if (!nacl) 223 223 return; 224 224 225 225 spin_lock_irq(&nacl->device_list_lock); 226 226 deve = &nacl->device_list[cmd->orig_fe_lun]; 227 - if (!(atomic_read(&deve->ua_count))) { 227 + if (!atomic_read(&deve->ua_count)) { 228 228 spin_unlock_irq(&nacl->device_list_lock); 229 229 return; 230 230 } ··· 264 264 spin_unlock(&deve->ua_lock); 265 265 spin_unlock_irq(&nacl->device_list_lock); 266 266 267 - printk(KERN_INFO "[%s]: %s UNIT ATTENTION condition with" 267 + pr_debug("[%s]: %s UNIT ATTENTION condition with" 268 268 " INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x" 269 269 " reported ASC: 0x%02x, ASCQ: 0x%02x\n", 270 270 nacl->se_tpg->se_tpg_tfo->get_fabric_name(), ··· 284 284 struct se_ua *ua = NULL, *ua_p; 285 285 int head = 1; 286 286 287 - if (!(sess)) 287 + if (!sess) 288 288 return -EINVAL; 289 289 290 290 nacl = sess->se_node_acl; 291 - if (!(nacl)) 291 + if (!nacl) 292 292 return -EINVAL; 293 293 294 294 spin_lock_irq(&nacl->device_list_lock); 295 295 deve = &nacl->device_list[cmd->orig_fe_lun]; 296 - if (!(atomic_read(&deve->ua_count))) { 296 + if (!atomic_read(&deve->ua_count)) { 297 297 spin_unlock_irq(&nacl->device_list_lock); 298 298 return -EPERM; 299 299 } ··· 323 323 spin_unlock(&deve->ua_lock); 324 324 spin_unlock_irq(&nacl->device_list_lock); 325 325 326 - printk(KERN_INFO "[%s]: Released UNIT ATTENTION condition, mapped" 326 + pr_debug("[%s]: Released UNIT ATTENTION condition, mapped" 327 327 " LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x," 328 328 " ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->get_fabric_name(), 329 329 cmd->orig_fe_lun, *asc, *ascq);
-24
drivers/target/tcm_fc/tcm_fc.h
··· 23 23 #define FT_TPG_NAMELEN 32 /* max length of TPG name */ 24 24 #define FT_LUN_NAMELEN 32 /* max length of LUN name */ 25 25 26 - /* 27 - * Debug options. 28 - */ 29 - #define FT_DEBUG_CONF 0x01 /* configuration messages */ 30 - #define FT_DEBUG_SESS 0x02 /* session messages */ 31 - #define FT_DEBUG_TM 0x04 /* TM operations */ 32 - #define FT_DEBUG_IO 0x08 /* I/O commands */ 33 - #define FT_DEBUG_DATA 0x10 /* Data transfer */ 34 - 35 - extern unsigned int ft_debug_logging; /* debug options */ 36 - 37 - #define FT_DEBUG(mask, fmt, args...) \ 38 - do { \ 39 - if (ft_debug_logging & (mask)) \ 40 - printk(KERN_INFO "tcm_fc: %s: " fmt, \ 41 - __func__, ##args); \ 42 - } while (0) 43 - 44 - #define FT_CONF_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_CONF, fmt, ##args) 45 - #define FT_SESS_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_SESS, fmt, ##args) 46 - #define FT_TM_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_TM, fmt, ##args) 47 - #define FT_IO_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_IO, fmt, ##args) 48 - #define FT_DATA_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_DATA, fmt, ##args) 49 - 50 26 struct ft_transport_id { 51 27 __u8 format; 52 28 __u8 __resvd1[7];
+17 -20
drivers/target/tcm_fc/tfc_cmd.c
··· 62 62 struct scatterlist *sg; 63 63 int count; 64 64 65 - if (!(ft_debug_logging & FT_DEBUG_IO)) 66 - return; 67 - 68 65 se_cmd = &cmd->se_cmd; 69 - printk(KERN_INFO "%s: cmd %p state %d sess %p seq %p se_cmd %p\n", 66 + pr_debug("%s: cmd %p state %d sess %p seq %p se_cmd %p\n", 70 67 caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd); 71 - printk(KERN_INFO "%s: cmd %p cdb %p\n", 68 + pr_debug("%s: cmd %p cdb %p\n", 72 69 caller, cmd, cmd->cdb); 73 - printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun); 70 + pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun); 74 71 75 - printk(KERN_INFO "%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n", 72 + pr_debug("%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n", 76 73 caller, cmd, se_cmd->t_data_nents, 77 74 se_cmd->data_length, se_cmd->se_cmd_flags); 78 75 79 76 for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, count) 80 - printk(KERN_INFO "%s: cmd %p sg %p page %p " 77 + pr_debug("%s: cmd %p sg %p page %p " 81 78 "len 0x%x off 0x%x\n", 82 79 caller, cmd, sg, 83 80 sg_page(sg), sg->length, sg->offset); ··· 82 85 sp = cmd->seq; 83 86 if (sp) { 84 87 ep = fc_seq_exch(sp); 85 - printk(KERN_INFO "%s: cmd %p sid %x did %x " 88 + pr_debug("%s: cmd %p sid %x did %x " 86 89 "ox_id %x rx_id %x seq_id %x e_stat %x\n", 87 90 caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid, 88 91 sp->id, ep->esb_stat); ··· 318 321 case FC_RCTL_DD_SOL_CTL: /* transfer ready */ 319 322 case FC_RCTL_DD_DATA_DESC: /* transfer ready */ 320 323 default: 321 - printk(KERN_INFO "%s: unhandled frame r_ctl %x\n", 324 + pr_debug("%s: unhandled frame r_ctl %x\n", 322 325 __func__, fh->fh_r_ctl); 323 326 fc_frame_free(fp); 324 327 transport_generic_free_cmd(&cmd->se_cmd, 0, 0); ··· 343 346 struct fcp_resp_rsp_info *info; 344 347 345 348 fh = fc_frame_header_get(rx_fp); 346 - FT_IO_DBG("FCP error response: did %x oxid %x status %x code %x\n", 349 + pr_debug("FCP error response: did %x oxid %x status %x code %x\n", 347 350 ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code); 348 351 len = sizeof(*fcp); 349 352 if (status == SAM_STAT_GOOD) ··· 413 416 * FCP4r01 indicates having a combination of 414 417 * tm_flags set is invalid. 415 418 */ 416 - FT_TM_DBG("invalid FCP tm_flags %x\n", fcp->fc_tm_flags); 419 + pr_debug("invalid FCP tm_flags %x\n", fcp->fc_tm_flags); 417 420 ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID); 418 421 return; 419 422 } 420 423 421 - FT_TM_DBG("alloc tm cmd fn %d\n", tm_func); 424 + pr_debug("alloc tm cmd fn %d\n", tm_func); 422 425 tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func); 423 426 if (!tmr) { 424 - FT_TM_DBG("alloc failed\n"); 427 + pr_debug("alloc failed\n"); 425 428 ft_send_resp_code(cmd, FCP_TMF_FAILED); 426 429 return; 427 430 } ··· 436 439 * since "unable to handle TMR request because failed 437 440 * to get to LUN" 438 441 */ 439 - FT_TM_DBG("Failed to get LUN for TMR func %d, " 442 + pr_debug("Failed to get LUN for TMR func %d, " 440 443 "se_cmd %p, unpacked_lun %d\n", 441 444 tm_func, &cmd->se_cmd, cmd->lun); 442 445 ft_dump_cmd(cmd, __func__); ··· 487 490 code = FCP_TMF_FAILED; 488 491 break; 489 492 } 490 - FT_TM_DBG("tmr fn %d resp %d fcp code %d\n", 493 + pr_debug("tmr fn %d resp %d fcp code %d\n", 491 494 tmr->function, tmr->response, code); 492 495 ft_send_resp_code(cmd, code); 493 496 return 0; ··· 515 518 return; 516 519 517 520 busy: 518 - FT_IO_DBG("cmd or seq allocation failure - sending BUSY\n"); 521 + pr_debug("cmd or seq allocation failure - sending BUSY\n"); 519 522 ft_send_resp_status(lport, fp, SAM_STAT_BUSY, 0); 520 523 fc_frame_free(fp); 521 524 ft_sess_put(sess); /* undo get from lookup */ ··· 540 543 case FC_RCTL_DD_DATA_DESC: /* transfer ready */ 541 544 case FC_RCTL_ELS4_REQ: /* SRR, perhaps */ 542 545 default: 543 - printk(KERN_INFO "%s: unhandled frame r_ctl %x\n", 546 + pr_debug("%s: unhandled frame r_ctl %x\n", 544 547 __func__, fh->fh_r_ctl); 545 548 fc_frame_free(fp); 546 549 ft_sess_put(sess); /* undo get from lookup */ ··· 639 642 640 643 ret = transport_generic_allocate_tasks(se_cmd, cmd->cdb); 641 644 642 - FT_IO_DBG("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret); 645 + pr_debug("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret); 643 646 ft_dump_cmd(cmd, __func__); 644 647 645 648 if (ret == -ENOMEM) { ··· 669 672 */ 670 673 static void ft_exec_req(struct ft_cmd *cmd) 671 674 { 672 - FT_IO_DBG("cmd state %x\n", cmd->state); 675 + pr_debug("cmd state %x\n", cmd->state); 673 676 switch (cmd->state) { 674 677 case FC_CMD_ST_NEW: 675 678 ft_send_cmd(cmd);
+15 -18
drivers/target/tcm_fc/tfc_conf.c
··· 106 106 } 107 107 err = 4; 108 108 fail: 109 - FT_CONF_DBG("err %u len %zu pos %u byte %u\n", 109 + pr_debug("err %u len %zu pos %u byte %u\n", 110 110 err, cp - name, pos, byte); 111 111 return -1; 112 112 } ··· 216 216 u64 wwpn; 217 217 u32 q_depth; 218 218 219 - FT_CONF_DBG("add acl %s\n", name); 219 + pr_debug("add acl %s\n", name); 220 220 tpg = container_of(se_tpg, struct ft_tpg, se_tpg); 221 221 222 222 if (ft_parse_wwn(name, &wwpn, 1) < 0) ··· 239 239 struct ft_node_acl *acl = container_of(se_acl, 240 240 struct ft_node_acl, se_node_acl); 241 241 242 - FT_CONF_DBG("del acl %s\n", 242 + pr_debug("del acl %s\n", 243 243 config_item_name(&se_acl->acl_group.cg_item)); 244 244 245 245 tpg = container_of(se_tpg, struct ft_tpg, se_tpg); 246 - FT_CONF_DBG("del acl %p se_acl %p tpg %p se_tpg %p\n", 246 + pr_debug("del acl %p se_acl %p tpg %p se_tpg %p\n", 247 247 acl, se_acl, tpg, &tpg->se_tpg); 248 248 249 249 core_tpg_del_initiator_node_acl(&tpg->se_tpg, se_acl, 1); ··· 260 260 spin_lock_bh(&se_tpg->acl_node_lock); 261 261 list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) { 262 262 acl = container_of(se_acl, struct ft_node_acl, se_node_acl); 263 - FT_CONF_DBG("acl %p port_name %llx\n", 263 + pr_debug("acl %p port_name %llx\n", 264 264 acl, (unsigned long long)acl->node_auth.port_name); 265 265 if (acl->node_auth.port_name == rdata->ids.port_name || 266 266 acl->node_auth.node_name == rdata->ids.node_name) { 267 - FT_CONF_DBG("acl %p port_name %llx matched\n", acl, 267 + pr_debug("acl %p port_name %llx matched\n", acl, 268 268 (unsigned long long)rdata->ids.port_name); 269 269 found = acl; 270 270 /* XXX need to hold onto ACL */ ··· 281 281 282 282 acl = kzalloc(sizeof(*acl), GFP_KERNEL); 283 283 if (!acl) { 284 - printk(KERN_ERR "Unable to allocate struct ft_node_acl\n"); 284 + pr_err("Unable to allocate struct ft_node_acl\n"); 285 285 return NULL; 286 286 } 287 - FT_CONF_DBG("acl %p\n", acl); 287 + pr_debug("acl %p\n", acl); 288 288 return &acl->se_node_acl; 289 289 } 290 290 ··· 294 294 struct ft_node_acl *acl = container_of(se_acl, 295 295 struct ft_node_acl, se_node_acl); 296 296 297 - FT_CONF_DBG(KERN_INFO "acl %p\n", acl); 297 + pr_debug("acl %p\n", acl); 298 298 kfree(acl); 299 299 } 300 300 ··· 311 311 unsigned long index; 312 312 int ret; 313 313 314 - FT_CONF_DBG("tcm_fc: add tpg %s\n", name); 314 + pr_debug("tcm_fc: add tpg %s\n", name); 315 315 316 316 /* 317 317 * Name must be "tpgt_" followed by the index. ··· 354 354 { 355 355 struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg); 356 356 357 - FT_CONF_DBG("del tpg %s\n", 357 + pr_debug("del tpg %s\n", 358 358 config_item_name(&tpg->se_tpg.tpg_group.cg_item)); 359 359 360 360 kthread_stop(tpg->thread); ··· 412 412 struct ft_lport_acl *old_lacl; 413 413 u64 wwpn; 414 414 415 - FT_CONF_DBG("add lport %s\n", name); 415 + pr_debug("add lport %s\n", name); 416 416 if (ft_parse_wwn(name, &wwpn, 1) < 0) 417 417 return NULL; 418 418 lacl = kzalloc(sizeof(*lacl), GFP_KERNEL); ··· 441 441 struct ft_lport_acl *lacl = container_of(wwn, 442 442 struct ft_lport_acl, fc_lport_wwn); 443 443 444 - FT_CONF_DBG("del lport %s\n", 444 + pr_debug("del lport %s\n", 445 445 config_item_name(&wwn->wwn_group.cg_item)); 446 446 mutex_lock(&ft_lport_lock); 447 447 list_del(&lacl->list); ··· 581 581 */ 582 582 fabric = target_fabric_configfs_init(THIS_MODULE, "fc"); 583 583 if (IS_ERR(fabric)) { 584 - printk(KERN_INFO "%s: target_fabric_configfs_init() failed!\n", 584 + pr_err("%s: target_fabric_configfs_init() failed!\n", 585 585 __func__); 586 586 return PTR_ERR(fabric); 587 587 } ··· 608 608 */ 609 609 ret = target_fabric_configfs_register(fabric); 610 610 if (ret < 0) { 611 - FT_CONF_DBG("target_fabric_configfs_register() for" 611 + pr_debug("target_fabric_configfs_register() for" 612 612 " FC Target failed!\n"); 613 - printk(KERN_INFO 614 - "%s: target_fabric_configfs_register() failed!\n", 615 - __func__); 616 613 target_fabric_configfs_free(fabric); 617 614 return -1; 618 615 }
+4 -4
drivers/target/tcm_fc/tfc_io.c
··· 39 39 #include <linux/configfs.h> 40 40 #include <linux/ctype.h> 41 41 #include <linux/hash.h> 42 + #include <linux/ratelimit.h> 42 43 #include <asm/unaligned.h> 43 44 #include <scsi/scsi.h> 44 45 #include <scsi/scsi_host.h> ··· 177 176 error = lport->tt.seq_send(lport, cmd->seq, fp); 178 177 if (error) { 179 178 /* XXX For now, initiator will retry */ 180 - if (printk_ratelimit()) 181 - printk(KERN_ERR "%s: Failed to send frame %p, " 179 + pr_err_ratelimited("%s: Failed to send frame %p, " 182 180 "xid <0x%x>, remaining %zu, " 183 181 "lso_max <0x%x>\n", 184 182 __func__, fp, ep->xid, ··· 222 222 */ 223 223 buf = fc_frame_payload_get(fp, 1); 224 224 if (cmd->was_ddp_setup && buf) { 225 - printk(KERN_INFO "%s: When DDP was setup, not expected to" 225 + pr_debug("%s: When DDP was setup, not expected to" 226 226 "receive frame with payload, Payload shall be" 227 227 "copied directly to buffer instead of coming " 228 228 "via. legacy receive queues\n", __func__); ··· 260 260 * this point, but just in case if required in future 261 261 * for debugging or any other purpose 262 262 */ 263 - printk(KERN_ERR "%s: Received frame with TSI bit not" 263 + pr_err("%s: Received frame with TSI bit not" 264 264 " being SET, dropping the frame, " 265 265 "cmd->sg <%p>, cmd->sg_cnt <0x%x>\n", 266 266 __func__, cmd->sg, cmd->sg_cnt);
+9 -9
drivers/target/tcm_fc/tfc_sess.c
··· 198 198 if (sess->port_id == port_id) { 199 199 kref_get(&sess->kref); 200 200 rcu_read_unlock(); 201 - FT_SESS_DBG("port_id %x found %p\n", port_id, sess); 201 + pr_debug("port_id %x found %p\n", port_id, sess); 202 202 return sess; 203 203 } 204 204 } 205 205 out: 206 206 rcu_read_unlock(); 207 - FT_SESS_DBG("port_id %x not found\n", port_id); 207 + pr_debug("port_id %x not found\n", port_id); 208 208 return NULL; 209 209 } 210 210 ··· 240 240 hlist_add_head_rcu(&sess->hash, head); 241 241 tport->sess_count++; 242 242 243 - FT_SESS_DBG("port_id %x sess %p\n", port_id, sess); 243 + pr_debug("port_id %x sess %p\n", port_id, sess); 244 244 245 245 transport_register_session(&tport->tpg->se_tpg, &acl->se_node_acl, 246 246 sess->se_sess, sess); ··· 314 314 { 315 315 struct ft_sess *sess = se_sess->fabric_sess_ptr; 316 316 317 - FT_SESS_DBG("port_id %x\n", sess->port_id); 317 + pr_debug("port_id %x\n", sess->port_id); 318 318 return 1; 319 319 } 320 320 ··· 335 335 mutex_unlock(&ft_lport_lock); 336 336 return; 337 337 } 338 - FT_SESS_DBG("port_id %x\n", port_id); 338 + pr_debug("port_id %x\n", port_id); 339 339 ft_sess_unhash(sess); 340 340 mutex_unlock(&ft_lport_lock); 341 341 transport_deregister_session_configfs(se_sess); ··· 348 348 { 349 349 struct ft_sess *sess = se_sess->fabric_sess_ptr; 350 350 351 - FT_SESS_DBG("port_id %x\n", sess->port_id); 351 + pr_debug("port_id %x\n", sess->port_id); 352 352 } 353 353 354 354 int ft_sess_logged_in(struct se_session *se_sess) ··· 458 458 mutex_lock(&ft_lport_lock); 459 459 ret = ft_prli_locked(rdata, spp_len, rspp, spp); 460 460 mutex_unlock(&ft_lport_lock); 461 - FT_SESS_DBG("port_id %x flags %x ret %x\n", 461 + pr_debug("port_id %x flags %x ret %x\n", 462 462 rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret); 463 463 return ret; 464 464 } ··· 518 518 struct ft_sess *sess; 519 519 u32 sid = fc_frame_sid(fp); 520 520 521 - FT_SESS_DBG("sid %x\n", sid); 521 + pr_debug("sid %x\n", sid); 522 522 523 523 sess = ft_sess_get(lport, sid); 524 524 if (!sess) { 525 - FT_SESS_DBG("sid %x sess lookup failed\n", sid); 525 + pr_debug("sid %x sess lookup failed\n", sid); 526 526 /* TBD XXX - if FCP_CMND, send PRLO */ 527 527 fc_frame_free(fp); 528 528 return;
+1 -1
include/target/target_core_base.h
··· 403 403 struct se_task { 404 404 unsigned char task_sense; 405 405 struct scatterlist *task_sg; 406 - u32 task_sg_num; 406 + u32 task_sg_nents; 407 407 struct scatterlist *task_sg_bidi; 408 408 u8 task_scsi_status; 409 409 u8 task_flags;
+1 -6
include/target/target_core_transport.h
··· 292 292 * drivers. Provided out of convenience. 293 293 */ 294 294 int (*transport_complete)(struct se_task *task); 295 - struct se_task *(*alloc_task)(struct se_cmd *); 295 + struct se_task *(*alloc_task)(unsigned char *cdb); 296 296 /* 297 297 * do_task(): 298 298 */ ··· 341 341 * Get the sector_t from a subsystem backstore.. 342 342 */ 343 343 sector_t (*get_blocks)(struct se_device *); 344 - /* 345 - * do_se_mem_map(): 346 - */ 347 - int (*do_se_mem_map)(struct se_task *, struct list_head *, void *, 348 - struct se_mem *, struct se_mem **, u32 *, u32 *); 349 344 /* 350 345 * get_sense_buffer(): 351 346 */