Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull SCSI target updates from Nicholas Bellinger:
"Lots of activity in target land the last months.

The highlights include:

- Convert fabric drivers tree-wide to target_register_template() (hch
+ bart)

- iser-target hardening fixes + v1.0 improvements (sagi)

- Convert iscsi_thread_set usage to kthread.h + kill
iscsi_target_tq.c (sagi + nab)

- Add support for T10-PI WRITE_STRIP + READ_INSERT operation (mkp +
sagi + nab)

- DIF fixes for CONFIG_DEBUG_SG=y + UNMAP file emulation (akinobu +
sagi + mkp)

- Extended TCMU ABI v2 for future BIDI + DIF support (andy + ilias)

- Fix COMPARE_AND_WRITE handling for NO_ALLLOC drivers (hch + nab)

Thanks to everyone who contributed this round with new features,
bug-reports, fixes, cleanups and improvements.

Looking forward, it's currently shaping up to be a busy v4.2 as well"

* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (69 commits)
target: Put TCMU under a new config option
target: Version 2 of TCMU ABI
target: fix tcm_mod_builder.py
target/file: Fix UNMAP with DIF protection support
target/file: Fix SG table for prot_buf initialization
target/file: Fix BUG() when CONFIG_DEBUG_SG=y and DIF protection enabled
target: Make core_tmr_abort_task() skip TMFs
target/sbc: Update sbc_dif_generate pr_debug output
target/sbc: Make internal DIF emulation honor ->prot_checks
target/sbc: Return INVALID_CDB_FIELD if DIF + sess_prot_type disabled
target: Ensure sess_prot_type is saved across session restart
target/rd: Don't pass incomplete scatterlist entries to sbc_dif_verify_*
target: Remove the unused flag SCF_ACK_KREF
target: Fix two sparse warnings
target: Fix COMPARE_AND_WRITE with SG_TO_MEM_NOALLOC handling
target: simplify the target template registration API
target: simplify target_xcopy_init_pt_lun
target: remove the unused SCF_CMD_XCOPY_PASSTHROUGH flag
target/rd: reduce code duplication in rd_execute_rw()
tcm_loop: fixup tpgt string to integer conversion
...

+1703 -2289
+12 -67
Documentation/target/tcm_mod_builder.py
··· 237 237 buf += "#include \"" + fabric_mod_name + "_base.h\"\n" 238 238 buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n" 239 239 240 - buf += "/* Local pointer to allocated TCM configfs fabric module */\n" 241 - buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n" 240 + buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops;\n\n" 242 241 243 242 buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n" 244 243 buf += " struct se_portal_group *se_tpg,\n" ··· 308 309 buf += " }\n" 309 310 buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n" 310 311 buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n" 311 - buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n" 312 - buf += " &tpg->se_tpg, (void *)tpg,\n" 312 + buf += " ret = core_tpg_register(&" + fabric_mod_name + "_ops, wwn,\n" 313 + buf += " &tpg->se_tpg, tpg,\n" 313 314 buf += " TRANSPORT_TPG_TYPE_NORMAL);\n" 314 315 buf += " if (ret < 0) {\n" 315 316 buf += " kfree(tpg);\n" ··· 369 370 buf += " NULL,\n" 370 371 buf += "};\n\n" 371 372 372 - buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n" 373 + buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n" 374 + buf += " .module = THIS_MODULE,\n" 375 + buf += " .name = " + fabric_mod_name + ",\n" 376 + buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n" 373 377 buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n" 374 378 buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n" 375 379 buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n" ··· 415 413 buf += " .fabric_drop_np = NULL,\n" 416 414 buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n" 417 415 buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n" 418 - buf += "};\n\n" 419 - 420 - buf += "static int " + fabric_mod_name + "_register_configfs(void)\n" 421 - buf += "{\n" 422 - buf += " struct target_fabric_configfs *fabric;\n" 423 - buf += " int ret;\n\n" 424 - buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n" 425 - buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n" 426 - buf += " utsname()->machine);\n" 427 - buf += " /*\n" 428 - buf += " * Register the top level struct config_item_type with TCM core\n" 429 - buf += " */\n" 430 - buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name + "\");\n" 431 - buf += " if (IS_ERR(fabric)) {\n" 432 - buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n" 433 - buf += " return PTR_ERR(fabric);\n" 434 - buf += " }\n" 435 - buf += " /*\n" 436 - buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n" 437 - buf += " */\n" 438 - buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n" 439 - buf += " /*\n" 440 - buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n" 441 - buf += " */\n" 442 - buf += " fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n" 443 - buf += " fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL;\n" 444 - buf += " fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;\n" 445 - buf += " fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;\n" 446 - buf += " fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;\n" 447 - buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n" 448 - buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n" 449 - buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n" 450 - buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n" 451 - buf += " /*\n" 452 - buf += " * Register the fabric for use within TCM\n" 453 - buf += " */\n" 454 - buf += " ret = target_fabric_configfs_register(fabric);\n" 455 - buf += " if (ret < 0) {\n" 456 - buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n" 457 - buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n" 458 - buf += " return ret;\n" 459 - buf += " }\n" 460 - buf += " /*\n" 461 - buf += " * Setup our local pointer to *fabric\n" 462 - buf += " */\n" 463 - buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n" 464 - buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n" 465 - buf += " return 0;\n" 466 - buf += "};\n\n" 467 - buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n" 468 - buf += "{\n" 469 - buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n" 470 - buf += " return;\n\n" 471 - buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n" 472 - buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n" 473 - buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n" 416 + buf += "\n" 417 + buf += " .tfc_wwn_attrs = " + fabric_mod_name + "_wwn_attrs;\n" 474 418 buf += "};\n\n" 475 419 476 420 buf += "static int __init " + fabric_mod_name + "_init(void)\n" 477 421 buf += "{\n" 478 - buf += " int ret;\n\n" 479 - buf += " ret = " + fabric_mod_name + "_register_configfs();\n" 480 - buf += " if (ret < 0)\n" 481 - buf += " return ret;\n\n" 482 - buf += " return 0;\n" 422 + buf += " return target_register_template(" + fabric_mod_name + "_ops);\n" 483 423 buf += "};\n\n" 424 + 484 425 buf += "static void __exit " + fabric_mod_name + "_exit(void)\n" 485 426 buf += "{\n" 486 - buf += " " + fabric_mod_name + "_deregister_configfs();\n" 427 + buf += " target_unregister_template(" + fabric_mod_name + "_ops);\n" 487 428 buf += "};\n\n" 488 429 489 430 buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
+26 -13
Documentation/target/tcmu-design.txt
··· 138 138 cmd_tail, the ring is empty -- no commands are currently waiting to be 139 139 processed by userspace. 140 140 141 - TCMU commands start with a common header containing "len_op", a 32-bit 142 - value that stores the length, as well as the opcode in the lowest 143 - unused bits. Currently only two opcodes are defined, TCMU_OP_PAD and 144 - TCMU_OP_CMD. When userspace encounters a command with PAD opcode, it 145 - should skip ahead by the bytes in "length". (The kernel inserts PAD 146 - entries to ensure each CMD entry fits contigously into the circular 147 - buffer.) 141 + TCMU commands are 8-byte aligned. They start with a common header 142 + containing "len_op", a 32-bit value that stores the length, as well as 143 + the opcode in the lowest unused bits. It also contains cmd_id and 144 + flags fields for setting by the kernel (kflags) and userspace 145 + (uflags). 148 146 149 - When userspace handles a CMD, it finds the SCSI CDB (Command Data 150 - Block) via tcmu_cmd_entry.req.cdb_off. This is an offset from the 151 - start of the overall shared memory region, not the entry. The data 152 - in/out buffers are accessible via tht req.iov[] array. Note that 153 - each iov.iov_base is also an offset from the start of the region. 147 + Currently only two opcodes are defined, TCMU_OP_CMD and TCMU_OP_PAD. 154 148 155 - TCMU currently does not support BIDI operations. 149 + When the opcode is CMD, the entry in the command ring is a struct 150 + tcmu_cmd_entry. Userspace finds the SCSI CDB (Command Data Block) via 151 + tcmu_cmd_entry.req.cdb_off. This is an offset from the start of the 152 + overall shared memory region, not the entry. The data in/out buffers 153 + are accessible via tht req.iov[] array. iov_cnt contains the number of 154 + entries in iov[] needed to describe either the Data-In or Data-Out 155 + buffers. For bidirectional commands, iov_cnt specifies how many iovec 156 + entries cover the Data-Out area, and iov_bidi_count specifies how many 157 + iovec entries immediately after that in iov[] cover the Data-In 158 + area. Just like other fields, iov.iov_base is an offset from the start 159 + of the region. 156 160 157 161 When completing a command, userspace sets rsp.scsi_status, and 158 162 rsp.sense_buffer if necessary. Userspace then increments 159 163 mailbox.cmd_tail by entry.hdr.length (mod cmdr_size) and signals the 160 164 kernel via the UIO method, a 4-byte write to the file descriptor. 165 + 166 + When the opcode is PAD, userspace only updates cmd_tail as above -- 167 + it's a no-op. (The kernel inserts PAD entries to ensure each CMD entry 168 + is contiguous within the command ring.) 169 + 170 + More opcodes may be added in the future. If userspace encounters an 171 + opcode it does not handle, it must set UNKNOWN_OP bit (bit 0) in 172 + hdr.uflags, update cmd_tail, and proceed with processing additional 173 + commands, if any. 161 174 162 175 The Data Area: 163 176
+392 -323
drivers/infiniband/ulp/isert/ib_isert.c
··· 76 76 static void 77 77 isert_qp_event_callback(struct ib_event *e, void *context) 78 78 { 79 - struct isert_conn *isert_conn = (struct isert_conn *)context; 79 + struct isert_conn *isert_conn = context; 80 80 81 81 isert_err("conn %p event: %d\n", isert_conn, e->event); 82 82 switch (e->event) { 83 83 case IB_EVENT_COMM_EST: 84 - rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST); 84 + rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST); 85 85 break; 86 86 case IB_EVENT_QP_LAST_WQE_REACHED: 87 87 isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n"); ··· 107 107 return 0; 108 108 } 109 109 110 - static int 111 - isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id) 110 + static struct isert_comp * 111 + isert_comp_get(struct isert_conn *isert_conn) 112 112 { 113 - struct isert_device *device = isert_conn->conn_device; 114 - struct ib_qp_init_attr attr; 113 + struct isert_device *device = isert_conn->device; 115 114 struct isert_comp *comp; 116 - int ret, i, min = 0; 115 + int i, min = 0; 117 116 118 117 mutex_lock(&device_list_mutex); 119 118 for (i = 0; i < device->comps_used; i++) ··· 121 122 min = i; 122 123 comp = &device->comps[min]; 123 124 comp->active_qps++; 125 + mutex_unlock(&device_list_mutex); 126 + 124 127 isert_info("conn %p, using comp %p min_index: %d\n", 125 128 isert_conn, comp, min); 129 + 130 + return comp; 131 + } 132 + 133 + static void 134 + isert_comp_put(struct isert_comp *comp) 135 + { 136 + mutex_lock(&device_list_mutex); 137 + comp->active_qps--; 126 138 mutex_unlock(&device_list_mutex); 139 + } 140 + 141 + static struct ib_qp * 142 + isert_create_qp(struct isert_conn *isert_conn, 143 + struct isert_comp *comp, 144 + struct rdma_cm_id *cma_id) 145 + { 146 + struct isert_device *device = isert_conn->device; 147 + struct ib_qp_init_attr attr; 148 + int ret; 127 149 128 150 memset(&attr, 0, sizeof(struct ib_qp_init_attr)); 129 151 attr.event_handler = isert_qp_event_callback; ··· 169 149 if (device->pi_capable) 170 150 attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN; 171 151 172 - ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr); 152 + ret = rdma_create_qp(cma_id, device->pd, &attr); 173 153 if (ret) { 174 154 isert_err("rdma_create_qp failed for cma_id %d\n", ret); 155 + return ERR_PTR(ret); 156 + } 157 + 158 + return cma_id->qp; 159 + } 160 + 161 + static int 162 + isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id) 163 + { 164 + struct isert_comp *comp; 165 + int ret; 166 + 167 + comp = isert_comp_get(isert_conn); 168 + isert_conn->qp = isert_create_qp(isert_conn, comp, cma_id); 169 + if (IS_ERR(isert_conn->qp)) { 170 + ret = PTR_ERR(isert_conn->qp); 175 171 goto err; 176 172 } 177 - isert_conn->conn_qp = cma_id->qp; 178 173 179 174 return 0; 180 175 err: 181 - mutex_lock(&device_list_mutex); 182 - comp->active_qps--; 183 - mutex_unlock(&device_list_mutex); 184 - 176 + isert_comp_put(comp); 185 177 return ret; 186 178 } 187 179 ··· 206 174 static int 207 175 isert_alloc_rx_descriptors(struct isert_conn *isert_conn) 208 176 { 209 - struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 177 + struct isert_device *device = isert_conn->device; 178 + struct ib_device *ib_dev = device->ib_device; 210 179 struct iser_rx_desc *rx_desc; 211 180 struct ib_sge *rx_sg; 212 181 u64 dma_addr; 213 182 int i, j; 214 183 215 - isert_conn->conn_rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS * 184 + isert_conn->rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS * 216 185 sizeof(struct iser_rx_desc), GFP_KERNEL); 217 - if (!isert_conn->conn_rx_descs) 186 + if (!isert_conn->rx_descs) 218 187 goto fail; 219 188 220 - rx_desc = isert_conn->conn_rx_descs; 189 + rx_desc = isert_conn->rx_descs; 221 190 222 191 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { 223 192 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc, ··· 231 198 rx_sg = &rx_desc->rx_sg; 232 199 rx_sg->addr = rx_desc->dma_addr; 233 200 rx_sg->length = ISER_RX_PAYLOAD_SIZE; 234 - rx_sg->lkey = isert_conn->conn_mr->lkey; 201 + rx_sg->lkey = device->mr->lkey; 235 202 } 236 203 237 - isert_conn->conn_rx_desc_head = 0; 204 + isert_conn->rx_desc_head = 0; 238 205 239 206 return 0; 240 207 241 208 dma_map_fail: 242 - rx_desc = isert_conn->conn_rx_descs; 209 + rx_desc = isert_conn->rx_descs; 243 210 for (j = 0; j < i; j++, rx_desc++) { 244 211 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, 245 212 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 246 213 } 247 - kfree(isert_conn->conn_rx_descs); 248 - isert_conn->conn_rx_descs = NULL; 214 + kfree(isert_conn->rx_descs); 215 + isert_conn->rx_descs = NULL; 249 216 fail: 250 217 isert_err("conn %p failed to allocate rx descriptors\n", isert_conn); 251 218 ··· 255 222 static void 256 223 isert_free_rx_descriptors(struct isert_conn *isert_conn) 257 224 { 258 - struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 225 + struct ib_device *ib_dev = isert_conn->device->ib_device; 259 226 struct iser_rx_desc *rx_desc; 260 227 int i; 261 228 262 - if (!isert_conn->conn_rx_descs) 229 + if (!isert_conn->rx_descs) 263 230 return; 264 231 265 - rx_desc = isert_conn->conn_rx_descs; 232 + rx_desc = isert_conn->rx_descs; 266 233 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { 267 234 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, 268 235 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 269 236 } 270 237 271 - kfree(isert_conn->conn_rx_descs); 272 - isert_conn->conn_rx_descs = NULL; 238 + kfree(isert_conn->rx_descs); 239 + isert_conn->rx_descs = NULL; 273 240 } 274 241 275 242 static void isert_cq_work(struct work_struct *); 276 243 static void isert_cq_callback(struct ib_cq *, void *); 277 244 278 - static int 279 - isert_create_device_ib_res(struct isert_device *device) 245 + static void 246 + isert_free_comps(struct isert_device *device) 280 247 { 281 - struct ib_device *ib_dev = device->ib_device; 282 - struct ib_device_attr *dev_attr; 283 - int ret = 0, i; 284 - int max_cqe; 248 + int i; 285 249 286 - dev_attr = &device->dev_attr; 287 - ret = isert_query_device(ib_dev, dev_attr); 288 - if (ret) 289 - return ret; 250 + for (i = 0; i < device->comps_used; i++) { 251 + struct isert_comp *comp = &device->comps[i]; 290 252 291 - max_cqe = min(ISER_MAX_CQ_LEN, dev_attr->max_cqe); 292 - 293 - /* asign function handlers */ 294 - if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS && 295 - dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) { 296 - device->use_fastreg = 1; 297 - device->reg_rdma_mem = isert_reg_rdma; 298 - device->unreg_rdma_mem = isert_unreg_rdma; 299 - } else { 300 - device->use_fastreg = 0; 301 - device->reg_rdma_mem = isert_map_rdma; 302 - device->unreg_rdma_mem = isert_unmap_cmd; 253 + if (comp->cq) { 254 + cancel_work_sync(&comp->work); 255 + ib_destroy_cq(comp->cq); 256 + } 303 257 } 258 + kfree(device->comps); 259 + } 304 260 305 - /* Check signature cap */ 306 - device->pi_capable = dev_attr->device_cap_flags & 307 - IB_DEVICE_SIGNATURE_HANDOVER ? true : false; 261 + static int 262 + isert_alloc_comps(struct isert_device *device, 263 + struct ib_device_attr *attr) 264 + { 265 + int i, max_cqe, ret = 0; 308 266 309 267 device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(), 310 - device->ib_device->num_comp_vectors)); 268 + device->ib_device->num_comp_vectors)); 269 + 311 270 isert_info("Using %d CQs, %s supports %d vectors support " 312 271 "Fast registration %d pi_capable %d\n", 313 272 device->comps_used, device->ib_device->name, ··· 313 288 return -ENOMEM; 314 289 } 315 290 291 + max_cqe = min(ISER_MAX_CQ_LEN, attr->max_cqe); 292 + 316 293 for (i = 0; i < device->comps_used; i++) { 317 294 struct isert_comp *comp = &device->comps[i]; 318 295 ··· 326 299 (void *)comp, 327 300 max_cqe, i); 328 301 if (IS_ERR(comp->cq)) { 302 + isert_err("Unable to allocate cq\n"); 329 303 ret = PTR_ERR(comp->cq); 330 304 comp->cq = NULL; 331 305 goto out_cq; ··· 338 310 } 339 311 340 312 return 0; 341 - 342 313 out_cq: 343 - for (i = 0; i < device->comps_used; i++) { 344 - struct isert_comp *comp = &device->comps[i]; 314 + isert_free_comps(device); 315 + return ret; 316 + } 345 317 346 - if (comp->cq) { 347 - cancel_work_sync(&comp->work); 348 - ib_destroy_cq(comp->cq); 349 - } 318 + static int 319 + isert_create_device_ib_res(struct isert_device *device) 320 + { 321 + struct ib_device_attr *dev_attr; 322 + int ret; 323 + 324 + dev_attr = &device->dev_attr; 325 + ret = isert_query_device(device->ib_device, dev_attr); 326 + if (ret) 327 + return ret; 328 + 329 + /* asign function handlers */ 330 + if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS && 331 + dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) { 332 + device->use_fastreg = 1; 333 + device->reg_rdma_mem = isert_reg_rdma; 334 + device->unreg_rdma_mem = isert_unreg_rdma; 335 + } else { 336 + device->use_fastreg = 0; 337 + device->reg_rdma_mem = isert_map_rdma; 338 + device->unreg_rdma_mem = isert_unmap_cmd; 350 339 } 351 - kfree(device->comps); 352 340 341 + ret = isert_alloc_comps(device, dev_attr); 342 + if (ret) 343 + return ret; 344 + 345 + device->pd = ib_alloc_pd(device->ib_device); 346 + if (IS_ERR(device->pd)) { 347 + ret = PTR_ERR(device->pd); 348 + isert_err("failed to allocate pd, device %p, ret=%d\n", 349 + device, ret); 350 + goto out_cq; 351 + } 352 + 353 + device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE); 354 + if (IS_ERR(device->mr)) { 355 + ret = PTR_ERR(device->mr); 356 + isert_err("failed to create dma mr, device %p, ret=%d\n", 357 + device, ret); 358 + goto out_mr; 359 + } 360 + 361 + /* Check signature cap */ 362 + device->pi_capable = dev_attr->device_cap_flags & 363 + IB_DEVICE_SIGNATURE_HANDOVER ? true : false; 364 + 365 + return 0; 366 + 367 + out_mr: 368 + ib_dealloc_pd(device->pd); 369 + out_cq: 370 + isert_free_comps(device); 353 371 return ret; 354 372 } 355 373 356 374 static void 357 375 isert_free_device_ib_res(struct isert_device *device) 358 376 { 359 - int i; 360 - 361 377 isert_info("device %p\n", device); 362 378 363 - for (i = 0; i < device->comps_used; i++) { 364 - struct isert_comp *comp = &device->comps[i]; 365 - 366 - cancel_work_sync(&comp->work); 367 - ib_destroy_cq(comp->cq); 368 - comp->cq = NULL; 369 - } 370 - kfree(device->comps); 379 + ib_dereg_mr(device->mr); 380 + ib_dealloc_pd(device->pd); 381 + isert_free_comps(device); 371 382 } 372 383 373 384 static void 374 - isert_device_try_release(struct isert_device *device) 385 + isert_device_put(struct isert_device *device) 375 386 { 376 387 mutex_lock(&device_list_mutex); 377 388 device->refcount--; ··· 424 357 } 425 358 426 359 static struct isert_device * 427 - isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id) 360 + isert_device_get(struct rdma_cm_id *cma_id) 428 361 { 429 362 struct isert_device *device; 430 363 int ret; ··· 471 404 struct fast_reg_descriptor *fr_desc, *tmp; 472 405 int i = 0; 473 406 474 - if (list_empty(&isert_conn->conn_fr_pool)) 407 + if (list_empty(&isert_conn->fr_pool)) 475 408 return; 476 409 477 410 isert_info("Freeing conn %p fastreg pool", isert_conn); 478 411 479 412 list_for_each_entry_safe(fr_desc, tmp, 480 - &isert_conn->conn_fr_pool, list) { 413 + &isert_conn->fr_pool, list) { 481 414 list_del(&fr_desc->list); 482 415 ib_free_fast_reg_page_list(fr_desc->data_frpl); 483 416 ib_dereg_mr(fr_desc->data_mr); ··· 491 424 ++i; 492 425 } 493 426 494 - if (i < isert_conn->conn_fr_pool_size) 427 + if (i < isert_conn->fr_pool_size) 495 428 isert_warn("Pool still has %d regions registered\n", 496 - isert_conn->conn_fr_pool_size - i); 429 + isert_conn->fr_pool_size - i); 497 430 } 498 431 499 432 static int ··· 593 526 isert_conn_create_fastreg_pool(struct isert_conn *isert_conn) 594 527 { 595 528 struct fast_reg_descriptor *fr_desc; 596 - struct isert_device *device = isert_conn->conn_device; 529 + struct isert_device *device = isert_conn->device; 597 530 struct se_session *se_sess = isert_conn->conn->sess->se_sess; 598 531 struct se_node_acl *se_nacl = se_sess->se_node_acl; 599 532 int i, ret, tag_num; ··· 604 537 tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth); 605 538 tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS; 606 539 607 - isert_conn->conn_fr_pool_size = 0; 540 + isert_conn->fr_pool_size = 0; 608 541 for (i = 0; i < tag_num; i++) { 609 542 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL); 610 543 if (!fr_desc) { ··· 614 547 } 615 548 616 549 ret = isert_create_fr_desc(device->ib_device, 617 - isert_conn->conn_pd, fr_desc); 550 + device->pd, fr_desc); 618 551 if (ret) { 619 552 isert_err("Failed to create fastreg descriptor err=%d\n", 620 553 ret); ··· 622 555 goto err; 623 556 } 624 557 625 - list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool); 626 - isert_conn->conn_fr_pool_size++; 558 + list_add_tail(&fr_desc->list, &isert_conn->fr_pool); 559 + isert_conn->fr_pool_size++; 627 560 } 628 561 629 562 isert_dbg("Creating conn %p fastreg pool size=%d", 630 - isert_conn, isert_conn->conn_fr_pool_size); 563 + isert_conn, isert_conn->fr_pool_size); 631 564 632 565 return 0; 633 566 634 567 err: 635 568 isert_conn_free_fastreg_pool(isert_conn); 569 + return ret; 570 + } 571 + 572 + static void 573 + isert_init_conn(struct isert_conn *isert_conn) 574 + { 575 + isert_conn->state = ISER_CONN_INIT; 576 + INIT_LIST_HEAD(&isert_conn->accept_node); 577 + init_completion(&isert_conn->login_comp); 578 + init_completion(&isert_conn->login_req_comp); 579 + init_completion(&isert_conn->wait); 580 + kref_init(&isert_conn->kref); 581 + mutex_init(&isert_conn->mutex); 582 + spin_lock_init(&isert_conn->pool_lock); 583 + INIT_LIST_HEAD(&isert_conn->fr_pool); 584 + } 585 + 586 + static void 587 + isert_free_login_buf(struct isert_conn *isert_conn) 588 + { 589 + struct ib_device *ib_dev = isert_conn->device->ib_device; 590 + 591 + ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, 592 + ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE); 593 + ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, 594 + ISCSI_DEF_MAX_RECV_SEG_LEN, 595 + DMA_FROM_DEVICE); 596 + kfree(isert_conn->login_buf); 597 + } 598 + 599 + static int 600 + isert_alloc_login_buf(struct isert_conn *isert_conn, 601 + struct ib_device *ib_dev) 602 + { 603 + int ret; 604 + 605 + isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN + 606 + ISER_RX_LOGIN_SIZE, GFP_KERNEL); 607 + if (!isert_conn->login_buf) { 608 + isert_err("Unable to allocate isert_conn->login_buf\n"); 609 + return -ENOMEM; 610 + } 611 + 612 + isert_conn->login_req_buf = isert_conn->login_buf; 613 + isert_conn->login_rsp_buf = isert_conn->login_buf + 614 + ISCSI_DEF_MAX_RECV_SEG_LEN; 615 + 616 + isert_dbg("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n", 617 + isert_conn->login_buf, isert_conn->login_req_buf, 618 + isert_conn->login_rsp_buf); 619 + 620 + isert_conn->login_req_dma = ib_dma_map_single(ib_dev, 621 + (void *)isert_conn->login_req_buf, 622 + ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE); 623 + 624 + ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma); 625 + if (ret) { 626 + isert_err("login_req_dma mapping error: %d\n", ret); 627 + isert_conn->login_req_dma = 0; 628 + goto out_login_buf; 629 + } 630 + 631 + isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev, 632 + (void *)isert_conn->login_rsp_buf, 633 + ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE); 634 + 635 + ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma); 636 + if (ret) { 637 + isert_err("login_rsp_dma mapping error: %d\n", ret); 638 + isert_conn->login_rsp_dma = 0; 639 + goto out_req_dma_map; 640 + } 641 + 642 + return 0; 643 + 644 + out_req_dma_map: 645 + ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, 646 + ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE); 647 + out_login_buf: 648 + kfree(isert_conn->login_buf); 636 649 return ret; 637 650 } 638 651 ··· 723 576 struct iscsi_np *np = isert_np->np; 724 577 struct isert_conn *isert_conn; 725 578 struct isert_device *device; 726 - struct ib_device *ib_dev = cma_id->device; 727 579 int ret = 0; 728 580 729 581 spin_lock_bh(&np->np_thread_lock); ··· 737 591 cma_id, cma_id->context); 738 592 739 593 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL); 740 - if (!isert_conn) { 741 - isert_err("Unable to allocate isert_conn\n"); 594 + if (!isert_conn) 742 595 return -ENOMEM; 743 - } 744 - isert_conn->state = ISER_CONN_INIT; 745 - INIT_LIST_HEAD(&isert_conn->conn_accept_node); 746 - init_completion(&isert_conn->conn_login_comp); 747 - init_completion(&isert_conn->login_req_comp); 748 - init_completion(&isert_conn->conn_wait); 749 - kref_init(&isert_conn->conn_kref); 750 - mutex_init(&isert_conn->conn_mutex); 751 - spin_lock_init(&isert_conn->conn_lock); 752 - INIT_LIST_HEAD(&isert_conn->conn_fr_pool); 753 596 754 - isert_conn->conn_cm_id = cma_id; 597 + isert_init_conn(isert_conn); 598 + isert_conn->cm_id = cma_id; 755 599 756 - isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN + 757 - ISER_RX_LOGIN_SIZE, GFP_KERNEL); 758 - if (!isert_conn->login_buf) { 759 - isert_err("Unable to allocate isert_conn->login_buf\n"); 760 - ret = -ENOMEM; 600 + ret = isert_alloc_login_buf(isert_conn, cma_id->device); 601 + if (ret) 761 602 goto out; 762 - } 763 603 764 - isert_conn->login_req_buf = isert_conn->login_buf; 765 - isert_conn->login_rsp_buf = isert_conn->login_buf + 766 - ISCSI_DEF_MAX_RECV_SEG_LEN; 767 - isert_dbg("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n", 768 - isert_conn->login_buf, isert_conn->login_req_buf, 769 - isert_conn->login_rsp_buf); 770 - 771 - isert_conn->login_req_dma = ib_dma_map_single(ib_dev, 772 - (void *)isert_conn->login_req_buf, 773 - ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE); 774 - 775 - ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma); 776 - if (ret) { 777 - isert_err("ib_dma_mapping_error failed for login_req_dma: %d\n", 778 - ret); 779 - isert_conn->login_req_dma = 0; 780 - goto out_login_buf; 781 - } 782 - 783 - isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev, 784 - (void *)isert_conn->login_rsp_buf, 785 - ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE); 786 - 787 - ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma); 788 - if (ret) { 789 - isert_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n", 790 - ret); 791 - isert_conn->login_rsp_dma = 0; 792 - goto out_req_dma_map; 793 - } 794 - 795 - device = isert_device_find_by_ib_dev(cma_id); 604 + device = isert_device_get(cma_id); 796 605 if (IS_ERR(device)) { 797 606 ret = PTR_ERR(device); 798 607 goto out_rsp_dma_map; 799 608 } 609 + isert_conn->device = device; 800 610 801 611 /* Set max inflight RDMA READ requests */ 802 612 isert_conn->initiator_depth = min_t(u8, 803 613 event->param.conn.initiator_depth, 804 614 device->dev_attr.max_qp_init_rd_atom); 805 615 isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth); 806 - 807 - isert_conn->conn_device = device; 808 - isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device); 809 - if (IS_ERR(isert_conn->conn_pd)) { 810 - ret = PTR_ERR(isert_conn->conn_pd); 811 - isert_err("ib_alloc_pd failed for conn %p: ret=%d\n", 812 - isert_conn, ret); 813 - goto out_pd; 814 - } 815 - 816 - isert_conn->conn_mr = ib_get_dma_mr(isert_conn->conn_pd, 817 - IB_ACCESS_LOCAL_WRITE); 818 - if (IS_ERR(isert_conn->conn_mr)) { 819 - ret = PTR_ERR(isert_conn->conn_mr); 820 - isert_err("ib_get_dma_mr failed for conn %p: ret=%d\n", 821 - isert_conn, ret); 822 - goto out_mr; 823 - } 824 616 825 617 ret = isert_conn_setup_qp(isert_conn, cma_id); 826 618 if (ret) ··· 773 689 goto out_conn_dev; 774 690 775 691 mutex_lock(&isert_np->np_accept_mutex); 776 - list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list); 692 + list_add_tail(&isert_conn->accept_node, &isert_np->np_accept_list); 777 693 mutex_unlock(&isert_np->np_accept_mutex); 778 694 779 695 isert_info("np %p: Allow accept_np to continue\n", np); ··· 781 697 return 0; 782 698 783 699 out_conn_dev: 784 - ib_dereg_mr(isert_conn->conn_mr); 785 - out_mr: 786 - ib_dealloc_pd(isert_conn->conn_pd); 787 - out_pd: 788 - isert_device_try_release(device); 700 + isert_device_put(device); 789 701 out_rsp_dma_map: 790 - ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, 791 - ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE); 792 - out_req_dma_map: 793 - ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, 794 - ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE); 795 - out_login_buf: 796 - kfree(isert_conn->login_buf); 702 + isert_free_login_buf(isert_conn); 797 703 out: 798 704 kfree(isert_conn); 799 705 rdma_reject(cma_id, NULL, 0); ··· 793 719 static void 794 720 isert_connect_release(struct isert_conn *isert_conn) 795 721 { 796 - struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 797 - struct isert_device *device = isert_conn->conn_device; 722 + struct isert_device *device = isert_conn->device; 798 723 799 724 isert_dbg("conn %p\n", isert_conn); 800 725 801 - if (device && device->use_fastreg) 726 + BUG_ON(!device); 727 + 728 + if (device->use_fastreg) 802 729 isert_conn_free_fastreg_pool(isert_conn); 803 730 804 731 isert_free_rx_descriptors(isert_conn); 805 - rdma_destroy_id(isert_conn->conn_cm_id); 732 + if (isert_conn->cm_id) 733 + rdma_destroy_id(isert_conn->cm_id); 806 734 807 - if (isert_conn->conn_qp) { 808 - struct isert_comp *comp = isert_conn->conn_qp->recv_cq->cq_context; 735 + if (isert_conn->qp) { 736 + struct isert_comp *comp = isert_conn->qp->recv_cq->cq_context; 809 737 810 - isert_dbg("dec completion context %p active_qps\n", comp); 811 - mutex_lock(&device_list_mutex); 812 - comp->active_qps--; 813 - mutex_unlock(&device_list_mutex); 814 - 815 - ib_destroy_qp(isert_conn->conn_qp); 738 + isert_comp_put(comp); 739 + ib_destroy_qp(isert_conn->qp); 816 740 } 817 741 818 - ib_dereg_mr(isert_conn->conn_mr); 819 - ib_dealloc_pd(isert_conn->conn_pd); 742 + if (isert_conn->login_buf) 743 + isert_free_login_buf(isert_conn); 820 744 821 - if (isert_conn->login_buf) { 822 - ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, 823 - ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE); 824 - ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, 825 - ISCSI_DEF_MAX_RECV_SEG_LEN, 826 - DMA_FROM_DEVICE); 827 - kfree(isert_conn->login_buf); 828 - } 745 + isert_device_put(device); 746 + 829 747 kfree(isert_conn); 830 - 831 - if (device) 832 - isert_device_try_release(device); 833 748 } 834 749 835 750 static void ··· 828 765 829 766 isert_info("conn %p\n", isert_conn); 830 767 831 - if (!kref_get_unless_zero(&isert_conn->conn_kref)) { 768 + if (!kref_get_unless_zero(&isert_conn->kref)) { 832 769 isert_warn("conn %p connect_release is running\n", isert_conn); 833 770 return; 834 771 } 835 772 836 - mutex_lock(&isert_conn->conn_mutex); 773 + mutex_lock(&isert_conn->mutex); 837 774 if (isert_conn->state != ISER_CONN_FULL_FEATURE) 838 775 isert_conn->state = ISER_CONN_UP; 839 - mutex_unlock(&isert_conn->conn_mutex); 776 + mutex_unlock(&isert_conn->mutex); 840 777 } 841 778 842 779 static void 843 - isert_release_conn_kref(struct kref *kref) 780 + isert_release_kref(struct kref *kref) 844 781 { 845 782 struct isert_conn *isert_conn = container_of(kref, 846 - struct isert_conn, conn_kref); 783 + struct isert_conn, kref); 847 784 848 785 isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm, 849 786 current->pid); ··· 854 791 static void 855 792 isert_put_conn(struct isert_conn *isert_conn) 856 793 { 857 - kref_put(&isert_conn->conn_kref, isert_release_conn_kref); 794 + kref_put(&isert_conn->kref, isert_release_kref); 858 795 } 859 796 860 797 /** ··· 866 803 * to TEMINATING and start teardown sequence (rdma_disconnect). 867 804 * In case the connection state is UP, complete flush as well. 868 805 * 869 - * This routine must be called with conn_mutex held. Thus it is 806 + * This routine must be called with mutex held. Thus it is 870 807 * safe to call multiple times. 871 808 */ 872 809 static void ··· 882 819 isert_info("Terminating conn %p state %d\n", 883 820 isert_conn, isert_conn->state); 884 821 isert_conn->state = ISER_CONN_TERMINATING; 885 - err = rdma_disconnect(isert_conn->conn_cm_id); 822 + err = rdma_disconnect(isert_conn->cm_id); 886 823 if (err) 887 824 isert_warn("Failed rdma_disconnect isert_conn %p\n", 888 825 isert_conn); ··· 931 868 932 869 isert_conn = cma_id->qp->qp_context; 933 870 934 - mutex_lock(&isert_conn->conn_mutex); 871 + mutex_lock(&isert_conn->mutex); 935 872 isert_conn_terminate(isert_conn); 936 - mutex_unlock(&isert_conn->conn_mutex); 873 + mutex_unlock(&isert_conn->mutex); 937 874 938 - isert_info("conn %p completing conn_wait\n", isert_conn); 939 - complete(&isert_conn->conn_wait); 875 + isert_info("conn %p completing wait\n", isert_conn); 876 + complete(&isert_conn->wait); 940 877 941 878 return 0; 942 879 } 943 880 944 - static void 881 + static int 945 882 isert_connect_error(struct rdma_cm_id *cma_id) 946 883 { 947 884 struct isert_conn *isert_conn = cma_id->qp->qp_context; 948 885 886 + isert_conn->cm_id = NULL; 949 887 isert_put_conn(isert_conn); 888 + 889 + return -1; 950 890 } 951 891 952 892 static int ··· 978 912 case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */ 979 913 case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */ 980 914 case RDMA_CM_EVENT_CONNECT_ERROR: 981 - isert_connect_error(cma_id); 915 + ret = isert_connect_error(cma_id); 982 916 break; 983 917 default: 984 918 isert_err("Unhandled RDMA CMA event: %d\n", event->event); ··· 993 927 { 994 928 struct ib_recv_wr *rx_wr, *rx_wr_failed; 995 929 int i, ret; 996 - unsigned int rx_head = isert_conn->conn_rx_desc_head; 930 + unsigned int rx_head = isert_conn->rx_desc_head; 997 931 struct iser_rx_desc *rx_desc; 998 932 999 - for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) { 1000 - rx_desc = &isert_conn->conn_rx_descs[rx_head]; 933 + for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) { 934 + rx_desc = &isert_conn->rx_descs[rx_head]; 1001 935 rx_wr->wr_id = (uintptr_t)rx_desc; 1002 936 rx_wr->sg_list = &rx_desc->rx_sg; 1003 937 rx_wr->num_sge = 1; ··· 1009 943 rx_wr->next = NULL; /* mark end of work requests list */ 1010 944 1011 945 isert_conn->post_recv_buf_count += count; 1012 - ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr, 946 + ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr, 1013 947 &rx_wr_failed); 1014 948 if (ret) { 1015 949 isert_err("ib_post_recv() failed with ret: %d\n", ret); 1016 950 isert_conn->post_recv_buf_count -= count; 1017 951 } else { 1018 952 isert_dbg("Posted %d RX buffers\n", count); 1019 - isert_conn->conn_rx_desc_head = rx_head; 953 + isert_conn->rx_desc_head = rx_head; 1020 954 } 1021 955 return ret; 1022 956 } ··· 1024 958 static int 1025 959 isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc) 1026 960 { 1027 - struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 961 + struct ib_device *ib_dev = isert_conn->cm_id->device; 1028 962 struct ib_send_wr send_wr, *send_wr_failed; 1029 963 int ret; 1030 964 ··· 1038 972 send_wr.opcode = IB_WR_SEND; 1039 973 send_wr.send_flags = IB_SEND_SIGNALED; 1040 974 1041 - ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed); 975 + ret = ib_post_send(isert_conn->qp, &send_wr, &send_wr_failed); 1042 976 if (ret) 1043 977 isert_err("ib_post_send() failed, ret: %d\n", ret); 1044 978 ··· 1050 984 struct isert_cmd *isert_cmd, 1051 985 struct iser_tx_desc *tx_desc) 1052 986 { 1053 - struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 987 + struct isert_device *device = isert_conn->device; 988 + struct ib_device *ib_dev = device->ib_device; 1054 989 1055 990 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr, 1056 991 ISER_HEADERS_LEN, DMA_TO_DEVICE); ··· 1062 995 tx_desc->num_sge = 1; 1063 996 tx_desc->isert_cmd = isert_cmd; 1064 997 1065 - if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) { 1066 - tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey; 998 + if (tx_desc->tx_sg[0].lkey != device->mr->lkey) { 999 + tx_desc->tx_sg[0].lkey = device->mr->lkey; 1067 1000 isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc); 1068 1001 } 1069 1002 } ··· 1072 1005 isert_init_tx_hdrs(struct isert_conn *isert_conn, 1073 1006 struct iser_tx_desc *tx_desc) 1074 1007 { 1075 - struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1008 + struct isert_device *device = isert_conn->device; 1009 + struct ib_device *ib_dev = device->ib_device; 1076 1010 u64 dma_addr; 1077 1011 1078 1012 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc, ··· 1086 1018 tx_desc->dma_addr = dma_addr; 1087 1019 tx_desc->tx_sg[0].addr = tx_desc->dma_addr; 1088 1020 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; 1089 - tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey; 1021 + tx_desc->tx_sg[0].lkey = device->mr->lkey; 1090 1022 1091 1023 isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n", 1092 1024 tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length, ··· 1119 1051 memset(&sge, 0, sizeof(struct ib_sge)); 1120 1052 sge.addr = isert_conn->login_req_dma; 1121 1053 sge.length = ISER_RX_LOGIN_SIZE; 1122 - sge.lkey = isert_conn->conn_mr->lkey; 1054 + sge.lkey = isert_conn->device->mr->lkey; 1123 1055 1124 1056 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n", 1125 1057 sge.addr, sge.length, sge.lkey); ··· 1130 1062 rx_wr.num_sge = 1; 1131 1063 1132 1064 isert_conn->post_recv_buf_count++; 1133 - ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail); 1065 + ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail); 1134 1066 if (ret) { 1135 1067 isert_err("ib_post_recv() failed: %d\n", ret); 1136 1068 isert_conn->post_recv_buf_count--; ··· 1144 1076 u32 length) 1145 1077 { 1146 1078 struct isert_conn *isert_conn = conn->context; 1147 - struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1148 - struct iser_tx_desc *tx_desc = &isert_conn->conn_login_tx_desc; 1079 + struct isert_device *device = isert_conn->device; 1080 + struct ib_device *ib_dev = device->ib_device; 1081 + struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc; 1149 1082 int ret; 1150 1083 1151 1084 isert_create_send_desc(isert_conn, NULL, tx_desc); ··· 1169 1100 1170 1101 tx_dsg->addr = isert_conn->login_rsp_dma; 1171 1102 tx_dsg->length = length; 1172 - tx_dsg->lkey = isert_conn->conn_mr->lkey; 1103 + tx_dsg->lkey = isert_conn->device->mr->lkey; 1173 1104 tx_desc->num_sge = 2; 1174 1105 } 1175 1106 if (!login->login_failed) { 1176 1107 if (login->login_complete) { 1177 1108 if (!conn->sess->sess_ops->SessionType && 1178 - isert_conn->conn_device->use_fastreg) { 1109 + isert_conn->device->use_fastreg) { 1179 1110 ret = isert_conn_create_fastreg_pool(isert_conn); 1180 1111 if (ret) { 1181 1112 isert_err("Conn: %p failed to create" ··· 1193 1124 return ret; 1194 1125 1195 1126 /* Now we are in FULL_FEATURE phase */ 1196 - mutex_lock(&isert_conn->conn_mutex); 1127 + mutex_lock(&isert_conn->mutex); 1197 1128 isert_conn->state = ISER_CONN_FULL_FEATURE; 1198 - mutex_unlock(&isert_conn->conn_mutex); 1129 + mutex_unlock(&isert_conn->mutex); 1199 1130 goto post_send; 1200 1131 } 1201 1132 ··· 1254 1185 memcpy(login->req_buf, &rx_desc->data[0], size); 1255 1186 1256 1187 if (login->first_request) { 1257 - complete(&isert_conn->conn_login_comp); 1188 + complete(&isert_conn->login_comp); 1258 1189 return; 1259 1190 } 1260 1191 schedule_delayed_work(&conn->login_work, 0); ··· 1263 1194 static struct iscsi_cmd 1264 1195 *isert_allocate_cmd(struct iscsi_conn *conn) 1265 1196 { 1266 - struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 1197 + struct isert_conn *isert_conn = conn->context; 1267 1198 struct isert_cmd *isert_cmd; 1268 1199 struct iscsi_cmd *cmd; 1269 1200 ··· 1448 1379 { 1449 1380 struct iscsi_hdr *hdr = &rx_desc->iscsi_header; 1450 1381 struct iscsi_conn *conn = isert_conn->conn; 1451 - struct iscsi_session *sess = conn->sess; 1452 1382 struct iscsi_cmd *cmd; 1453 1383 struct isert_cmd *isert_cmd; 1454 1384 int ret = -EINVAL; 1455 1385 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK); 1456 1386 1457 - if (sess->sess_ops->SessionType && 1387 + if (conn->sess->sess_ops->SessionType && 1458 1388 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) { 1459 1389 isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery," 1460 1390 " ignoring\n", opcode); ··· 1565 1497 } 1566 1498 1567 1499 static void 1568 - isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn, 1569 - u32 xfer_len) 1500 + isert_rcv_completion(struct iser_rx_desc *desc, 1501 + struct isert_conn *isert_conn, 1502 + u32 xfer_len) 1570 1503 { 1571 - struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1504 + struct ib_device *ib_dev = isert_conn->cm_id->device; 1572 1505 struct iscsi_hdr *hdr; 1573 1506 u64 rx_dma; 1574 1507 int rx_buflen, outstanding; ··· 1601 1532 if (login && !login->first_request) 1602 1533 isert_rx_login_req(isert_conn); 1603 1534 } 1604 - mutex_lock(&isert_conn->conn_mutex); 1535 + mutex_lock(&isert_conn->mutex); 1605 1536 complete(&isert_conn->login_req_comp); 1606 - mutex_unlock(&isert_conn->conn_mutex); 1537 + mutex_unlock(&isert_conn->mutex); 1607 1538 } else { 1608 1539 isert_rx_do_work(desc, isert_conn); 1609 1540 } ··· 1635 1566 struct scatterlist *sg, u32 nents, u32 length, u32 offset, 1636 1567 enum iser_ib_op_code op, struct isert_data_buf *data) 1637 1568 { 1638 - struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1569 + struct ib_device *ib_dev = isert_conn->cm_id->device; 1639 1570 1640 1571 data->dma_dir = op == ISER_IB_RDMA_WRITE ? 1641 1572 DMA_TO_DEVICE : DMA_FROM_DEVICE; ··· 1666 1597 static void 1667 1598 isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data) 1668 1599 { 1669 - struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1600 + struct ib_device *ib_dev = isert_conn->cm_id->device; 1670 1601 1671 1602 ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir); 1672 1603 memset(data, 0, sizeof(*data)); ··· 1703 1634 isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) 1704 1635 { 1705 1636 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1706 - LIST_HEAD(unmap_list); 1707 1637 1708 1638 isert_dbg("Cmd %p\n", isert_cmd); 1709 1639 ··· 1712 1644 isert_unmap_data_buf(isert_conn, &wr->prot); 1713 1645 wr->fr_desc->ind &= ~ISERT_PROTECTED; 1714 1646 } 1715 - spin_lock_bh(&isert_conn->conn_lock); 1716 - list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool); 1717 - spin_unlock_bh(&isert_conn->conn_lock); 1647 + spin_lock_bh(&isert_conn->pool_lock); 1648 + list_add_tail(&wr->fr_desc->list, &isert_conn->fr_pool); 1649 + spin_unlock_bh(&isert_conn->pool_lock); 1718 1650 wr->fr_desc = NULL; 1719 1651 } 1720 1652 ··· 1733 1665 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1734 1666 struct isert_conn *isert_conn = isert_cmd->conn; 1735 1667 struct iscsi_conn *conn = isert_conn->conn; 1736 - struct isert_device *device = isert_conn->conn_device; 1668 + struct isert_device *device = isert_conn->device; 1737 1669 struct iscsi_text_rsp *hdr; 1738 1670 1739 1671 isert_dbg("Cmd %p\n", isert_cmd); ··· 1883 1815 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1884 1816 struct se_cmd *se_cmd = &cmd->se_cmd; 1885 1817 struct isert_conn *isert_conn = isert_cmd->conn; 1886 - struct isert_device *device = isert_conn->conn_device; 1818 + struct isert_device *device = isert_conn->device; 1887 1819 int ret = 0; 1888 1820 1889 1821 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) { ··· 1909 1841 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1910 1842 struct se_cmd *se_cmd = &cmd->se_cmd; 1911 1843 struct isert_conn *isert_conn = isert_cmd->conn; 1912 - struct isert_device *device = isert_conn->conn_device; 1844 + struct isert_device *device = isert_conn->device; 1913 1845 int ret = 0; 1914 1846 1915 1847 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) { ··· 1929 1861 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1930 1862 spin_unlock_bh(&cmd->istate_lock); 1931 1863 1932 - if (ret) 1864 + if (ret) { 1865 + target_put_sess_cmd(se_cmd->se_sess, se_cmd); 1933 1866 transport_send_check_condition_and_sense(se_cmd, 1934 1867 se_cmd->pi_err, 0); 1935 - else 1868 + } else { 1936 1869 target_execute_cmd(se_cmd); 1870 + } 1937 1871 } 1938 1872 1939 1873 static void ··· 1944 1874 struct isert_cmd *isert_cmd = container_of(work, 1945 1875 struct isert_cmd, comp_work); 1946 1876 struct isert_conn *isert_conn = isert_cmd->conn; 1947 - struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1877 + struct ib_device *ib_dev = isert_conn->cm_id->device; 1948 1878 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1949 1879 1950 1880 isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state); ··· 1992 1922 } 1993 1923 1994 1924 static void 1995 - isert_send_completion(struct iser_tx_desc *tx_desc, 1925 + isert_snd_completion(struct iser_tx_desc *tx_desc, 1996 1926 struct isert_conn *isert_conn) 1997 1927 { 1998 - struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1928 + struct ib_device *ib_dev = isert_conn->cm_id->device; 1999 1929 struct isert_cmd *isert_cmd = tx_desc->isert_cmd; 2000 1930 struct isert_rdma_wr *wr; 2001 1931 ··· 2008 1938 isert_dbg("Cmd %p iser_ib_op %d\n", isert_cmd, wr->iser_ib_op); 2009 1939 2010 1940 switch (wr->iser_ib_op) { 2011 - case ISER_IB_RECV: 2012 - isert_err("Got ISER_IB_RECV\n"); 2013 - dump_stack(); 2014 - break; 2015 1941 case ISER_IB_SEND: 2016 1942 isert_response_completion(tx_desc, isert_cmd, 2017 1943 isert_conn, ib_dev); ··· 2039 1973 static inline bool 2040 1974 is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id) 2041 1975 { 2042 - void *start = isert_conn->conn_rx_descs; 2043 - int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->conn_rx_descs); 1976 + void *start = isert_conn->rx_descs; 1977 + int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->rx_descs); 2044 1978 2045 1979 if (wr_id >= start && wr_id < start + len) 2046 1980 return false; ··· 2052 1986 isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc) 2053 1987 { 2054 1988 if (wc->wr_id == ISER_BEACON_WRID) { 2055 - isert_info("conn %p completing conn_wait_comp_err\n", 1989 + isert_info("conn %p completing wait_comp_err\n", 2056 1990 isert_conn); 2057 - complete(&isert_conn->conn_wait_comp_err); 1991 + complete(&isert_conn->wait_comp_err); 2058 1992 } else if (is_isert_tx_desc(isert_conn, (void *)(uintptr_t)wc->wr_id)) { 2059 - struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1993 + struct ib_device *ib_dev = isert_conn->cm_id->device; 2060 1994 struct isert_cmd *isert_cmd; 2061 1995 struct iser_tx_desc *desc; 2062 1996 ··· 2084 2018 if (likely(wc->status == IB_WC_SUCCESS)) { 2085 2019 if (wc->opcode == IB_WC_RECV) { 2086 2020 rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id; 2087 - isert_rx_completion(rx_desc, isert_conn, wc->byte_len); 2021 + isert_rcv_completion(rx_desc, isert_conn, wc->byte_len); 2088 2022 } else { 2089 2023 tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id; 2090 - isert_send_completion(tx_desc, isert_conn); 2024 + isert_snd_completion(tx_desc, isert_conn); 2091 2025 } 2092 2026 } else { 2093 2027 if (wc->status != IB_WC_WR_FLUSH_ERR) ··· 2136 2070 struct ib_send_wr *wr_failed; 2137 2071 int ret; 2138 2072 2139 - ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr, 2073 + ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr, 2140 2074 &wr_failed); 2141 2075 if (ret) { 2142 2076 isert_err("ib_post_send failed with %d\n", ret); ··· 2149 2083 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 2150 2084 { 2151 2085 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2152 - struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 2086 + struct isert_conn *isert_conn = conn->context; 2153 2087 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 2154 2088 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *) 2155 2089 &isert_cmd->tx_desc.iscsi_header; ··· 2163 2097 if (cmd->se_cmd.sense_buffer && 2164 2098 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || 2165 2099 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { 2166 - struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 2100 + struct isert_device *device = isert_conn->device; 2101 + struct ib_device *ib_dev = device->ib_device; 2167 2102 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; 2168 2103 u32 padding, pdu_len; 2169 2104 ··· 2183 2116 isert_cmd->pdu_buf_len = pdu_len; 2184 2117 tx_dsg->addr = isert_cmd->pdu_buf_dma; 2185 2118 tx_dsg->length = pdu_len; 2186 - tx_dsg->lkey = isert_conn->conn_mr->lkey; 2119 + tx_dsg->lkey = device->mr->lkey; 2187 2120 isert_cmd->tx_desc.num_sge = 2; 2188 2121 } 2189 2122 ··· 2198 2131 isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 2199 2132 { 2200 2133 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2201 - struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 2202 - struct isert_device *device = isert_conn->conn_device; 2134 + struct isert_conn *isert_conn = conn->context; 2135 + struct isert_device *device = isert_conn->device; 2203 2136 2204 2137 spin_lock_bh(&conn->cmd_lock); 2205 2138 if (!list_empty(&cmd->i_conn_node)) ··· 2215 2148 static enum target_prot_op 2216 2149 isert_get_sup_prot_ops(struct iscsi_conn *conn) 2217 2150 { 2218 - struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 2219 - struct isert_device *device = isert_conn->conn_device; 2151 + struct isert_conn *isert_conn = conn->context; 2152 + struct isert_device *device = isert_conn->device; 2220 2153 2221 2154 if (conn->tpg->tpg_attrib.t10_pi) { 2222 2155 if (device->pi_capable) { ··· 2237 2170 bool nopout_response) 2238 2171 { 2239 2172 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2240 - struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 2173 + struct isert_conn *isert_conn = conn->context; 2241 2174 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 2242 2175 2243 2176 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); ··· 2256 2189 isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2257 2190 { 2258 2191 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2259 - struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 2192 + struct isert_conn *isert_conn = conn->context; 2260 2193 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 2261 2194 2262 2195 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); ··· 2274 2207 isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2275 2208 { 2276 2209 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2277 - struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 2210 + struct isert_conn *isert_conn = conn->context; 2278 2211 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 2279 2212 2280 2213 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); ··· 2292 2225 isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2293 2226 { 2294 2227 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2295 - struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 2228 + struct isert_conn *isert_conn = conn->context; 2296 2229 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 2297 - struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 2230 + struct isert_device *device = isert_conn->device; 2231 + struct ib_device *ib_dev = device->ib_device; 2298 2232 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; 2299 2233 struct iscsi_reject *hdr = 2300 2234 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header; ··· 2311 2243 isert_cmd->pdu_buf_len = ISCSI_HDR_LEN; 2312 2244 tx_dsg->addr = isert_cmd->pdu_buf_dma; 2313 2245 tx_dsg->length = ISCSI_HDR_LEN; 2314 - tx_dsg->lkey = isert_conn->conn_mr->lkey; 2246 + tx_dsg->lkey = device->mr->lkey; 2315 2247 isert_cmd->tx_desc.num_sge = 2; 2316 2248 2317 2249 isert_init_send_wr(isert_conn, isert_cmd, send_wr); ··· 2325 2257 isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2326 2258 { 2327 2259 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2328 - struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 2260 + struct isert_conn *isert_conn = conn->context; 2329 2261 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 2330 2262 struct iscsi_text_rsp *hdr = 2331 2263 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header; ··· 2341 2273 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2342 2274 2343 2275 if (txt_rsp_len) { 2344 - struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 2276 + struct isert_device *device = isert_conn->device; 2277 + struct ib_device *ib_dev = device->ib_device; 2345 2278 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; 2346 2279 void *txt_rsp_buf = cmd->buf_ptr; 2347 2280 ··· 2352 2283 isert_cmd->pdu_buf_len = txt_rsp_len; 2353 2284 tx_dsg->addr = isert_cmd->pdu_buf_dma; 2354 2285 tx_dsg->length = txt_rsp_len; 2355 - tx_dsg->lkey = isert_conn->conn_mr->lkey; 2286 + tx_dsg->lkey = device->mr->lkey; 2356 2287 isert_cmd->tx_desc.num_sge = 2; 2357 2288 } 2358 2289 isert_init_send_wr(isert_conn, isert_cmd, send_wr); ··· 2369 2300 { 2370 2301 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 2371 2302 struct scatterlist *sg_start, *tmp_sg; 2372 - struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 2303 + struct isert_device *device = isert_conn->device; 2304 + struct ib_device *ib_dev = device->ib_device; 2373 2305 u32 sg_off, page_off; 2374 2306 int i = 0, sg_nents; 2375 2307 ··· 2394 2324 ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off; 2395 2325 ib_sge->length = min_t(u32, data_left, 2396 2326 ib_sg_dma_len(ib_dev, tmp_sg) - page_off); 2397 - ib_sge->lkey = isert_conn->conn_mr->lkey; 2327 + ib_sge->lkey = device->mr->lkey; 2398 2328 2399 2329 isert_dbg("RDMA ib_sge: addr: 0x%llx length: %u lkey: %x\n", 2400 2330 ib_sge->addr, ib_sge->length, ib_sge->lkey); ··· 2416 2346 { 2417 2347 struct se_cmd *se_cmd = &cmd->se_cmd; 2418 2348 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2419 - struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 2349 + struct isert_conn *isert_conn = conn->context; 2420 2350 struct isert_data_buf *data = &wr->data; 2421 2351 struct ib_send_wr *send_wr; 2422 2352 struct ib_sge *ib_sge; ··· 2555 2485 enum isert_indicator ind, 2556 2486 struct ib_sge *sge) 2557 2487 { 2558 - struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 2488 + struct isert_device *device = isert_conn->device; 2489 + struct ib_device *ib_dev = device->ib_device; 2559 2490 struct ib_mr *mr; 2560 2491 struct ib_fast_reg_page_list *frpl; 2561 2492 struct ib_send_wr fr_wr, inv_wr; ··· 2565 2494 u32 page_off; 2566 2495 2567 2496 if (mem->dma_nents == 1) { 2568 - sge->lkey = isert_conn->conn_mr->lkey; 2497 + sge->lkey = device->mr->lkey; 2569 2498 sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]); 2570 2499 sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]); 2571 2500 isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n", ··· 2613 2542 else 2614 2543 wr->next = &fr_wr; 2615 2544 2616 - ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr); 2545 + ret = ib_post_send(isert_conn->qp, wr, &bad_wr); 2617 2546 if (ret) { 2618 2547 isert_err("fast registration failed, ret:%d\n", ret); 2619 2548 return ret; ··· 2726 2655 else 2727 2656 wr->next = &sig_wr; 2728 2657 2729 - ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr); 2658 + ret = ib_post_send(isert_conn->qp, wr, &bad_wr); 2730 2659 if (ret) { 2731 2660 isert_err("fast registration failed, ret:%d\n", ret); 2732 2661 goto err; ··· 2756 2685 struct isert_cmd *isert_cmd, 2757 2686 struct isert_rdma_wr *wr) 2758 2687 { 2759 - struct isert_device *device = isert_conn->conn_device; 2688 + struct isert_device *device = isert_conn->device; 2760 2689 struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd; 2761 2690 int ret; 2762 2691 2763 2692 if (!wr->fr_desc->pi_ctx) { 2764 2693 ret = isert_create_pi_ctx(wr->fr_desc, 2765 2694 device->ib_device, 2766 - isert_conn->conn_pd); 2695 + device->pd); 2767 2696 if (ret) { 2768 2697 isert_err("conn %p failed to allocate pi_ctx\n", 2769 2698 isert_conn); ··· 2834 2763 return ret; 2835 2764 2836 2765 if (wr->data.dma_nents != 1 || isert_prot_cmd(isert_conn, se_cmd)) { 2837 - spin_lock_irqsave(&isert_conn->conn_lock, flags); 2838 - fr_desc = list_first_entry(&isert_conn->conn_fr_pool, 2766 + spin_lock_irqsave(&isert_conn->pool_lock, flags); 2767 + fr_desc = list_first_entry(&isert_conn->fr_pool, 2839 2768 struct fast_reg_descriptor, list); 2840 2769 list_del(&fr_desc->list); 2841 - spin_unlock_irqrestore(&isert_conn->conn_lock, flags); 2770 + spin_unlock_irqrestore(&isert_conn->pool_lock, flags); 2842 2771 wr->fr_desc = fr_desc; 2843 2772 } 2844 2773 ··· 2885 2814 2886 2815 unmap_cmd: 2887 2816 if (fr_desc) { 2888 - spin_lock_irqsave(&isert_conn->conn_lock, flags); 2889 - list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool); 2890 - spin_unlock_irqrestore(&isert_conn->conn_lock, flags); 2817 + spin_lock_irqsave(&isert_conn->pool_lock, flags); 2818 + list_add_tail(&fr_desc->list, &isert_conn->fr_pool); 2819 + spin_unlock_irqrestore(&isert_conn->pool_lock, flags); 2891 2820 } 2892 2821 isert_unmap_data_buf(isert_conn, &wr->data); 2893 2822 ··· 2900 2829 struct se_cmd *se_cmd = &cmd->se_cmd; 2901 2830 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2902 2831 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 2903 - struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 2904 - struct isert_device *device = isert_conn->conn_device; 2832 + struct isert_conn *isert_conn = conn->context; 2833 + struct isert_device *device = isert_conn->device; 2905 2834 struct ib_send_wr *wr_failed; 2906 2835 int rc; 2907 2836 ··· 2930 2859 wr->send_wr_num += 1; 2931 2860 } 2932 2861 2933 - rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); 2862 + rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed); 2934 2863 if (rc) 2935 2864 isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); 2936 2865 ··· 2950 2879 struct se_cmd *se_cmd = &cmd->se_cmd; 2951 2880 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2952 2881 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 2953 - struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 2954 - struct isert_device *device = isert_conn->conn_device; 2882 + struct isert_conn *isert_conn = conn->context; 2883 + struct isert_device *device = isert_conn->device; 2955 2884 struct ib_send_wr *wr_failed; 2956 2885 int rc; 2957 2886 ··· 2964 2893 return rc; 2965 2894 } 2966 2895 2967 - rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); 2896 + rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed); 2968 2897 if (rc) 2969 2898 isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); 2970 2899 ··· 3058 2987 goto out_id; 3059 2988 } 3060 2989 3061 - ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG); 2990 + ret = rdma_listen(id, 0); 3062 2991 if (ret) { 3063 2992 isert_err("rdma_listen() failed: %d\n", ret); 3064 2993 goto out_id; ··· 3117 3046 static int 3118 3047 isert_rdma_accept(struct isert_conn *isert_conn) 3119 3048 { 3120 - struct rdma_cm_id *cm_id = isert_conn->conn_cm_id; 3049 + struct rdma_cm_id *cm_id = isert_conn->cm_id; 3121 3050 struct rdma_conn_param cp; 3122 3051 int ret; 3123 3052 ··· 3138 3067 static int 3139 3068 isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login) 3140 3069 { 3141 - struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 3070 + struct isert_conn *isert_conn = conn->context; 3142 3071 int ret; 3143 3072 3144 3073 isert_info("before login_req comp conn: %p\n", isert_conn); ··· 3161 3090 3162 3091 isert_rx_login_req(isert_conn); 3163 3092 3164 - isert_info("before conn_login_comp conn: %p\n", conn); 3165 - ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp); 3093 + isert_info("before login_comp conn: %p\n", conn); 3094 + ret = wait_for_completion_interruptible(&isert_conn->login_comp); 3166 3095 if (ret) 3167 3096 return ret; 3168 3097 ··· 3175 3104 isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn, 3176 3105 struct isert_conn *isert_conn) 3177 3106 { 3178 - struct rdma_cm_id *cm_id = isert_conn->conn_cm_id; 3107 + struct rdma_cm_id *cm_id = isert_conn->cm_id; 3179 3108 struct rdma_route *cm_route = &cm_id->route; 3180 3109 struct sockaddr_in *sock_in; 3181 3110 struct sockaddr_in6 *sock_in6; ··· 3208 3137 static int 3209 3138 isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn) 3210 3139 { 3211 - struct isert_np *isert_np = (struct isert_np *)np->np_context; 3140 + struct isert_np *isert_np = np->np_context; 3212 3141 struct isert_conn *isert_conn; 3213 - int max_accept = 0, ret; 3142 + int ret; 3214 3143 3215 3144 accept_wait: 3216 3145 ret = down_interruptible(&isert_np->np_sem); 3217 - if (ret || max_accept > 5) 3146 + if (ret) 3218 3147 return -ENODEV; 3219 3148 3220 3149 spin_lock_bh(&np->np_thread_lock); ··· 3233 3162 mutex_lock(&isert_np->np_accept_mutex); 3234 3163 if (list_empty(&isert_np->np_accept_list)) { 3235 3164 mutex_unlock(&isert_np->np_accept_mutex); 3236 - max_accept++; 3237 3165 goto accept_wait; 3238 3166 } 3239 3167 isert_conn = list_first_entry(&isert_np->np_accept_list, 3240 - struct isert_conn, conn_accept_node); 3241 - list_del_init(&isert_conn->conn_accept_node); 3168 + struct isert_conn, accept_node); 3169 + list_del_init(&isert_conn->accept_node); 3242 3170 mutex_unlock(&isert_np->np_accept_mutex); 3243 3171 3244 3172 conn->context = isert_conn; 3245 3173 isert_conn->conn = conn; 3246 - max_accept = 0; 3247 3174 3248 3175 isert_set_conn_info(np, conn, isert_conn); 3249 3176 ··· 3253 3184 static void 3254 3185 isert_free_np(struct iscsi_np *np) 3255 3186 { 3256 - struct isert_np *isert_np = (struct isert_np *)np->np_context; 3187 + struct isert_np *isert_np = np->np_context; 3257 3188 struct isert_conn *isert_conn, *n; 3258 3189 3259 3190 if (isert_np->np_cm_id) ··· 3271 3202 isert_info("Still have isert connections, cleaning up...\n"); 3272 3203 list_for_each_entry_safe(isert_conn, n, 3273 3204 &isert_np->np_accept_list, 3274 - conn_accept_node) { 3205 + accept_node) { 3275 3206 isert_info("cleaning isert_conn %p state (%d)\n", 3276 3207 isert_conn, isert_conn->state); 3277 3208 isert_connect_release(isert_conn); ··· 3291 3222 3292 3223 isert_info("Starting release conn %p\n", isert_conn); 3293 3224 3294 - wait_for_completion(&isert_conn->conn_wait); 3225 + wait_for_completion(&isert_conn->wait); 3295 3226 3296 - mutex_lock(&isert_conn->conn_mutex); 3227 + mutex_lock(&isert_conn->mutex); 3297 3228 isert_conn->state = ISER_CONN_DOWN; 3298 - mutex_unlock(&isert_conn->conn_mutex); 3229 + mutex_unlock(&isert_conn->mutex); 3299 3230 3300 3231 isert_info("Destroying conn %p\n", isert_conn); 3301 3232 isert_put_conn(isert_conn); ··· 3333 3264 3334 3265 isert_info("conn %p\n", isert_conn); 3335 3266 3336 - init_completion(&isert_conn->conn_wait_comp_err); 3267 + init_completion(&isert_conn->wait_comp_err); 3337 3268 isert_conn->beacon.wr_id = ISER_BEACON_WRID; 3338 3269 /* post an indication that all flush errors were consumed */ 3339 - if (ib_post_recv(isert_conn->conn_qp, &isert_conn->beacon, &bad_wr)) { 3270 + if (ib_post_recv(isert_conn->qp, &isert_conn->beacon, &bad_wr)) { 3340 3271 isert_err("conn %p failed to post beacon", isert_conn); 3341 3272 return; 3342 3273 } 3343 3274 3344 - wait_for_completion(&isert_conn->conn_wait_comp_err); 3275 + wait_for_completion(&isert_conn->wait_comp_err); 3345 3276 } 3346 3277 3347 3278 static void isert_wait_conn(struct iscsi_conn *conn) ··· 3350 3281 3351 3282 isert_info("Starting conn %p\n", isert_conn); 3352 3283 3353 - mutex_lock(&isert_conn->conn_mutex); 3284 + mutex_lock(&isert_conn->mutex); 3354 3285 /* 3355 - * Only wait for conn_wait_comp_err if the isert_conn made it 3286 + * Only wait for wait_comp_err if the isert_conn made it 3356 3287 * into full feature phase.. 3357 3288 */ 3358 3289 if (isert_conn->state == ISER_CONN_INIT) { 3359 - mutex_unlock(&isert_conn->conn_mutex); 3290 + mutex_unlock(&isert_conn->mutex); 3360 3291 return; 3361 3292 } 3362 3293 isert_conn_terminate(isert_conn); 3363 - mutex_unlock(&isert_conn->conn_mutex); 3294 + mutex_unlock(&isert_conn->mutex); 3364 3295 3365 3296 isert_wait4cmds(conn); 3366 3297 isert_wait4flush(isert_conn); ··· 3439 3370 } 3440 3371 3441 3372 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure"); 3442 - MODULE_VERSION("0.1"); 3373 + MODULE_VERSION("1.0"); 3443 3374 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 3444 3375 MODULE_LICENSE("GPL"); 3445 3376
+18 -19
drivers/infiniband/ulp/isert/ib_isert.h
··· 31 31 #define isert_err(fmt, arg...) \ 32 32 pr_err(PFX "%s: " fmt, __func__ , ## arg) 33 33 34 - #define ISERT_RDMA_LISTEN_BACKLOG 10 35 34 #define ISCSI_ISER_SG_TABLESIZE 256 36 35 #define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL 37 36 #define ISER_BEACON_WRID 0xfffffffffffffffeULL ··· 159 160 u64 login_req_dma; 160 161 int login_req_len; 161 162 u64 login_rsp_dma; 162 - unsigned int conn_rx_desc_head; 163 - struct iser_rx_desc *conn_rx_descs; 164 - struct ib_recv_wr conn_rx_wr[ISERT_MIN_POSTED_RX]; 163 + unsigned int rx_desc_head; 164 + struct iser_rx_desc *rx_descs; 165 + struct ib_recv_wr rx_wr[ISERT_MIN_POSTED_RX]; 165 166 struct iscsi_conn *conn; 166 - struct list_head conn_accept_node; 167 - struct completion conn_login_comp; 167 + struct list_head accept_node; 168 + struct completion login_comp; 168 169 struct completion login_req_comp; 169 - struct iser_tx_desc conn_login_tx_desc; 170 - struct rdma_cm_id *conn_cm_id; 171 - struct ib_pd *conn_pd; 172 - struct ib_mr *conn_mr; 173 - struct ib_qp *conn_qp; 174 - struct isert_device *conn_device; 175 - struct mutex conn_mutex; 176 - struct completion conn_wait; 177 - struct completion conn_wait_comp_err; 178 - struct kref conn_kref; 179 - struct list_head conn_fr_pool; 180 - int conn_fr_pool_size; 170 + struct iser_tx_desc login_tx_desc; 171 + struct rdma_cm_id *cm_id; 172 + struct ib_qp *qp; 173 + struct isert_device *device; 174 + struct mutex mutex; 175 + struct completion wait; 176 + struct completion wait_comp_err; 177 + struct kref kref; 178 + struct list_head fr_pool; 179 + int fr_pool_size; 181 180 /* lock to protect fastreg pool */ 182 - spinlock_t conn_lock; 181 + spinlock_t pool_lock; 183 182 struct work_struct release_work; 184 183 struct ib_recv_wr beacon; 185 184 bool logout_posted; ··· 208 211 bool pi_capable; 209 212 int refcount; 210 213 struct ib_device *ib_device; 214 + struct ib_pd *pd; 215 + struct ib_mr *mr; 211 216 struct isert_comp *comps; 212 217 int comps_used; 213 218 struct list_head dev_node;
+13 -36
drivers/infiniband/ulp/srpt/ib_srpt.c
··· 93 93 " instead of using the node_guid of the first HCA."); 94 94 95 95 static struct ib_client srpt_client; 96 - static struct target_fabric_configfs *srpt_target; 96 + static const struct target_core_fabric_ops srpt_template; 97 97 static void srpt_release_channel(struct srpt_rdma_ch *ch); 98 98 static int srpt_queue_status(struct se_cmd *cmd); 99 99 ··· 3845 3845 int res; 3846 3846 3847 3847 /* Initialize sport->port_wwn and sport->port_tpg_1 */ 3848 - res = core_tpg_register(&srpt_target->tf_ops, &sport->port_wwn, 3848 + res = core_tpg_register(&srpt_template, &sport->port_wwn, 3849 3849 &sport->port_tpg_1, sport, TRANSPORT_TPG_TYPE_NORMAL); 3850 3850 if (res) 3851 3851 return ERR_PTR(res); ··· 3913 3913 NULL, 3914 3914 }; 3915 3915 3916 - static struct target_core_fabric_ops srpt_template = { 3916 + static const struct target_core_fabric_ops srpt_template = { 3917 + .module = THIS_MODULE, 3918 + .name = "srpt", 3917 3919 .get_fabric_name = srpt_get_fabric_name, 3918 3920 .get_fabric_proto_ident = srpt_get_fabric_proto_ident, 3919 3921 .tpg_get_wwn = srpt_get_fabric_wwn, ··· 3960 3958 .fabric_drop_np = NULL, 3961 3959 .fabric_make_nodeacl = srpt_make_nodeacl, 3962 3960 .fabric_drop_nodeacl = srpt_drop_nodeacl, 3961 + 3962 + .tfc_wwn_attrs = srpt_wwn_attrs, 3963 + .tfc_tpg_base_attrs = srpt_tpg_attrs, 3964 + .tfc_tpg_attrib_attrs = srpt_tpg_attrib_attrs, 3963 3965 }; 3964 3966 3965 3967 /** ··· 3994 3988 goto out; 3995 3989 } 3996 3990 3997 - srpt_target = target_fabric_configfs_init(THIS_MODULE, "srpt"); 3998 - if (IS_ERR(srpt_target)) { 3999 - pr_err("couldn't register\n"); 4000 - ret = PTR_ERR(srpt_target); 3991 + ret = target_register_template(&srpt_template); 3992 + if (ret) 4001 3993 goto out; 4002 - } 4003 - 4004 - srpt_target->tf_ops = srpt_template; 4005 - 4006 - /* 4007 - * Set up default attribute lists. 4008 - */ 4009 - srpt_target->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = srpt_wwn_attrs; 4010 - srpt_target->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = srpt_tpg_attrs; 4011 - srpt_target->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = srpt_tpg_attrib_attrs; 4012 - srpt_target->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; 4013 - srpt_target->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; 4014 - srpt_target->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; 4015 - srpt_target->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; 4016 - srpt_target->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; 4017 - srpt_target->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; 4018 - 4019 - ret = target_fabric_configfs_register(srpt_target); 4020 - if (ret < 0) { 4021 - pr_err("couldn't register\n"); 4022 - goto out_free_target; 4023 - } 4024 3994 4025 3995 ret = ib_register_client(&srpt_client); 4026 3996 if (ret) { ··· 4007 4025 return 0; 4008 4026 4009 4027 out_unregister_target: 4010 - target_fabric_configfs_deregister(srpt_target); 4011 - srpt_target = NULL; 4012 - out_free_target: 4013 - if (srpt_target) 4014 - target_fabric_configfs_free(srpt_target); 4028 + target_unregister_template(&srpt_template); 4015 4029 out: 4016 4030 return ret; 4017 4031 } ··· 4015 4037 static void __exit srpt_cleanup_module(void) 4016 4038 { 4017 4039 ib_unregister_client(&srpt_client); 4018 - target_fabric_configfs_deregister(srpt_target); 4019 - srpt_target = NULL; 4040 + target_unregister_template(&srpt_template); 4020 4041 } 4021 4042 4022 4043 module_init(srpt_init_module);
+1 -1
drivers/scsi/qla2xxx/qla_target.c
··· 3065 3065 { 3066 3066 struct qla_hw_data *ha = vha->hw; 3067 3067 struct se_cmd *se_cmd; 3068 - struct target_core_fabric_ops *tfo; 3068 + const struct target_core_fabric_ops *tfo; 3069 3069 struct qla_tgt_cmd *cmd; 3070 3070 3071 3071 if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
+82 -94
drivers/scsi/qla2xxx/tcm_qla2xxx.c
··· 53 53 static struct workqueue_struct *tcm_qla2xxx_free_wq; 54 54 static struct workqueue_struct *tcm_qla2xxx_cmd_wq; 55 55 56 - /* Local pointer to allocated TCM configfs fabric module */ 57 - static struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs; 58 - static struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs; 56 + static const struct target_core_fabric_ops tcm_qla2xxx_ops; 57 + static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops; 59 58 60 59 /* 61 60 * Parse WWN. ··· 333 334 struct tcm_qla2xxx_tpg, se_tpg); 334 335 335 336 return tpg->tpg_attrib.demo_mode_login_only; 337 + } 338 + 339 + static int tcm_qla2xxx_check_prot_fabric_only(struct se_portal_group *se_tpg) 340 + { 341 + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 342 + struct tcm_qla2xxx_tpg, se_tpg); 343 + 344 + return tpg->tpg_attrib.fabric_prot_type; 336 345 } 337 346 338 347 static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl( ··· 1089 1082 1090 1083 TF_TPG_BASE_ATTR(tcm_qla2xxx, enable, S_IRUGO | S_IWUSR); 1091 1084 1085 + static ssize_t tcm_qla2xxx_tpg_show_dynamic_sessions( 1086 + struct se_portal_group *se_tpg, 1087 + char *page) 1088 + { 1089 + return target_show_dynamic_sessions(se_tpg, page); 1090 + } 1091 + 1092 + TF_TPG_BASE_ATTR_RO(tcm_qla2xxx, dynamic_sessions); 1093 + 1094 + static ssize_t tcm_qla2xxx_tpg_store_fabric_prot_type( 1095 + struct se_portal_group *se_tpg, 1096 + const char *page, 1097 + size_t count) 1098 + { 1099 + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 1100 + struct tcm_qla2xxx_tpg, se_tpg); 1101 + unsigned long val; 1102 + int ret = kstrtoul(page, 0, &val); 1103 + 1104 + if (ret) { 1105 + pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret); 1106 + return ret; 1107 + } 1108 + if (val != 0 && val != 1 && val != 3) { 1109 + pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val); 1110 + return -EINVAL; 1111 + } 1112 + tpg->tpg_attrib.fabric_prot_type = val; 1113 + 1114 + return count; 1115 + } 1116 + 1117 + static ssize_t tcm_qla2xxx_tpg_show_fabric_prot_type( 1118 + struct se_portal_group *se_tpg, 1119 + char *page) 1120 + { 1121 + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 1122 + struct tcm_qla2xxx_tpg, se_tpg); 1123 + 1124 + return sprintf(page, "%d\n", tpg->tpg_attrib.fabric_prot_type); 1125 + } 1126 + TF_TPG_BASE_ATTR(tcm_qla2xxx, fabric_prot_type, S_IRUGO | S_IWUSR); 1127 + 1092 1128 static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = { 1093 1129 &tcm_qla2xxx_tpg_enable.attr, 1130 + &tcm_qla2xxx_tpg_dynamic_sessions.attr, 1131 + &tcm_qla2xxx_tpg_fabric_prot_type.attr, 1094 1132 NULL, 1095 1133 }; 1096 1134 ··· 1176 1124 tpg->tpg_attrib.cache_dynamic_acls = 1; 1177 1125 tpg->tpg_attrib.demo_mode_login_only = 1; 1178 1126 1179 - ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn, 1127 + ret = core_tpg_register(&tcm_qla2xxx_ops, wwn, 1180 1128 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); 1181 1129 if (ret < 0) { 1182 1130 kfree(tpg); ··· 1296 1244 tpg->tpg_attrib.cache_dynamic_acls = 1; 1297 1245 tpg->tpg_attrib.demo_mode_login_only = 1; 1298 1246 1299 - ret = core_tpg_register(&tcm_qla2xxx_npiv_fabric_configfs->tf_ops, wwn, 1247 + ret = core_tpg_register(&tcm_qla2xxx_npiv_ops, wwn, 1300 1248 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); 1301 1249 if (ret < 0) { 1302 1250 kfree(tpg); ··· 1612 1560 1613 1561 se_sess = transport_init_session_tags(num_tags, 1614 1562 sizeof(struct qla_tgt_cmd), 1615 - TARGET_PROT_NORMAL); 1563 + TARGET_PROT_ALL); 1616 1564 if (IS_ERR(se_sess)) { 1617 1565 pr_err("Unable to initialize struct se_session\n"); 1618 1566 return PTR_ERR(se_sess); ··· 1986 1934 NULL, 1987 1935 }; 1988 1936 1989 - static struct target_core_fabric_ops tcm_qla2xxx_ops = { 1937 + static const struct target_core_fabric_ops tcm_qla2xxx_ops = { 1938 + .module = THIS_MODULE, 1939 + .name = "qla2xxx", 1990 1940 .get_fabric_name = tcm_qla2xxx_get_fabric_name, 1991 1941 .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident, 1992 1942 .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn, ··· 2003 1949 tcm_qla2xxx_check_demo_write_protect, 2004 1950 .tpg_check_prod_mode_write_protect = 2005 1951 tcm_qla2xxx_check_prod_write_protect, 1952 + .tpg_check_prot_fabric_only = tcm_qla2xxx_check_prot_fabric_only, 2006 1953 .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only, 2007 1954 .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl, 2008 1955 .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, ··· 2038 1983 .fabric_drop_np = NULL, 2039 1984 .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl, 2040 1985 .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl, 1986 + 1987 + .tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs, 1988 + .tfc_tpg_base_attrs = tcm_qla2xxx_tpg_attrs, 1989 + .tfc_tpg_attrib_attrs = tcm_qla2xxx_tpg_attrib_attrs, 2041 1990 }; 2042 1991 2043 - static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { 1992 + static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { 1993 + .module = THIS_MODULE, 1994 + .name = "qla2xxx_npiv", 2044 1995 .get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name, 2045 1996 .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident, 2046 1997 .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn, ··· 2094 2033 .fabric_drop_np = NULL, 2095 2034 .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl, 2096 2035 .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl, 2036 + 2037 + .tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs, 2038 + .tfc_tpg_base_attrs = tcm_qla2xxx_npiv_tpg_attrs, 2097 2039 }; 2098 2040 2099 2041 static int tcm_qla2xxx_register_configfs(void) 2100 2042 { 2101 - struct target_fabric_configfs *fabric, *npiv_fabric; 2102 2043 int ret; 2103 2044 2104 2045 pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on " 2105 2046 UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname, 2106 2047 utsname()->machine); 2107 - /* 2108 - * Register the top level struct config_item_type with TCM core 2109 - */ 2110 - fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx"); 2111 - if (IS_ERR(fabric)) { 2112 - pr_err("target_fabric_configfs_init() failed\n"); 2113 - return PTR_ERR(fabric); 2114 - } 2115 - /* 2116 - * Setup fabric->tf_ops from our local tcm_qla2xxx_ops 2117 - */ 2118 - fabric->tf_ops = tcm_qla2xxx_ops; 2119 - /* 2120 - * Setup default attribute lists for various fabric->tf_cit_tmpl 2121 - */ 2122 - fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; 2123 - fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs; 2124 - fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = 2125 - tcm_qla2xxx_tpg_attrib_attrs; 2126 - fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; 2127 - fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; 2128 - fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; 2129 - fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; 2130 - fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; 2131 - fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; 2132 - /* 2133 - * Register the fabric for use within TCM 2134 - */ 2135 - ret = target_fabric_configfs_register(fabric); 2136 - if (ret < 0) { 2137 - pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n"); 2138 - return ret; 2139 - } 2140 - /* 2141 - * Setup our local pointer to *fabric 2142 - */ 2143 - tcm_qla2xxx_fabric_configfs = fabric; 2144 - pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_fabric_configfs\n"); 2145 2048 2146 - /* 2147 - * Register the top level struct config_item_type for NPIV with TCM core 2148 - */ 2149 - npiv_fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx_npiv"); 2150 - if (IS_ERR(npiv_fabric)) { 2151 - pr_err("target_fabric_configfs_init() failed\n"); 2152 - ret = PTR_ERR(npiv_fabric); 2049 + ret = target_register_template(&tcm_qla2xxx_ops); 2050 + if (ret) 2051 + return ret; 2052 + 2053 + ret = target_register_template(&tcm_qla2xxx_npiv_ops); 2054 + if (ret) 2153 2055 goto out_fabric; 2154 - } 2155 - /* 2156 - * Setup fabric->tf_ops from our local tcm_qla2xxx_npiv_ops 2157 - */ 2158 - npiv_fabric->tf_ops = tcm_qla2xxx_npiv_ops; 2159 - /* 2160 - * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl 2161 - */ 2162 - npiv_fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; 2163 - npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = 2164 - tcm_qla2xxx_npiv_tpg_attrs; 2165 - npiv_fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; 2166 - npiv_fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; 2167 - npiv_fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; 2168 - npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; 2169 - npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; 2170 - npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; 2171 - npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; 2172 - /* 2173 - * Register the npiv_fabric for use within TCM 2174 - */ 2175 - ret = target_fabric_configfs_register(npiv_fabric); 2176 - if (ret < 0) { 2177 - pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n"); 2178 - goto out_fabric; 2179 - } 2180 - /* 2181 - * Setup our local pointer to *npiv_fabric 2182 - */ 2183 - tcm_qla2xxx_npiv_fabric_configfs = npiv_fabric; 2184 - pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_npiv_fabric_configfs\n"); 2185 2056 2186 2057 tcm_qla2xxx_free_wq = alloc_workqueue("tcm_qla2xxx_free", 2187 2058 WQ_MEM_RECLAIM, 0); ··· 2133 2140 out_free_wq: 2134 2141 destroy_workqueue(tcm_qla2xxx_free_wq); 2135 2142 out_fabric_npiv: 2136 - target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs); 2143 + target_unregister_template(&tcm_qla2xxx_npiv_ops); 2137 2144 out_fabric: 2138 - target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs); 2145 + target_unregister_template(&tcm_qla2xxx_ops); 2139 2146 return ret; 2140 2147 } 2141 2148 ··· 2144 2151 destroy_workqueue(tcm_qla2xxx_cmd_wq); 2145 2152 destroy_workqueue(tcm_qla2xxx_free_wq); 2146 2153 2147 - target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs); 2148 - tcm_qla2xxx_fabric_configfs = NULL; 2149 - pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_fabric_configfs\n"); 2150 - 2151 - target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs); 2152 - tcm_qla2xxx_npiv_fabric_configfs = NULL; 2153 - pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_npiv_fabric_configfs\n"); 2154 + target_unregister_template(&tcm_qla2xxx_ops); 2155 + target_unregister_template(&tcm_qla2xxx_npiv_ops); 2154 2156 } 2155 2157 2156 2158 static int __init tcm_qla2xxx_init(void)
+1
drivers/scsi/qla2xxx/tcm_qla2xxx.h
··· 33 33 int demo_mode_write_protect; 34 34 int prod_mode_write_protect; 35 35 int demo_mode_login_only; 36 + int fabric_prot_type; 36 37 }; 37 38 38 39 struct tcm_qla2xxx_tpg {
+3 -2
drivers/target/Kconfig
··· 31 31 Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered 32 32 passthrough access to Linux/SCSI device 33 33 34 - config TCM_USER 34 + config TCM_USER2 35 35 tristate "TCM/USER Subsystem Plugin for Linux" 36 36 depends on UIO && NET 37 37 help 38 38 Say Y here to enable the TCM/USER subsystem plugin for a userspace 39 - process to handle requests 39 + process to handle requests. This is version 2 of the ABI; version 1 40 + is obsolete. 40 41 41 42 source "drivers/target/loopback/Kconfig" 42 43 source "drivers/target/tcm_fc/Kconfig"
+1 -1
drivers/target/Makefile
··· 22 22 obj-$(CONFIG_TCM_IBLOCK) += target_core_iblock.o 23 23 obj-$(CONFIG_TCM_FILEIO) += target_core_file.o 24 24 obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o 25 - obj-$(CONFIG_TCM_USER) += target_core_user.o 25 + obj-$(CONFIG_TCM_USER2) += target_core_user.o 26 26 27 27 # Fabric modules 28 28 obj-$(CONFIG_LOOPBACK_TARGET) += loopback/
-1
drivers/target/iscsi/Makefile
··· 1 1 iscsi_target_mod-y += iscsi_target_parameters.o \ 2 2 iscsi_target_seq_pdu_list.o \ 3 - iscsi_target_tq.o \ 4 3 iscsi_target_auth.o \ 5 4 iscsi_target_datain_values.o \ 6 5 iscsi_target_device.o \
+60 -71
drivers/target/iscsi/iscsi_target.c
··· 33 33 #include <target/iscsi/iscsi_target_core.h> 34 34 #include "iscsi_target_parameters.h" 35 35 #include "iscsi_target_seq_pdu_list.h" 36 - #include "iscsi_target_tq.h" 37 - #include "iscsi_target_configfs.h" 38 36 #include "iscsi_target_datain_values.h" 39 37 #include "iscsi_target_erl0.h" 40 38 #include "iscsi_target_erl1.h" ··· 535 537 536 538 static int __init iscsi_target_init_module(void) 537 539 { 538 - int ret = 0; 540 + int ret = 0, size; 539 541 540 542 pr_debug("iSCSI-Target "ISCSIT_VERSION"\n"); 541 543 ··· 544 546 pr_err("Unable to allocate memory for iscsit_global\n"); 545 547 return -1; 546 548 } 549 + spin_lock_init(&iscsit_global->ts_bitmap_lock); 547 550 mutex_init(&auth_id_lock); 548 551 spin_lock_init(&sess_idr_lock); 549 552 idr_init(&tiqn_idr); 550 553 idr_init(&sess_idr); 551 554 552 - ret = iscsi_target_register_configfs(); 553 - if (ret < 0) 555 + ret = target_register_template(&iscsi_ops); 556 + if (ret) 554 557 goto out; 555 558 556 - ret = iscsi_thread_set_init(); 557 - if (ret < 0) 559 + size = BITS_TO_LONGS(ISCSIT_BITMAP_BITS) * sizeof(long); 560 + iscsit_global->ts_bitmap = vzalloc(size); 561 + if (!iscsit_global->ts_bitmap) { 562 + pr_err("Unable to allocate iscsit_global->ts_bitmap\n"); 558 563 goto configfs_out; 559 - 560 - if (iscsi_allocate_thread_sets(TARGET_THREAD_SET_COUNT) != 561 - TARGET_THREAD_SET_COUNT) { 562 - pr_err("iscsi_allocate_thread_sets() returned" 563 - " unexpected value!\n"); 564 - goto ts_out1; 565 564 } 566 565 567 566 lio_qr_cache = kmem_cache_create("lio_qr_cache", ··· 567 572 if (!lio_qr_cache) { 568 573 pr_err("nable to kmem_cache_create() for" 569 574 " lio_qr_cache\n"); 570 - goto ts_out2; 575 + goto bitmap_out; 571 576 } 572 577 573 578 lio_dr_cache = kmem_cache_create("lio_dr_cache", ··· 612 617 kmem_cache_destroy(lio_dr_cache); 613 618 qr_out: 614 619 kmem_cache_destroy(lio_qr_cache); 615 - ts_out2: 616 - iscsi_deallocate_thread_sets(); 617 - ts_out1: 618 - iscsi_thread_set_free(); 620 + bitmap_out: 621 + vfree(iscsit_global->ts_bitmap); 619 622 configfs_out: 620 - iscsi_target_deregister_configfs(); 623 + /* XXX: this probably wants it to be it's own unwind step.. */ 624 + if (iscsit_global->discovery_tpg) 625 + iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1); 626 + target_unregister_template(&iscsi_ops); 621 627 out: 622 628 kfree(iscsit_global); 623 629 return -ENOMEM; ··· 626 630 627 631 static void __exit iscsi_target_cleanup_module(void) 628 632 { 629 - iscsi_deallocate_thread_sets(); 630 - iscsi_thread_set_free(); 631 633 iscsit_release_discovery_tpg(); 632 634 iscsit_unregister_transport(&iscsi_target_transport); 633 635 kmem_cache_destroy(lio_qr_cache); ··· 633 639 kmem_cache_destroy(lio_ooo_cache); 634 640 kmem_cache_destroy(lio_r2t_cache); 635 641 636 - iscsi_target_deregister_configfs(); 642 + /* 643 + * Shutdown discovery sessions and disable discovery TPG 644 + */ 645 + if (iscsit_global->discovery_tpg) 646 + iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1); 637 647 648 + target_unregister_template(&iscsi_ops); 649 + 650 + vfree(iscsit_global->ts_bitmap); 638 651 kfree(iscsit_global); 639 652 } 640 653 ··· 991 990 /* 992 991 * Initialize struct se_cmd descriptor from target_core_mod infrastructure 993 992 */ 994 - transport_init_se_cmd(&cmd->se_cmd, &lio_target_fabric_configfs->tf_ops, 993 + transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops, 995 994 conn->sess->se_sess, be32_to_cpu(hdr->data_length), 996 995 cmd->data_direction, sam_task_attr, 997 996 cmd->sense_buffer + 2); ··· 1806 1805 u8 tcm_function; 1807 1806 int ret; 1808 1807 1809 - transport_init_se_cmd(&cmd->se_cmd, 1810 - &lio_target_fabric_configfs->tf_ops, 1808 + transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops, 1811 1809 conn->sess->se_sess, 0, DMA_NONE, 1812 1810 TCM_SIMPLE_TAG, cmd->sense_buffer + 2); 1813 1811 ··· 2155 2155 cmd->text_in_ptr = NULL; 2156 2156 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf); 2157 2157 } 2158 - EXPORT_SYMBOL(iscsit_handle_text_cmd); 2159 2158 2160 2159 int iscsit_logout_closesession(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2161 2160 { ··· 3714 3715 3715 3716 void iscsit_thread_get_cpumask(struct iscsi_conn *conn) 3716 3717 { 3717 - struct iscsi_thread_set *ts = conn->thread_set; 3718 3718 int ord, cpu; 3719 3719 /* 3720 - * thread_id is assigned from iscsit_global->ts_bitmap from 3721 - * within iscsi_thread_set.c:iscsi_allocate_thread_sets() 3720 + * bitmap_id is assigned from iscsit_global->ts_bitmap from 3721 + * within iscsit_start_kthreads() 3722 3722 * 3723 - * Here we use thread_id to determine which CPU that this 3724 - * iSCSI connection's iscsi_thread_set will be scheduled to 3723 + * Here we use bitmap_id to determine which CPU that this 3724 + * iSCSI connection's RX/TX threads will be scheduled to 3725 3725 * execute upon. 3726 3726 */ 3727 - ord = ts->thread_id % cpumask_weight(cpu_online_mask); 3727 + ord = conn->bitmap_id % cpumask_weight(cpu_online_mask); 3728 3728 for_each_online_cpu(cpu) { 3729 3729 if (ord-- == 0) { 3730 3730 cpumask_set_cpu(cpu, conn->conn_cpumask); ··· 3912 3914 switch (state) { 3913 3915 case ISTATE_SEND_LOGOUTRSP: 3914 3916 if (!iscsit_logout_post_handler(cmd, conn)) 3915 - goto restart; 3917 + return -ECONNRESET; 3916 3918 /* fall through */ 3917 3919 case ISTATE_SEND_STATUS: 3918 3920 case ISTATE_SEND_ASYNCMSG: ··· 3940 3942 3941 3943 err: 3942 3944 return -1; 3943 - restart: 3944 - return -EAGAIN; 3945 3945 } 3946 3946 3947 3947 static int iscsit_handle_response_queue(struct iscsi_conn *conn) ··· 3966 3970 int iscsi_target_tx_thread(void *arg) 3967 3971 { 3968 3972 int ret = 0; 3969 - struct iscsi_conn *conn; 3970 - struct iscsi_thread_set *ts = arg; 3973 + struct iscsi_conn *conn = arg; 3971 3974 /* 3972 3975 * Allow ourselves to be interrupted by SIGINT so that a 3973 3976 * connection recovery / failure event can be triggered externally. 3974 3977 */ 3975 3978 allow_signal(SIGINT); 3976 - 3977 - restart: 3978 - conn = iscsi_tx_thread_pre_handler(ts); 3979 - if (!conn) 3980 - goto out; 3981 - 3982 - ret = 0; 3983 3979 3984 3980 while (!kthread_should_stop()) { 3985 3981 /* ··· 3981 3993 iscsit_thread_check_cpumask(conn, current, 1); 3982 3994 3983 3995 wait_event_interruptible(conn->queues_wq, 3984 - !iscsit_conn_all_queues_empty(conn) || 3985 - ts->status == ISCSI_THREAD_SET_RESET); 3996 + !iscsit_conn_all_queues_empty(conn)); 3986 3997 3987 - if ((ts->status == ISCSI_THREAD_SET_RESET) || 3988 - signal_pending(current)) 3998 + if (signal_pending(current)) 3989 3999 goto transport_err; 3990 4000 3991 4001 get_immediate: ··· 3994 4008 ret = iscsit_handle_response_queue(conn); 3995 4009 if (ret == 1) 3996 4010 goto get_immediate; 3997 - else if (ret == -EAGAIN) 3998 - goto restart; 4011 + else if (ret == -ECONNRESET) 4012 + goto out; 3999 4013 else if (ret < 0) 4000 4014 goto transport_err; 4001 4015 } 4002 4016 4003 4017 transport_err: 4004 4018 iscsit_take_action_for_connection_exit(conn); 4005 - goto restart; 4006 4019 out: 4007 4020 return 0; 4008 4021 } ··· 4096 4111 int ret; 4097 4112 u8 buffer[ISCSI_HDR_LEN], opcode; 4098 4113 u32 checksum = 0, digest = 0; 4099 - struct iscsi_conn *conn = NULL; 4100 - struct iscsi_thread_set *ts = arg; 4114 + struct iscsi_conn *conn = arg; 4101 4115 struct kvec iov; 4102 4116 /* 4103 4117 * Allow ourselves to be interrupted by SIGINT so that a 4104 4118 * connection recovery / failure event can be triggered externally. 4105 4119 */ 4106 4120 allow_signal(SIGINT); 4107 - 4108 - restart: 4109 - conn = iscsi_rx_thread_pre_handler(ts); 4110 - if (!conn) 4111 - goto out; 4112 4121 4113 4122 if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { 4114 4123 struct completion comp; ··· 4113 4134 if (rc < 0) 4114 4135 goto transport_err; 4115 4136 4116 - goto out; 4137 + goto transport_err; 4117 4138 } 4118 4139 4119 4140 while (!kthread_should_stop()) { ··· 4189 4210 if (!signal_pending(current)) 4190 4211 atomic_set(&conn->transport_failed, 1); 4191 4212 iscsit_take_action_for_connection_exit(conn); 4192 - goto restart; 4193 - out: 4194 4213 return 0; 4195 4214 } 4196 4215 ··· 4250 4273 if (conn->conn_transport->transport_type == ISCSI_TCP) 4251 4274 complete(&conn->conn_logout_comp); 4252 4275 4253 - iscsi_release_thread_set(conn); 4276 + if (!strcmp(current->comm, ISCSI_RX_THREAD_NAME)) { 4277 + if (conn->tx_thread && 4278 + cmpxchg(&conn->tx_thread_active, true, false)) { 4279 + send_sig(SIGINT, conn->tx_thread, 1); 4280 + kthread_stop(conn->tx_thread); 4281 + } 4282 + } else if (!strcmp(current->comm, ISCSI_TX_THREAD_NAME)) { 4283 + if (conn->rx_thread && 4284 + cmpxchg(&conn->rx_thread_active, true, false)) { 4285 + send_sig(SIGINT, conn->rx_thread, 1); 4286 + kthread_stop(conn->rx_thread); 4287 + } 4288 + } 4289 + 4290 + spin_lock(&iscsit_global->ts_bitmap_lock); 4291 + bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id, 4292 + get_order(1)); 4293 + spin_unlock(&iscsit_global->ts_bitmap_lock); 4254 4294 4255 4295 iscsit_stop_timers_for_cmds(conn); 4256 4296 iscsit_stop_nopin_response_timer(conn); ··· 4376 4382 conn->conn_transport->iscsit_free_conn(conn); 4377 4383 4378 4384 iscsit_put_transport(conn->conn_transport); 4379 - 4380 - conn->thread_set = NULL; 4381 4385 4382 4386 pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); 4383 4387 conn->conn_state = TARG_CONN_STATE_FREE; ··· 4543 4551 struct iscsi_conn *conn) 4544 4552 { 4545 4553 struct iscsi_session *sess = conn->sess; 4546 - 4547 - iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD); 4548 - iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD); 4554 + int sleep = cmpxchg(&conn->tx_thread_active, true, false); 4549 4555 4550 4556 atomic_set(&conn->conn_logout_remove, 0); 4551 4557 complete(&conn->conn_logout_comp); 4552 4558 4553 4559 iscsit_dec_conn_usage_count(conn); 4554 - iscsit_stop_session(sess, 1, 1); 4560 + iscsit_stop_session(sess, sleep, sleep); 4555 4561 iscsit_dec_session_usage_count(sess); 4556 4562 target_put_session(sess->se_sess); 4557 4563 } ··· 4557 4567 static void iscsit_logout_post_handler_samecid( 4558 4568 struct iscsi_conn *conn) 4559 4569 { 4560 - iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD); 4561 - iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD); 4570 + int sleep = cmpxchg(&conn->tx_thread_active, true, false); 4562 4571 4563 4572 atomic_set(&conn->conn_logout_remove, 0); 4564 4573 complete(&conn->conn_logout_comp); 4565 4574 4566 - iscsit_cause_connection_reinstatement(conn, 1); 4575 + iscsit_cause_connection_reinstatement(conn, sleep); 4567 4576 iscsit_dec_conn_usage_count(conn); 4568 4577 } 4569 4578
+1 -1
drivers/target/iscsi/iscsi_target.h
··· 35 35 extern int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *, int); 36 36 37 37 extern struct iscsit_global *iscsit_global; 38 - extern struct target_fabric_configfs *lio_target_fabric_configfs; 38 + extern const struct target_core_fabric_ops iscsi_ops; 39 39 40 40 extern struct kmem_cache *lio_dr_cache; 41 41 extern struct kmem_cache *lio_ooo_cache;
+88 -118
drivers/target/iscsi/iscsi_target_configfs.c
··· 37 37 #include "iscsi_target_util.h" 38 38 #include "iscsi_target.h" 39 39 #include <target/iscsi/iscsi_target_stat.h> 40 - #include "iscsi_target_configfs.h" 41 - 42 - struct target_fabric_configfs *lio_target_fabric_configfs; 43 40 44 41 struct lio_target_configfs_attribute { 45 42 struct configfs_attribute attr; ··· 1049 1052 */ 1050 1053 DEF_TPG_ATTRIB(t10_pi); 1051 1054 TPG_ATTR(t10_pi, S_IRUGO | S_IWUSR); 1055 + /* 1056 + * Define iscsi_tpg_attrib_s_fabric_prot_type 1057 + */ 1058 + DEF_TPG_ATTRIB(fabric_prot_type); 1059 + TPG_ATTR(fabric_prot_type, S_IRUGO | S_IWUSR); 1052 1060 1053 1061 static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = { 1054 1062 &iscsi_tpg_attrib_authentication.attr, ··· 1067 1065 &iscsi_tpg_attrib_demo_mode_discovery.attr, 1068 1066 &iscsi_tpg_attrib_default_erl.attr, 1069 1067 &iscsi_tpg_attrib_t10_pi.attr, 1068 + &iscsi_tpg_attrib_fabric_prot_type.attr, 1070 1069 NULL, 1071 1070 }; 1072 1071 ··· 1413 1410 1414 1411 TF_TPG_BASE_ATTR(lio_target, enable, S_IRUGO | S_IWUSR); 1415 1412 1413 + static ssize_t lio_target_tpg_show_dynamic_sessions( 1414 + struct se_portal_group *se_tpg, 1415 + char *page) 1416 + { 1417 + return target_show_dynamic_sessions(se_tpg, page); 1418 + } 1419 + 1420 + TF_TPG_BASE_ATTR_RO(lio_target, dynamic_sessions); 1421 + 1416 1422 static struct configfs_attribute *lio_target_tpg_attrs[] = { 1417 1423 &lio_target_tpg_enable.attr, 1424 + &lio_target_tpg_dynamic_sessions.attr, 1418 1425 NULL, 1419 1426 }; 1420 1427 ··· 1463 1450 if (!tpg) 1464 1451 return NULL; 1465 1452 1466 - ret = core_tpg_register( 1467 - &lio_target_fabric_configfs->tf_ops, 1468 - wwn, &tpg->tpg_se_tpg, tpg, 1469 - TRANSPORT_TPG_TYPE_NORMAL); 1453 + ret = core_tpg_register(&iscsi_ops, wwn, &tpg->tpg_se_tpg, 1454 + tpg, TRANSPORT_TPG_TYPE_NORMAL); 1470 1455 if (ret < 0) 1471 1456 return NULL; 1472 1457 ··· 1883 1872 return tpg->tpg_attrib.prod_mode_write_protect; 1884 1873 } 1885 1874 1875 + static int lio_tpg_check_prot_fabric_only( 1876 + struct se_portal_group *se_tpg) 1877 + { 1878 + struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; 1879 + /* 1880 + * Only report fabric_prot_type if t10_pi has also been enabled 1881 + * for incoming ib_isert sessions. 1882 + */ 1883 + if (!tpg->tpg_attrib.t10_pi) 1884 + return 0; 1885 + 1886 + return tpg->tpg_attrib.fabric_prot_type; 1887 + } 1888 + 1886 1889 static void lio_tpg_release_fabric_acl( 1887 1890 struct se_portal_group *se_tpg, 1888 1891 struct se_node_acl *se_acl) ··· 1978 1953 iscsit_release_cmd(cmd); 1979 1954 } 1980 1955 1981 - /* End functions for target_core_fabric_ops */ 1956 + const struct target_core_fabric_ops iscsi_ops = { 1957 + .module = THIS_MODULE, 1958 + .name = "iscsi", 1959 + .get_fabric_name = iscsi_get_fabric_name, 1960 + .get_fabric_proto_ident = iscsi_get_fabric_proto_ident, 1961 + .tpg_get_wwn = lio_tpg_get_endpoint_wwn, 1962 + .tpg_get_tag = lio_tpg_get_tag, 1963 + .tpg_get_default_depth = lio_tpg_get_default_depth, 1964 + .tpg_get_pr_transport_id = iscsi_get_pr_transport_id, 1965 + .tpg_get_pr_transport_id_len = iscsi_get_pr_transport_id_len, 1966 + .tpg_parse_pr_out_transport_id = iscsi_parse_pr_out_transport_id, 1967 + .tpg_check_demo_mode = lio_tpg_check_demo_mode, 1968 + .tpg_check_demo_mode_cache = lio_tpg_check_demo_mode_cache, 1969 + .tpg_check_demo_mode_write_protect = 1970 + lio_tpg_check_demo_mode_write_protect, 1971 + .tpg_check_prod_mode_write_protect = 1972 + lio_tpg_check_prod_mode_write_protect, 1973 + .tpg_check_prot_fabric_only = &lio_tpg_check_prot_fabric_only, 1974 + .tpg_alloc_fabric_acl = lio_tpg_alloc_fabric_acl, 1975 + .tpg_release_fabric_acl = lio_tpg_release_fabric_acl, 1976 + .tpg_get_inst_index = lio_tpg_get_inst_index, 1977 + .check_stop_free = lio_check_stop_free, 1978 + .release_cmd = lio_release_cmd, 1979 + .shutdown_session = lio_tpg_shutdown_session, 1980 + .close_session = lio_tpg_close_session, 1981 + .sess_get_index = lio_sess_get_index, 1982 + .sess_get_initiator_sid = lio_sess_get_initiator_sid, 1983 + .write_pending = lio_write_pending, 1984 + .write_pending_status = lio_write_pending_status, 1985 + .set_default_node_attributes = lio_set_default_node_attributes, 1986 + .get_task_tag = iscsi_get_task_tag, 1987 + .get_cmd_state = iscsi_get_cmd_state, 1988 + .queue_data_in = lio_queue_data_in, 1989 + .queue_status = lio_queue_status, 1990 + .queue_tm_rsp = lio_queue_tm_rsp, 1991 + .aborted_task = lio_aborted_task, 1992 + .fabric_make_wwn = lio_target_call_coreaddtiqn, 1993 + .fabric_drop_wwn = lio_target_call_coredeltiqn, 1994 + .fabric_make_tpg = lio_target_tiqn_addtpg, 1995 + .fabric_drop_tpg = lio_target_tiqn_deltpg, 1996 + .fabric_make_np = lio_target_call_addnptotpg, 1997 + .fabric_drop_np = lio_target_call_delnpfromtpg, 1998 + .fabric_make_nodeacl = lio_target_make_nodeacl, 1999 + .fabric_drop_nodeacl = lio_target_drop_nodeacl, 1982 2000 1983 - int iscsi_target_register_configfs(void) 1984 - { 1985 - struct target_fabric_configfs *fabric; 1986 - int ret; 1987 - 1988 - lio_target_fabric_configfs = NULL; 1989 - fabric = target_fabric_configfs_init(THIS_MODULE, "iscsi"); 1990 - if (IS_ERR(fabric)) { 1991 - pr_err("target_fabric_configfs_init() for" 1992 - " LIO-Target failed!\n"); 1993 - return PTR_ERR(fabric); 1994 - } 1995 - /* 1996 - * Setup the fabric API of function pointers used by target_core_mod.. 1997 - */ 1998 - fabric->tf_ops.get_fabric_name = &iscsi_get_fabric_name; 1999 - fabric->tf_ops.get_fabric_proto_ident = &iscsi_get_fabric_proto_ident; 2000 - fabric->tf_ops.tpg_get_wwn = &lio_tpg_get_endpoint_wwn; 2001 - fabric->tf_ops.tpg_get_tag = &lio_tpg_get_tag; 2002 - fabric->tf_ops.tpg_get_default_depth = &lio_tpg_get_default_depth; 2003 - fabric->tf_ops.tpg_get_pr_transport_id = &iscsi_get_pr_transport_id; 2004 - fabric->tf_ops.tpg_get_pr_transport_id_len = 2005 - &iscsi_get_pr_transport_id_len; 2006 - fabric->tf_ops.tpg_parse_pr_out_transport_id = 2007 - &iscsi_parse_pr_out_transport_id; 2008 - fabric->tf_ops.tpg_check_demo_mode = &lio_tpg_check_demo_mode; 2009 - fabric->tf_ops.tpg_check_demo_mode_cache = 2010 - &lio_tpg_check_demo_mode_cache; 2011 - fabric->tf_ops.tpg_check_demo_mode_write_protect = 2012 - &lio_tpg_check_demo_mode_write_protect; 2013 - fabric->tf_ops.tpg_check_prod_mode_write_protect = 2014 - &lio_tpg_check_prod_mode_write_protect; 2015 - fabric->tf_ops.tpg_alloc_fabric_acl = &lio_tpg_alloc_fabric_acl; 2016 - fabric->tf_ops.tpg_release_fabric_acl = &lio_tpg_release_fabric_acl; 2017 - fabric->tf_ops.tpg_get_inst_index = &lio_tpg_get_inst_index; 2018 - fabric->tf_ops.check_stop_free = &lio_check_stop_free, 2019 - fabric->tf_ops.release_cmd = &lio_release_cmd; 2020 - fabric->tf_ops.shutdown_session = &lio_tpg_shutdown_session; 2021 - fabric->tf_ops.close_session = &lio_tpg_close_session; 2022 - fabric->tf_ops.sess_get_index = &lio_sess_get_index; 2023 - fabric->tf_ops.sess_get_initiator_sid = &lio_sess_get_initiator_sid; 2024 - fabric->tf_ops.write_pending = &lio_write_pending; 2025 - fabric->tf_ops.write_pending_status = &lio_write_pending_status; 2026 - fabric->tf_ops.set_default_node_attributes = 2027 - &lio_set_default_node_attributes; 2028 - fabric->tf_ops.get_task_tag = &iscsi_get_task_tag; 2029 - fabric->tf_ops.get_cmd_state = &iscsi_get_cmd_state; 2030 - fabric->tf_ops.queue_data_in = &lio_queue_data_in; 2031 - fabric->tf_ops.queue_status = &lio_queue_status; 2032 - fabric->tf_ops.queue_tm_rsp = &lio_queue_tm_rsp; 2033 - fabric->tf_ops.aborted_task = &lio_aborted_task; 2034 - /* 2035 - * Setup function pointers for generic logic in target_core_fabric_configfs.c 2036 - */ 2037 - fabric->tf_ops.fabric_make_wwn = &lio_target_call_coreaddtiqn; 2038 - fabric->tf_ops.fabric_drop_wwn = &lio_target_call_coredeltiqn; 2039 - fabric->tf_ops.fabric_make_tpg = &lio_target_tiqn_addtpg; 2040 - fabric->tf_ops.fabric_drop_tpg = &lio_target_tiqn_deltpg; 2041 - fabric->tf_ops.fabric_post_link = NULL; 2042 - fabric->tf_ops.fabric_pre_unlink = NULL; 2043 - fabric->tf_ops.fabric_make_np = &lio_target_call_addnptotpg; 2044 - fabric->tf_ops.fabric_drop_np = &lio_target_call_delnpfromtpg; 2045 - fabric->tf_ops.fabric_make_nodeacl = &lio_target_make_nodeacl; 2046 - fabric->tf_ops.fabric_drop_nodeacl = &lio_target_drop_nodeacl; 2047 - /* 2048 - * Setup default attribute lists for various fabric->tf_cit_tmpl 2049 - * sturct config_item_type's 2050 - */ 2051 - fabric->tf_cit_tmpl.tfc_discovery_cit.ct_attrs = lio_target_discovery_auth_attrs; 2052 - fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = lio_target_wwn_attrs; 2053 - fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = lio_target_tpg_attrs; 2054 - fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = lio_target_tpg_attrib_attrs; 2055 - fabric->tf_cit_tmpl.tfc_tpg_auth_cit.ct_attrs = lio_target_tpg_auth_attrs; 2056 - fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = lio_target_tpg_param_attrs; 2057 - fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = lio_target_portal_attrs; 2058 - fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = lio_target_initiator_attrs; 2059 - fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = lio_target_nacl_attrib_attrs; 2060 - fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = lio_target_nacl_auth_attrs; 2061 - fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = lio_target_nacl_param_attrs; 2062 - 2063 - ret = target_fabric_configfs_register(fabric); 2064 - if (ret < 0) { 2065 - pr_err("target_fabric_configfs_register() for" 2066 - " LIO-Target failed!\n"); 2067 - target_fabric_configfs_free(fabric); 2068 - return ret; 2069 - } 2070 - 2071 - lio_target_fabric_configfs = fabric; 2072 - pr_debug("LIO_TARGET[0] - Set fabric ->" 2073 - " lio_target_fabric_configfs\n"); 2074 - return 0; 2075 - } 2076 - 2077 - 2078 - void iscsi_target_deregister_configfs(void) 2079 - { 2080 - if (!lio_target_fabric_configfs) 2081 - return; 2082 - /* 2083 - * Shutdown discovery sessions and disable discovery TPG 2084 - */ 2085 - if (iscsit_global->discovery_tpg) 2086 - iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1); 2087 - 2088 - target_fabric_configfs_deregister(lio_target_fabric_configfs); 2089 - lio_target_fabric_configfs = NULL; 2090 - pr_debug("LIO_TARGET[0] - Cleared" 2091 - " lio_target_fabric_configfs\n"); 2092 - } 2001 + .tfc_discovery_attrs = lio_target_discovery_auth_attrs, 2002 + .tfc_wwn_attrs = lio_target_wwn_attrs, 2003 + .tfc_tpg_base_attrs = lio_target_tpg_attrs, 2004 + .tfc_tpg_attrib_attrs = lio_target_tpg_attrib_attrs, 2005 + .tfc_tpg_auth_attrs = lio_target_tpg_auth_attrs, 2006 + .tfc_tpg_param_attrs = lio_target_tpg_param_attrs, 2007 + .tfc_tpg_np_base_attrs = lio_target_portal_attrs, 2008 + .tfc_tpg_nacl_base_attrs = lio_target_initiator_attrs, 2009 + .tfc_tpg_nacl_attrib_attrs = lio_target_nacl_attrib_attrs, 2010 + .tfc_tpg_nacl_auth_attrs = lio_target_nacl_auth_attrs, 2011 + .tfc_tpg_nacl_param_attrs = lio_target_nacl_param_attrs, 2012 + };
-7
drivers/target/iscsi/iscsi_target_configfs.h
··· 1 - #ifndef ISCSI_TARGET_CONFIGFS_H 2 - #define ISCSI_TARGET_CONFIGFS_H 3 - 4 - extern int iscsi_target_register_configfs(void); 5 - extern void iscsi_target_deregister_configfs(void); 6 - 7 - #endif /* ISCSI_TARGET_CONFIGFS_H */
+8 -6
drivers/target/iscsi/iscsi_target_erl0.c
··· 23 23 24 24 #include <target/iscsi/iscsi_target_core.h> 25 25 #include "iscsi_target_seq_pdu_list.h" 26 - #include "iscsi_target_tq.h" 27 26 #include "iscsi_target_erl0.h" 28 27 #include "iscsi_target_erl1.h" 29 28 #include "iscsi_target_erl2.h" ··· 859 860 } 860 861 spin_unlock_bh(&conn->state_lock); 861 862 862 - iscsi_thread_set_force_reinstatement(conn); 863 + if (conn->tx_thread && conn->tx_thread_active) 864 + send_sig(SIGINT, conn->tx_thread, 1); 865 + if (conn->rx_thread && conn->rx_thread_active) 866 + send_sig(SIGINT, conn->rx_thread, 1); 863 867 864 868 sleep: 865 869 wait_for_completion(&conn->conn_wait_rcfr_comp); ··· 887 885 return; 888 886 } 889 887 890 - if (iscsi_thread_set_force_reinstatement(conn) < 0) { 891 - spin_unlock_bh(&conn->state_lock); 892 - return; 893 - } 888 + if (conn->tx_thread && conn->tx_thread_active) 889 + send_sig(SIGINT, conn->tx_thread, 1); 890 + if (conn->rx_thread && conn->rx_thread_active) 891 + send_sig(SIGINT, conn->rx_thread, 1); 894 892 895 893 atomic_set(&conn->connection_reinstatement, 1); 896 894 if (!sleep) {
+54 -6
drivers/target/iscsi/iscsi_target_login.c
··· 26 26 27 27 #include <target/iscsi/iscsi_target_core.h> 28 28 #include <target/iscsi/iscsi_target_stat.h> 29 - #include "iscsi_target_tq.h" 30 29 #include "iscsi_target_device.h" 31 30 #include "iscsi_target_nego.h" 32 31 #include "iscsi_target_erl0.h" ··· 698 699 iscsit_start_nopin_timer(conn); 699 700 } 700 701 702 + static int iscsit_start_kthreads(struct iscsi_conn *conn) 703 + { 704 + int ret = 0; 705 + 706 + spin_lock(&iscsit_global->ts_bitmap_lock); 707 + conn->bitmap_id = bitmap_find_free_region(iscsit_global->ts_bitmap, 708 + ISCSIT_BITMAP_BITS, get_order(1)); 709 + spin_unlock(&iscsit_global->ts_bitmap_lock); 710 + 711 + if (conn->bitmap_id < 0) { 712 + pr_err("bitmap_find_free_region() failed for" 713 + " iscsit_start_kthreads()\n"); 714 + return -ENOMEM; 715 + } 716 + 717 + conn->tx_thread = kthread_run(iscsi_target_tx_thread, conn, 718 + "%s", ISCSI_TX_THREAD_NAME); 719 + if (IS_ERR(conn->tx_thread)) { 720 + pr_err("Unable to start iscsi_target_tx_thread\n"); 721 + ret = PTR_ERR(conn->tx_thread); 722 + goto out_bitmap; 723 + } 724 + conn->tx_thread_active = true; 725 + 726 + conn->rx_thread = kthread_run(iscsi_target_rx_thread, conn, 727 + "%s", ISCSI_RX_THREAD_NAME); 728 + if (IS_ERR(conn->rx_thread)) { 729 + pr_err("Unable to start iscsi_target_rx_thread\n"); 730 + ret = PTR_ERR(conn->rx_thread); 731 + goto out_tx; 732 + } 733 + conn->rx_thread_active = true; 734 + 735 + return 0; 736 + out_tx: 737 + kthread_stop(conn->tx_thread); 738 + conn->tx_thread_active = false; 739 + out_bitmap: 740 + spin_lock(&iscsit_global->ts_bitmap_lock); 741 + bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id, 742 + get_order(1)); 743 + spin_unlock(&iscsit_global->ts_bitmap_lock); 744 + return ret; 745 + } 746 + 701 747 int iscsi_post_login_handler( 702 748 struct iscsi_np *np, 703 749 struct iscsi_conn *conn, ··· 753 709 struct se_session *se_sess = sess->se_sess; 754 710 struct iscsi_portal_group *tpg = sess->tpg; 755 711 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 756 - struct iscsi_thread_set *ts; 712 + int rc; 757 713 758 714 iscsit_inc_conn_usage_count(conn); 759 715 ··· 768 724 /* 769 725 * SCSI Initiator -> SCSI Target Port Mapping 770 726 */ 771 - ts = iscsi_get_thread_set(); 772 727 if (!zero_tsih) { 773 728 iscsi_set_session_parameters(sess->sess_ops, 774 729 conn->param_list, 0); ··· 794 751 sess->sess_ops->InitiatorName); 795 752 spin_unlock_bh(&sess->conn_lock); 796 753 797 - iscsi_post_login_start_timers(conn); 754 + rc = iscsit_start_kthreads(conn); 755 + if (rc) 756 + return rc; 798 757 799 - iscsi_activate_thread_set(conn, ts); 758 + iscsi_post_login_start_timers(conn); 800 759 /* 801 760 * Determine CPU mask to ensure connection's RX and TX kthreads 802 761 * are scheduled on the same CPU. ··· 855 810 " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt); 856 811 spin_unlock_bh(&se_tpg->session_lock); 857 812 813 + rc = iscsit_start_kthreads(conn); 814 + if (rc) 815 + return rc; 816 + 858 817 iscsi_post_login_start_timers(conn); 859 - iscsi_activate_thread_set(conn, ts); 860 818 /* 861 819 * Determine CPU mask to ensure connection's RX and TX kthreads 862 820 * are scheduled on the same CPU.
+21 -4
drivers/target/iscsi/iscsi_target_tpg.c
··· 68 68 return -1; 69 69 } 70 70 71 - ret = core_tpg_register( 72 - &lio_target_fabric_configfs->tf_ops, 73 - NULL, &tpg->tpg_se_tpg, tpg, 74 - TRANSPORT_TPG_TYPE_DISCOVERY); 71 + ret = core_tpg_register(&iscsi_ops, NULL, &tpg->tpg_se_tpg, 72 + tpg, TRANSPORT_TPG_TYPE_DISCOVERY); 75 73 if (ret < 0) { 76 74 kfree(tpg); 77 75 return -1; ··· 226 228 a->demo_mode_discovery = TA_DEMO_MODE_DISCOVERY; 227 229 a->default_erl = TA_DEFAULT_ERL; 228 230 a->t10_pi = TA_DEFAULT_T10_PI; 231 + a->fabric_prot_type = TA_DEFAULT_FABRIC_PROT_TYPE; 229 232 } 230 233 231 234 int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg) ··· 874 875 pr_debug("iSCSI_TPG[%hu] - T10 Protection information bit:" 875 876 " %s\n", tpg->tpgt, (a->t10_pi) ? 876 877 "ON" : "OFF"); 878 + 879 + return 0; 880 + } 881 + 882 + int iscsit_ta_fabric_prot_type( 883 + struct iscsi_portal_group *tpg, 884 + u32 prot_type) 885 + { 886 + struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; 887 + 888 + if ((prot_type != 0) && (prot_type != 1) && (prot_type != 3)) { 889 + pr_err("Illegal value for fabric_prot_type: %u\n", prot_type); 890 + return -EINVAL; 891 + } 892 + 893 + a->fabric_prot_type = prot_type; 894 + pr_debug("iSCSI_TPG[%hu] - T10 Fabric Protection Type: %u\n", 895 + tpg->tpgt, prot_type); 877 896 878 897 return 0; 879 898 }
+1
drivers/target/iscsi/iscsi_target_tpg.h
··· 39 39 extern int iscsit_ta_demo_mode_discovery(struct iscsi_portal_group *, u32); 40 40 extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32); 41 41 extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32); 42 + extern int iscsit_ta_fabric_prot_type(struct iscsi_portal_group *, u32); 42 43 43 44 #endif /* ISCSI_TARGET_TPG_H */
-495
drivers/target/iscsi/iscsi_target_tq.c
··· 1 - /******************************************************************************* 2 - * This file contains the iSCSI Login Thread and Thread Queue functions. 3 - * 4 - * (c) Copyright 2007-2013 Datera, Inc. 5 - * 6 - * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 7 - * 8 - * This program is free software; you can redistribute it and/or modify 9 - * it under the terms of the GNU General Public License as published by 10 - * the Free Software Foundation; either version 2 of the License, or 11 - * (at your option) any later version. 12 - * 13 - * This program is distributed in the hope that it will be useful, 14 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 - * GNU General Public License for more details. 17 - ******************************************************************************/ 18 - 19 - #include <linux/kthread.h> 20 - #include <linux/list.h> 21 - #include <linux/bitmap.h> 22 - 23 - #include <target/iscsi/iscsi_target_core.h> 24 - #include "iscsi_target_tq.h" 25 - #include "iscsi_target.h" 26 - 27 - static LIST_HEAD(inactive_ts_list); 28 - static DEFINE_SPINLOCK(inactive_ts_lock); 29 - static DEFINE_SPINLOCK(ts_bitmap_lock); 30 - 31 - static void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *ts) 32 - { 33 - if (!list_empty(&ts->ts_list)) { 34 - WARN_ON(1); 35 - return; 36 - } 37 - spin_lock(&inactive_ts_lock); 38 - list_add_tail(&ts->ts_list, &inactive_ts_list); 39 - iscsit_global->inactive_ts++; 40 - spin_unlock(&inactive_ts_lock); 41 - } 42 - 43 - static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void) 44 - { 45 - struct iscsi_thread_set *ts; 46 - 47 - spin_lock(&inactive_ts_lock); 48 - if (list_empty(&inactive_ts_list)) { 49 - spin_unlock(&inactive_ts_lock); 50 - return NULL; 51 - } 52 - 53 - ts = list_first_entry(&inactive_ts_list, struct iscsi_thread_set, ts_list); 54 - 55 - list_del_init(&ts->ts_list); 56 - iscsit_global->inactive_ts--; 57 - spin_unlock(&inactive_ts_lock); 58 - 59 - return ts; 60 - } 61 - 62 - int iscsi_allocate_thread_sets(u32 thread_pair_count) 63 - { 64 - int allocated_thread_pair_count = 0, i, thread_id; 65 - struct iscsi_thread_set *ts = NULL; 66 - 67 - for (i = 0; i < thread_pair_count; i++) { 68 - ts = kzalloc(sizeof(struct iscsi_thread_set), GFP_KERNEL); 69 - if (!ts) { 70 - pr_err("Unable to allocate memory for" 71 - " thread set.\n"); 72 - return allocated_thread_pair_count; 73 - } 74 - /* 75 - * Locate the next available regision in the thread_set_bitmap 76 - */ 77 - spin_lock(&ts_bitmap_lock); 78 - thread_id = bitmap_find_free_region(iscsit_global->ts_bitmap, 79 - iscsit_global->ts_bitmap_count, get_order(1)); 80 - spin_unlock(&ts_bitmap_lock); 81 - if (thread_id < 0) { 82 - pr_err("bitmap_find_free_region() failed for" 83 - " thread_set_bitmap\n"); 84 - kfree(ts); 85 - return allocated_thread_pair_count; 86 - } 87 - 88 - ts->thread_id = thread_id; 89 - ts->status = ISCSI_THREAD_SET_FREE; 90 - INIT_LIST_HEAD(&ts->ts_list); 91 - spin_lock_init(&ts->ts_state_lock); 92 - init_completion(&ts->rx_restart_comp); 93 - init_completion(&ts->tx_restart_comp); 94 - init_completion(&ts->rx_start_comp); 95 - init_completion(&ts->tx_start_comp); 96 - sema_init(&ts->ts_activate_sem, 0); 97 - 98 - ts->create_threads = 1; 99 - ts->tx_thread = kthread_run(iscsi_target_tx_thread, ts, "%s", 100 - ISCSI_TX_THREAD_NAME); 101 - if (IS_ERR(ts->tx_thread)) { 102 - dump_stack(); 103 - pr_err("Unable to start iscsi_target_tx_thread\n"); 104 - break; 105 - } 106 - 107 - ts->rx_thread = kthread_run(iscsi_target_rx_thread, ts, "%s", 108 - ISCSI_RX_THREAD_NAME); 109 - if (IS_ERR(ts->rx_thread)) { 110 - kthread_stop(ts->tx_thread); 111 - pr_err("Unable to start iscsi_target_rx_thread\n"); 112 - break; 113 - } 114 - ts->create_threads = 0; 115 - 116 - iscsi_add_ts_to_inactive_list(ts); 117 - allocated_thread_pair_count++; 118 - } 119 - 120 - pr_debug("Spawned %d thread set(s) (%d total threads).\n", 121 - allocated_thread_pair_count, allocated_thread_pair_count * 2); 122 - return allocated_thread_pair_count; 123 - } 124 - 125 - static void iscsi_deallocate_thread_one(struct iscsi_thread_set *ts) 126 - { 127 - spin_lock_bh(&ts->ts_state_lock); 128 - ts->status = ISCSI_THREAD_SET_DIE; 129 - 130 - if (ts->rx_thread) { 131 - complete(&ts->rx_start_comp); 132 - spin_unlock_bh(&ts->ts_state_lock); 133 - kthread_stop(ts->rx_thread); 134 - spin_lock_bh(&ts->ts_state_lock); 135 - } 136 - if (ts->tx_thread) { 137 - complete(&ts->tx_start_comp); 138 - spin_unlock_bh(&ts->ts_state_lock); 139 - kthread_stop(ts->tx_thread); 140 - spin_lock_bh(&ts->ts_state_lock); 141 - } 142 - spin_unlock_bh(&ts->ts_state_lock); 143 - /* 144 - * Release this thread_id in the thread_set_bitmap 145 - */ 146 - spin_lock(&ts_bitmap_lock); 147 - bitmap_release_region(iscsit_global->ts_bitmap, 148 - ts->thread_id, get_order(1)); 149 - spin_unlock(&ts_bitmap_lock); 150 - 151 - kfree(ts); 152 - } 153 - 154 - void iscsi_deallocate_thread_sets(void) 155 - { 156 - struct iscsi_thread_set *ts = NULL; 157 - u32 released_count = 0; 158 - 159 - while ((ts = iscsi_get_ts_from_inactive_list())) { 160 - 161 - iscsi_deallocate_thread_one(ts); 162 - released_count++; 163 - } 164 - 165 - if (released_count) 166 - pr_debug("Stopped %d thread set(s) (%d total threads)." 167 - "\n", released_count, released_count * 2); 168 - } 169 - 170 - static void iscsi_deallocate_extra_thread_sets(void) 171 - { 172 - u32 orig_count, released_count = 0; 173 - struct iscsi_thread_set *ts = NULL; 174 - 175 - orig_count = TARGET_THREAD_SET_COUNT; 176 - 177 - while ((iscsit_global->inactive_ts + 1) > orig_count) { 178 - ts = iscsi_get_ts_from_inactive_list(); 179 - if (!ts) 180 - break; 181 - 182 - iscsi_deallocate_thread_one(ts); 183 - released_count++; 184 - } 185 - 186 - if (released_count) 187 - pr_debug("Stopped %d thread set(s) (%d total threads)." 188 - "\n", released_count, released_count * 2); 189 - } 190 - 191 - void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts) 192 - { 193 - spin_lock_bh(&ts->ts_state_lock); 194 - conn->thread_set = ts; 195 - ts->conn = conn; 196 - ts->status = ISCSI_THREAD_SET_ACTIVE; 197 - spin_unlock_bh(&ts->ts_state_lock); 198 - 199 - complete(&ts->rx_start_comp); 200 - complete(&ts->tx_start_comp); 201 - 202 - down(&ts->ts_activate_sem); 203 - } 204 - 205 - struct iscsi_thread_set *iscsi_get_thread_set(void) 206 - { 207 - struct iscsi_thread_set *ts; 208 - 209 - get_set: 210 - ts = iscsi_get_ts_from_inactive_list(); 211 - if (!ts) { 212 - iscsi_allocate_thread_sets(1); 213 - goto get_set; 214 - } 215 - 216 - ts->delay_inactive = 1; 217 - ts->signal_sent = 0; 218 - ts->thread_count = 2; 219 - init_completion(&ts->rx_restart_comp); 220 - init_completion(&ts->tx_restart_comp); 221 - sema_init(&ts->ts_activate_sem, 0); 222 - 223 - return ts; 224 - } 225 - 226 - void iscsi_set_thread_clear(struct iscsi_conn *conn, u8 thread_clear) 227 - { 228 - struct iscsi_thread_set *ts = NULL; 229 - 230 - if (!conn->thread_set) { 231 - pr_err("struct iscsi_conn->thread_set is NULL\n"); 232 - return; 233 - } 234 - ts = conn->thread_set; 235 - 236 - spin_lock_bh(&ts->ts_state_lock); 237 - ts->thread_clear &= ~thread_clear; 238 - 239 - if ((thread_clear & ISCSI_CLEAR_RX_THREAD) && 240 - (ts->blocked_threads & ISCSI_BLOCK_RX_THREAD)) 241 - complete(&ts->rx_restart_comp); 242 - else if ((thread_clear & ISCSI_CLEAR_TX_THREAD) && 243 - (ts->blocked_threads & ISCSI_BLOCK_TX_THREAD)) 244 - complete(&ts->tx_restart_comp); 245 - spin_unlock_bh(&ts->ts_state_lock); 246 - } 247 - 248 - void iscsi_set_thread_set_signal(struct iscsi_conn *conn, u8 signal_sent) 249 - { 250 - struct iscsi_thread_set *ts = NULL; 251 - 252 - if (!conn->thread_set) { 253 - pr_err("struct iscsi_conn->thread_set is NULL\n"); 254 - return; 255 - } 256 - ts = conn->thread_set; 257 - 258 - spin_lock_bh(&ts->ts_state_lock); 259 - ts->signal_sent |= signal_sent; 260 - spin_unlock_bh(&ts->ts_state_lock); 261 - } 262 - 263 - int iscsi_release_thread_set(struct iscsi_conn *conn) 264 - { 265 - int thread_called = 0; 266 - struct iscsi_thread_set *ts = NULL; 267 - 268 - if (!conn || !conn->thread_set) { 269 - pr_err("connection or thread set pointer is NULL\n"); 270 - BUG(); 271 - } 272 - ts = conn->thread_set; 273 - 274 - spin_lock_bh(&ts->ts_state_lock); 275 - ts->status = ISCSI_THREAD_SET_RESET; 276 - 277 - if (!strncmp(current->comm, ISCSI_RX_THREAD_NAME, 278 - strlen(ISCSI_RX_THREAD_NAME))) 279 - thread_called = ISCSI_RX_THREAD; 280 - else if (!strncmp(current->comm, ISCSI_TX_THREAD_NAME, 281 - strlen(ISCSI_TX_THREAD_NAME))) 282 - thread_called = ISCSI_TX_THREAD; 283 - 284 - if (ts->rx_thread && (thread_called == ISCSI_TX_THREAD) && 285 - (ts->thread_clear & ISCSI_CLEAR_RX_THREAD)) { 286 - 287 - if (!(ts->signal_sent & ISCSI_SIGNAL_RX_THREAD)) { 288 - send_sig(SIGINT, ts->rx_thread, 1); 289 - ts->signal_sent |= ISCSI_SIGNAL_RX_THREAD; 290 - } 291 - ts->blocked_threads |= ISCSI_BLOCK_RX_THREAD; 292 - spin_unlock_bh(&ts->ts_state_lock); 293 - wait_for_completion(&ts->rx_restart_comp); 294 - spin_lock_bh(&ts->ts_state_lock); 295 - ts->blocked_threads &= ~ISCSI_BLOCK_RX_THREAD; 296 - } 297 - if (ts->tx_thread && (thread_called == ISCSI_RX_THREAD) && 298 - (ts->thread_clear & ISCSI_CLEAR_TX_THREAD)) { 299 - 300 - if (!(ts->signal_sent & ISCSI_SIGNAL_TX_THREAD)) { 301 - send_sig(SIGINT, ts->tx_thread, 1); 302 - ts->signal_sent |= ISCSI_SIGNAL_TX_THREAD; 303 - } 304 - ts->blocked_threads |= ISCSI_BLOCK_TX_THREAD; 305 - spin_unlock_bh(&ts->ts_state_lock); 306 - wait_for_completion(&ts->tx_restart_comp); 307 - spin_lock_bh(&ts->ts_state_lock); 308 - ts->blocked_threads &= ~ISCSI_BLOCK_TX_THREAD; 309 - } 310 - 311 - ts->conn = NULL; 312 - ts->status = ISCSI_THREAD_SET_FREE; 313 - spin_unlock_bh(&ts->ts_state_lock); 314 - 315 - return 0; 316 - } 317 - 318 - int iscsi_thread_set_force_reinstatement(struct iscsi_conn *conn) 319 - { 320 - struct iscsi_thread_set *ts; 321 - 322 - if (!conn->thread_set) 323 - return -1; 324 - ts = conn->thread_set; 325 - 326 - spin_lock_bh(&ts->ts_state_lock); 327 - if (ts->status != ISCSI_THREAD_SET_ACTIVE) { 328 - spin_unlock_bh(&ts->ts_state_lock); 329 - return -1; 330 - } 331 - 332 - if (ts->tx_thread && (!(ts->signal_sent & ISCSI_SIGNAL_TX_THREAD))) { 333 - send_sig(SIGINT, ts->tx_thread, 1); 334 - ts->signal_sent |= ISCSI_SIGNAL_TX_THREAD; 335 - } 336 - if (ts->rx_thread && (!(ts->signal_sent & ISCSI_SIGNAL_RX_THREAD))) { 337 - send_sig(SIGINT, ts->rx_thread, 1); 338 - ts->signal_sent |= ISCSI_SIGNAL_RX_THREAD; 339 - } 340 - spin_unlock_bh(&ts->ts_state_lock); 341 - 342 - return 0; 343 - } 344 - 345 - static void iscsi_check_to_add_additional_sets(void) 346 - { 347 - int thread_sets_add; 348 - 349 - spin_lock(&inactive_ts_lock); 350 - thread_sets_add = iscsit_global->inactive_ts; 351 - spin_unlock(&inactive_ts_lock); 352 - if (thread_sets_add == 1) 353 - iscsi_allocate_thread_sets(1); 354 - } 355 - 356 - static int iscsi_signal_thread_pre_handler(struct iscsi_thread_set *ts) 357 - { 358 - spin_lock_bh(&ts->ts_state_lock); 359 - if (ts->status == ISCSI_THREAD_SET_DIE || kthread_should_stop() || 360 - signal_pending(current)) { 361 - spin_unlock_bh(&ts->ts_state_lock); 362 - return -1; 363 - } 364 - spin_unlock_bh(&ts->ts_state_lock); 365 - 366 - return 0; 367 - } 368 - 369 - struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *ts) 370 - { 371 - int ret; 372 - 373 - spin_lock_bh(&ts->ts_state_lock); 374 - if (ts->create_threads) { 375 - spin_unlock_bh(&ts->ts_state_lock); 376 - goto sleep; 377 - } 378 - 379 - if (ts->status != ISCSI_THREAD_SET_DIE) 380 - flush_signals(current); 381 - 382 - if (ts->delay_inactive && (--ts->thread_count == 0)) { 383 - spin_unlock_bh(&ts->ts_state_lock); 384 - 385 - if (!iscsit_global->in_shutdown) 386 - iscsi_deallocate_extra_thread_sets(); 387 - 388 - iscsi_add_ts_to_inactive_list(ts); 389 - spin_lock_bh(&ts->ts_state_lock); 390 - } 391 - 392 - if ((ts->status == ISCSI_THREAD_SET_RESET) && 393 - (ts->thread_clear & ISCSI_CLEAR_RX_THREAD)) 394 - complete(&ts->rx_restart_comp); 395 - 396 - ts->thread_clear &= ~ISCSI_CLEAR_RX_THREAD; 397 - spin_unlock_bh(&ts->ts_state_lock); 398 - sleep: 399 - ret = wait_for_completion_interruptible(&ts->rx_start_comp); 400 - if (ret != 0) 401 - return NULL; 402 - 403 - if (iscsi_signal_thread_pre_handler(ts) < 0) 404 - return NULL; 405 - 406 - iscsi_check_to_add_additional_sets(); 407 - 408 - spin_lock_bh(&ts->ts_state_lock); 409 - if (!ts->conn) { 410 - pr_err("struct iscsi_thread_set->conn is NULL for" 411 - " RX thread_id: %s/%d\n", current->comm, current->pid); 412 - spin_unlock_bh(&ts->ts_state_lock); 413 - return NULL; 414 - } 415 - ts->thread_clear |= ISCSI_CLEAR_RX_THREAD; 416 - spin_unlock_bh(&ts->ts_state_lock); 417 - 418 - up(&ts->ts_activate_sem); 419 - 420 - return ts->conn; 421 - } 422 - 423 - struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *ts) 424 - { 425 - int ret; 426 - 427 - spin_lock_bh(&ts->ts_state_lock); 428 - if (ts->create_threads) { 429 - spin_unlock_bh(&ts->ts_state_lock); 430 - goto sleep; 431 - } 432 - 433 - if (ts->status != ISCSI_THREAD_SET_DIE) 434 - flush_signals(current); 435 - 436 - if (ts->delay_inactive && (--ts->thread_count == 0)) { 437 - spin_unlock_bh(&ts->ts_state_lock); 438 - 439 - if (!iscsit_global->in_shutdown) 440 - iscsi_deallocate_extra_thread_sets(); 441 - 442 - iscsi_add_ts_to_inactive_list(ts); 443 - spin_lock_bh(&ts->ts_state_lock); 444 - } 445 - if ((ts->status == ISCSI_THREAD_SET_RESET) && 446 - (ts->thread_clear & ISCSI_CLEAR_TX_THREAD)) 447 - complete(&ts->tx_restart_comp); 448 - 449 - ts->thread_clear &= ~ISCSI_CLEAR_TX_THREAD; 450 - spin_unlock_bh(&ts->ts_state_lock); 451 - sleep: 452 - ret = wait_for_completion_interruptible(&ts->tx_start_comp); 453 - if (ret != 0) 454 - return NULL; 455 - 456 - if (iscsi_signal_thread_pre_handler(ts) < 0) 457 - return NULL; 458 - 459 - iscsi_check_to_add_additional_sets(); 460 - 461 - spin_lock_bh(&ts->ts_state_lock); 462 - if (!ts->conn) { 463 - pr_err("struct iscsi_thread_set->conn is NULL for" 464 - " TX thread_id: %s/%d\n", current->comm, current->pid); 465 - spin_unlock_bh(&ts->ts_state_lock); 466 - return NULL; 467 - } 468 - ts->thread_clear |= ISCSI_CLEAR_TX_THREAD; 469 - spin_unlock_bh(&ts->ts_state_lock); 470 - 471 - up(&ts->ts_activate_sem); 472 - 473 - return ts->conn; 474 - } 475 - 476 - int iscsi_thread_set_init(void) 477 - { 478 - int size; 479 - 480 - iscsit_global->ts_bitmap_count = ISCSI_TS_BITMAP_BITS; 481 - 482 - size = BITS_TO_LONGS(iscsit_global->ts_bitmap_count) * sizeof(long); 483 - iscsit_global->ts_bitmap = kzalloc(size, GFP_KERNEL); 484 - if (!iscsit_global->ts_bitmap) { 485 - pr_err("Unable to allocate iscsit_global->ts_bitmap\n"); 486 - return -ENOMEM; 487 - } 488 - 489 - return 0; 490 - } 491 - 492 - void iscsi_thread_set_free(void) 493 - { 494 - kfree(iscsit_global->ts_bitmap); 495 - }
-84
drivers/target/iscsi/iscsi_target_tq.h
··· 1 - #ifndef ISCSI_THREAD_QUEUE_H 2 - #define ISCSI_THREAD_QUEUE_H 3 - 4 - /* 5 - * Defines for thread sets. 6 - */ 7 - extern int iscsi_thread_set_force_reinstatement(struct iscsi_conn *); 8 - extern int iscsi_allocate_thread_sets(u32); 9 - extern void iscsi_deallocate_thread_sets(void); 10 - extern void iscsi_activate_thread_set(struct iscsi_conn *, struct iscsi_thread_set *); 11 - extern struct iscsi_thread_set *iscsi_get_thread_set(void); 12 - extern void iscsi_set_thread_clear(struct iscsi_conn *, u8); 13 - extern void iscsi_set_thread_set_signal(struct iscsi_conn *, u8); 14 - extern int iscsi_release_thread_set(struct iscsi_conn *); 15 - extern struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *); 16 - extern struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *); 17 - extern int iscsi_thread_set_init(void); 18 - extern void iscsi_thread_set_free(void); 19 - 20 - extern int iscsi_target_tx_thread(void *); 21 - extern int iscsi_target_rx_thread(void *); 22 - 23 - #define TARGET_THREAD_SET_COUNT 4 24 - 25 - #define ISCSI_RX_THREAD 1 26 - #define ISCSI_TX_THREAD 2 27 - #define ISCSI_RX_THREAD_NAME "iscsi_trx" 28 - #define ISCSI_TX_THREAD_NAME "iscsi_ttx" 29 - #define ISCSI_BLOCK_RX_THREAD 0x1 30 - #define ISCSI_BLOCK_TX_THREAD 0x2 31 - #define ISCSI_CLEAR_RX_THREAD 0x1 32 - #define ISCSI_CLEAR_TX_THREAD 0x2 33 - #define ISCSI_SIGNAL_RX_THREAD 0x1 34 - #define ISCSI_SIGNAL_TX_THREAD 0x2 35 - 36 - /* struct iscsi_thread_set->status */ 37 - #define ISCSI_THREAD_SET_FREE 1 38 - #define ISCSI_THREAD_SET_ACTIVE 2 39 - #define ISCSI_THREAD_SET_DIE 3 40 - #define ISCSI_THREAD_SET_RESET 4 41 - #define ISCSI_THREAD_SET_DEALLOCATE_THREADS 5 42 - 43 - /* By default allow a maximum of 32K iSCSI connections */ 44 - #define ISCSI_TS_BITMAP_BITS 32768 45 - 46 - struct iscsi_thread_set { 47 - /* flags used for blocking and restarting sets */ 48 - int blocked_threads; 49 - /* flag for creating threads */ 50 - int create_threads; 51 - /* flag for delaying readding to inactive list */ 52 - int delay_inactive; 53 - /* status for thread set */ 54 - int status; 55 - /* which threads have had signals sent */ 56 - int signal_sent; 57 - /* flag for which threads exited first */ 58 - int thread_clear; 59 - /* Active threads in the thread set */ 60 - int thread_count; 61 - /* Unique thread ID */ 62 - u32 thread_id; 63 - /* pointer to connection if set is active */ 64 - struct iscsi_conn *conn; 65 - /* used for controlling ts state accesses */ 66 - spinlock_t ts_state_lock; 67 - /* used for restarting thread queue */ 68 - struct completion rx_restart_comp; 69 - /* used for restarting thread queue */ 70 - struct completion tx_restart_comp; 71 - /* used for normal unused blocking */ 72 - struct completion rx_start_comp; 73 - /* used for normal unused blocking */ 74 - struct completion tx_start_comp; 75 - /* OS descriptor for rx thread */ 76 - struct task_struct *rx_thread; 77 - /* OS descriptor for tx thread */ 78 - struct task_struct *tx_thread; 79 - /* struct iscsi_thread_set in list list head*/ 80 - struct list_head ts_list; 81 - struct semaphore ts_activate_sem; 82 - }; 83 - 84 - #endif /*** ISCSI_THREAD_QUEUE_H ***/
-1
drivers/target/iscsi/iscsi_target_util.c
··· 33 33 #include "iscsi_target_erl1.h" 34 34 #include "iscsi_target_erl2.h" 35 35 #include "iscsi_target_tpg.h" 36 - #include "iscsi_target_tq.h" 37 36 #include "iscsi_target_util.h" 38 37 #include "iscsi_target.h" 39 38
+105 -137
drivers/target/loopback/tcm_loop.c
··· 41 41 42 42 #define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev) 43 43 44 - /* Local pointer to allocated TCM configfs fabric module */ 45 - static struct target_fabric_configfs *tcm_loop_fabric_configfs; 44 + static const struct target_core_fabric_ops loop_ops; 46 45 47 46 static struct workqueue_struct *tcm_loop_workqueue; 48 47 static struct kmem_cache *tcm_loop_cmd_cache; ··· 107 108 /* 108 109 * Used with root_device_register() in tcm_loop_alloc_core_bus() below 109 110 */ 110 - struct device *tcm_loop_primary; 111 + static struct device *tcm_loop_primary; 111 112 112 113 static void tcm_loop_submission_work(struct work_struct *work) 113 114 { ··· 696 697 return 0; 697 698 } 698 699 700 + static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg) 701 + { 702 + struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, 703 + tl_se_tpg); 704 + return tl_tpg->tl_fabric_prot_type; 705 + } 706 + 699 707 static struct se_node_acl *tcm_loop_tpg_alloc_fabric_acl( 700 708 struct se_portal_group *se_tpg) 701 709 { ··· 918 912 919 913 /* End items for tcm_loop_port_cit */ 920 914 915 + static ssize_t tcm_loop_tpg_attrib_show_fabric_prot_type( 916 + struct se_portal_group *se_tpg, 917 + char *page) 918 + { 919 + struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, 920 + tl_se_tpg); 921 + 922 + return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type); 923 + } 924 + 925 + static ssize_t tcm_loop_tpg_attrib_store_fabric_prot_type( 926 + struct se_portal_group *se_tpg, 927 + const char *page, 928 + size_t count) 929 + { 930 + struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, 931 + tl_se_tpg); 932 + unsigned long val; 933 + int ret = kstrtoul(page, 0, &val); 934 + 935 + if (ret) { 936 + pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret); 937 + return ret; 938 + } 939 + if (val != 0 && val != 1 && val != 3) { 940 + pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val); 941 + return -EINVAL; 942 + } 943 + tl_tpg->tl_fabric_prot_type = val; 944 + 945 + return count; 946 + } 947 + 948 + TF_TPG_ATTRIB_ATTR(tcm_loop, fabric_prot_type, S_IRUGO | S_IWUSR); 949 + 950 + static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = { 951 + &tcm_loop_tpg_attrib_fabric_prot_type.attr, 952 + NULL, 953 + }; 954 + 921 955 /* Start items for tcm_loop_nexus_cit */ 922 956 923 957 static int tcm_loop_make_nexus( ··· 983 937 /* 984 938 * Initialize the struct se_session pointer 985 939 */ 986 - tl_nexus->se_sess = transport_init_session(TARGET_PROT_ALL); 940 + tl_nexus->se_sess = transport_init_session( 941 + TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS); 987 942 if (IS_ERR(tl_nexus->se_sess)) { 988 943 ret = PTR_ERR(tl_nexus->se_sess); 989 944 goto out; ··· 1212 1165 struct tcm_loop_hba *tl_hba = container_of(wwn, 1213 1166 struct tcm_loop_hba, tl_hba_wwn); 1214 1167 struct tcm_loop_tpg *tl_tpg; 1215 - char *tpgt_str, *end_ptr; 1216 1168 int ret; 1217 - unsigned short int tpgt; 1169 + unsigned long tpgt; 1218 1170 1219 - tpgt_str = strstr(name, "tpgt_"); 1220 - if (!tpgt_str) { 1171 + if (strstr(name, "tpgt_") != name) { 1221 1172 pr_err("Unable to locate \"tpgt_#\" directory" 1222 1173 " group\n"); 1223 1174 return ERR_PTR(-EINVAL); 1224 1175 } 1225 - tpgt_str += 5; /* Skip ahead of "tpgt_" */ 1226 - tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0); 1176 + if (kstrtoul(name+5, 10, &tpgt)) 1177 + return ERR_PTR(-EINVAL); 1227 1178 1228 1179 if (tpgt >= TL_TPGS_PER_HBA) { 1229 - pr_err("Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:" 1180 + pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA:" 1230 1181 " %u\n", tpgt, TL_TPGS_PER_HBA); 1231 1182 return ERR_PTR(-EINVAL); 1232 1183 } ··· 1234 1189 /* 1235 1190 * Register the tl_tpg as a emulated SAS TCM Target Endpoint 1236 1191 */ 1237 - ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops, 1238 - wwn, &tl_tpg->tl_se_tpg, tl_tpg, 1192 + ret = core_tpg_register(&loop_ops, wwn, &tl_tpg->tl_se_tpg, tl_tpg, 1239 1193 TRANSPORT_TPG_TYPE_NORMAL); 1240 1194 if (ret < 0) 1241 1195 return ERR_PTR(-ENOMEM); 1242 1196 1243 1197 pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s" 1244 - " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba), 1198 + " Target Port %s,t,0x%04lx\n", tcm_loop_dump_proto_id(tl_hba), 1245 1199 config_item_name(&wwn->wwn_group.cg_item), tpgt); 1246 1200 1247 1201 return &tl_tpg->tl_se_tpg; ··· 1382 1338 1383 1339 /* End items for tcm_loop_cit */ 1384 1340 1385 - static int tcm_loop_register_configfs(void) 1386 - { 1387 - struct target_fabric_configfs *fabric; 1388 - int ret; 1389 - /* 1390 - * Set the TCM Loop HBA counter to zero 1391 - */ 1392 - tcm_loop_hba_no_cnt = 0; 1393 - /* 1394 - * Register the top level struct config_item_type with TCM core 1395 - */ 1396 - fabric = target_fabric_configfs_init(THIS_MODULE, "loopback"); 1397 - if (IS_ERR(fabric)) { 1398 - pr_err("tcm_loop_register_configfs() failed!\n"); 1399 - return PTR_ERR(fabric); 1400 - } 1401 - /* 1402 - * Setup the fabric API of function pointers used by target_core_mod 1403 - */ 1404 - fabric->tf_ops.get_fabric_name = &tcm_loop_get_fabric_name; 1405 - fabric->tf_ops.get_fabric_proto_ident = &tcm_loop_get_fabric_proto_ident; 1406 - fabric->tf_ops.tpg_get_wwn = &tcm_loop_get_endpoint_wwn; 1407 - fabric->tf_ops.tpg_get_tag = &tcm_loop_get_tag; 1408 - fabric->tf_ops.tpg_get_default_depth = &tcm_loop_get_default_depth; 1409 - fabric->tf_ops.tpg_get_pr_transport_id = &tcm_loop_get_pr_transport_id; 1410 - fabric->tf_ops.tpg_get_pr_transport_id_len = 1411 - &tcm_loop_get_pr_transport_id_len; 1412 - fabric->tf_ops.tpg_parse_pr_out_transport_id = 1413 - &tcm_loop_parse_pr_out_transport_id; 1414 - fabric->tf_ops.tpg_check_demo_mode = &tcm_loop_check_demo_mode; 1415 - fabric->tf_ops.tpg_check_demo_mode_cache = 1416 - &tcm_loop_check_demo_mode_cache; 1417 - fabric->tf_ops.tpg_check_demo_mode_write_protect = 1418 - &tcm_loop_check_demo_mode_write_protect; 1419 - fabric->tf_ops.tpg_check_prod_mode_write_protect = 1420 - &tcm_loop_check_prod_mode_write_protect; 1421 - /* 1422 - * The TCM loopback fabric module runs in demo-mode to a local 1423 - * virtual SCSI device, so fabric dependent initator ACLs are 1424 - * not required. 1425 - */ 1426 - fabric->tf_ops.tpg_alloc_fabric_acl = &tcm_loop_tpg_alloc_fabric_acl; 1427 - fabric->tf_ops.tpg_release_fabric_acl = 1428 - &tcm_loop_tpg_release_fabric_acl; 1429 - fabric->tf_ops.tpg_get_inst_index = &tcm_loop_get_inst_index; 1430 - /* 1431 - * Used for setting up remaining TCM resources in process context 1432 - */ 1433 - fabric->tf_ops.check_stop_free = &tcm_loop_check_stop_free; 1434 - fabric->tf_ops.release_cmd = &tcm_loop_release_cmd; 1435 - fabric->tf_ops.shutdown_session = &tcm_loop_shutdown_session; 1436 - fabric->tf_ops.close_session = &tcm_loop_close_session; 1437 - fabric->tf_ops.sess_get_index = &tcm_loop_sess_get_index; 1438 - fabric->tf_ops.sess_get_initiator_sid = NULL; 1439 - fabric->tf_ops.write_pending = &tcm_loop_write_pending; 1440 - fabric->tf_ops.write_pending_status = &tcm_loop_write_pending_status; 1441 - /* 1442 - * Not used for TCM loopback 1443 - */ 1444 - fabric->tf_ops.set_default_node_attributes = 1445 - &tcm_loop_set_default_node_attributes; 1446 - fabric->tf_ops.get_task_tag = &tcm_loop_get_task_tag; 1447 - fabric->tf_ops.get_cmd_state = &tcm_loop_get_cmd_state; 1448 - fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in; 1449 - fabric->tf_ops.queue_status = &tcm_loop_queue_status; 1450 - fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp; 1451 - fabric->tf_ops.aborted_task = &tcm_loop_aborted_task; 1452 - 1453 - /* 1454 - * Setup function pointers for generic logic in target_core_fabric_configfs.c 1455 - */ 1456 - fabric->tf_ops.fabric_make_wwn = &tcm_loop_make_scsi_hba; 1457 - fabric->tf_ops.fabric_drop_wwn = &tcm_loop_drop_scsi_hba; 1458 - fabric->tf_ops.fabric_make_tpg = &tcm_loop_make_naa_tpg; 1459 - fabric->tf_ops.fabric_drop_tpg = &tcm_loop_drop_naa_tpg; 1460 - /* 1461 - * fabric_post_link() and fabric_pre_unlink() are used for 1462 - * registration and release of TCM Loop Virtual SCSI LUNs. 1463 - */ 1464 - fabric->tf_ops.fabric_post_link = &tcm_loop_port_link; 1465 - fabric->tf_ops.fabric_pre_unlink = &tcm_loop_port_unlink; 1466 - fabric->tf_ops.fabric_make_np = NULL; 1467 - fabric->tf_ops.fabric_drop_np = NULL; 1468 - /* 1469 - * Setup default attribute lists for various fabric->tf_cit_tmpl 1470 - */ 1471 - fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs; 1472 - fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs; 1473 - fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; 1474 - fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; 1475 - fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; 1476 - /* 1477 - * Once fabric->tf_ops has been setup, now register the fabric for 1478 - * use within TCM 1479 - */ 1480 - ret = target_fabric_configfs_register(fabric); 1481 - if (ret < 0) { 1482 - pr_err("target_fabric_configfs_register() for" 1483 - " TCM_Loop failed!\n"); 1484 - target_fabric_configfs_free(fabric); 1485 - return -1; 1486 - } 1487 - /* 1488 - * Setup our local pointer to *fabric. 1489 - */ 1490 - tcm_loop_fabric_configfs = fabric; 1491 - pr_debug("TCM_LOOP[0] - Set fabric ->" 1492 - " tcm_loop_fabric_configfs\n"); 1493 - return 0; 1494 - } 1495 - 1496 - static void tcm_loop_deregister_configfs(void) 1497 - { 1498 - if (!tcm_loop_fabric_configfs) 1499 - return; 1500 - 1501 - target_fabric_configfs_deregister(tcm_loop_fabric_configfs); 1502 - tcm_loop_fabric_configfs = NULL; 1503 - pr_debug("TCM_LOOP[0] - Cleared" 1504 - " tcm_loop_fabric_configfs\n"); 1505 - } 1341 + static const struct target_core_fabric_ops loop_ops = { 1342 + .module = THIS_MODULE, 1343 + .name = "loopback", 1344 + .get_fabric_name = tcm_loop_get_fabric_name, 1345 + .get_fabric_proto_ident = tcm_loop_get_fabric_proto_ident, 1346 + .tpg_get_wwn = tcm_loop_get_endpoint_wwn, 1347 + .tpg_get_tag = tcm_loop_get_tag, 1348 + .tpg_get_default_depth = tcm_loop_get_default_depth, 1349 + .tpg_get_pr_transport_id = tcm_loop_get_pr_transport_id, 1350 + .tpg_get_pr_transport_id_len = tcm_loop_get_pr_transport_id_len, 1351 + .tpg_parse_pr_out_transport_id = tcm_loop_parse_pr_out_transport_id, 1352 + .tpg_check_demo_mode = tcm_loop_check_demo_mode, 1353 + .tpg_check_demo_mode_cache = tcm_loop_check_demo_mode_cache, 1354 + .tpg_check_demo_mode_write_protect = 1355 + tcm_loop_check_demo_mode_write_protect, 1356 + .tpg_check_prod_mode_write_protect = 1357 + tcm_loop_check_prod_mode_write_protect, 1358 + .tpg_check_prot_fabric_only = tcm_loop_check_prot_fabric_only, 1359 + .tpg_alloc_fabric_acl = tcm_loop_tpg_alloc_fabric_acl, 1360 + .tpg_release_fabric_acl = tcm_loop_tpg_release_fabric_acl, 1361 + .tpg_get_inst_index = tcm_loop_get_inst_index, 1362 + .check_stop_free = tcm_loop_check_stop_free, 1363 + .release_cmd = tcm_loop_release_cmd, 1364 + .shutdown_session = tcm_loop_shutdown_session, 1365 + .close_session = tcm_loop_close_session, 1366 + .sess_get_index = tcm_loop_sess_get_index, 1367 + .write_pending = tcm_loop_write_pending, 1368 + .write_pending_status = tcm_loop_write_pending_status, 1369 + .set_default_node_attributes = tcm_loop_set_default_node_attributes, 1370 + .get_task_tag = tcm_loop_get_task_tag, 1371 + .get_cmd_state = tcm_loop_get_cmd_state, 1372 + .queue_data_in = tcm_loop_queue_data_in, 1373 + .queue_status = tcm_loop_queue_status, 1374 + .queue_tm_rsp = tcm_loop_queue_tm_rsp, 1375 + .aborted_task = tcm_loop_aborted_task, 1376 + .fabric_make_wwn = tcm_loop_make_scsi_hba, 1377 + .fabric_drop_wwn = tcm_loop_drop_scsi_hba, 1378 + .fabric_make_tpg = tcm_loop_make_naa_tpg, 1379 + .fabric_drop_tpg = tcm_loop_drop_naa_tpg, 1380 + .fabric_post_link = tcm_loop_port_link, 1381 + .fabric_pre_unlink = tcm_loop_port_unlink, 1382 + .tfc_wwn_attrs = tcm_loop_wwn_attrs, 1383 + .tfc_tpg_base_attrs = tcm_loop_tpg_attrs, 1384 + .tfc_tpg_attrib_attrs = tcm_loop_tpg_attrib_attrs, 1385 + }; 1506 1386 1507 1387 static int __init tcm_loop_fabric_init(void) 1508 1388 { ··· 1450 1482 if (ret) 1451 1483 goto out_destroy_cache; 1452 1484 1453 - ret = tcm_loop_register_configfs(); 1485 + ret = target_register_template(&loop_ops); 1454 1486 if (ret) 1455 1487 goto out_release_core_bus; 1456 1488 ··· 1468 1500 1469 1501 static void __exit tcm_loop_fabric_exit(void) 1470 1502 { 1471 - tcm_loop_deregister_configfs(); 1503 + target_unregister_template(&loop_ops); 1472 1504 tcm_loop_release_core_bus(); 1473 1505 kmem_cache_destroy(tcm_loop_cmd_cache); 1474 1506 destroy_workqueue(tcm_loop_workqueue);
+1
drivers/target/loopback/tcm_loop.h
··· 43 43 struct tcm_loop_tpg { 44 44 unsigned short tl_tpgt; 45 45 unsigned short tl_transport_status; 46 + enum target_prot_type tl_fabric_prot_type; 46 47 atomic_t tl_tpg_port_count; 47 48 struct se_portal_group tl_se_tpg; 48 49 struct tcm_loop_hba *tl_hba;
+10 -58
drivers/target/sbp/sbp_target.c
··· 42 42 43 43 #include "sbp_target.h" 44 44 45 - /* Local pointer to allocated TCM configfs fabric module */ 46 - static struct target_fabric_configfs *sbp_fabric_configfs; 45 + static const struct target_core_fabric_ops sbp_ops; 47 46 48 47 /* FireWire address region for management and command block address handlers */ 49 48 static const struct fw_address_region sbp_register_region = { ··· 2214 2215 goto out_free_tpg; 2215 2216 } 2216 2217 2217 - ret = core_tpg_register(&sbp_fabric_configfs->tf_ops, wwn, 2218 - &tpg->se_tpg, (void *)tpg, 2218 + ret = core_tpg_register(&sbp_ops, wwn, &tpg->se_tpg, tpg, 2219 2219 TRANSPORT_TPG_TYPE_NORMAL); 2220 2220 if (ret < 0) 2221 2221 goto out_unreg_mgt_agt; ··· 2501 2503 NULL, 2502 2504 }; 2503 2505 2504 - static struct target_core_fabric_ops sbp_ops = { 2506 + static const struct target_core_fabric_ops sbp_ops = { 2507 + .module = THIS_MODULE, 2508 + .name = "sbp", 2505 2509 .get_fabric_name = sbp_get_fabric_name, 2506 2510 .get_fabric_proto_ident = sbp_get_fabric_proto_ident, 2507 2511 .tpg_get_wwn = sbp_get_fabric_wwn, ··· 2544 2544 .fabric_drop_np = NULL, 2545 2545 .fabric_make_nodeacl = sbp_make_nodeacl, 2546 2546 .fabric_drop_nodeacl = sbp_drop_nodeacl, 2547 - }; 2548 2547 2549 - static int sbp_register_configfs(void) 2550 - { 2551 - struct target_fabric_configfs *fabric; 2552 - int ret; 2553 - 2554 - fabric = target_fabric_configfs_init(THIS_MODULE, "sbp"); 2555 - if (IS_ERR(fabric)) { 2556 - pr_err("target_fabric_configfs_init() failed\n"); 2557 - return PTR_ERR(fabric); 2558 - } 2559 - 2560 - fabric->tf_ops = sbp_ops; 2561 - 2562 - /* 2563 - * Setup default attribute lists for various fabric->tf_cit_tmpl 2564 - */ 2565 - fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = sbp_wwn_attrs; 2566 - fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = sbp_tpg_base_attrs; 2567 - fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = sbp_tpg_attrib_attrs; 2568 - fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; 2569 - fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; 2570 - fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; 2571 - fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; 2572 - fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; 2573 - fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; 2574 - 2575 - ret = target_fabric_configfs_register(fabric); 2576 - if (ret < 0) { 2577 - pr_err("target_fabric_configfs_register() failed for SBP\n"); 2578 - return ret; 2579 - } 2580 - 2581 - sbp_fabric_configfs = fabric; 2582 - 2583 - return 0; 2584 - }; 2585 - 2586 - static void sbp_deregister_configfs(void) 2587 - { 2588 - if (!sbp_fabric_configfs) 2589 - return; 2590 - 2591 - target_fabric_configfs_deregister(sbp_fabric_configfs); 2592 - sbp_fabric_configfs = NULL; 2548 + .tfc_wwn_attrs = sbp_wwn_attrs, 2549 + .tfc_tpg_base_attrs = sbp_tpg_base_attrs, 2550 + .tfc_tpg_attrib_attrs = sbp_tpg_attrib_attrs, 2593 2551 }; 2594 2552 2595 2553 static int __init sbp_init(void) 2596 2554 { 2597 - int ret; 2598 - 2599 - ret = sbp_register_configfs(); 2600 - if (ret < 0) 2601 - return ret; 2602 - 2603 - return 0; 2555 + return target_register_template(&sbp_ops); 2604 2556 }; 2605 2557 2606 2558 static void __exit sbp_exit(void) 2607 2559 { 2608 - sbp_deregister_configfs(); 2560 + target_unregister_template(&sbp_ops); 2609 2561 }; 2610 2562 2611 2563 MODULE_DESCRIPTION("FireWire SBP fabric driver");
+59 -141
drivers/target/target_core_configfs.c
··· 142 142 143 143 tf = target_core_get_fabric(name); 144 144 if (!tf) { 145 - pr_err("target_core_register_fabric() trying autoload for %s\n", 146 - name); 145 + pr_debug("target_core_register_fabric() trying autoload for %s\n", 146 + name); 147 147 148 148 /* 149 149 * Below are some hardcoded request_module() calls to automatically ··· 165 165 */ 166 166 ret = request_module("iscsi_target_mod"); 167 167 if (ret < 0) { 168 - pr_err("request_module() failed for" 169 - " iscsi_target_mod.ko: %d\n", ret); 168 + pr_debug("request_module() failed for" 169 + " iscsi_target_mod.ko: %d\n", ret); 170 170 return ERR_PTR(-EINVAL); 171 171 } 172 172 } else if (!strncmp(name, "loopback", 8)) { ··· 178 178 */ 179 179 ret = request_module("tcm_loop"); 180 180 if (ret < 0) { 181 - pr_err("request_module() failed for" 182 - " tcm_loop.ko: %d\n", ret); 181 + pr_debug("request_module() failed for" 182 + " tcm_loop.ko: %d\n", ret); 183 183 return ERR_PTR(-EINVAL); 184 184 } 185 185 } ··· 188 188 } 189 189 190 190 if (!tf) { 191 - pr_err("target_core_get_fabric() failed for %s\n", 192 - name); 191 + pr_debug("target_core_get_fabric() failed for %s\n", 192 + name); 193 193 return ERR_PTR(-EINVAL); 194 194 } 195 195 pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:" ··· 300 300 // Start functions called by external Target Fabrics Modules 301 301 //############################################################################*/ 302 302 303 - /* 304 - * First function called by fabric modules to: 305 - * 306 - * 1) Allocate a struct target_fabric_configfs and save the *fabric_cit pointer. 307 - * 2) Add struct target_fabric_configfs to g_tf_list 308 - * 3) Return struct target_fabric_configfs to fabric module to be passed 309 - * into target_fabric_configfs_register(). 310 - */ 311 - struct target_fabric_configfs *target_fabric_configfs_init( 312 - struct module *fabric_mod, 313 - const char *name) 303 + static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo) 314 304 { 315 - struct target_fabric_configfs *tf; 316 - 317 - if (!(name)) { 318 - pr_err("Unable to locate passed fabric name\n"); 319 - return ERR_PTR(-EINVAL); 305 + if (!tfo->name) { 306 + pr_err("Missing tfo->name\n"); 307 + return -EINVAL; 320 308 } 321 - if (strlen(name) >= TARGET_FABRIC_NAME_SIZE) { 309 + if (strlen(tfo->name) >= TARGET_FABRIC_NAME_SIZE) { 322 310 pr_err("Passed name: %s exceeds TARGET_FABRIC" 323 - "_NAME_SIZE\n", name); 324 - return ERR_PTR(-EINVAL); 311 + "_NAME_SIZE\n", tfo->name); 312 + return -EINVAL; 325 313 } 326 - 327 - tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL); 328 - if (!tf) 329 - return ERR_PTR(-ENOMEM); 330 - 331 - INIT_LIST_HEAD(&tf->tf_list); 332 - atomic_set(&tf->tf_access_cnt, 0); 333 - /* 334 - * Setup the default generic struct config_item_type's (cits) in 335 - * struct target_fabric_configfs->tf_cit_tmpl 336 - */ 337 - tf->tf_module = fabric_mod; 338 - target_fabric_setup_cits(tf); 339 - 340 - tf->tf_subsys = target_core_subsystem[0]; 341 - snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", name); 342 - 343 - mutex_lock(&g_tf_lock); 344 - list_add_tail(&tf->tf_list, &g_tf_list); 345 - mutex_unlock(&g_tf_lock); 346 - 347 - pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>" 348 - ">>>>>>>>>>>>>>\n"); 349 - pr_debug("Initialized struct target_fabric_configfs: %p for" 350 - " %s\n", tf, tf->tf_name); 351 - return tf; 352 - } 353 - EXPORT_SYMBOL(target_fabric_configfs_init); 354 - 355 - /* 356 - * Called by fabric plugins after FAILED target_fabric_configfs_register() call. 357 - */ 358 - void target_fabric_configfs_free( 359 - struct target_fabric_configfs *tf) 360 - { 361 - mutex_lock(&g_tf_lock); 362 - list_del(&tf->tf_list); 363 - mutex_unlock(&g_tf_lock); 364 - 365 - kfree(tf); 366 - } 367 - EXPORT_SYMBOL(target_fabric_configfs_free); 368 - 369 - /* 370 - * Perform a sanity check of the passed tf->tf_ops before completing 371 - * TCM fabric module registration. 372 - */ 373 - static int target_fabric_tf_ops_check( 374 - struct target_fabric_configfs *tf) 375 - { 376 - struct target_core_fabric_ops *tfo = &tf->tf_ops; 377 - 378 314 if (!tfo->get_fabric_name) { 379 315 pr_err("Missing tfo->get_fabric_name()\n"); 380 316 return -EINVAL; ··· 444 508 return 0; 445 509 } 446 510 447 - /* 448 - * Called 2nd from fabric module with returned parameter of 449 - * struct target_fabric_configfs * from target_fabric_configfs_init(). 450 - * 451 - * Upon a successful registration, the new fabric's struct config_item is 452 - * return. Also, a pointer to this struct is set in the passed 453 - * struct target_fabric_configfs. 454 - */ 455 - int target_fabric_configfs_register( 456 - struct target_fabric_configfs *tf) 511 + int target_register_template(const struct target_core_fabric_ops *fo) 457 512 { 513 + struct target_fabric_configfs *tf; 458 514 int ret; 459 515 460 - if (!tf) { 461 - pr_err("Unable to locate target_fabric_configfs" 462 - " pointer\n"); 463 - return -EINVAL; 464 - } 465 - if (!tf->tf_subsys) { 466 - pr_err("Unable to target struct config_subsystem" 467 - " pointer\n"); 468 - return -EINVAL; 469 - } 470 - ret = target_fabric_tf_ops_check(tf); 471 - if (ret < 0) 516 + ret = target_fabric_tf_ops_check(fo); 517 + if (ret) 472 518 return ret; 473 519 474 - pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>" 475 - ">>>>>>>>>>\n"); 476 - return 0; 477 - } 478 - EXPORT_SYMBOL(target_fabric_configfs_register); 479 - 480 - void target_fabric_configfs_deregister( 481 - struct target_fabric_configfs *tf) 482 - { 483 - struct configfs_subsystem *su; 484 - 520 + tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL); 485 521 if (!tf) { 486 - pr_err("Unable to locate passed target_fabric_" 487 - "configfs\n"); 488 - return; 522 + pr_err("%s: could not allocate memory!\n", __func__); 523 + return -ENOMEM; 489 524 } 490 - su = tf->tf_subsys; 491 - if (!su) { 492 - pr_err("Unable to locate passed tf->tf_subsys" 493 - " pointer\n"); 494 - return; 495 - } 496 - pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>" 497 - ">>>>>>>>>>>>\n"); 525 + 526 + INIT_LIST_HEAD(&tf->tf_list); 527 + atomic_set(&tf->tf_access_cnt, 0); 528 + 529 + /* 530 + * Setup the default generic struct config_item_type's (cits) in 531 + * struct target_fabric_configfs->tf_cit_tmpl 532 + */ 533 + tf->tf_module = fo->module; 534 + tf->tf_subsys = target_core_subsystem[0]; 535 + snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", fo->name); 536 + 537 + tf->tf_ops = *fo; 538 + target_fabric_setup_cits(tf); 539 + 498 540 mutex_lock(&g_tf_lock); 499 - if (atomic_read(&tf->tf_access_cnt)) { 500 - mutex_unlock(&g_tf_lock); 501 - pr_err("Non zero tf->tf_access_cnt for fabric %s\n", 502 - tf->tf_name); 503 - BUG(); 504 - } 505 - list_del(&tf->tf_list); 541 + list_add_tail(&tf->tf_list, &g_tf_list); 506 542 mutex_unlock(&g_tf_lock); 507 543 508 - pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing tf:" 509 - " %s\n", tf->tf_name); 510 - tf->tf_module = NULL; 511 - tf->tf_subsys = NULL; 512 - kfree(tf); 513 - 514 - pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>" 515 - ">>>>>\n"); 544 + return 0; 516 545 } 517 - EXPORT_SYMBOL(target_fabric_configfs_deregister); 546 + EXPORT_SYMBOL(target_register_template); 547 + 548 + void target_unregister_template(const struct target_core_fabric_ops *fo) 549 + { 550 + struct target_fabric_configfs *t; 551 + 552 + mutex_lock(&g_tf_lock); 553 + list_for_each_entry(t, &g_tf_list, tf_list) { 554 + if (!strcmp(t->tf_name, fo->name)) { 555 + BUG_ON(atomic_read(&t->tf_access_cnt)); 556 + list_del(&t->tf_list); 557 + kfree(t); 558 + break; 559 + } 560 + } 561 + mutex_unlock(&g_tf_lock); 562 + } 563 + EXPORT_SYMBOL(target_unregister_template); 518 564 519 565 /*############################################################################## 520 566 // Stop functions called by external Target Fabrics Modules ··· 863 945 struct se_lun *lun; 864 946 struct se_portal_group *se_tpg; 865 947 struct t10_pr_registration *pr_reg; 866 - struct target_core_fabric_ops *tfo; 948 + const struct target_core_fabric_ops *tfo; 867 949 ssize_t len = 0; 868 950 869 951 spin_lock(&dev->dev_reservation_lock); ··· 897 979 static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts( 898 980 struct se_device *dev, char *page) 899 981 { 900 - struct target_core_fabric_ops *tfo; 982 + const struct target_core_fabric_ops *tfo; 901 983 struct t10_pr_registration *pr_reg; 902 984 unsigned char buf[384]; 903 985 char i_buf[PR_REG_ISID_ID_LEN];
+26 -12
drivers/target/target_core_fabric_configfs.c
··· 56 56 pr_debug("Setup generic %s\n", __stringify(_name)); \ 57 57 } 58 58 59 + #define TF_CIT_SETUP_DRV(_name, _item_ops, _group_ops) \ 60 + static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \ 61 + { \ 62 + struct target_fabric_configfs_template *tfc = &tf->tf_cit_tmpl; \ 63 + struct config_item_type *cit = &tfc->tfc_##_name##_cit; \ 64 + struct configfs_attribute **attrs = tf->tf_ops.tfc_##_name##_attrs; \ 65 + \ 66 + cit->ct_item_ops = _item_ops; \ 67 + cit->ct_group_ops = _group_ops; \ 68 + cit->ct_attrs = attrs; \ 69 + cit->ct_owner = tf->tf_module; \ 70 + pr_debug("Setup generic %s\n", __stringify(_name)); \ 71 + } 72 + 59 73 /* Start of tfc_tpg_mappedlun_cit */ 60 74 61 75 static int target_fabric_mappedlun_link( ··· 292 278 .store_attribute = target_fabric_nacl_attrib_attr_store, 293 279 }; 294 280 295 - TF_CIT_SETUP(tpg_nacl_attrib, &target_fabric_nacl_attrib_item_ops, NULL, NULL); 281 + TF_CIT_SETUP_DRV(tpg_nacl_attrib, &target_fabric_nacl_attrib_item_ops, NULL); 296 282 297 283 /* End of tfc_tpg_nacl_attrib_cit */ 298 284 ··· 305 291 .store_attribute = target_fabric_nacl_auth_attr_store, 306 292 }; 307 293 308 - TF_CIT_SETUP(tpg_nacl_auth, &target_fabric_nacl_auth_item_ops, NULL, NULL); 294 + TF_CIT_SETUP_DRV(tpg_nacl_auth, &target_fabric_nacl_auth_item_ops, NULL); 309 295 310 296 /* End of tfc_tpg_nacl_auth_cit */ 311 297 ··· 318 304 .store_attribute = target_fabric_nacl_param_attr_store, 319 305 }; 320 306 321 - TF_CIT_SETUP(tpg_nacl_param, &target_fabric_nacl_param_item_ops, NULL, NULL); 307 + TF_CIT_SETUP_DRV(tpg_nacl_param, &target_fabric_nacl_param_item_ops, NULL); 322 308 323 309 /* End of tfc_tpg_nacl_param_cit */ 324 310 ··· 475 461 .drop_item = target_fabric_drop_mappedlun, 476 462 }; 477 463 478 - TF_CIT_SETUP(tpg_nacl_base, &target_fabric_nacl_base_item_ops, 479 - &target_fabric_nacl_base_group_ops, NULL); 464 + TF_CIT_SETUP_DRV(tpg_nacl_base, &target_fabric_nacl_base_item_ops, 465 + &target_fabric_nacl_base_group_ops); 480 466 481 467 /* End of tfc_tpg_nacl_base_cit */ 482 468 ··· 584 570 .store_attribute = target_fabric_np_base_attr_store, 585 571 }; 586 572 587 - TF_CIT_SETUP(tpg_np_base, &target_fabric_np_base_item_ops, NULL, NULL); 573 + TF_CIT_SETUP_DRV(tpg_np_base, &target_fabric_np_base_item_ops, NULL); 588 574 589 575 /* End of tfc_tpg_np_base_cit */ 590 576 ··· 980 966 .store_attribute = target_fabric_tpg_attrib_attr_store, 981 967 }; 982 968 983 - TF_CIT_SETUP(tpg_attrib, &target_fabric_tpg_attrib_item_ops, NULL, NULL); 969 + TF_CIT_SETUP_DRV(tpg_attrib, &target_fabric_tpg_attrib_item_ops, NULL); 984 970 985 971 /* End of tfc_tpg_attrib_cit */ 986 972 ··· 993 979 .store_attribute = target_fabric_tpg_auth_attr_store, 994 980 }; 995 981 996 - TF_CIT_SETUP(tpg_auth, &target_fabric_tpg_auth_item_ops, NULL, NULL); 982 + TF_CIT_SETUP_DRV(tpg_auth, &target_fabric_tpg_auth_item_ops, NULL); 997 983 998 984 /* End of tfc_tpg_attrib_cit */ 999 985 ··· 1006 992 .store_attribute = target_fabric_tpg_param_attr_store, 1007 993 }; 1008 994 1009 - TF_CIT_SETUP(tpg_param, &target_fabric_tpg_param_item_ops, NULL, NULL); 995 + TF_CIT_SETUP_DRV(tpg_param, &target_fabric_tpg_param_item_ops, NULL); 1010 996 1011 997 /* End of tfc_tpg_param_cit */ 1012 998 ··· 1032 1018 .store_attribute = target_fabric_tpg_attr_store, 1033 1019 }; 1034 1020 1035 - TF_CIT_SETUP(tpg_base, &target_fabric_tpg_base_item_ops, NULL, NULL); 1021 + TF_CIT_SETUP_DRV(tpg_base, &target_fabric_tpg_base_item_ops, NULL); 1036 1022 1037 1023 /* End of tfc_tpg_base_cit */ 1038 1024 ··· 1206 1192 .store_attribute = target_fabric_wwn_attr_store, 1207 1193 }; 1208 1194 1209 - TF_CIT_SETUP(wwn, &target_fabric_wwn_item_ops, &target_fabric_wwn_group_ops, NULL); 1195 + TF_CIT_SETUP_DRV(wwn, &target_fabric_wwn_item_ops, &target_fabric_wwn_group_ops); 1210 1196 1211 1197 /* End of tfc_wwn_cit */ 1212 1198 ··· 1220 1206 .store_attribute = target_fabric_discovery_attr_store, 1221 1207 }; 1222 1208 1223 - TF_CIT_SETUP(discovery, &target_fabric_discovery_item_ops, NULL, NULL); 1209 + TF_CIT_SETUP_DRV(discovery, &target_fabric_discovery_item_ops, NULL); 1224 1210 1225 1211 /* End of tfc_discovery_cit */ 1226 1212
+116 -147
drivers/target/target_core_file.c
··· 264 264 struct se_device *se_dev = cmd->se_dev; 265 265 struct fd_dev *dev = FD_DEV(se_dev); 266 266 struct file *prot_fd = dev->fd_prot_file; 267 - struct scatterlist *sg; 268 267 loff_t pos = (cmd->t_task_lba * se_dev->prot_length); 269 268 unsigned char *buf; 270 - u32 prot_size, len, size; 271 - int rc, ret = 1, i; 269 + u32 prot_size; 270 + int rc, ret = 1; 272 271 273 272 prot_size = (cmd->data_length / se_dev->dev_attrib.block_size) * 274 273 se_dev->prot_length; 275 274 276 275 if (!is_write) { 277 - fd_prot->prot_buf = vzalloc(prot_size); 276 + fd_prot->prot_buf = kzalloc(prot_size, GFP_KERNEL); 278 277 if (!fd_prot->prot_buf) { 279 278 pr_err("Unable to allocate fd_prot->prot_buf\n"); 280 279 return -ENOMEM; 281 280 } 282 281 buf = fd_prot->prot_buf; 283 282 284 - fd_prot->prot_sg_nents = cmd->t_prot_nents; 285 - fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist) * 286 - fd_prot->prot_sg_nents, GFP_KERNEL); 283 + fd_prot->prot_sg_nents = 1; 284 + fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist), 285 + GFP_KERNEL); 287 286 if (!fd_prot->prot_sg) { 288 287 pr_err("Unable to allocate fd_prot->prot_sg\n"); 289 - vfree(fd_prot->prot_buf); 288 + kfree(fd_prot->prot_buf); 290 289 return -ENOMEM; 291 290 } 292 - size = prot_size; 293 - 294 - for_each_sg(fd_prot->prot_sg, sg, fd_prot->prot_sg_nents, i) { 295 - 296 - len = min_t(u32, PAGE_SIZE, size); 297 - sg_set_buf(sg, buf, len); 298 - size -= len; 299 - buf += len; 300 - } 291 + sg_init_table(fd_prot->prot_sg, fd_prot->prot_sg_nents); 292 + sg_set_buf(fd_prot->prot_sg, buf, prot_size); 301 293 } 302 294 303 295 if (is_write) { ··· 310 318 311 319 if (is_write || ret < 0) { 312 320 kfree(fd_prot->prot_sg); 313 - vfree(fd_prot->prot_buf); 321 + kfree(fd_prot->prot_buf); 314 322 } 315 323 316 324 return ret; ··· 323 331 struct fd_dev *dev = FD_DEV(se_dev); 324 332 struct file *fd = dev->fd_file; 325 333 struct scatterlist *sg; 326 - struct iovec *iov; 327 - mm_segment_t old_fs; 334 + struct iov_iter iter; 335 + struct bio_vec *bvec; 336 + ssize_t len = 0; 328 337 loff_t pos = (cmd->t_task_lba * se_dev->dev_attrib.block_size); 329 338 int ret = 0, i; 330 339 331 - iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL); 332 - if (!iov) { 340 + bvec = kcalloc(sgl_nents, sizeof(struct bio_vec), GFP_KERNEL); 341 + if (!bvec) { 333 342 pr_err("Unable to allocate fd_do_readv iov[]\n"); 334 343 return -ENOMEM; 335 344 } 336 345 337 346 for_each_sg(sgl, sg, sgl_nents, i) { 338 - iov[i].iov_len = sg->length; 339 - iov[i].iov_base = kmap(sg_page(sg)) + sg->offset; 347 + bvec[i].bv_page = sg_page(sg); 348 + bvec[i].bv_len = sg->length; 349 + bvec[i].bv_offset = sg->offset; 350 + 351 + len += sg->length; 340 352 } 341 353 342 - old_fs = get_fs(); 343 - set_fs(get_ds()); 344 - 354 + iov_iter_bvec(&iter, ITER_BVEC, bvec, sgl_nents, len); 345 355 if (is_write) 346 - ret = vfs_writev(fd, &iov[0], sgl_nents, &pos); 356 + ret = vfs_iter_write(fd, &iter, &pos); 347 357 else 348 - ret = vfs_readv(fd, &iov[0], sgl_nents, &pos); 358 + ret = vfs_iter_read(fd, &iter, &pos); 349 359 350 - set_fs(old_fs); 351 - 352 - for_each_sg(sgl, sg, sgl_nents, i) 353 - kunmap(sg_page(sg)); 354 - 355 - kfree(iov); 360 + kfree(bvec); 356 361 357 362 if (is_write) { 358 363 if (ret < 0 || ret != cmd->data_length) { ··· 425 436 return 0; 426 437 } 427 438 428 - static unsigned char * 429 - fd_setup_write_same_buf(struct se_cmd *cmd, struct scatterlist *sg, 430 - unsigned int len) 431 - { 432 - struct se_device *se_dev = cmd->se_dev; 433 - unsigned int block_size = se_dev->dev_attrib.block_size; 434 - unsigned int i = 0, end; 435 - unsigned char *buf, *p, *kmap_buf; 436 - 437 - buf = kzalloc(min_t(unsigned int, len, PAGE_SIZE), GFP_KERNEL); 438 - if (!buf) { 439 - pr_err("Unable to allocate fd_execute_write_same buf\n"); 440 - return NULL; 441 - } 442 - 443 - kmap_buf = kmap(sg_page(sg)) + sg->offset; 444 - if (!kmap_buf) { 445 - pr_err("kmap() failed in fd_setup_write_same\n"); 446 - kfree(buf); 447 - return NULL; 448 - } 449 - /* 450 - * Fill local *buf to contain multiple WRITE_SAME blocks up to 451 - * min(len, PAGE_SIZE) 452 - */ 453 - p = buf; 454 - end = min_t(unsigned int, len, PAGE_SIZE); 455 - 456 - while (i < end) { 457 - memcpy(p, kmap_buf, block_size); 458 - 459 - i += block_size; 460 - p += block_size; 461 - } 462 - kunmap(sg_page(sg)); 463 - 464 - return buf; 465 - } 466 - 467 439 static sense_reason_t 468 440 fd_execute_write_same(struct se_cmd *cmd) 469 441 { 470 442 struct se_device *se_dev = cmd->se_dev; 471 443 struct fd_dev *fd_dev = FD_DEV(se_dev); 472 - struct file *f = fd_dev->fd_file; 473 - struct scatterlist *sg; 474 - struct iovec *iov; 475 - mm_segment_t old_fs; 476 - sector_t nolb = sbc_get_write_same_sectors(cmd); 477 444 loff_t pos = cmd->t_task_lba * se_dev->dev_attrib.block_size; 478 - unsigned int len, len_tmp, iov_num; 479 - int i, rc; 480 - unsigned char *buf; 445 + sector_t nolb = sbc_get_write_same_sectors(cmd); 446 + struct iov_iter iter; 447 + struct bio_vec *bvec; 448 + unsigned int len = 0, i; 449 + ssize_t ret; 481 450 482 451 if (!nolb) { 483 452 target_complete_cmd(cmd, SAM_STAT_GOOD); ··· 446 499 " backends not supported\n"); 447 500 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 448 501 } 449 - sg = &cmd->t_data_sg[0]; 450 502 451 503 if (cmd->t_data_nents > 1 || 452 - sg->length != cmd->se_dev->dev_attrib.block_size) { 504 + cmd->t_data_sg[0].length != cmd->se_dev->dev_attrib.block_size) { 453 505 pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u" 454 - " block_size: %u\n", cmd->t_data_nents, sg->length, 506 + " block_size: %u\n", 507 + cmd->t_data_nents, 508 + cmd->t_data_sg[0].length, 455 509 cmd->se_dev->dev_attrib.block_size); 456 510 return TCM_INVALID_CDB_FIELD; 457 511 } 458 512 459 - len = len_tmp = nolb * se_dev->dev_attrib.block_size; 460 - iov_num = DIV_ROUND_UP(len, PAGE_SIZE); 461 - 462 - buf = fd_setup_write_same_buf(cmd, sg, len); 463 - if (!buf) 513 + bvec = kcalloc(nolb, sizeof(struct bio_vec), GFP_KERNEL); 514 + if (!bvec) 464 515 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 465 516 466 - iov = vzalloc(sizeof(struct iovec) * iov_num); 467 - if (!iov) { 468 - pr_err("Unable to allocate fd_execute_write_same iovecs\n"); 469 - kfree(buf); 470 - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 471 - } 472 - /* 473 - * Map the single fabric received scatterlist block now populated 474 - * in *buf into each iovec for I/O submission. 475 - */ 476 - for (i = 0; i < iov_num; i++) { 477 - iov[i].iov_base = buf; 478 - iov[i].iov_len = min_t(unsigned int, len_tmp, PAGE_SIZE); 479 - len_tmp -= iov[i].iov_len; 517 + for (i = 0; i < nolb; i++) { 518 + bvec[i].bv_page = sg_page(&cmd->t_data_sg[0]); 519 + bvec[i].bv_len = cmd->t_data_sg[0].length; 520 + bvec[i].bv_offset = cmd->t_data_sg[0].offset; 521 + 522 + len += se_dev->dev_attrib.block_size; 480 523 } 481 524 482 - old_fs = get_fs(); 483 - set_fs(get_ds()); 484 - rc = vfs_writev(f, &iov[0], iov_num, &pos); 485 - set_fs(old_fs); 525 + iov_iter_bvec(&iter, ITER_BVEC, bvec, nolb, len); 526 + ret = vfs_iter_write(fd_dev->fd_file, &iter, &pos); 486 527 487 - vfree(iov); 488 - kfree(buf); 489 - 490 - if (rc < 0 || rc != len) { 491 - pr_err("vfs_writev() returned %d for write same\n", rc); 528 + kfree(bvec); 529 + if (ret < 0 || ret != len) { 530 + pr_err("vfs_iter_write() returned %zd for write same\n", ret); 492 531 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 493 532 } 494 533 495 534 target_complete_cmd(cmd, SAM_STAT_GOOD); 496 535 return 0; 536 + } 537 + 538 + static int 539 + fd_do_prot_fill(struct se_device *se_dev, sector_t lba, sector_t nolb, 540 + void *buf, size_t bufsize) 541 + { 542 + struct fd_dev *fd_dev = FD_DEV(se_dev); 543 + struct file *prot_fd = fd_dev->fd_prot_file; 544 + sector_t prot_length, prot; 545 + loff_t pos = lba * se_dev->prot_length; 546 + 547 + if (!prot_fd) { 548 + pr_err("Unable to locate fd_dev->fd_prot_file\n"); 549 + return -ENODEV; 550 + } 551 + 552 + prot_length = nolb * se_dev->prot_length; 553 + 554 + for (prot = 0; prot < prot_length;) { 555 + sector_t len = min_t(sector_t, bufsize, prot_length - prot); 556 + ssize_t ret = kernel_write(prot_fd, buf, len, pos + prot); 557 + 558 + if (ret != len) { 559 + pr_err("vfs_write to prot file failed: %zd\n", ret); 560 + return ret < 0 ? ret : -ENODEV; 561 + } 562 + prot += ret; 563 + } 564 + 565 + return 0; 566 + } 567 + 568 + static int 569 + fd_do_prot_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) 570 + { 571 + void *buf; 572 + int rc; 573 + 574 + buf = (void *)__get_free_page(GFP_KERNEL); 575 + if (!buf) { 576 + pr_err("Unable to allocate FILEIO prot buf\n"); 577 + return -ENOMEM; 578 + } 579 + memset(buf, 0xff, PAGE_SIZE); 580 + 581 + rc = fd_do_prot_fill(cmd->se_dev, lba, nolb, buf, PAGE_SIZE); 582 + 583 + free_page((unsigned long)buf); 584 + 585 + return rc; 497 586 } 498 587 499 588 static sense_reason_t ··· 538 555 struct file *file = priv; 539 556 struct inode *inode = file->f_mapping->host; 540 557 int ret; 558 + 559 + if (cmd->se_dev->dev_attrib.pi_prot_type) { 560 + ret = fd_do_prot_unmap(cmd, lba, nolb); 561 + if (ret) 562 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 563 + } 541 564 542 565 if (S_ISBLK(inode->i_mode)) { 543 566 /* The backend is block device, use discard */ ··· 584 595 struct file *file = fd_dev->fd_file; 585 596 sector_t lba = cmd->t_task_lba; 586 597 sector_t nolb = sbc_get_write_same_sectors(cmd); 587 - int ret; 598 + sense_reason_t ret; 588 599 589 600 if (!nolb) { 590 601 target_complete_cmd(cmd, SAM_STAT_GOOD); ··· 632 643 if (data_direction == DMA_FROM_DEVICE) { 633 644 memset(&fd_prot, 0, sizeof(struct fd_prot)); 634 645 635 - if (cmd->prot_type) { 646 + if (cmd->prot_type && dev->dev_attrib.pi_prot_type) { 636 647 ret = fd_do_prot_rw(cmd, &fd_prot, false); 637 648 if (ret < 0) 638 649 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; ··· 640 651 641 652 ret = fd_do_rw(cmd, sgl, sgl_nents, 0); 642 653 643 - if (ret > 0 && cmd->prot_type) { 654 + if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type) { 644 655 u32 sectors = cmd->data_length / dev->dev_attrib.block_size; 645 656 646 657 rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 647 658 0, fd_prot.prot_sg, 0); 648 659 if (rc) { 649 660 kfree(fd_prot.prot_sg); 650 - vfree(fd_prot.prot_buf); 661 + kfree(fd_prot.prot_buf); 651 662 return rc; 652 663 } 653 664 kfree(fd_prot.prot_sg); 654 - vfree(fd_prot.prot_buf); 665 + kfree(fd_prot.prot_buf); 655 666 } 656 667 } else { 657 668 memset(&fd_prot, 0, sizeof(struct fd_prot)); 658 669 659 - if (cmd->prot_type) { 670 + if (cmd->prot_type && dev->dev_attrib.pi_prot_type) { 660 671 u32 sectors = cmd->data_length / dev->dev_attrib.block_size; 661 672 662 673 ret = fd_do_prot_rw(cmd, &fd_prot, false); ··· 667 678 0, fd_prot.prot_sg, 0); 668 679 if (rc) { 669 680 kfree(fd_prot.prot_sg); 670 - vfree(fd_prot.prot_buf); 681 + kfree(fd_prot.prot_buf); 671 682 return rc; 672 683 } 673 684 } ··· 694 705 vfs_fsync_range(fd_dev->fd_file, start, end, 1); 695 706 } 696 707 697 - if (ret > 0 && cmd->prot_type) { 708 + if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type) { 698 709 ret = fd_do_prot_rw(cmd, &fd_prot, true); 699 710 if (ret < 0) 700 711 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; ··· 703 714 704 715 if (ret < 0) { 705 716 kfree(fd_prot.prot_sg); 706 - vfree(fd_prot.prot_buf); 717 + kfree(fd_prot.prot_buf); 707 718 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 708 719 } 709 720 ··· 867 878 868 879 static int fd_format_prot(struct se_device *dev) 869 880 { 870 - struct fd_dev *fd_dev = FD_DEV(dev); 871 - struct file *prot_fd = fd_dev->fd_prot_file; 872 - sector_t prot_length, prot; 873 881 unsigned char *buf; 874 - loff_t pos = 0; 875 882 int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size; 876 - int rc, ret = 0, size, len; 883 + int ret; 877 884 878 885 if (!dev->dev_attrib.pi_prot_type) { 879 886 pr_err("Unable to format_prot while pi_prot_type == 0\n"); 880 - return -ENODEV; 881 - } 882 - if (!prot_fd) { 883 - pr_err("Unable to locate fd_dev->fd_prot_file\n"); 884 887 return -ENODEV; 885 888 } 886 889 ··· 881 900 pr_err("Unable to allocate FILEIO prot buf\n"); 882 901 return -ENOMEM; 883 902 } 884 - prot_length = (dev->transport->get_blocks(dev) + 1) * dev->prot_length; 885 - size = prot_length; 886 903 887 904 pr_debug("Using FILEIO prot_length: %llu\n", 888 - (unsigned long long)prot_length); 905 + (unsigned long long)(dev->transport->get_blocks(dev) + 1) * 906 + dev->prot_length); 889 907 890 908 memset(buf, 0xff, unit_size); 891 - for (prot = 0; prot < prot_length; prot += unit_size) { 892 - len = min(unit_size, size); 893 - rc = kernel_write(prot_fd, buf, len, pos); 894 - if (rc != len) { 895 - pr_err("vfs_write to prot file failed: %d\n", rc); 896 - ret = -ENODEV; 897 - goto out; 898 - } 899 - pos += len; 900 - size -= len; 901 - } 902 - 903 - out: 909 + ret = fd_do_prot_fill(dev, 0, dev->transport->get_blocks(dev) + 1, 910 + buf, unit_size); 904 911 vfree(buf); 905 912 return ret; 906 913 }
+2 -2
drivers/target/target_core_iblock.c
··· 444 444 struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd; 445 445 sector_t lba = cmd->t_task_lba; 446 446 sector_t nolb = sbc_get_write_same_sectors(cmd); 447 - int ret; 447 + sense_reason_t ret; 448 448 449 449 ret = iblock_do_unmap(cmd, bdev, lba, nolb); 450 450 if (ret) ··· 774 774 sg_num--; 775 775 } 776 776 777 - if (cmd->prot_type) { 777 + if (cmd->prot_type && dev->dev_attrib.pi_prot_type) { 778 778 int rc = iblock_alloc_bip(cmd, bio_start); 779 779 if (rc) 780 780 goto fail_put_bios;
+6
drivers/target/target_core_internal.h
··· 4 4 /* target_core_alua.c */ 5 5 extern struct t10_alua_lu_gp *default_lu_gp; 6 6 7 + /* target_core_configfs.c */ 8 + extern struct configfs_subsystem *target_core_subsystem[]; 9 + 7 10 /* target_core_device.c */ 11 + extern struct mutex g_device_mutex; 12 + extern struct list_head g_device_list; 13 + 8 14 struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16); 9 15 int core_free_device_list_for_node(struct se_node_acl *, 10 16 struct se_portal_group *);
+29 -19
drivers/target/target_core_pr.c
··· 78 78 static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *, 79 79 struct t10_pr_registration *, int, int); 80 80 81 + static int is_reservation_holder( 82 + struct t10_pr_registration *pr_res_holder, 83 + struct t10_pr_registration *pr_reg) 84 + { 85 + int pr_res_type; 86 + 87 + if (pr_res_holder) { 88 + pr_res_type = pr_res_holder->pr_res_type; 89 + 90 + return pr_res_holder == pr_reg || 91 + pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG || 92 + pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG; 93 + } 94 + return 0; 95 + } 96 + 81 97 static sense_reason_t 82 98 target_scsi2_reservation_check(struct se_cmd *cmd) 83 99 { ··· 680 664 struct se_dev_entry *deve_tmp; 681 665 struct se_node_acl *nacl_tmp; 682 666 struct se_port *port, *port_tmp; 683 - struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; 667 + const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; 684 668 struct t10_pr_registration *pr_reg, *pr_reg_atp, *pr_reg_tmp, *pr_reg_tmp_safe; 685 669 int ret; 686 670 /* ··· 979 963 } 980 964 981 965 static void __core_scsi3_dump_registration( 982 - struct target_core_fabric_ops *tfo, 966 + const struct target_core_fabric_ops *tfo, 983 967 struct se_device *dev, 984 968 struct se_node_acl *nacl, 985 969 struct t10_pr_registration *pr_reg, ··· 1020 1004 enum register_type register_type, 1021 1005 int register_move) 1022 1006 { 1023 - struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; 1007 + const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; 1024 1008 struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe; 1025 1009 struct t10_reservation *pr_tmpl = &dev->t10_pr; 1026 1010 ··· 1236 1220 struct t10_pr_registration *pr_reg, 1237 1221 struct list_head *preempt_and_abort_list, 1238 1222 int dec_holders) 1223 + __releases(&pr_tmpl->registration_lock) 1224 + __acquires(&pr_tmpl->registration_lock) 1239 1225 { 1240 - struct target_core_fabric_ops *tfo = 1226 + const struct target_core_fabric_ops *tfo = 1241 1227 pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo; 1242 1228 struct t10_reservation *pr_tmpl = &dev->t10_pr; 1243 1229 char i_buf[PR_REG_ISID_ID_LEN]; ··· 1463 1445 struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe; 1464 1446 LIST_HEAD(tid_dest_list); 1465 1447 struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp; 1466 - struct target_core_fabric_ops *tmp_tf_ops; 1448 + const struct target_core_fabric_ops *tmp_tf_ops; 1467 1449 unsigned char *buf; 1468 1450 unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident; 1469 1451 char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN]; ··· 2305 2287 spin_lock(&dev->dev_reservation_lock); 2306 2288 pr_res_holder = dev->dev_pr_res_holder; 2307 2289 if (pr_res_holder) { 2308 - int pr_res_type = pr_res_holder->pr_res_type; 2309 2290 /* 2310 2291 * From spc4r17 Section 5.7.9: Reserving: 2311 2292 * ··· 2315 2298 * the logical unit, then the command shall be completed with 2316 2299 * RESERVATION CONFLICT status. 2317 2300 */ 2318 - if ((pr_res_holder != pr_reg) && 2319 - (pr_res_type != PR_TYPE_WRITE_EXCLUSIVE_ALLREG) && 2320 - (pr_res_type != PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) { 2301 + if (!is_reservation_holder(pr_res_holder, pr_reg)) { 2321 2302 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; 2322 2303 pr_err("SPC-3 PR: Attempted RESERVE from" 2323 2304 " [%s]: %s while reservation already held by" ··· 2424 2409 int explicit, 2425 2410 int unreg) 2426 2411 { 2427 - struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo; 2412 + const struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo; 2428 2413 char i_buf[PR_REG_ISID_ID_LEN]; 2429 2414 int pr_res_type = 0, pr_res_scope = 0; 2430 2415 ··· 2492 2477 struct se_lun *se_lun = cmd->se_lun; 2493 2478 struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder; 2494 2479 struct t10_reservation *pr_tmpl = &dev->t10_pr; 2495 - int all_reg = 0; 2496 2480 sense_reason_t ret = 0; 2497 2481 2498 2482 if (!se_sess || !se_lun) { ··· 2528 2514 spin_unlock(&dev->dev_reservation_lock); 2529 2515 goto out_put_pr_reg; 2530 2516 } 2531 - if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || 2532 - (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) 2533 - all_reg = 1; 2534 2517 2535 - if ((all_reg == 0) && (pr_res_holder != pr_reg)) { 2518 + if (!is_reservation_holder(pr_res_holder, pr_reg)) { 2536 2519 /* 2537 - * Non 'All Registrants' PR Type cases.. 2538 2520 * Release request from a registered I_T nexus that is not a 2539 2521 * persistent reservation holder. return GOOD status. 2540 2522 */ ··· 2736 2726 enum preempt_type preempt_type) 2737 2727 { 2738 2728 struct se_node_acl *nacl = pr_reg->pr_reg_nacl; 2739 - struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; 2729 + const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; 2740 2730 char i_buf[PR_REG_ISID_ID_LEN]; 2741 2731 2742 2732 memset(i_buf, 0, PR_REG_ISID_ID_LEN); ··· 3121 3111 struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL; 3122 3112 struct se_port *se_port; 3123 3113 struct se_portal_group *se_tpg, *dest_se_tpg = NULL; 3124 - struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops; 3114 + const struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops; 3125 3115 struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg; 3126 3116 struct t10_reservation *pr_tmpl = &dev->t10_pr; 3127 3117 unsigned char *buf; ··· 3385 3375 * From spc4r17 section 5.7.8 Table 50 -- 3386 3376 * Register behaviors for a REGISTER AND MOVE service action 3387 3377 */ 3388 - if (pr_res_holder != pr_reg) { 3378 + if (!is_reservation_holder(pr_res_holder, pr_reg)) { 3389 3379 pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T" 3390 3380 " Nexus is not reservation holder\n"); 3391 3381 spin_unlock(&dev->dev_reservation_lock);
+99 -38
drivers/target/target_core_rd.c
··· 139 139 unsigned char *p; 140 140 141 141 while (total_sg_needed) { 142 + unsigned int chain_entry = 0; 143 + 142 144 sg_per_table = (total_sg_needed > max_sg_per_table) ? 143 145 max_sg_per_table : total_sg_needed; 144 146 145 - sg = kzalloc(sg_per_table * sizeof(struct scatterlist), 147 + #ifdef CONFIG_ARCH_HAS_SG_CHAIN 148 + 149 + /* 150 + * Reserve extra element for chain entry 151 + */ 152 + if (sg_per_table < total_sg_needed) 153 + chain_entry = 1; 154 + 155 + #endif /* CONFIG_ARCH_HAS_SG_CHAIN */ 156 + 157 + sg = kcalloc(sg_per_table + chain_entry, sizeof(*sg), 146 158 GFP_KERNEL); 147 159 if (!sg) { 148 160 pr_err("Unable to allocate scatterlist array" ··· 162 150 return -ENOMEM; 163 151 } 164 152 165 - sg_init_table(sg, sg_per_table); 153 + sg_init_table(sg, sg_per_table + chain_entry); 154 + 155 + #ifdef CONFIG_ARCH_HAS_SG_CHAIN 156 + 157 + if (i > 0) { 158 + sg_chain(sg_table[i - 1].sg_table, 159 + max_sg_per_table + 1, sg); 160 + } 161 + 162 + #endif /* CONFIG_ARCH_HAS_SG_CHAIN */ 166 163 167 164 sg_table[i].sg_table = sg; 168 165 sg_table[i].rd_sg_count = sg_per_table; ··· 403 382 return NULL; 404 383 } 405 384 385 + typedef sense_reason_t (*dif_verify)(struct se_cmd *, sector_t, unsigned int, 386 + unsigned int, struct scatterlist *, int); 387 + 388 + static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, dif_verify dif_verify) 389 + { 390 + struct se_device *se_dev = cmd->se_dev; 391 + struct rd_dev *dev = RD_DEV(se_dev); 392 + struct rd_dev_sg_table *prot_table; 393 + bool need_to_release = false; 394 + struct scatterlist *prot_sg; 395 + u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size; 396 + u32 prot_offset, prot_page; 397 + u32 prot_npages __maybe_unused; 398 + u64 tmp; 399 + sense_reason_t rc = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 400 + 401 + tmp = cmd->t_task_lba * se_dev->prot_length; 402 + prot_offset = do_div(tmp, PAGE_SIZE); 403 + prot_page = tmp; 404 + 405 + prot_table = rd_get_prot_table(dev, prot_page); 406 + if (!prot_table) 407 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 408 + 409 + prot_sg = &prot_table->sg_table[prot_page - 410 + prot_table->page_start_offset]; 411 + 412 + #ifndef CONFIG_ARCH_HAS_SG_CHAIN 413 + 414 + prot_npages = DIV_ROUND_UP(prot_offset + sectors * se_dev->prot_length, 415 + PAGE_SIZE); 416 + 417 + /* 418 + * Allocate temporaly contiguous scatterlist entries if prot pages 419 + * straddles multiple scatterlist tables. 420 + */ 421 + if (prot_table->page_end_offset < prot_page + prot_npages - 1) { 422 + int i; 423 + 424 + prot_sg = kcalloc(prot_npages, sizeof(*prot_sg), GFP_KERNEL); 425 + if (!prot_sg) 426 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 427 + 428 + need_to_release = true; 429 + sg_init_table(prot_sg, prot_npages); 430 + 431 + for (i = 0; i < prot_npages; i++) { 432 + if (prot_page + i > prot_table->page_end_offset) { 433 + prot_table = rd_get_prot_table(dev, 434 + prot_page + i); 435 + if (!prot_table) { 436 + kfree(prot_sg); 437 + return rc; 438 + } 439 + sg_unmark_end(&prot_sg[i - 1]); 440 + } 441 + prot_sg[i] = prot_table->sg_table[prot_page + i - 442 + prot_table->page_start_offset]; 443 + } 444 + } 445 + 446 + #endif /* !CONFIG_ARCH_HAS_SG_CHAIN */ 447 + 448 + rc = dif_verify(cmd, cmd->t_task_lba, sectors, 0, prot_sg, prot_offset); 449 + if (need_to_release) 450 + kfree(prot_sg); 451 + 452 + return rc; 453 + } 454 + 406 455 static sense_reason_t 407 456 rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, 408 457 enum dma_data_direction data_direction) ··· 510 419 data_direction == DMA_FROM_DEVICE ? "Read" : "Write", 511 420 cmd->t_task_lba, rd_size, rd_page, rd_offset); 512 421 513 - if (cmd->prot_type && data_direction == DMA_TO_DEVICE) { 514 - struct rd_dev_sg_table *prot_table; 515 - struct scatterlist *prot_sg; 516 - u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size; 517 - u32 prot_offset, prot_page; 518 - 519 - tmp = cmd->t_task_lba * se_dev->prot_length; 520 - prot_offset = do_div(tmp, PAGE_SIZE); 521 - prot_page = tmp; 522 - 523 - prot_table = rd_get_prot_table(dev, prot_page); 524 - if (!prot_table) 525 - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 526 - 527 - prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset]; 528 - 529 - rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors, 0, 530 - prot_sg, prot_offset); 422 + if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type && 423 + data_direction == DMA_TO_DEVICE) { 424 + rc = rd_do_prot_rw(cmd, sbc_dif_verify_write); 531 425 if (rc) 532 426 return rc; 533 427 } ··· 578 502 } 579 503 sg_miter_stop(&m); 580 504 581 - if (cmd->prot_type && data_direction == DMA_FROM_DEVICE) { 582 - struct rd_dev_sg_table *prot_table; 583 - struct scatterlist *prot_sg; 584 - u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size; 585 - u32 prot_offset, prot_page; 586 - 587 - tmp = cmd->t_task_lba * se_dev->prot_length; 588 - prot_offset = do_div(tmp, PAGE_SIZE); 589 - prot_page = tmp; 590 - 591 - prot_table = rd_get_prot_table(dev, prot_page); 592 - if (!prot_table) 593 - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 594 - 595 - prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset]; 596 - 597 - rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0, 598 - prot_sg, prot_offset); 505 + if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type && 506 + data_direction == DMA_FROM_DEVICE) { 507 + rc = rd_do_prot_rw(cmd, sbc_dif_verify_read); 599 508 if (rc) 600 509 return rc; 601 510 }
+81 -28
drivers/target/target_core_sbc.c
··· 93 93 { 94 94 struct se_device *dev = cmd->se_dev; 95 95 struct se_session *sess = cmd->se_sess; 96 + int pi_prot_type = dev->dev_attrib.pi_prot_type; 97 + 96 98 unsigned char *rbuf; 97 99 unsigned char buf[32]; 98 100 unsigned long long blocks = dev->transport->get_blocks(dev); ··· 116 114 * Set P_TYPE and PROT_EN bits for DIF support 117 115 */ 118 116 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { 119 - if (dev->dev_attrib.pi_prot_type) 120 - buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1; 117 + /* 118 + * Only override a device's pi_prot_type if no T10-PI is 119 + * available, and sess_prot_type has been explicitly enabled. 120 + */ 121 + if (!pi_prot_type) 122 + pi_prot_type = sess->sess_prot_type; 123 + 124 + if (pi_prot_type) 125 + buf[12] = (pi_prot_type - 1) << 1 | 0x1; 121 126 } 122 127 123 128 if (dev->transport->get_lbppbe) ··· 321 312 return 0; 322 313 } 323 314 324 - static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd) 315 + static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success) 325 316 { 326 317 unsigned char *buf, *addr; 327 318 struct scatterlist *sg; ··· 385 376 cmd->data_direction); 386 377 } 387 378 388 - static sense_reason_t compare_and_write_post(struct se_cmd *cmd) 379 + static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success) 389 380 { 390 381 struct se_device *dev = cmd->se_dev; 391 382 ··· 408 399 return TCM_NO_SENSE; 409 400 } 410 401 411 - static sense_reason_t compare_and_write_callback(struct se_cmd *cmd) 402 + static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success) 412 403 { 413 404 struct se_device *dev = cmd->se_dev; 414 405 struct scatterlist *write_sg = NULL, *sg; ··· 423 414 424 415 /* 425 416 * Handle early failure in transport_generic_request_failure(), 426 - * which will not have taken ->caw_mutex yet.. 417 + * which will not have taken ->caw_sem yet.. 427 418 */ 428 - if (!cmd->t_data_sg || !cmd->t_bidi_data_sg) 419 + if (!success && (!cmd->t_data_sg || !cmd->t_bidi_data_sg)) 429 420 return TCM_NO_SENSE; 421 + /* 422 + * Handle special case for zero-length COMPARE_AND_WRITE 423 + */ 424 + if (!cmd->data_length) 425 + goto out; 430 426 /* 431 427 * Immediately exit + release dev->caw_sem if command has already 432 428 * been failed with a non-zero SCSI status. ··· 595 581 } 596 582 597 583 static int 598 - sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type, 584 + sbc_set_prot_op_checks(u8 protect, bool fabric_prot, enum target_prot_type prot_type, 599 585 bool is_write, struct se_cmd *cmd) 600 586 { 601 587 if (is_write) { 602 - cmd->prot_op = protect ? TARGET_PROT_DOUT_PASS : 603 - TARGET_PROT_DOUT_INSERT; 588 + cmd->prot_op = fabric_prot ? TARGET_PROT_DOUT_STRIP : 589 + protect ? TARGET_PROT_DOUT_PASS : 590 + TARGET_PROT_DOUT_INSERT; 604 591 switch (protect) { 605 592 case 0x0: 606 593 case 0x3: ··· 625 610 return -EINVAL; 626 611 } 627 612 } else { 628 - cmd->prot_op = protect ? TARGET_PROT_DIN_PASS : 629 - TARGET_PROT_DIN_STRIP; 613 + cmd->prot_op = fabric_prot ? TARGET_PROT_DIN_INSERT : 614 + protect ? TARGET_PROT_DIN_PASS : 615 + TARGET_PROT_DIN_STRIP; 630 616 switch (protect) { 631 617 case 0x0: 632 618 case 0x1: ··· 660 644 u32 sectors, bool is_write) 661 645 { 662 646 u8 protect = cdb[1] >> 5; 647 + int sp_ops = cmd->se_sess->sup_prot_ops; 648 + int pi_prot_type = dev->dev_attrib.pi_prot_type; 649 + bool fabric_prot = false; 663 650 664 651 if (!cmd->t_prot_sg || !cmd->t_prot_nents) { 665 - if (protect && !dev->dev_attrib.pi_prot_type) { 666 - pr_err("CDB contains protect bit, but device does not" 667 - " advertise PROTECT=1 feature bit\n"); 652 + if (unlikely(protect && 653 + !dev->dev_attrib.pi_prot_type && !cmd->se_sess->sess_prot_type)) { 654 + pr_err("CDB contains protect bit, but device + fabric does" 655 + " not advertise PROTECT=1 feature bit\n"); 668 656 return TCM_INVALID_CDB_FIELD; 669 657 } 670 658 if (cmd->prot_pto) ··· 689 669 cmd->reftag_seed = cmd->t_task_lba; 690 670 break; 691 671 case TARGET_DIF_TYPE0_PROT: 672 + /* 673 + * See if the fabric supports T10-PI, and the session has been 674 + * configured to allow export PROTECT=1 feature bit with backend 675 + * devices that don't support T10-PI. 676 + */ 677 + fabric_prot = is_write ? 678 + !!(sp_ops & (TARGET_PROT_DOUT_PASS | TARGET_PROT_DOUT_STRIP)) : 679 + !!(sp_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DIN_INSERT)); 680 + 681 + if (fabric_prot && cmd->se_sess->sess_prot_type) { 682 + pi_prot_type = cmd->se_sess->sess_prot_type; 683 + break; 684 + } 685 + if (!protect) 686 + return TCM_NO_SENSE; 687 + /* Fallthrough */ 692 688 default: 693 - return TCM_NO_SENSE; 689 + pr_err("Unable to determine pi_prot_type for CDB: 0x%02x " 690 + "PROTECT: 0x%02x\n", cdb[0], protect); 691 + return TCM_INVALID_CDB_FIELD; 694 692 } 695 693 696 - if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type, 697 - is_write, cmd)) 694 + if (sbc_set_prot_op_checks(protect, fabric_prot, pi_prot_type, is_write, cmd)) 698 695 return TCM_INVALID_CDB_FIELD; 699 696 700 - cmd->prot_type = dev->dev_attrib.pi_prot_type; 697 + cmd->prot_type = pi_prot_type; 701 698 cmd->prot_length = dev->prot_length * sectors; 702 699 703 700 /** ··· 1203 1166 sdt = paddr + offset; 1204 1167 sdt->guard_tag = cpu_to_be16(crc_t10dif(daddr + j, 1205 1168 dev->dev_attrib.block_size)); 1206 - if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT) 1169 + if (cmd->prot_type == TARGET_DIF_TYPE1_PROT) 1207 1170 sdt->ref_tag = cpu_to_be32(sector & 0xffffffff); 1208 1171 sdt->app_tag = 0; 1209 1172 1210 - pr_debug("DIF WRITE INSERT sector: %llu guard_tag: 0x%04x" 1173 + pr_debug("DIF %s INSERT sector: %llu guard_tag: 0x%04x" 1211 1174 " app_tag: 0x%04x ref_tag: %u\n", 1212 - (unsigned long long)sector, sdt->guard_tag, 1213 - sdt->app_tag, be32_to_cpu(sdt->ref_tag)); 1175 + (cmd->data_direction == DMA_TO_DEVICE) ? 1176 + "WRITE" : "READ", (unsigned long long)sector, 1177 + sdt->guard_tag, sdt->app_tag, 1178 + be32_to_cpu(sdt->ref_tag)); 1214 1179 1215 1180 sector++; 1216 1181 offset += sizeof(struct se_dif_v1_tuple); ··· 1224 1185 } 1225 1186 1226 1187 static sense_reason_t 1227 - sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt, 1188 + sbc_dif_v1_verify(struct se_cmd *cmd, struct se_dif_v1_tuple *sdt, 1228 1189 const void *p, sector_t sector, unsigned int ei_lba) 1229 1190 { 1191 + struct se_device *dev = cmd->se_dev; 1230 1192 int block_size = dev->dev_attrib.block_size; 1231 1193 __be16 csum; 1194 + 1195 + if (!(cmd->prot_checks & TARGET_DIF_CHECK_GUARD)) 1196 + goto check_ref; 1232 1197 1233 1198 csum = cpu_to_be16(crc_t10dif(p, block_size)); 1234 1199 ··· 1243 1200 return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; 1244 1201 } 1245 1202 1246 - if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT && 1203 + check_ref: 1204 + if (!(cmd->prot_checks & TARGET_DIF_CHECK_REFTAG)) 1205 + return 0; 1206 + 1207 + if (cmd->prot_type == TARGET_DIF_TYPE1_PROT && 1247 1208 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { 1248 1209 pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x" 1249 1210 " sector MSB: 0x%08x\n", (unsigned long long)sector, ··· 1255 1208 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 1256 1209 } 1257 1210 1258 - if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE2_PROT && 1211 + if (cmd->prot_type == TARGET_DIF_TYPE2_PROT && 1259 1212 be32_to_cpu(sdt->ref_tag) != ei_lba) { 1260 1213 pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x" 1261 1214 " ei_lba: 0x%08x\n", (unsigned long long)sector, ··· 1275 1228 void *paddr, *addr; 1276 1229 unsigned int i, len, left; 1277 1230 unsigned int offset = sg_off; 1231 + 1232 + if (!sg) 1233 + return; 1278 1234 1279 1235 left = sectors * dev->prot_length; 1280 1236 ··· 1342 1292 (unsigned long long)sector, sdt->guard_tag, 1343 1293 sdt->app_tag, be32_to_cpu(sdt->ref_tag)); 1344 1294 1345 - rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector, 1295 + rc = sbc_dif_v1_verify(cmd, sdt, daddr + j, sector, 1346 1296 ei_lba); 1347 1297 if (rc) { 1348 1298 kunmap_atomic(paddr); ··· 1359 1309 kunmap_atomic(paddr); 1360 1310 kunmap_atomic(daddr); 1361 1311 } 1312 + if (!sg) 1313 + return 0; 1314 + 1362 1315 sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off); 1363 1316 1364 1317 return 0; ··· 1406 1353 continue; 1407 1354 } 1408 1355 1409 - rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector, 1356 + rc = sbc_dif_v1_verify(cmd, sdt, daddr + j, sector, 1410 1357 ei_lba); 1411 1358 if (rc) { 1412 1359 kunmap_atomic(paddr);
+10 -6
drivers/target/target_core_spc.c
··· 103 103 buf[5] |= 0x8; 104 104 /* 105 105 * Set Protection (PROTECT) bit when DIF has been enabled on the 106 - * device, and the transport supports VERIFY + PASS. 106 + * device, and the fabric supports VERIFY + PASS. Also report 107 + * PROTECT=1 if sess_prot_type has been configured to allow T10-PI 108 + * to unprotected devices. 107 109 */ 108 110 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { 109 - if (dev->dev_attrib.pi_prot_type) 111 + if (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type) 110 112 buf[5] |= 0x1; 111 113 } 112 114 ··· 469 467 * only for TYPE3 protection. 470 468 */ 471 469 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { 472 - if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT) 470 + if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT || 471 + cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE1_PROT) 473 472 buf[4] = 0x5; 474 - else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT) 473 + else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT || 474 + cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT) 475 475 buf[4] = 0x4; 476 476 } 477 477 ··· 865 861 * TAG field. 866 862 */ 867 863 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { 868 - if (dev->dev_attrib.pi_prot_type) 864 + if (dev->dev_attrib.pi_prot_type || sess->sess_prot_type) 869 865 p[5] |= 0x80; 870 866 } 871 867 ··· 1103 1099 unsigned char *buf; 1104 1100 unsigned char tbuf[SE_MODE_PAGE_BUF]; 1105 1101 int length; 1106 - int ret = 0; 1102 + sense_reason_t ret = 0; 1107 1103 int i; 1108 1104 1109 1105 if (!cmd->data_length) {
+2 -2
drivers/target/target_core_tmr.c
··· 125 125 if (dev != se_cmd->se_dev) 126 126 continue; 127 127 128 - /* skip se_cmd associated with tmr */ 129 - if (tmr->task_cmd == se_cmd) 128 + /* skip task management functions, including tmr->task_cmd */ 129 + if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) 130 130 continue; 131 131 132 132 ref_tag = se_cmd->se_tfo->get_task_tag(se_cmd);
+1 -1
drivers/target/target_core_tpg.c
··· 672 672 } 673 673 674 674 int core_tpg_register( 675 - struct target_core_fabric_ops *tfo, 675 + const struct target_core_fabric_ops *tfo, 676 676 struct se_wwn *se_wwn, 677 677 struct se_portal_group *se_tpg, 678 678 void *tpg_fabric_ptr,
+136 -26
drivers/target/target_core_transport.c
··· 322 322 struct se_session *se_sess, 323 323 void *fabric_sess_ptr) 324 324 { 325 + const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo; 325 326 unsigned char buf[PR_REG_ISID_LEN]; 326 327 327 328 se_sess->se_tpg = se_tpg; ··· 334 333 * eg: *NOT* discovery sessions. 335 334 */ 336 335 if (se_nacl) { 336 + /* 337 + * 338 + * Determine if fabric allows for T10-PI feature bits exposed to 339 + * initiators for device backends with !dev->dev_attrib.pi_prot_type. 340 + * 341 + * If so, then always save prot_type on a per se_node_acl node 342 + * basis and re-instate the previous sess_prot_type to avoid 343 + * disabling PI from below any previously initiator side 344 + * registered LUNs. 345 + */ 346 + if (se_nacl->saved_prot_type) 347 + se_sess->sess_prot_type = se_nacl->saved_prot_type; 348 + else if (tfo->tpg_check_prot_fabric_only) 349 + se_sess->sess_prot_type = se_nacl->saved_prot_type = 350 + tfo->tpg_check_prot_fabric_only(se_tpg); 337 351 /* 338 352 * If the fabric module supports an ISID based TransportID, 339 353 * save this value in binary from the fabric I_T Nexus now. ··· 420 404 } 421 405 EXPORT_SYMBOL(target_put_session); 422 406 407 + ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page) 408 + { 409 + struct se_session *se_sess; 410 + ssize_t len = 0; 411 + 412 + spin_lock_bh(&se_tpg->session_lock); 413 + list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) { 414 + if (!se_sess->se_node_acl) 415 + continue; 416 + if (!se_sess->se_node_acl->dynamic_node_acl) 417 + continue; 418 + if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE) 419 + break; 420 + 421 + len += snprintf(page + len, PAGE_SIZE - len, "%s\n", 422 + se_sess->se_node_acl->initiatorname); 423 + len += 1; /* Include NULL terminator */ 424 + } 425 + spin_unlock_bh(&se_tpg->session_lock); 426 + 427 + return len; 428 + } 429 + EXPORT_SYMBOL(target_show_dynamic_sessions); 430 + 423 431 static void target_complete_nacl(struct kref *kref) 424 432 { 425 433 struct se_node_acl *nacl = container_of(kref, ··· 502 462 void transport_deregister_session(struct se_session *se_sess) 503 463 { 504 464 struct se_portal_group *se_tpg = se_sess->se_tpg; 505 - struct target_core_fabric_ops *se_tfo; 465 + const struct target_core_fabric_ops *se_tfo; 506 466 struct se_node_acl *se_nacl; 507 467 unsigned long flags; 508 468 bool comp_nacl = true; ··· 1158 1118 */ 1159 1119 void transport_init_se_cmd( 1160 1120 struct se_cmd *cmd, 1161 - struct target_core_fabric_ops *tfo, 1121 + const struct target_core_fabric_ops *tfo, 1162 1122 struct se_session *se_sess, 1163 1123 u32 data_length, 1164 1124 int data_direction, ··· 1610 1570 * has completed. 1611 1571 */ 1612 1572 bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags) 1573 + __releases(&cmd->t_state_lock) 1574 + __acquires(&cmd->t_state_lock) 1613 1575 { 1614 1576 bool was_active = false; 1615 1577 ··· 1657 1615 transport_complete_task_attr(cmd); 1658 1616 /* 1659 1617 * Handle special case for COMPARE_AND_WRITE failure, where the 1660 - * callback is expected to drop the per device ->caw_mutex. 1618 + * callback is expected to drop the per device ->caw_sem. 1661 1619 */ 1662 1620 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 1663 1621 cmd->transport_complete_callback) 1664 - cmd->transport_complete_callback(cmd); 1622 + cmd->transport_complete_callback(cmd, false); 1665 1623 1666 1624 switch (sense_reason) { 1667 1625 case TCM_NON_EXISTENT_LUN: ··· 1748 1706 } 1749 1707 } 1750 1708 1709 + static int target_write_prot_action(struct se_cmd *cmd) 1710 + { 1711 + u32 sectors; 1712 + /* 1713 + * Perform WRITE_INSERT of PI using software emulation when backend 1714 + * device has PI enabled, if the transport has not already generated 1715 + * PI using hardware WRITE_INSERT offload. 1716 + */ 1717 + switch (cmd->prot_op) { 1718 + case TARGET_PROT_DOUT_INSERT: 1719 + if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT)) 1720 + sbc_dif_generate(cmd); 1721 + break; 1722 + case TARGET_PROT_DOUT_STRIP: 1723 + if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP) 1724 + break; 1725 + 1726 + sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size); 1727 + cmd->pi_err = sbc_dif_verify_write(cmd, cmd->t_task_lba, 1728 + sectors, 0, NULL, 0); 1729 + if (unlikely(cmd->pi_err)) { 1730 + spin_lock_irq(&cmd->t_state_lock); 1731 + cmd->transport_state &= ~CMD_T_BUSY|CMD_T_SENT; 1732 + spin_unlock_irq(&cmd->t_state_lock); 1733 + transport_generic_request_failure(cmd, cmd->pi_err); 1734 + return -1; 1735 + } 1736 + break; 1737 + default: 1738 + break; 1739 + } 1740 + 1741 + return 0; 1742 + } 1743 + 1751 1744 static bool target_handle_task_attr(struct se_cmd *cmd) 1752 1745 { 1753 1746 struct se_device *dev = cmd->se_dev; ··· 1862 1785 cmd->t_state = TRANSPORT_PROCESSING; 1863 1786 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; 1864 1787 spin_unlock_irq(&cmd->t_state_lock); 1865 - /* 1866 - * Perform WRITE_INSERT of PI using software emulation when backend 1867 - * device has PI enabled, if the transport has not already generated 1868 - * PI using hardware WRITE_INSERT offload. 1869 - */ 1870 - if (cmd->prot_op == TARGET_PROT_DOUT_INSERT) { 1871 - if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT)) 1872 - sbc_dif_generate(cmd); 1873 - } 1788 + 1789 + if (target_write_prot_action(cmd)) 1790 + return; 1874 1791 1875 1792 if (target_handle_task_attr(cmd)) { 1876 1793 spin_lock_irq(&cmd->t_state_lock); ··· 1990 1919 schedule_work(&cmd->se_dev->qf_work_queue); 1991 1920 } 1992 1921 1993 - static bool target_check_read_strip(struct se_cmd *cmd) 1922 + static bool target_read_prot_action(struct se_cmd *cmd) 1994 1923 { 1995 1924 sense_reason_t rc; 1996 1925 1997 - if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) { 1998 - rc = sbc_dif_read_strip(cmd); 1999 - if (rc) { 2000 - cmd->pi_err = rc; 2001 - return true; 1926 + switch (cmd->prot_op) { 1927 + case TARGET_PROT_DIN_STRIP: 1928 + if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) { 1929 + rc = sbc_dif_read_strip(cmd); 1930 + if (rc) { 1931 + cmd->pi_err = rc; 1932 + return true; 1933 + } 2002 1934 } 1935 + break; 1936 + case TARGET_PROT_DIN_INSERT: 1937 + if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT) 1938 + break; 1939 + 1940 + sbc_dif_generate(cmd); 1941 + break; 1942 + default: 1943 + break; 2003 1944 } 2004 1945 2005 1946 return false; ··· 2058 1975 if (cmd->transport_complete_callback) { 2059 1976 sense_reason_t rc; 2060 1977 2061 - rc = cmd->transport_complete_callback(cmd); 1978 + rc = cmd->transport_complete_callback(cmd, true); 2062 1979 if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) { 1980 + if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 1981 + !cmd->data_length) 1982 + goto queue_rsp; 1983 + 2063 1984 return; 2064 1985 } else if (rc) { 2065 1986 ret = transport_send_check_condition_and_sense(cmd, ··· 2077 1990 } 2078 1991 } 2079 1992 1993 + queue_rsp: 2080 1994 switch (cmd->data_direction) { 2081 1995 case DMA_FROM_DEVICE: 2082 1996 spin_lock(&cmd->se_lun->lun_sep_lock); ··· 2091 2003 * backend had PI enabled, if the transport will not be 2092 2004 * performing hardware READ_STRIP offload. 2093 2005 */ 2094 - if (cmd->prot_op == TARGET_PROT_DIN_STRIP && 2095 - target_check_read_strip(cmd)) { 2006 + if (target_read_prot_action(cmd)) { 2096 2007 ret = transport_send_check_condition_and_sense(cmd, 2097 2008 cmd->pi_err, 0); 2098 2009 if (ret == -EAGAIN || ret == -ENOMEM) ··· 2181 2094 static inline void transport_free_pages(struct se_cmd *cmd) 2182 2095 { 2183 2096 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { 2097 + /* 2098 + * Release special case READ buffer payload required for 2099 + * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE 2100 + */ 2101 + if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) { 2102 + transport_free_sgl(cmd->t_bidi_data_sg, 2103 + cmd->t_bidi_data_nents); 2104 + cmd->t_bidi_data_sg = NULL; 2105 + cmd->t_bidi_data_nents = 0; 2106 + } 2184 2107 transport_reset_sgl_orig(cmd); 2185 2108 return; 2186 2109 } ··· 2343 2246 transport_generic_new_cmd(struct se_cmd *cmd) 2344 2247 { 2345 2248 int ret = 0; 2249 + bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); 2346 2250 2347 2251 /* 2348 2252 * Determine is the TCM fabric module has already allocated physical ··· 2352 2254 */ 2353 2255 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && 2354 2256 cmd->data_length) { 2355 - bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); 2356 2257 2357 2258 if ((cmd->se_cmd_flags & SCF_BIDI) || 2358 2259 (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) { ··· 2380 2283 2381 2284 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, 2382 2285 cmd->data_length, zero_flag); 2286 + if (ret < 0) 2287 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2288 + } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 2289 + cmd->data_length) { 2290 + /* 2291 + * Special case for COMPARE_AND_WRITE with fabrics 2292 + * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC. 2293 + */ 2294 + u32 caw_length = cmd->t_task_nolb * 2295 + cmd->se_dev->dev_attrib.block_size; 2296 + 2297 + ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2298 + &cmd->t_bidi_data_nents, 2299 + caw_length, zero_flag); 2383 2300 if (ret < 0) 2384 2301 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2385 2302 } ··· 2487 2376 * fabric acknowledgement that requires two target_put_sess_cmd() 2488 2377 * invocations before se_cmd descriptor release. 2489 2378 */ 2490 - if (ack_kref) { 2379 + if (ack_kref) 2491 2380 kref_get(&se_cmd->cmd_kref); 2492 - se_cmd->se_cmd_flags |= SCF_ACK_KREF; 2493 - } 2494 2381 2495 2382 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2496 2383 if (se_sess->sess_tearing_down) { ··· 2507 2398 EXPORT_SYMBOL(target_get_sess_cmd); 2508 2399 2509 2400 static void target_release_cmd_kref(struct kref *kref) 2401 + __releases(&se_cmd->se_sess->sess_cmd_lock) 2510 2402 { 2511 2403 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); 2512 2404 struct se_session *se_sess = se_cmd->se_sess;
+38 -14
drivers/target/target_core_user.c
··· 344 344 345 345 entry = (void *) mb + CMDR_OFF + cmd_head; 346 346 tcmu_flush_dcache_range(entry, sizeof(*entry)); 347 - tcmu_hdr_set_op(&entry->hdr, TCMU_OP_PAD); 348 - tcmu_hdr_set_len(&entry->hdr, pad_size); 347 + tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD); 348 + tcmu_hdr_set_len(&entry->hdr.len_op, pad_size); 349 + entry->hdr.cmd_id = 0; /* not used for PAD */ 350 + entry->hdr.kflags = 0; 351 + entry->hdr.uflags = 0; 349 352 350 353 UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size); 351 354 ··· 358 355 359 356 entry = (void *) mb + CMDR_OFF + cmd_head; 360 357 tcmu_flush_dcache_range(entry, sizeof(*entry)); 361 - tcmu_hdr_set_op(&entry->hdr, TCMU_OP_CMD); 362 - tcmu_hdr_set_len(&entry->hdr, command_size); 363 - entry->cmd_id = tcmu_cmd->cmd_id; 358 + tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD); 359 + tcmu_hdr_set_len(&entry->hdr.len_op, command_size); 360 + entry->hdr.cmd_id = tcmu_cmd->cmd_id; 361 + entry->hdr.kflags = 0; 362 + entry->hdr.uflags = 0; 364 363 365 364 /* 366 365 * Fix up iovecs, and handle if allocation in data ring wrapped. ··· 381 376 382 377 /* Even iov_base is relative to mb_addr */ 383 378 iov->iov_len = copy_bytes; 384 - iov->iov_base = (void *) udev->data_off + udev->data_head; 379 + iov->iov_base = (void __user *) udev->data_off + 380 + udev->data_head; 385 381 iov_cnt++; 386 382 iov++; 387 383 ··· 394 388 copy_bytes = sg->length - copy_bytes; 395 389 396 390 iov->iov_len = copy_bytes; 397 - iov->iov_base = (void *) udev->data_off + udev->data_head; 391 + iov->iov_base = (void __user *) udev->data_off + 392 + udev->data_head; 398 393 399 394 if (se_cmd->data_direction == DMA_TO_DEVICE) { 400 395 to = (void *) mb + udev->data_off + udev->data_head; ··· 412 405 kunmap_atomic(from); 413 406 } 414 407 entry->req.iov_cnt = iov_cnt; 408 + entry->req.iov_bidi_cnt = 0; 409 + entry->req.iov_dif_cnt = 0; 415 410 416 411 /* All offsets relative to mb_addr, not start of entry! */ 417 412 cdb_off = CMDR_OFF + cmd_head + base_command_size; ··· 468 459 /* cmd has been completed already from timeout, just reclaim data 469 460 ring space */ 470 461 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size); 462 + return; 463 + } 464 + 465 + if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) { 466 + UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size); 467 + pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n", 468 + cmd->se_cmd); 469 + transport_generic_request_failure(cmd->se_cmd, 470 + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); 471 + cmd->se_cmd = NULL; 472 + kmem_cache_free(tcmu_cmd_cache, cmd); 471 473 return; 472 474 } 473 475 ··· 560 540 561 541 tcmu_flush_dcache_range(entry, sizeof(*entry)); 562 542 563 - if (tcmu_hdr_get_op(&entry->hdr) == TCMU_OP_PAD) { 564 - UPDATE_HEAD(udev->cmdr_last_cleaned, tcmu_hdr_get_len(&entry->hdr), udev->cmdr_size); 543 + if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) { 544 + UPDATE_HEAD(udev->cmdr_last_cleaned, 545 + tcmu_hdr_get_len(entry->hdr.len_op), 546 + udev->cmdr_size); 565 547 continue; 566 548 } 567 - WARN_ON(tcmu_hdr_get_op(&entry->hdr) != TCMU_OP_CMD); 549 + WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD); 568 550 569 551 spin_lock(&udev->commands_lock); 570 - cmd = idr_find(&udev->commands, entry->cmd_id); 552 + cmd = idr_find(&udev->commands, entry->hdr.cmd_id); 571 553 if (cmd) 572 554 idr_remove(&udev->commands, cmd->cmd_id); 573 555 spin_unlock(&udev->commands_lock); ··· 582 560 583 561 tcmu_handle_completion(cmd, entry); 584 562 585 - UPDATE_HEAD(udev->cmdr_last_cleaned, tcmu_hdr_get_len(&entry->hdr), udev->cmdr_size); 563 + UPDATE_HEAD(udev->cmdr_last_cleaned, 564 + tcmu_hdr_get_len(entry->hdr.len_op), 565 + udev->cmdr_size); 586 566 587 567 handled++; 588 568 } ··· 862 838 udev->data_size = TCMU_RING_SIZE - CMDR_SIZE; 863 839 864 840 mb = udev->mb_addr; 865 - mb->version = 1; 841 + mb->version = TCMU_MAILBOX_VERSION; 866 842 mb->cmdr_off = CMDR_OFF; 867 843 mb->cmdr_size = udev->cmdr_size; 868 844 869 845 WARN_ON(!PAGE_ALIGNED(udev->data_off)); 870 846 WARN_ON(udev->data_size % PAGE_SIZE); 871 847 872 - info->version = "1"; 848 + info->version = xstr(TCMU_MAILBOX_VERSION); 873 849 874 850 info->mem[0].name = "tcm-user command & data buffer"; 875 851 info->mem[0].addr = (phys_addr_t) udev->mb_addr;
+12 -34
drivers/target/target_core_xcopy.c
··· 34 34 #include <target/target_core_fabric.h> 35 35 #include <target/target_core_configfs.h> 36 36 37 + #include "target_core_internal.h" 37 38 #include "target_core_pr.h" 38 39 #include "target_core_ua.h" 39 40 #include "target_core_xcopy.h" 40 41 41 42 static struct workqueue_struct *xcopy_wq = NULL; 42 - /* 43 - * From target_core_device.c 44 - */ 45 - extern struct mutex g_device_mutex; 46 - extern struct list_head g_device_list; 47 - /* 48 - * From target_core_configfs.c 49 - */ 50 - extern struct configfs_subsystem *target_core_subsystem[]; 51 43 52 44 static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf) 53 45 { ··· 425 433 return 0; 426 434 } 427 435 428 - static struct target_core_fabric_ops xcopy_pt_tfo = { 436 + static const struct target_core_fabric_ops xcopy_pt_tfo = { 429 437 .get_fabric_name = xcopy_pt_get_fabric_name, 430 438 .get_task_tag = xcopy_pt_get_tag, 431 439 .get_cmd_state = xcopy_pt_get_cmd_state, ··· 540 548 } 541 549 } 542 550 543 - static int target_xcopy_init_pt_lun( 544 - struct xcopy_pt_cmd *xpt_cmd, 545 - struct xcopy_op *xop, 546 - struct se_device *se_dev, 547 - struct se_cmd *pt_cmd, 548 - bool remote_port) 551 + static void target_xcopy_init_pt_lun(struct se_device *se_dev, 552 + struct se_cmd *pt_cmd, bool remote_port) 549 553 { 550 554 /* 551 555 * Don't allocate + init an pt_cmd->se_lun if honoring local port for 552 556 * reservations. The pt_cmd->se_lun pointer will be setup from within 553 557 * target_xcopy_setup_pt_port() 554 558 */ 555 - if (!remote_port) { 556 - pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH; 557 - return 0; 559 + if (remote_port) { 560 + pr_debug("Setup emulated se_dev: %p from se_dev\n", 561 + pt_cmd->se_dev); 562 + pt_cmd->se_lun = &se_dev->xcopy_lun; 563 + pt_cmd->se_dev = se_dev; 558 564 } 559 565 560 - pt_cmd->se_lun = &se_dev->xcopy_lun; 561 - pt_cmd->se_dev = se_dev; 562 - 563 - pr_debug("Setup emulated se_dev: %p from se_dev\n", pt_cmd->se_dev); 564 - pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH; 565 - 566 - pr_debug("Setup emulated se_dev: %p to pt_cmd->se_lun->lun_se_dev\n", 567 - pt_cmd->se_lun->lun_se_dev); 568 - 569 - return 0; 566 + pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 570 567 } 571 568 572 569 static int target_xcopy_setup_pt_cmd( ··· 573 592 * Setup LUN+port to honor reservations based upon xop->op_origin for 574 593 * X-COPY PUSH or X-COPY PULL based upon where the CDB was received. 575 594 */ 576 - rc = target_xcopy_init_pt_lun(xpt_cmd, xop, se_dev, cmd, remote_port); 577 - if (rc < 0) { 578 - ret = rc; 579 - goto out; 580 - } 595 + target_xcopy_init_pt_lun(se_dev, cmd, remote_port); 596 + 581 597 xpt_cmd->xcopy_op = xop; 582 598 target_xcopy_setup_pt_port(xpt_cmd, xop, remote_port); 583 599
-1
drivers/target/tcm_fc/tcm_fc.h
··· 129 129 130 130 extern struct mutex ft_lport_lock; 131 131 extern struct fc4_prov ft_prov; 132 - extern struct target_fabric_configfs *ft_configfs; 133 132 extern unsigned int ft_debug_logging; 134 133 135 134 /*
+24 -65
drivers/target/tcm_fc/tfc_conf.c
··· 48 48 49 49 #include "tcm_fc.h" 50 50 51 - struct target_fabric_configfs *ft_configfs; 51 + static const struct target_core_fabric_ops ft_fabric_ops; 52 52 53 53 static LIST_HEAD(ft_wwn_list); 54 54 DEFINE_MUTEX(ft_lport_lock); ··· 337 337 return NULL; 338 338 } 339 339 340 - ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg, 340 + ret = core_tpg_register(&ft_fabric_ops, wwn, &tpg->se_tpg, 341 341 tpg, TRANSPORT_TPG_TYPE_NORMAL); 342 342 if (ret < 0) { 343 343 destroy_workqueue(wq); ··· 507 507 return tpg->index; 508 508 } 509 509 510 - static struct target_core_fabric_ops ft_fabric_ops = { 510 + static const struct target_core_fabric_ops ft_fabric_ops = { 511 + .module = THIS_MODULE, 512 + .name = "fc", 511 513 .get_fabric_name = ft_get_fabric_name, 512 514 .get_fabric_proto_ident = fc_get_fabric_proto_ident, 513 515 .tpg_get_wwn = ft_get_fabric_wwn, ··· 554 552 .fabric_drop_np = NULL, 555 553 .fabric_make_nodeacl = &ft_add_acl, 556 554 .fabric_drop_nodeacl = &ft_del_acl, 555 + 556 + .tfc_wwn_attrs = ft_wwn_attrs, 557 + .tfc_tpg_nacl_base_attrs = ft_nacl_base_attrs, 557 558 }; 558 - 559 - static int ft_register_configfs(void) 560 - { 561 - struct target_fabric_configfs *fabric; 562 - int ret; 563 - 564 - /* 565 - * Register the top level struct config_item_type with TCM core 566 - */ 567 - fabric = target_fabric_configfs_init(THIS_MODULE, "fc"); 568 - if (IS_ERR(fabric)) { 569 - pr_err("%s: target_fabric_configfs_init() failed!\n", 570 - __func__); 571 - return PTR_ERR(fabric); 572 - } 573 - fabric->tf_ops = ft_fabric_ops; 574 - 575 - /* 576 - * Setup default attribute lists for various fabric->tf_cit_tmpl 577 - */ 578 - fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = ft_wwn_attrs; 579 - fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL; 580 - fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; 581 - fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; 582 - fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; 583 - fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = 584 - ft_nacl_base_attrs; 585 - fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; 586 - fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; 587 - fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; 588 - /* 589 - * register the fabric for use within TCM 590 - */ 591 - ret = target_fabric_configfs_register(fabric); 592 - if (ret < 0) { 593 - pr_debug("target_fabric_configfs_register() for" 594 - " FC Target failed!\n"); 595 - target_fabric_configfs_free(fabric); 596 - return -1; 597 - } 598 - 599 - /* 600 - * Setup our local pointer to *fabric. 601 - */ 602 - ft_configfs = fabric; 603 - return 0; 604 - } 605 - 606 - static void ft_deregister_configfs(void) 607 - { 608 - if (!ft_configfs) 609 - return; 610 - target_fabric_configfs_deregister(ft_configfs); 611 - ft_configfs = NULL; 612 - } 613 559 614 560 static struct notifier_block ft_notifier = { 615 561 .notifier_call = ft_lport_notify ··· 565 615 566 616 static int __init ft_init(void) 567 617 { 568 - if (ft_register_configfs()) 569 - return -1; 570 - if (fc_fc4_register_provider(FC_TYPE_FCP, &ft_prov)) { 571 - ft_deregister_configfs(); 572 - return -1; 573 - } 618 + int ret; 619 + 620 + ret = target_register_template(&ft_fabric_ops); 621 + if (ret) 622 + goto out; 623 + 624 + ret = fc_fc4_register_provider(FC_TYPE_FCP, &ft_prov); 625 + if (ret) 626 + goto out_unregister_template; 627 + 574 628 blocking_notifier_chain_register(&fc_lport_notifier_head, &ft_notifier); 575 629 fc_lport_iterate(ft_lport_add, NULL); 576 630 return 0; 631 + 632 + out_unregister_template: 633 + target_unregister_template(&ft_fabric_ops); 634 + out: 635 + return ret; 577 636 } 578 637 579 638 static void __exit ft_exit(void) ··· 591 632 &ft_notifier); 592 633 fc_fc4_deregister_provider(FC_TYPE_FCP, &ft_prov); 593 634 fc_lport_iterate(ft_lport_del, NULL); 594 - ft_deregister_configfs(); 635 + target_unregister_template(&ft_fabric_ops); 595 636 synchronize_rcu(); 596 637 } 597 638
+9 -48
drivers/usb/gadget/legacy/tcm_usb_gadget.c
··· 29 29 30 30 USB_GADGET_COMPOSITE_OPTIONS(); 31 31 32 - static struct target_fabric_configfs *usbg_fabric_configfs; 32 + static const struct target_core_fabric_ops usbg_ops; 33 33 34 34 static inline struct f_uas *to_f_uas(struct usb_function *f) 35 35 { ··· 1572 1572 tpg->tport = tport; 1573 1573 tpg->tport_tpgt = tpgt; 1574 1574 1575 - ret = core_tpg_register(&usbg_fabric_configfs->tf_ops, wwn, 1576 - &tpg->se_tpg, tpg, 1575 + ret = core_tpg_register(&usbg_ops, wwn, &tpg->se_tpg, tpg, 1577 1576 TRANSPORT_TPG_TYPE_NORMAL); 1578 1577 if (ret < 0) { 1579 1578 destroy_workqueue(tpg->workqueue); ··· 1863 1864 return 1; 1864 1865 } 1865 1866 1866 - static struct target_core_fabric_ops usbg_ops = { 1867 + static const struct target_core_fabric_ops usbg_ops = { 1868 + .module = THIS_MODULE, 1869 + .name = "usb_gadget", 1867 1870 .get_fabric_name = usbg_get_fabric_name, 1868 1871 .get_fabric_proto_ident = usbg_get_fabric_proto_ident, 1869 1872 .tpg_get_wwn = usbg_get_fabric_wwn, ··· 1907 1906 .fabric_drop_np = NULL, 1908 1907 .fabric_make_nodeacl = usbg_make_nodeacl, 1909 1908 .fabric_drop_nodeacl = usbg_drop_nodeacl, 1910 - }; 1911 1909 1912 - static int usbg_register_configfs(void) 1913 - { 1914 - struct target_fabric_configfs *fabric; 1915 - int ret; 1916 - 1917 - fabric = target_fabric_configfs_init(THIS_MODULE, "usb_gadget"); 1918 - if (IS_ERR(fabric)) { 1919 - printk(KERN_ERR "target_fabric_configfs_init() failed\n"); 1920 - return PTR_ERR(fabric); 1921 - } 1922 - 1923 - fabric->tf_ops = usbg_ops; 1924 - fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = usbg_wwn_attrs; 1925 - fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = usbg_base_attrs; 1926 - fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; 1927 - fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; 1928 - fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; 1929 - fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; 1930 - fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; 1931 - fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; 1932 - fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; 1933 - ret = target_fabric_configfs_register(fabric); 1934 - if (ret < 0) { 1935 - printk(KERN_ERR "target_fabric_configfs_register() failed" 1936 - " for usb-gadget\n"); 1937 - return ret; 1938 - } 1939 - usbg_fabric_configfs = fabric; 1940 - return 0; 1941 - }; 1942 - 1943 - static void usbg_deregister_configfs(void) 1944 - { 1945 - if (!(usbg_fabric_configfs)) 1946 - return; 1947 - 1948 - target_fabric_configfs_deregister(usbg_fabric_configfs); 1949 - usbg_fabric_configfs = NULL; 1910 + .tfc_wwn_attrs = usbg_wwn_attrs, 1911 + .tfc_tpg_base_attrs = usbg_base_attrs, 1950 1912 }; 1951 1913 1952 1914 /* Start gadget.c code */ ··· 2418 2454 2419 2455 static int __init usb_target_gadget_init(void) 2420 2456 { 2421 - int ret; 2422 - 2423 - ret = usbg_register_configfs(); 2424 - return ret; 2457 + return target_register_template(&usbg_ops); 2425 2458 } 2426 2459 module_init(usb_target_gadget_init); 2427 2460 2428 2461 static void __exit usb_target_gadget_exit(void) 2429 2462 { 2430 - usbg_deregister_configfs(); 2463 + target_unregister_template(&usbg_ops); 2431 2464 } 2432 2465 module_exit(usb_target_gadget_exit); 2433 2466
+64 -64
drivers/vhost/scsi.c
··· 131 131 int tv_tpg_port_count; 132 132 /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */ 133 133 int tv_tpg_vhost_count; 134 + /* Used for enabling T10-PI with legacy devices */ 135 + int tv_fabric_prot_type; 134 136 /* list for vhost_scsi_list */ 135 137 struct list_head tv_tpg_list; 136 138 /* Used to protect access for tpg_nexus */ ··· 216 214 int vs_events_nr; /* num of pending events, protected by vq->mutex */ 217 215 }; 218 216 219 - /* Local pointer to allocated TCM configfs fabric module */ 220 - static struct target_fabric_configfs *vhost_scsi_fabric_configfs; 221 - 217 + static struct target_core_fabric_ops vhost_scsi_ops; 222 218 static struct workqueue_struct *vhost_scsi_workqueue; 223 219 224 220 /* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */ ··· 429 429 430 430 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, 431 431 port_nexus_ptr); 432 + } 433 + 434 + static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg) 435 + { 436 + struct vhost_scsi_tpg *tpg = container_of(se_tpg, 437 + struct vhost_scsi_tpg, se_tpg); 438 + 439 + return tpg->tv_fabric_prot_type; 432 440 } 433 441 434 442 static struct se_node_acl * ··· 1886 1878 } 1887 1879 } 1888 1880 1881 + static ssize_t vhost_scsi_tpg_attrib_store_fabric_prot_type( 1882 + struct se_portal_group *se_tpg, 1883 + const char *page, 1884 + size_t count) 1885 + { 1886 + struct vhost_scsi_tpg *tpg = container_of(se_tpg, 1887 + struct vhost_scsi_tpg, se_tpg); 1888 + unsigned long val; 1889 + int ret = kstrtoul(page, 0, &val); 1890 + 1891 + if (ret) { 1892 + pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret); 1893 + return ret; 1894 + } 1895 + if (val != 0 && val != 1 && val != 3) { 1896 + pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val); 1897 + return -EINVAL; 1898 + } 1899 + tpg->tv_fabric_prot_type = val; 1900 + 1901 + return count; 1902 + } 1903 + 1904 + static ssize_t vhost_scsi_tpg_attrib_show_fabric_prot_type( 1905 + struct se_portal_group *se_tpg, 1906 + char *page) 1907 + { 1908 + struct vhost_scsi_tpg *tpg = container_of(se_tpg, 1909 + struct vhost_scsi_tpg, se_tpg); 1910 + 1911 + return sprintf(page, "%d\n", tpg->tv_fabric_prot_type); 1912 + } 1913 + TF_TPG_ATTRIB_ATTR(vhost_scsi, fabric_prot_type, S_IRUGO | S_IWUSR); 1914 + 1915 + static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = { 1916 + &vhost_scsi_tpg_attrib_fabric_prot_type.attr, 1917 + NULL, 1918 + }; 1919 + 1889 1920 static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg, 1890 1921 const char *name) 1891 1922 { ··· 2202 2155 tpg->tport = tport; 2203 2156 tpg->tport_tpgt = tpgt; 2204 2157 2205 - ret = core_tpg_register(&vhost_scsi_fabric_configfs->tf_ops, wwn, 2158 + ret = core_tpg_register(&vhost_scsi_ops, wwn, 2206 2159 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); 2207 2160 if (ret < 0) { 2208 2161 kfree(tpg); ··· 2324 2277 }; 2325 2278 2326 2279 static struct target_core_fabric_ops vhost_scsi_ops = { 2280 + .module = THIS_MODULE, 2281 + .name = "vhost", 2327 2282 .get_fabric_name = vhost_scsi_get_fabric_name, 2328 2283 .get_fabric_proto_ident = vhost_scsi_get_fabric_proto_ident, 2329 2284 .tpg_get_wwn = vhost_scsi_get_fabric_wwn, ··· 2338 2289 .tpg_check_demo_mode_cache = vhost_scsi_check_true, 2339 2290 .tpg_check_demo_mode_write_protect = vhost_scsi_check_false, 2340 2291 .tpg_check_prod_mode_write_protect = vhost_scsi_check_false, 2292 + .tpg_check_prot_fabric_only = vhost_scsi_check_prot_fabric_only, 2341 2293 .tpg_alloc_fabric_acl = vhost_scsi_alloc_fabric_acl, 2342 2294 .tpg_release_fabric_acl = vhost_scsi_release_fabric_acl, 2343 2295 .tpg_get_inst_index = vhost_scsi_tpg_get_inst_index, ··· 2370 2320 .fabric_drop_np = NULL, 2371 2321 .fabric_make_nodeacl = vhost_scsi_make_nodeacl, 2372 2322 .fabric_drop_nodeacl = vhost_scsi_drop_nodeacl, 2373 - }; 2374 2323 2375 - static int vhost_scsi_register_configfs(void) 2376 - { 2377 - struct target_fabric_configfs *fabric; 2378 - int ret; 2379 - 2380 - pr_debug("vhost-scsi fabric module %s on %s/%s" 2381 - " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname, 2382 - utsname()->machine); 2383 - /* 2384 - * Register the top level struct config_item_type with TCM core 2385 - */ 2386 - fabric = target_fabric_configfs_init(THIS_MODULE, "vhost"); 2387 - if (IS_ERR(fabric)) { 2388 - pr_err("target_fabric_configfs_init() failed\n"); 2389 - return PTR_ERR(fabric); 2390 - } 2391 - /* 2392 - * Setup fabric->tf_ops from our local vhost_scsi_ops 2393 - */ 2394 - fabric->tf_ops = vhost_scsi_ops; 2395 - /* 2396 - * Setup default attribute lists for various fabric->tf_cit_tmpl 2397 - */ 2398 - fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = vhost_scsi_wwn_attrs; 2399 - fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = vhost_scsi_tpg_attrs; 2400 - fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; 2401 - fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; 2402 - fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; 2403 - fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; 2404 - fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; 2405 - fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; 2406 - fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; 2407 - /* 2408 - * Register the fabric for use within TCM 2409 - */ 2410 - ret = target_fabric_configfs_register(fabric); 2411 - if (ret < 0) { 2412 - pr_err("target_fabric_configfs_register() failed" 2413 - " for TCM_VHOST\n"); 2414 - return ret; 2415 - } 2416 - /* 2417 - * Setup our local pointer to *fabric 2418 - */ 2419 - vhost_scsi_fabric_configfs = fabric; 2420 - pr_debug("TCM_VHOST[0] - Set fabric -> vhost_scsi_fabric_configfs\n"); 2421 - return 0; 2422 - }; 2423 - 2424 - static void vhost_scsi_deregister_configfs(void) 2425 - { 2426 - if (!vhost_scsi_fabric_configfs) 2427 - return; 2428 - 2429 - target_fabric_configfs_deregister(vhost_scsi_fabric_configfs); 2430 - vhost_scsi_fabric_configfs = NULL; 2431 - pr_debug("TCM_VHOST[0] - Cleared vhost_scsi_fabric_configfs\n"); 2324 + .tfc_wwn_attrs = vhost_scsi_wwn_attrs, 2325 + .tfc_tpg_base_attrs = vhost_scsi_tpg_attrs, 2326 + .tfc_tpg_attrib_attrs = vhost_scsi_tpg_attrib_attrs, 2432 2327 }; 2433 2328 2434 2329 static int __init vhost_scsi_init(void) 2435 2330 { 2436 2331 int ret = -ENOMEM; 2332 + 2333 + pr_debug("TCM_VHOST fabric module %s on %s/%s" 2334 + " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname, 2335 + utsname()->machine); 2336 + 2437 2337 /* 2438 2338 * Use our own dedicated workqueue for submitting I/O into 2439 2339 * target core to avoid contention within system_wq. ··· 2396 2396 if (ret < 0) 2397 2397 goto out_destroy_workqueue; 2398 2398 2399 - ret = vhost_scsi_register_configfs(); 2399 + ret = target_register_template(&vhost_scsi_ops); 2400 2400 if (ret < 0) 2401 2401 goto out_vhost_scsi_deregister; 2402 2402 ··· 2412 2412 2413 2413 static void vhost_scsi_exit(void) 2414 2414 { 2415 - vhost_scsi_deregister_configfs(); 2415 + target_unregister_template(&vhost_scsi_ops); 2416 2416 vhost_scsi_deregister(); 2417 2417 destroy_workqueue(vhost_scsi_workqueue); 2418 2418 };
+13 -61
drivers/xen/xen-scsiback.c
··· 204 204 static DEFINE_MUTEX(scsiback_mutex); 205 205 static LIST_HEAD(scsiback_list); 206 206 207 - /* Local pointer to allocated TCM configfs fabric module */ 208 - static struct target_fabric_configfs *scsiback_fabric_configfs; 207 + static const struct target_core_fabric_ops scsiback_ops; 209 208 210 209 static void scsiback_get(struct vscsibk_info *info) 211 210 { ··· 1901 1902 tpg->tport = tport; 1902 1903 tpg->tport_tpgt = tpgt; 1903 1904 1904 - ret = core_tpg_register(&scsiback_fabric_configfs->tf_ops, wwn, 1905 + ret = core_tpg_register(&scsiback_ops, wwn, 1905 1906 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); 1906 1907 if (ret < 0) { 1907 1908 kfree(tpg); ··· 1943 1944 return 0; 1944 1945 } 1945 1946 1946 - static struct target_core_fabric_ops scsiback_ops = { 1947 + static const struct target_core_fabric_ops scsiback_ops = { 1948 + .module = THIS_MODULE, 1949 + .name = "xen-pvscsi", 1947 1950 .get_fabric_name = scsiback_get_fabric_name, 1948 1951 .get_fabric_proto_ident = scsiback_get_fabric_proto_ident, 1949 1952 .tpg_get_wwn = scsiback_get_fabric_wwn, ··· 1992 1991 .fabric_make_nodeacl = scsiback_make_nodeacl, 1993 1992 .fabric_drop_nodeacl = scsiback_drop_nodeacl, 1994 1993 #endif 1995 - }; 1996 1994 1997 - static int scsiback_register_configfs(void) 1998 - { 1999 - struct target_fabric_configfs *fabric; 2000 - int ret; 2001 - 2002 - pr_debug("fabric module %s on %s/%s on "UTS_RELEASE"\n", 2003 - VSCSI_VERSION, utsname()->sysname, utsname()->machine); 2004 - /* 2005 - * Register the top level struct config_item_type with TCM core 2006 - */ 2007 - fabric = target_fabric_configfs_init(THIS_MODULE, "xen-pvscsi"); 2008 - if (IS_ERR(fabric)) 2009 - return PTR_ERR(fabric); 2010 - 2011 - /* 2012 - * Setup fabric->tf_ops from our local scsiback_ops 2013 - */ 2014 - fabric->tf_ops = scsiback_ops; 2015 - /* 2016 - * Setup default attribute lists for various fabric->tf_cit_tmpl 2017 - */ 2018 - fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = scsiback_wwn_attrs; 2019 - fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = scsiback_tpg_attrs; 2020 - fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; 2021 - fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = scsiback_param_attrs; 2022 - fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; 2023 - fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; 2024 - fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; 2025 - fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; 2026 - fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; 2027 - /* 2028 - * Register the fabric for use within TCM 2029 - */ 2030 - ret = target_fabric_configfs_register(fabric); 2031 - if (ret < 0) { 2032 - target_fabric_configfs_free(fabric); 2033 - return ret; 2034 - } 2035 - /* 2036 - * Setup our local pointer to *fabric 2037 - */ 2038 - scsiback_fabric_configfs = fabric; 2039 - pr_debug("Set fabric -> scsiback_fabric_configfs\n"); 2040 - return 0; 2041 - }; 2042 - 2043 - static void scsiback_deregister_configfs(void) 2044 - { 2045 - if (!scsiback_fabric_configfs) 2046 - return; 2047 - 2048 - target_fabric_configfs_deregister(scsiback_fabric_configfs); 2049 - scsiback_fabric_configfs = NULL; 2050 - pr_debug("Cleared scsiback_fabric_configfs\n"); 1995 + .tfc_wwn_attrs = scsiback_wwn_attrs, 1996 + .tfc_tpg_base_attrs = scsiback_tpg_attrs, 1997 + .tfc_tpg_param_attrs = scsiback_param_attrs, 2051 1998 }; 2052 1999 2053 2000 static const struct xenbus_device_id scsiback_ids[] = { ··· 2027 2078 if (!xen_domain()) 2028 2079 return -ENODEV; 2029 2080 2081 + pr_debug("xen-pvscsi: fabric module %s on %s/%s on "UTS_RELEASE"\n", 2082 + VSCSI_VERSION, utsname()->sysname, utsname()->machine); 2083 + 2030 2084 scsiback_cachep = kmem_cache_create("vscsiif_cache", 2031 2085 sizeof(struct vscsibk_pend), 0, 0, scsiback_init_pend); 2032 2086 if (!scsiback_cachep) ··· 2039 2087 if (ret) 2040 2088 goto out_cache_destroy; 2041 2089 2042 - ret = scsiback_register_configfs(); 2090 + ret = target_register_template(&scsiback_ops); 2043 2091 if (ret) 2044 2092 goto out_unregister_xenbus; 2045 2093 ··· 2062 2110 BUG(); 2063 2111 gnttab_free_pages(1, &page); 2064 2112 } 2065 - scsiback_deregister_configfs(); 2113 + target_unregister_template(&scsiback_ops); 2066 2114 xenbus_unregister_driver(&scsiback_driver); 2067 2115 kmem_cache_destroy(scsiback_cachep); 2068 2116 }
+11 -4
include/target/iscsi/iscsi_target_core.h
··· 20 20 #define ISCSIT_MIN_TAGS 16 21 21 #define ISCSIT_EXTRA_TAGS 8 22 22 #define ISCSIT_TCP_BACKLOG 256 23 + #define ISCSI_RX_THREAD_NAME "iscsi_trx" 24 + #define ISCSI_TX_THREAD_NAME "iscsi_ttx" 23 25 24 26 /* struct iscsi_node_attrib sanity values */ 25 27 #define NA_DATAOUT_TIMEOUT 3 ··· 62 60 #define TA_CACHE_CORE_NPS 0 63 61 /* T10 protection information disabled by default */ 64 62 #define TA_DEFAULT_T10_PI 0 63 + #define TA_DEFAULT_FABRIC_PROT_TYPE 0 65 64 66 65 #define ISCSI_IOV_DATA_BUFFER 5 67 66 ··· 603 600 struct iscsi_tpg_np *tpg_np; 604 601 /* Pointer to parent session */ 605 602 struct iscsi_session *sess; 606 - /* Pointer to thread_set in use for this conn's threads */ 607 - struct iscsi_thread_set *thread_set; 603 + int bitmap_id; 604 + int rx_thread_active; 605 + struct task_struct *rx_thread; 606 + int tx_thread_active; 607 + struct task_struct *tx_thread; 608 608 /* list_head for session connection list */ 609 609 struct list_head conn_list; 610 610 } ____cacheline_aligned; ··· 773 767 u32 demo_mode_discovery; 774 768 u32 default_erl; 775 769 u8 t10_pi; 770 + u32 fabric_prot_type; 776 771 struct iscsi_portal_group *tpg; 777 772 }; 778 773 ··· 878 871 /* Unique identifier used for the authentication daemon */ 879 872 u32 auth_id; 880 873 u32 inactive_ts; 881 - /* Thread Set bitmap count */ 882 - int ts_bitmap_count; 874 + #define ISCSIT_BITMAP_BITS 262144 883 875 /* Thread Set bitmap pointer */ 884 876 unsigned long *ts_bitmap; 877 + spinlock_t ts_bitmap_lock; 885 878 /* Used for iSCSI discovery session authentication */ 886 879 struct iscsi_node_acl discovery_acl; 887 880 struct iscsi_portal_group *discovery_tpg;
+5 -5
include/target/target_core_base.h
··· 165 165 SCF_SEND_DELAYED_TAS = 0x00004000, 166 166 SCF_ALUA_NON_OPTIMIZED = 0x00008000, 167 167 SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000, 168 - SCF_ACK_KREF = 0x00040000, 169 168 SCF_COMPARE_AND_WRITE = 0x00080000, 170 169 SCF_COMPARE_AND_WRITE_POST = 0x00100000, 171 - SCF_CMD_XCOPY_PASSTHROUGH = 0x00200000, 172 170 }; 173 171 174 172 /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ ··· 518 520 struct list_head se_cmd_list; 519 521 struct completion cmd_wait_comp; 520 522 struct kref cmd_kref; 521 - struct target_core_fabric_ops *se_tfo; 523 + const struct target_core_fabric_ops *se_tfo; 522 524 sense_reason_t (*execute_cmd)(struct se_cmd *); 523 525 sense_reason_t (*execute_rw)(struct se_cmd *, struct scatterlist *, 524 526 u32, enum dma_data_direction); 525 - sense_reason_t (*transport_complete_callback)(struct se_cmd *); 527 + sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool); 526 528 527 529 unsigned char *t_task_cdb; 528 530 unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; ··· 589 591 bool acl_stop:1; 590 592 u32 queue_depth; 591 593 u32 acl_index; 594 + enum target_prot_type saved_prot_type; 592 595 #define MAX_ACL_TAG_SIZE 64 593 596 char acl_tag[MAX_ACL_TAG_SIZE]; 594 597 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ ··· 615 616 unsigned sess_tearing_down:1; 616 617 u64 sess_bin_isid; 617 618 enum target_prot_op sup_prot_ops; 619 + enum target_prot_type sess_prot_type; 618 620 struct se_node_acl *se_node_acl; 619 621 struct se_portal_group *se_tpg; 620 622 void *fabric_sess_ptr; ··· 890 890 /* List of TCM sessions associated wth this TPG */ 891 891 struct list_head tpg_sess_list; 892 892 /* Pointer to $FABRIC_MOD dependent code */ 893 - struct target_core_fabric_ops *se_tpg_tfo; 893 + const struct target_core_fabric_ops *se_tpg_tfo; 894 894 struct se_wwn *se_tpg_wwn; 895 895 struct config_group tpg_group; 896 896 struct config_group *tpg_default_groups[7];
-6
include/target/target_core_configfs.h
··· 5 5 #define TARGET_CORE_NAME_MAX_LEN 64 6 6 #define TARGET_FABRIC_NAME_SIZE 32 7 7 8 - extern struct target_fabric_configfs *target_fabric_configfs_init( 9 - struct module *, const char *); 10 - extern void target_fabric_configfs_free(struct target_fabric_configfs *); 11 - extern int target_fabric_configfs_register(struct target_fabric_configfs *); 12 - extern void target_fabric_configfs_deregister(struct target_fabric_configfs *); 13 - 14 8 struct target_fabric_configfs_template { 15 9 struct config_item_type tfc_discovery_cit; 16 10 struct config_item_type tfc_wwn_cit;
+30 -3
include/target/target_core_fabric.h
··· 2 2 #define TARGET_CORE_FABRIC_H 3 3 4 4 struct target_core_fabric_ops { 5 + struct module *module; 6 + const char *name; 5 7 struct configfs_subsystem *tf_subsys; 6 8 char *(*get_fabric_name)(void); 7 9 u8 (*get_fabric_proto_ident)(struct se_portal_group *); ··· 29 27 * inquiry response 30 28 */ 31 29 int (*tpg_check_demo_mode_login_only)(struct se_portal_group *); 30 + /* 31 + * Optionally used as a configfs tunable to determine when 32 + * target-core should signal the PROTECT=1 feature bit for 33 + * backends that don't support T10-PI, so that either fabric 34 + * HW offload or target-core emulation performs the associated 35 + * WRITE_STRIP and READ_INSERT operations. 36 + */ 37 + int (*tpg_check_prot_fabric_only)(struct se_portal_group *); 32 38 struct se_node_acl *(*tpg_alloc_fabric_acl)( 33 39 struct se_portal_group *); 34 40 void (*tpg_release_fabric_acl)(struct se_portal_group *, ··· 92 82 struct se_node_acl *(*fabric_make_nodeacl)(struct se_portal_group *, 93 83 struct config_group *, const char *); 94 84 void (*fabric_drop_nodeacl)(struct se_node_acl *); 85 + 86 + struct configfs_attribute **tfc_discovery_attrs; 87 + struct configfs_attribute **tfc_wwn_attrs; 88 + struct configfs_attribute **tfc_tpg_base_attrs; 89 + struct configfs_attribute **tfc_tpg_np_base_attrs; 90 + struct configfs_attribute **tfc_tpg_attrib_attrs; 91 + struct configfs_attribute **tfc_tpg_auth_attrs; 92 + struct configfs_attribute **tfc_tpg_param_attrs; 93 + struct configfs_attribute **tfc_tpg_nacl_base_attrs; 94 + struct configfs_attribute **tfc_tpg_nacl_attrib_attrs; 95 + struct configfs_attribute **tfc_tpg_nacl_auth_attrs; 96 + struct configfs_attribute **tfc_tpg_nacl_param_attrs; 95 97 }; 98 + 99 + int target_register_template(const struct target_core_fabric_ops *fo); 100 + void target_unregister_template(const struct target_core_fabric_ops *fo); 96 101 97 102 struct se_session *transport_init_session(enum target_prot_op); 98 103 int transport_alloc_session_tags(struct se_session *, unsigned int, ··· 120 95 struct se_node_acl *, struct se_session *, void *); 121 96 void target_get_session(struct se_session *); 122 97 void target_put_session(struct se_session *); 98 + ssize_t target_show_dynamic_sessions(struct se_portal_group *, char *); 123 99 void transport_free_session(struct se_session *); 124 100 void target_put_nacl(struct se_node_acl *); 125 101 void transport_deregister_session_configfs(struct se_session *); 126 102 void transport_deregister_session(struct se_session *); 127 103 128 104 129 - void transport_init_se_cmd(struct se_cmd *, struct target_core_fabric_ops *, 105 + void transport_init_se_cmd(struct se_cmd *, 106 + const struct target_core_fabric_ops *, 130 107 struct se_session *, u32, int, int, unsigned char *); 131 108 sense_reason_t transport_lookup_cmd_lun(struct se_cmd *, u32); 132 109 sense_reason_t target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *); ··· 180 153 unsigned char *, u32, int); 181 154 int core_tpg_set_initiator_node_tag(struct se_portal_group *, 182 155 struct se_node_acl *, const char *); 183 - int core_tpg_register(struct target_core_fabric_ops *, struct se_wwn *, 184 - struct se_portal_group *, void *, int); 156 + int core_tpg_register(const struct target_core_fabric_ops *, 157 + struct se_wwn *, struct se_portal_group *, void *, int); 185 158 int core_tpg_deregister(struct se_portal_group *); 186 159 187 160 /* SAS helpers */
+5
include/target/target_core_fabric_configfs.h
··· 90 90 _fabric##_tpg_store_##_name); 91 91 92 92 93 + #define TF_TPG_BASE_ATTR_RO(_fabric, _name) \ 94 + static struct target_fabric_tpg_attribute _fabric##_tpg_##_name = \ 95 + __CONFIGFS_EATTR_RO(_name, \ 96 + _fabric##_tpg_show_##_name); 97 + 93 98 CONFIGFS_EATTR_STRUCT(target_fabric_wwn, target_fabric_configfs); 94 99 #define TF_WWN_ATTR(_fabric, _name, _mode) \ 95 100 static struct target_fabric_wwn_attribute _fabric##_wwn_##_name = \
+27 -17
include/uapi/linux/target_core_user.h
··· 6 6 #include <linux/types.h> 7 7 #include <linux/uio.h> 8 8 9 - #define TCMU_VERSION "1.0" 9 + #define TCMU_VERSION "2.0" 10 10 11 11 /* 12 12 * Ring Design ··· 39 39 * should process the next packet the same way, and so on. 40 40 */ 41 41 42 - #define TCMU_MAILBOX_VERSION 1 42 + #define TCMU_MAILBOX_VERSION 2 43 43 #define ALIGN_SIZE 64 /* Should be enough for most CPUs */ 44 + 45 + /* See https://gcc.gnu.org/onlinedocs/cpp/Stringification.html */ 46 + #define xstr(s) str(s) 47 + #define str(s) #s 44 48 45 49 struct tcmu_mailbox { 46 50 __u16 version; ··· 68 64 * Only a few opcodes, and length is 8-byte aligned, so use low bits for opcode. 69 65 */ 70 66 struct tcmu_cmd_entry_hdr { 71 - __u32 len_op; 67 + __u32 len_op; 68 + __u16 cmd_id; 69 + __u8 kflags; 70 + #define TCMU_UFLAG_UNKNOWN_OP 0x1 71 + __u8 uflags; 72 + 72 73 } __packed; 73 74 74 75 #define TCMU_OP_MASK 0x7 75 76 76 - static inline enum tcmu_opcode tcmu_hdr_get_op(struct tcmu_cmd_entry_hdr *hdr) 77 + static inline enum tcmu_opcode tcmu_hdr_get_op(__u32 len_op) 77 78 { 78 - return hdr->len_op & TCMU_OP_MASK; 79 + return len_op & TCMU_OP_MASK; 79 80 } 80 81 81 - static inline void tcmu_hdr_set_op(struct tcmu_cmd_entry_hdr *hdr, enum tcmu_opcode op) 82 + static inline void tcmu_hdr_set_op(__u32 *len_op, enum tcmu_opcode op) 82 83 { 83 - hdr->len_op &= ~TCMU_OP_MASK; 84 - hdr->len_op |= (op & TCMU_OP_MASK); 84 + *len_op &= ~TCMU_OP_MASK; 85 + *len_op |= (op & TCMU_OP_MASK); 85 86 } 86 87 87 - static inline __u32 tcmu_hdr_get_len(struct tcmu_cmd_entry_hdr *hdr) 88 + static inline __u32 tcmu_hdr_get_len(__u32 len_op) 88 89 { 89 - return hdr->len_op & ~TCMU_OP_MASK; 90 + return len_op & ~TCMU_OP_MASK; 90 91 } 91 92 92 - static inline void tcmu_hdr_set_len(struct tcmu_cmd_entry_hdr *hdr, __u32 len) 93 + static inline void tcmu_hdr_set_len(__u32 *len_op, __u32 len) 93 94 { 94 - hdr->len_op &= TCMU_OP_MASK; 95 - hdr->len_op |= len; 95 + *len_op &= TCMU_OP_MASK; 96 + *len_op |= len; 96 97 } 97 98 98 99 /* Currently the same as SCSI_SENSE_BUFFERSIZE */ ··· 106 97 struct tcmu_cmd_entry { 107 98 struct tcmu_cmd_entry_hdr hdr; 108 99 109 - uint16_t cmd_id; 110 - uint16_t __pad1; 111 - 112 100 union { 113 101 struct { 102 + uint32_t iov_cnt; 103 + uint32_t iov_bidi_cnt; 104 + uint32_t iov_dif_cnt; 114 105 uint64_t cdb_off; 115 - uint64_t iov_cnt; 106 + uint64_t __pad1; 107 + uint64_t __pad2; 116 108 struct iovec iov[0]; 117 109 } req; 118 110 struct {