Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*******************************************************************************
3 * Filename: target_core_transport.c
4 *
5 * This file contains the Generic Target Engine Core.
6 *
7 * (c) Copyright 2002-2013 Datera, Inc.
8 *
9 * Nicholas A. Bellinger <nab@kernel.org>
10 *
11 ******************************************************************************/
12
13#include <linux/net.h>
14#include <linux/delay.h>
15#include <linux/string.h>
16#include <linux/timer.h>
17#include <linux/slab.h>
18#include <linux/spinlock.h>
19#include <linux/kthread.h>
20#include <linux/in.h>
21#include <linux/cdrom.h>
22#include <linux/module.h>
23#include <linux/ratelimit.h>
24#include <linux/vmalloc.h>
25#include <asm/unaligned.h>
26#include <net/sock.h>
27#include <net/tcp.h>
28#include <scsi/scsi_proto.h>
29#include <scsi/scsi_common.h>
30
31#include <target/target_core_base.h>
32#include <target/target_core_backend.h>
33#include <target/target_core_fabric.h>
34
35#include "target_core_internal.h"
36#include "target_core_alua.h"
37#include "target_core_pr.h"
38#include "target_core_ua.h"
39
40#define CREATE_TRACE_POINTS
41#include <trace/events/target.h>
42
43static struct workqueue_struct *target_completion_wq;
44static struct kmem_cache *se_sess_cache;
45struct kmem_cache *se_ua_cache;
46struct kmem_cache *t10_pr_reg_cache;
47struct kmem_cache *t10_alua_lu_gp_cache;
48struct kmem_cache *t10_alua_lu_gp_mem_cache;
49struct kmem_cache *t10_alua_tg_pt_gp_cache;
50struct kmem_cache *t10_alua_lba_map_cache;
51struct kmem_cache *t10_alua_lba_map_mem_cache;
52
53static void transport_complete_task_attr(struct se_cmd *cmd);
54static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
55static void transport_handle_queue_full(struct se_cmd *cmd,
56 struct se_device *dev, int err, bool write_pending);
57static void target_complete_ok_work(struct work_struct *work);
58
59int init_se_kmem_caches(void)
60{
61 se_sess_cache = kmem_cache_create("se_sess_cache",
62 sizeof(struct se_session), __alignof__(struct se_session),
63 0, NULL);
64 if (!se_sess_cache) {
65 pr_err("kmem_cache_create() for struct se_session"
66 " failed\n");
67 goto out;
68 }
69 se_ua_cache = kmem_cache_create("se_ua_cache",
70 sizeof(struct se_ua), __alignof__(struct se_ua),
71 0, NULL);
72 if (!se_ua_cache) {
73 pr_err("kmem_cache_create() for struct se_ua failed\n");
74 goto out_free_sess_cache;
75 }
76 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
77 sizeof(struct t10_pr_registration),
78 __alignof__(struct t10_pr_registration), 0, NULL);
79 if (!t10_pr_reg_cache) {
80 pr_err("kmem_cache_create() for struct t10_pr_registration"
81 " failed\n");
82 goto out_free_ua_cache;
83 }
84 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
85 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
86 0, NULL);
87 if (!t10_alua_lu_gp_cache) {
88 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
89 " failed\n");
90 goto out_free_pr_reg_cache;
91 }
92 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
93 sizeof(struct t10_alua_lu_gp_member),
94 __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
95 if (!t10_alua_lu_gp_mem_cache) {
96 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
97 "cache failed\n");
98 goto out_free_lu_gp_cache;
99 }
100 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
101 sizeof(struct t10_alua_tg_pt_gp),
102 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
103 if (!t10_alua_tg_pt_gp_cache) {
104 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
105 "cache failed\n");
106 goto out_free_lu_gp_mem_cache;
107 }
108 t10_alua_lba_map_cache = kmem_cache_create(
109 "t10_alua_lba_map_cache",
110 sizeof(struct t10_alua_lba_map),
111 __alignof__(struct t10_alua_lba_map), 0, NULL);
112 if (!t10_alua_lba_map_cache) {
113 pr_err("kmem_cache_create() for t10_alua_lba_map_"
114 "cache failed\n");
115 goto out_free_tg_pt_gp_cache;
116 }
117 t10_alua_lba_map_mem_cache = kmem_cache_create(
118 "t10_alua_lba_map_mem_cache",
119 sizeof(struct t10_alua_lba_map_member),
120 __alignof__(struct t10_alua_lba_map_member), 0, NULL);
121 if (!t10_alua_lba_map_mem_cache) {
122 pr_err("kmem_cache_create() for t10_alua_lba_map_mem_"
123 "cache failed\n");
124 goto out_free_lba_map_cache;
125 }
126
127 target_completion_wq = alloc_workqueue("target_completion",
128 WQ_MEM_RECLAIM, 0);
129 if (!target_completion_wq)
130 goto out_free_lba_map_mem_cache;
131
132 return 0;
133
134out_free_lba_map_mem_cache:
135 kmem_cache_destroy(t10_alua_lba_map_mem_cache);
136out_free_lba_map_cache:
137 kmem_cache_destroy(t10_alua_lba_map_cache);
138out_free_tg_pt_gp_cache:
139 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
140out_free_lu_gp_mem_cache:
141 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
142out_free_lu_gp_cache:
143 kmem_cache_destroy(t10_alua_lu_gp_cache);
144out_free_pr_reg_cache:
145 kmem_cache_destroy(t10_pr_reg_cache);
146out_free_ua_cache:
147 kmem_cache_destroy(se_ua_cache);
148out_free_sess_cache:
149 kmem_cache_destroy(se_sess_cache);
150out:
151 return -ENOMEM;
152}
153
154void release_se_kmem_caches(void)
155{
156 destroy_workqueue(target_completion_wq);
157 kmem_cache_destroy(se_sess_cache);
158 kmem_cache_destroy(se_ua_cache);
159 kmem_cache_destroy(t10_pr_reg_cache);
160 kmem_cache_destroy(t10_alua_lu_gp_cache);
161 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
162 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
163 kmem_cache_destroy(t10_alua_lba_map_cache);
164 kmem_cache_destroy(t10_alua_lba_map_mem_cache);
165}
166
167/* This code ensures unique mib indexes are handed out. */
168static DEFINE_SPINLOCK(scsi_mib_index_lock);
169static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
170
171/*
172 * Allocate a new row index for the entry type specified
173 */
174u32 scsi_get_new_index(scsi_index_t type)
175{
176 u32 new_index;
177
178 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
179
180 spin_lock(&scsi_mib_index_lock);
181 new_index = ++scsi_mib_index[type];
182 spin_unlock(&scsi_mib_index_lock);
183
184 return new_index;
185}
186
187void transport_subsystem_check_init(void)
188{
189 int ret;
190 static int sub_api_initialized;
191
192 if (sub_api_initialized)
193 return;
194
195 ret = IS_ENABLED(CONFIG_TCM_IBLOCK) && request_module("target_core_iblock");
196 if (ret != 0)
197 pr_err("Unable to load target_core_iblock\n");
198
199 ret = IS_ENABLED(CONFIG_TCM_FILEIO) && request_module("target_core_file");
200 if (ret != 0)
201 pr_err("Unable to load target_core_file\n");
202
203 ret = IS_ENABLED(CONFIG_TCM_PSCSI) && request_module("target_core_pscsi");
204 if (ret != 0)
205 pr_err("Unable to load target_core_pscsi\n");
206
207 ret = IS_ENABLED(CONFIG_TCM_USER2) && request_module("target_core_user");
208 if (ret != 0)
209 pr_err("Unable to load target_core_user\n");
210
211 sub_api_initialized = 1;
212}
213
214static void target_release_sess_cmd_refcnt(struct percpu_ref *ref)
215{
216 struct se_session *sess = container_of(ref, typeof(*sess), cmd_count);
217
218 wake_up(&sess->cmd_list_wq);
219}
220
221/**
222 * transport_init_session - initialize a session object
223 * @se_sess: Session object pointer.
224 *
225 * The caller must have zero-initialized @se_sess before calling this function.
226 */
227int transport_init_session(struct se_session *se_sess)
228{
229 INIT_LIST_HEAD(&se_sess->sess_list);
230 INIT_LIST_HEAD(&se_sess->sess_acl_list);
231 INIT_LIST_HEAD(&se_sess->sess_cmd_list);
232 spin_lock_init(&se_sess->sess_cmd_lock);
233 init_waitqueue_head(&se_sess->cmd_list_wq);
234 return percpu_ref_init(&se_sess->cmd_count,
235 target_release_sess_cmd_refcnt, 0, GFP_KERNEL);
236}
237EXPORT_SYMBOL(transport_init_session);
238
239/**
240 * transport_alloc_session - allocate a session object and initialize it
241 * @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
242 */
243struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
244{
245 struct se_session *se_sess;
246 int ret;
247
248 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
249 if (!se_sess) {
250 pr_err("Unable to allocate struct se_session from"
251 " se_sess_cache\n");
252 return ERR_PTR(-ENOMEM);
253 }
254 ret = transport_init_session(se_sess);
255 if (ret < 0) {
256 kmem_cache_free(se_sess_cache, se_sess);
257 return ERR_PTR(ret);
258 }
259 se_sess->sup_prot_ops = sup_prot_ops;
260
261 return se_sess;
262}
263EXPORT_SYMBOL(transport_alloc_session);
264
265/**
266 * transport_alloc_session_tags - allocate target driver private data
267 * @se_sess: Session pointer.
268 * @tag_num: Maximum number of in-flight commands between initiator and target.
269 * @tag_size: Size in bytes of the private data a target driver associates with
270 * each command.
271 */
272int transport_alloc_session_tags(struct se_session *se_sess,
273 unsigned int tag_num, unsigned int tag_size)
274{
275 int rc;
276
277 se_sess->sess_cmd_map = kvcalloc(tag_size, tag_num,
278 GFP_KERNEL | __GFP_RETRY_MAYFAIL);
279 if (!se_sess->sess_cmd_map) {
280 pr_err("Unable to allocate se_sess->sess_cmd_map\n");
281 return -ENOMEM;
282 }
283
284 rc = sbitmap_queue_init_node(&se_sess->sess_tag_pool, tag_num, -1,
285 false, GFP_KERNEL, NUMA_NO_NODE);
286 if (rc < 0) {
287 pr_err("Unable to init se_sess->sess_tag_pool,"
288 " tag_num: %u\n", tag_num);
289 kvfree(se_sess->sess_cmd_map);
290 se_sess->sess_cmd_map = NULL;
291 return -ENOMEM;
292 }
293
294 return 0;
295}
296EXPORT_SYMBOL(transport_alloc_session_tags);
297
298/**
299 * transport_init_session_tags - allocate a session and target driver private data
300 * @tag_num: Maximum number of in-flight commands between initiator and target.
301 * @tag_size: Size in bytes of the private data a target driver associates with
302 * each command.
303 * @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
304 */
305static struct se_session *
306transport_init_session_tags(unsigned int tag_num, unsigned int tag_size,
307 enum target_prot_op sup_prot_ops)
308{
309 struct se_session *se_sess;
310 int rc;
311
312 if (tag_num != 0 && !tag_size) {
313 pr_err("init_session_tags called with percpu-ida tag_num:"
314 " %u, but zero tag_size\n", tag_num);
315 return ERR_PTR(-EINVAL);
316 }
317 if (!tag_num && tag_size) {
318 pr_err("init_session_tags called with percpu-ida tag_size:"
319 " %u, but zero tag_num\n", tag_size);
320 return ERR_PTR(-EINVAL);
321 }
322
323 se_sess = transport_alloc_session(sup_prot_ops);
324 if (IS_ERR(se_sess))
325 return se_sess;
326
327 rc = transport_alloc_session_tags(se_sess, tag_num, tag_size);
328 if (rc < 0) {
329 transport_free_session(se_sess);
330 return ERR_PTR(-ENOMEM);
331 }
332
333 return se_sess;
334}
335
336/*
337 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
338 */
339void __transport_register_session(
340 struct se_portal_group *se_tpg,
341 struct se_node_acl *se_nacl,
342 struct se_session *se_sess,
343 void *fabric_sess_ptr)
344{
345 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
346 unsigned char buf[PR_REG_ISID_LEN];
347 unsigned long flags;
348
349 se_sess->se_tpg = se_tpg;
350 se_sess->fabric_sess_ptr = fabric_sess_ptr;
351 /*
352 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
353 *
354 * Only set for struct se_session's that will actually be moving I/O.
355 * eg: *NOT* discovery sessions.
356 */
357 if (se_nacl) {
358 /*
359 *
360 * Determine if fabric allows for T10-PI feature bits exposed to
361 * initiators for device backends with !dev->dev_attrib.pi_prot_type.
362 *
363 * If so, then always save prot_type on a per se_node_acl node
364 * basis and re-instate the previous sess_prot_type to avoid
365 * disabling PI from below any previously initiator side
366 * registered LUNs.
367 */
368 if (se_nacl->saved_prot_type)
369 se_sess->sess_prot_type = se_nacl->saved_prot_type;
370 else if (tfo->tpg_check_prot_fabric_only)
371 se_sess->sess_prot_type = se_nacl->saved_prot_type =
372 tfo->tpg_check_prot_fabric_only(se_tpg);
373 /*
374 * If the fabric module supports an ISID based TransportID,
375 * save this value in binary from the fabric I_T Nexus now.
376 */
377 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
378 memset(&buf[0], 0, PR_REG_ISID_LEN);
379 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
380 &buf[0], PR_REG_ISID_LEN);
381 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
382 }
383
384 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
385 /*
386 * The se_nacl->nacl_sess pointer will be set to the
387 * last active I_T Nexus for each struct se_node_acl.
388 */
389 se_nacl->nacl_sess = se_sess;
390
391 list_add_tail(&se_sess->sess_acl_list,
392 &se_nacl->acl_sess_list);
393 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
394 }
395 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
396
397 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
398 se_tpg->se_tpg_tfo->fabric_name, se_sess->fabric_sess_ptr);
399}
400EXPORT_SYMBOL(__transport_register_session);
401
402void transport_register_session(
403 struct se_portal_group *se_tpg,
404 struct se_node_acl *se_nacl,
405 struct se_session *se_sess,
406 void *fabric_sess_ptr)
407{
408 unsigned long flags;
409
410 spin_lock_irqsave(&se_tpg->session_lock, flags);
411 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
412 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
413}
414EXPORT_SYMBOL(transport_register_session);
415
416struct se_session *
417target_setup_session(struct se_portal_group *tpg,
418 unsigned int tag_num, unsigned int tag_size,
419 enum target_prot_op prot_op,
420 const char *initiatorname, void *private,
421 int (*callback)(struct se_portal_group *,
422 struct se_session *, void *))
423{
424 struct se_session *sess;
425
426 /*
427 * If the fabric driver is using percpu-ida based pre allocation
428 * of I/O descriptor tags, go ahead and perform that setup now..
429 */
430 if (tag_num != 0)
431 sess = transport_init_session_tags(tag_num, tag_size, prot_op);
432 else
433 sess = transport_alloc_session(prot_op);
434
435 if (IS_ERR(sess))
436 return sess;
437
438 sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg,
439 (unsigned char *)initiatorname);
440 if (!sess->se_node_acl) {
441 transport_free_session(sess);
442 return ERR_PTR(-EACCES);
443 }
444 /*
445 * Go ahead and perform any remaining fabric setup that is
446 * required before transport_register_session().
447 */
448 if (callback != NULL) {
449 int rc = callback(tpg, sess, private);
450 if (rc) {
451 transport_free_session(sess);
452 return ERR_PTR(rc);
453 }
454 }
455
456 transport_register_session(tpg, sess->se_node_acl, sess, private);
457 return sess;
458}
459EXPORT_SYMBOL(target_setup_session);
460
461ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page)
462{
463 struct se_session *se_sess;
464 ssize_t len = 0;
465
466 spin_lock_bh(&se_tpg->session_lock);
467 list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
468 if (!se_sess->se_node_acl)
469 continue;
470 if (!se_sess->se_node_acl->dynamic_node_acl)
471 continue;
472 if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE)
473 break;
474
475 len += snprintf(page + len, PAGE_SIZE - len, "%s\n",
476 se_sess->se_node_acl->initiatorname);
477 len += 1; /* Include NULL terminator */
478 }
479 spin_unlock_bh(&se_tpg->session_lock);
480
481 return len;
482}
483EXPORT_SYMBOL(target_show_dynamic_sessions);
484
485static void target_complete_nacl(struct kref *kref)
486{
487 struct se_node_acl *nacl = container_of(kref,
488 struct se_node_acl, acl_kref);
489 struct se_portal_group *se_tpg = nacl->se_tpg;
490
491 if (!nacl->dynamic_stop) {
492 complete(&nacl->acl_free_comp);
493 return;
494 }
495
496 mutex_lock(&se_tpg->acl_node_mutex);
497 list_del_init(&nacl->acl_list);
498 mutex_unlock(&se_tpg->acl_node_mutex);
499
500 core_tpg_wait_for_nacl_pr_ref(nacl);
501 core_free_device_list_for_node(nacl, se_tpg);
502 kfree(nacl);
503}
504
505void target_put_nacl(struct se_node_acl *nacl)
506{
507 kref_put(&nacl->acl_kref, target_complete_nacl);
508}
509EXPORT_SYMBOL(target_put_nacl);
510
511void transport_deregister_session_configfs(struct se_session *se_sess)
512{
513 struct se_node_acl *se_nacl;
514 unsigned long flags;
515 /*
516 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
517 */
518 se_nacl = se_sess->se_node_acl;
519 if (se_nacl) {
520 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
521 if (!list_empty(&se_sess->sess_acl_list))
522 list_del_init(&se_sess->sess_acl_list);
523 /*
524 * If the session list is empty, then clear the pointer.
525 * Otherwise, set the struct se_session pointer from the tail
526 * element of the per struct se_node_acl active session list.
527 */
528 if (list_empty(&se_nacl->acl_sess_list))
529 se_nacl->nacl_sess = NULL;
530 else {
531 se_nacl->nacl_sess = container_of(
532 se_nacl->acl_sess_list.prev,
533 struct se_session, sess_acl_list);
534 }
535 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
536 }
537}
538EXPORT_SYMBOL(transport_deregister_session_configfs);
539
540void transport_free_session(struct se_session *se_sess)
541{
542 struct se_node_acl *se_nacl = se_sess->se_node_acl;
543
544 /*
545 * Drop the se_node_acl->nacl_kref obtained from within
546 * core_tpg_get_initiator_node_acl().
547 */
548 if (se_nacl) {
549 struct se_portal_group *se_tpg = se_nacl->se_tpg;
550 const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo;
551 unsigned long flags;
552
553 se_sess->se_node_acl = NULL;
554
555 /*
556 * Also determine if we need to drop the extra ->cmd_kref if
557 * it had been previously dynamically generated, and
558 * the endpoint is not caching dynamic ACLs.
559 */
560 mutex_lock(&se_tpg->acl_node_mutex);
561 if (se_nacl->dynamic_node_acl &&
562 !se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
563 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
564 if (list_empty(&se_nacl->acl_sess_list))
565 se_nacl->dynamic_stop = true;
566 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
567
568 if (se_nacl->dynamic_stop)
569 list_del_init(&se_nacl->acl_list);
570 }
571 mutex_unlock(&se_tpg->acl_node_mutex);
572
573 if (se_nacl->dynamic_stop)
574 target_put_nacl(se_nacl);
575
576 target_put_nacl(se_nacl);
577 }
578 if (se_sess->sess_cmd_map) {
579 sbitmap_queue_free(&se_sess->sess_tag_pool);
580 kvfree(se_sess->sess_cmd_map);
581 }
582 percpu_ref_exit(&se_sess->cmd_count);
583 kmem_cache_free(se_sess_cache, se_sess);
584}
585EXPORT_SYMBOL(transport_free_session);
586
587static int target_release_res(struct se_device *dev, void *data)
588{
589 struct se_session *sess = data;
590
591 if (dev->reservation_holder == sess)
592 target_release_reservation(dev);
593 return 0;
594}
595
596void transport_deregister_session(struct se_session *se_sess)
597{
598 struct se_portal_group *se_tpg = se_sess->se_tpg;
599 unsigned long flags;
600
601 if (!se_tpg) {
602 transport_free_session(se_sess);
603 return;
604 }
605
606 spin_lock_irqsave(&se_tpg->session_lock, flags);
607 list_del(&se_sess->sess_list);
608 se_sess->se_tpg = NULL;
609 se_sess->fabric_sess_ptr = NULL;
610 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
611
612 /*
613 * Since the session is being removed, release SPC-2
614 * reservations held by the session that is disappearing.
615 */
616 target_for_each_device(target_release_res, se_sess);
617
618 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
619 se_tpg->se_tpg_tfo->fabric_name);
620 /*
621 * If last kref is dropping now for an explicit NodeACL, awake sleeping
622 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
623 * removal context from within transport_free_session() code.
624 *
625 * For dynamic ACL, target_put_nacl() uses target_complete_nacl()
626 * to release all remaining generate_node_acl=1 created ACL resources.
627 */
628
629 transport_free_session(se_sess);
630}
631EXPORT_SYMBOL(transport_deregister_session);
632
633void target_remove_session(struct se_session *se_sess)
634{
635 transport_deregister_session_configfs(se_sess);
636 transport_deregister_session(se_sess);
637}
638EXPORT_SYMBOL(target_remove_session);
639
640static void target_remove_from_state_list(struct se_cmd *cmd)
641{
642 struct se_device *dev = cmd->se_dev;
643 unsigned long flags;
644
645 if (!dev)
646 return;
647
648 spin_lock_irqsave(&dev->execute_task_lock, flags);
649 if (cmd->state_active) {
650 list_del(&cmd->state_list);
651 cmd->state_active = false;
652 }
653 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
654}
655
656/*
657 * This function is called by the target core after the target core has
658 * finished processing a SCSI command or SCSI TMF. Both the regular command
659 * processing code and the code for aborting commands can call this
660 * function. CMD_T_STOP is set if and only if another thread is waiting
661 * inside transport_wait_for_tasks() for t_transport_stop_comp.
662 */
663static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
664{
665 unsigned long flags;
666
667 target_remove_from_state_list(cmd);
668
669 /*
670 * Clear struct se_cmd->se_lun before the handoff to FE.
671 */
672 cmd->se_lun = NULL;
673
674 spin_lock_irqsave(&cmd->t_state_lock, flags);
675 /*
676 * Determine if frontend context caller is requesting the stopping of
677 * this command for frontend exceptions.
678 */
679 if (cmd->transport_state & CMD_T_STOP) {
680 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
681 __func__, __LINE__, cmd->tag);
682
683 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
684
685 complete_all(&cmd->t_transport_stop_comp);
686 return 1;
687 }
688 cmd->transport_state &= ~CMD_T_ACTIVE;
689 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
690
691 /*
692 * Some fabric modules like tcm_loop can release their internally
693 * allocated I/O reference and struct se_cmd now.
694 *
695 * Fabric modules are expected to return '1' here if the se_cmd being
696 * passed is released at this point, or zero if not being released.
697 */
698 return cmd->se_tfo->check_stop_free(cmd);
699}
700
701static void transport_lun_remove_cmd(struct se_cmd *cmd)
702{
703 struct se_lun *lun = cmd->se_lun;
704
705 if (!lun)
706 return;
707
708 if (cmpxchg(&cmd->lun_ref_active, true, false))
709 percpu_ref_put(&lun->lun_ref);
710}
711
712static void target_complete_failure_work(struct work_struct *work)
713{
714 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
715
716 transport_generic_request_failure(cmd,
717 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
718}
719
720/*
721 * Used when asking transport to copy Sense Data from the underlying
722 * Linux/SCSI struct scsi_cmnd
723 */
724static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd)
725{
726 struct se_device *dev = cmd->se_dev;
727
728 WARN_ON(!cmd->se_lun);
729
730 if (!dev)
731 return NULL;
732
733 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION)
734 return NULL;
735
736 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
737
738 pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n",
739 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status);
740 return cmd->sense_buffer;
741}
742
743void transport_copy_sense_to_cmd(struct se_cmd *cmd, unsigned char *sense)
744{
745 unsigned char *cmd_sense_buf;
746 unsigned long flags;
747
748 spin_lock_irqsave(&cmd->t_state_lock, flags);
749 cmd_sense_buf = transport_get_sense_buffer(cmd);
750 if (!cmd_sense_buf) {
751 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
752 return;
753 }
754
755 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
756 memcpy(cmd_sense_buf, sense, cmd->scsi_sense_length);
757 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
758}
759EXPORT_SYMBOL(transport_copy_sense_to_cmd);
760
761static void target_handle_abort(struct se_cmd *cmd)
762{
763 bool tas = cmd->transport_state & CMD_T_TAS;
764 bool ack_kref = cmd->se_cmd_flags & SCF_ACK_KREF;
765 int ret;
766
767 pr_debug("tag %#llx: send_abort_response = %d\n", cmd->tag, tas);
768
769 if (tas) {
770 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
771 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
772 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
773 cmd->t_task_cdb[0], cmd->tag);
774 trace_target_cmd_complete(cmd);
775 ret = cmd->se_tfo->queue_status(cmd);
776 if (ret) {
777 transport_handle_queue_full(cmd, cmd->se_dev,
778 ret, false);
779 return;
780 }
781 } else {
782 cmd->se_tmr_req->response = TMR_FUNCTION_REJECTED;
783 cmd->se_tfo->queue_tm_rsp(cmd);
784 }
785 } else {
786 /*
787 * Allow the fabric driver to unmap any resources before
788 * releasing the descriptor via TFO->release_cmd().
789 */
790 cmd->se_tfo->aborted_task(cmd);
791 if (ack_kref)
792 WARN_ON_ONCE(target_put_sess_cmd(cmd) != 0);
793 /*
794 * To do: establish a unit attention condition on the I_T
795 * nexus associated with cmd. See also the paragraph "Aborting
796 * commands" in SAM.
797 */
798 }
799
800 WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0);
801
802 transport_lun_remove_cmd(cmd);
803
804 transport_cmd_check_stop_to_fabric(cmd);
805}
806
807static void target_abort_work(struct work_struct *work)
808{
809 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
810
811 target_handle_abort(cmd);
812}
813
814static bool target_cmd_interrupted(struct se_cmd *cmd)
815{
816 int post_ret;
817
818 if (cmd->transport_state & CMD_T_ABORTED) {
819 if (cmd->transport_complete_callback)
820 cmd->transport_complete_callback(cmd, false, &post_ret);
821 INIT_WORK(&cmd->work, target_abort_work);
822 queue_work(target_completion_wq, &cmd->work);
823 return true;
824 } else if (cmd->transport_state & CMD_T_STOP) {
825 if (cmd->transport_complete_callback)
826 cmd->transport_complete_callback(cmd, false, &post_ret);
827 complete_all(&cmd->t_transport_stop_comp);
828 return true;
829 }
830
831 return false;
832}
833
834/* May be called from interrupt context so must not sleep. */
835void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
836{
837 int success;
838 unsigned long flags;
839
840 if (target_cmd_interrupted(cmd))
841 return;
842
843 cmd->scsi_status = scsi_status;
844
845 spin_lock_irqsave(&cmd->t_state_lock, flags);
846 switch (cmd->scsi_status) {
847 case SAM_STAT_CHECK_CONDITION:
848 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
849 success = 1;
850 else
851 success = 0;
852 break;
853 default:
854 success = 1;
855 break;
856 }
857
858 cmd->t_state = TRANSPORT_COMPLETE;
859 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
860 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
861
862 INIT_WORK(&cmd->work, success ? target_complete_ok_work :
863 target_complete_failure_work);
864 if (cmd->se_cmd_flags & SCF_USE_CPUID)
865 queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
866 else
867 queue_work(target_completion_wq, &cmd->work);
868}
869EXPORT_SYMBOL(target_complete_cmd);
870
871void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
872{
873 if ((scsi_status == SAM_STAT_GOOD ||
874 cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
875 length < cmd->data_length) {
876 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
877 cmd->residual_count += cmd->data_length - length;
878 } else {
879 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
880 cmd->residual_count = cmd->data_length - length;
881 }
882
883 cmd->data_length = length;
884 }
885
886 target_complete_cmd(cmd, scsi_status);
887}
888EXPORT_SYMBOL(target_complete_cmd_with_length);
889
890static void target_add_to_state_list(struct se_cmd *cmd)
891{
892 struct se_device *dev = cmd->se_dev;
893 unsigned long flags;
894
895 spin_lock_irqsave(&dev->execute_task_lock, flags);
896 if (!cmd->state_active) {
897 list_add_tail(&cmd->state_list, &dev->state_list);
898 cmd->state_active = true;
899 }
900 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
901}
902
903/*
904 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
905 */
906static void transport_write_pending_qf(struct se_cmd *cmd);
907static void transport_complete_qf(struct se_cmd *cmd);
908
909void target_qf_do_work(struct work_struct *work)
910{
911 struct se_device *dev = container_of(work, struct se_device,
912 qf_work_queue);
913 LIST_HEAD(qf_cmd_list);
914 struct se_cmd *cmd, *cmd_tmp;
915
916 spin_lock_irq(&dev->qf_cmd_lock);
917 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
918 spin_unlock_irq(&dev->qf_cmd_lock);
919
920 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
921 list_del(&cmd->se_qf_node);
922 atomic_dec_mb(&dev->dev_qf_count);
923
924 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
925 " context: %s\n", cmd->se_tfo->fabric_name, cmd,
926 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
927 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
928 : "UNKNOWN");
929
930 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
931 transport_write_pending_qf(cmd);
932 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK ||
933 cmd->t_state == TRANSPORT_COMPLETE_QF_ERR)
934 transport_complete_qf(cmd);
935 }
936}
937
938unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
939{
940 switch (cmd->data_direction) {
941 case DMA_NONE:
942 return "NONE";
943 case DMA_FROM_DEVICE:
944 return "READ";
945 case DMA_TO_DEVICE:
946 return "WRITE";
947 case DMA_BIDIRECTIONAL:
948 return "BIDI";
949 default:
950 break;
951 }
952
953 return "UNKNOWN";
954}
955
956void transport_dump_dev_state(
957 struct se_device *dev,
958 char *b,
959 int *bl)
960{
961 *bl += sprintf(b + *bl, "Status: ");
962 if (dev->export_count)
963 *bl += sprintf(b + *bl, "ACTIVATED");
964 else
965 *bl += sprintf(b + *bl, "DEACTIVATED");
966
967 *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth);
968 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n",
969 dev->dev_attrib.block_size,
970 dev->dev_attrib.hw_max_sectors);
971 *bl += sprintf(b + *bl, " ");
972}
973
974void transport_dump_vpd_proto_id(
975 struct t10_vpd *vpd,
976 unsigned char *p_buf,
977 int p_buf_len)
978{
979 unsigned char buf[VPD_TMP_BUF_SIZE];
980 int len;
981
982 memset(buf, 0, VPD_TMP_BUF_SIZE);
983 len = sprintf(buf, "T10 VPD Protocol Identifier: ");
984
985 switch (vpd->protocol_identifier) {
986 case 0x00:
987 sprintf(buf+len, "Fibre Channel\n");
988 break;
989 case 0x10:
990 sprintf(buf+len, "Parallel SCSI\n");
991 break;
992 case 0x20:
993 sprintf(buf+len, "SSA\n");
994 break;
995 case 0x30:
996 sprintf(buf+len, "IEEE 1394\n");
997 break;
998 case 0x40:
999 sprintf(buf+len, "SCSI Remote Direct Memory Access"
1000 " Protocol\n");
1001 break;
1002 case 0x50:
1003 sprintf(buf+len, "Internet SCSI (iSCSI)\n");
1004 break;
1005 case 0x60:
1006 sprintf(buf+len, "SAS Serial SCSI Protocol\n");
1007 break;
1008 case 0x70:
1009 sprintf(buf+len, "Automation/Drive Interface Transport"
1010 " Protocol\n");
1011 break;
1012 case 0x80:
1013 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
1014 break;
1015 default:
1016 sprintf(buf+len, "Unknown 0x%02x\n",
1017 vpd->protocol_identifier);
1018 break;
1019 }
1020
1021 if (p_buf)
1022 strncpy(p_buf, buf, p_buf_len);
1023 else
1024 pr_debug("%s", buf);
1025}
1026
1027void
1028transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
1029{
1030 /*
1031 * Check if the Protocol Identifier Valid (PIV) bit is set..
1032 *
1033 * from spc3r23.pdf section 7.5.1
1034 */
1035 if (page_83[1] & 0x80) {
1036 vpd->protocol_identifier = (page_83[0] & 0xf0);
1037 vpd->protocol_identifier_set = 1;
1038 transport_dump_vpd_proto_id(vpd, NULL, 0);
1039 }
1040}
1041EXPORT_SYMBOL(transport_set_vpd_proto_id);
1042
1043int transport_dump_vpd_assoc(
1044 struct t10_vpd *vpd,
1045 unsigned char *p_buf,
1046 int p_buf_len)
1047{
1048 unsigned char buf[VPD_TMP_BUF_SIZE];
1049 int ret = 0;
1050 int len;
1051
1052 memset(buf, 0, VPD_TMP_BUF_SIZE);
1053 len = sprintf(buf, "T10 VPD Identifier Association: ");
1054
1055 switch (vpd->association) {
1056 case 0x00:
1057 sprintf(buf+len, "addressed logical unit\n");
1058 break;
1059 case 0x10:
1060 sprintf(buf+len, "target port\n");
1061 break;
1062 case 0x20:
1063 sprintf(buf+len, "SCSI target device\n");
1064 break;
1065 default:
1066 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
1067 ret = -EINVAL;
1068 break;
1069 }
1070
1071 if (p_buf)
1072 strncpy(p_buf, buf, p_buf_len);
1073 else
1074 pr_debug("%s", buf);
1075
1076 return ret;
1077}
1078
1079int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
1080{
1081 /*
1082 * The VPD identification association..
1083 *
1084 * from spc3r23.pdf Section 7.6.3.1 Table 297
1085 */
1086 vpd->association = (page_83[1] & 0x30);
1087 return transport_dump_vpd_assoc(vpd, NULL, 0);
1088}
1089EXPORT_SYMBOL(transport_set_vpd_assoc);
1090
1091int transport_dump_vpd_ident_type(
1092 struct t10_vpd *vpd,
1093 unsigned char *p_buf,
1094 int p_buf_len)
1095{
1096 unsigned char buf[VPD_TMP_BUF_SIZE];
1097 int ret = 0;
1098 int len;
1099
1100 memset(buf, 0, VPD_TMP_BUF_SIZE);
1101 len = sprintf(buf, "T10 VPD Identifier Type: ");
1102
1103 switch (vpd->device_identifier_type) {
1104 case 0x00:
1105 sprintf(buf+len, "Vendor specific\n");
1106 break;
1107 case 0x01:
1108 sprintf(buf+len, "T10 Vendor ID based\n");
1109 break;
1110 case 0x02:
1111 sprintf(buf+len, "EUI-64 based\n");
1112 break;
1113 case 0x03:
1114 sprintf(buf+len, "NAA\n");
1115 break;
1116 case 0x04:
1117 sprintf(buf+len, "Relative target port identifier\n");
1118 break;
1119 case 0x08:
1120 sprintf(buf+len, "SCSI name string\n");
1121 break;
1122 default:
1123 sprintf(buf+len, "Unsupported: 0x%02x\n",
1124 vpd->device_identifier_type);
1125 ret = -EINVAL;
1126 break;
1127 }
1128
1129 if (p_buf) {
1130 if (p_buf_len < strlen(buf)+1)
1131 return -EINVAL;
1132 strncpy(p_buf, buf, p_buf_len);
1133 } else {
1134 pr_debug("%s", buf);
1135 }
1136
1137 return ret;
1138}
1139
1140int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
1141{
1142 /*
1143 * The VPD identifier type..
1144 *
1145 * from spc3r23.pdf Section 7.6.3.1 Table 298
1146 */
1147 vpd->device_identifier_type = (page_83[1] & 0x0f);
1148 return transport_dump_vpd_ident_type(vpd, NULL, 0);
1149}
1150EXPORT_SYMBOL(transport_set_vpd_ident_type);
1151
1152int transport_dump_vpd_ident(
1153 struct t10_vpd *vpd,
1154 unsigned char *p_buf,
1155 int p_buf_len)
1156{
1157 unsigned char buf[VPD_TMP_BUF_SIZE];
1158 int ret = 0;
1159
1160 memset(buf, 0, VPD_TMP_BUF_SIZE);
1161
1162 switch (vpd->device_identifier_code_set) {
1163 case 0x01: /* Binary */
1164 snprintf(buf, sizeof(buf),
1165 "T10 VPD Binary Device Identifier: %s\n",
1166 &vpd->device_identifier[0]);
1167 break;
1168 case 0x02: /* ASCII */
1169 snprintf(buf, sizeof(buf),
1170 "T10 VPD ASCII Device Identifier: %s\n",
1171 &vpd->device_identifier[0]);
1172 break;
1173 case 0x03: /* UTF-8 */
1174 snprintf(buf, sizeof(buf),
1175 "T10 VPD UTF-8 Device Identifier: %s\n",
1176 &vpd->device_identifier[0]);
1177 break;
1178 default:
1179 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1180 " 0x%02x", vpd->device_identifier_code_set);
1181 ret = -EINVAL;
1182 break;
1183 }
1184
1185 if (p_buf)
1186 strncpy(p_buf, buf, p_buf_len);
1187 else
1188 pr_debug("%s", buf);
1189
1190 return ret;
1191}
1192
1193int
1194transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1195{
1196 static const char hex_str[] = "0123456789abcdef";
1197 int j = 0, i = 4; /* offset to start of the identifier */
1198
1199 /*
1200 * The VPD Code Set (encoding)
1201 *
1202 * from spc3r23.pdf Section 7.6.3.1 Table 296
1203 */
1204 vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1205 switch (vpd->device_identifier_code_set) {
1206 case 0x01: /* Binary */
1207 vpd->device_identifier[j++] =
1208 hex_str[vpd->device_identifier_type];
1209 while (i < (4 + page_83[3])) {
1210 vpd->device_identifier[j++] =
1211 hex_str[(page_83[i] & 0xf0) >> 4];
1212 vpd->device_identifier[j++] =
1213 hex_str[page_83[i] & 0x0f];
1214 i++;
1215 }
1216 break;
1217 case 0x02: /* ASCII */
1218 case 0x03: /* UTF-8 */
1219 while (i < (4 + page_83[3]))
1220 vpd->device_identifier[j++] = page_83[i++];
1221 break;
1222 default:
1223 break;
1224 }
1225
1226 return transport_dump_vpd_ident(vpd, NULL, 0);
1227}
1228EXPORT_SYMBOL(transport_set_vpd_ident);
1229
1230static sense_reason_t
1231target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev,
1232 unsigned int size)
1233{
1234 u32 mtl;
1235
1236 if (!cmd->se_tfo->max_data_sg_nents)
1237 return TCM_NO_SENSE;
1238 /*
1239 * Check if fabric enforced maximum SGL entries per I/O descriptor
1240 * exceeds se_cmd->data_length. If true, set SCF_UNDERFLOW_BIT +
1241 * residual_count and reduce original cmd->data_length to maximum
1242 * length based on single PAGE_SIZE entry scatter-lists.
1243 */
1244 mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE);
1245 if (cmd->data_length > mtl) {
1246 /*
1247 * If an existing CDB overflow is present, calculate new residual
1248 * based on CDB size minus fabric maximum transfer length.
1249 *
1250 * If an existing CDB underflow is present, calculate new residual
1251 * based on original cmd->data_length minus fabric maximum transfer
1252 * length.
1253 *
1254 * Otherwise, set the underflow residual based on cmd->data_length
1255 * minus fabric maximum transfer length.
1256 */
1257 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1258 cmd->residual_count = (size - mtl);
1259 } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
1260 u32 orig_dl = size + cmd->residual_count;
1261 cmd->residual_count = (orig_dl - mtl);
1262 } else {
1263 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
1264 cmd->residual_count = (cmd->data_length - mtl);
1265 }
1266 cmd->data_length = mtl;
1267 /*
1268 * Reset sbc_check_prot() calculated protection payload
1269 * length based upon the new smaller MTL.
1270 */
1271 if (cmd->prot_length) {
1272 u32 sectors = (mtl / dev->dev_attrib.block_size);
1273 cmd->prot_length = dev->prot_length * sectors;
1274 }
1275 }
1276 return TCM_NO_SENSE;
1277}
1278
1279/**
1280 * target_cmd_size_check - Check whether there will be a residual.
1281 * @cmd: SCSI command.
1282 * @size: Data buffer size derived from CDB. The data buffer size provided by
1283 * the SCSI transport driver is available in @cmd->data_length.
1284 *
1285 * Compare the data buffer size from the CDB with the data buffer limit from the transport
1286 * header. Set @cmd->residual_count and SCF_OVERFLOW_BIT or SCF_UNDERFLOW_BIT if necessary.
1287 *
1288 * Note: target drivers set @cmd->data_length by calling transport_init_se_cmd().
1289 *
1290 * Return: TCM_NO_SENSE
1291 */
1292sense_reason_t
1293target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
1294{
1295 struct se_device *dev = cmd->se_dev;
1296
1297 if (cmd->unknown_data_length) {
1298 cmd->data_length = size;
1299 } else if (size != cmd->data_length) {
1300 pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:"
1301 " %u does not match SCSI CDB Length: %u for SAM Opcode:"
1302 " 0x%02x\n", cmd->se_tfo->fabric_name,
1303 cmd->data_length, size, cmd->t_task_cdb[0]);
1304
1305 if (cmd->data_direction == DMA_TO_DEVICE) {
1306 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
1307 pr_err_ratelimited("Rejecting underflow/overflow"
1308 " for WRITE data CDB\n");
1309 return TCM_INVALID_CDB_FIELD;
1310 }
1311 /*
1312 * Some fabric drivers like iscsi-target still expect to
1313 * always reject overflow writes. Reject this case until
1314 * full fabric driver level support for overflow writes
1315 * is introduced tree-wide.
1316 */
1317 if (size > cmd->data_length) {
1318 pr_err_ratelimited("Rejecting overflow for"
1319 " WRITE control CDB\n");
1320 return TCM_INVALID_CDB_FIELD;
1321 }
1322 }
1323 /*
1324 * Reject READ_* or WRITE_* with overflow/underflow for
1325 * type SCF_SCSI_DATA_CDB.
1326 */
1327 if (dev->dev_attrib.block_size != 512) {
1328 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
1329 " CDB on non 512-byte sector setup subsystem"
1330 " plugin: %s\n", dev->transport->name);
1331 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
1332 return TCM_INVALID_CDB_FIELD;
1333 }
1334 /*
1335 * For the overflow case keep the existing fabric provided
1336 * ->data_length. Otherwise for the underflow case, reset
1337 * ->data_length to the smaller SCSI expected data transfer
1338 * length.
1339 */
1340 if (size > cmd->data_length) {
1341 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
1342 cmd->residual_count = (size - cmd->data_length);
1343 } else {
1344 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
1345 cmd->residual_count = (cmd->data_length - size);
1346 cmd->data_length = size;
1347 }
1348 }
1349
1350 return target_check_max_data_sg_nents(cmd, dev, size);
1351
1352}
1353
1354/*
1355 * Used by fabric modules containing a local struct se_cmd within their
1356 * fabric dependent per I/O descriptor.
1357 *
1358 * Preserves the value of @cmd->tag.
1359 */
1360void transport_init_se_cmd(
1361 struct se_cmd *cmd,
1362 const struct target_core_fabric_ops *tfo,
1363 struct se_session *se_sess,
1364 u32 data_length,
1365 int data_direction,
1366 int task_attr,
1367 unsigned char *sense_buffer, u64 unpacked_lun)
1368{
1369 INIT_LIST_HEAD(&cmd->se_delayed_node);
1370 INIT_LIST_HEAD(&cmd->se_qf_node);
1371 INIT_LIST_HEAD(&cmd->se_cmd_list);
1372 INIT_LIST_HEAD(&cmd->state_list);
1373 init_completion(&cmd->t_transport_stop_comp);
1374 cmd->free_compl = NULL;
1375 cmd->abrt_compl = NULL;
1376 spin_lock_init(&cmd->t_state_lock);
1377 INIT_WORK(&cmd->work, NULL);
1378 kref_init(&cmd->cmd_kref);
1379
1380 cmd->se_tfo = tfo;
1381 cmd->se_sess = se_sess;
1382 cmd->data_length = data_length;
1383 cmd->data_direction = data_direction;
1384 cmd->sam_task_attr = task_attr;
1385 cmd->sense_buffer = sense_buffer;
1386 cmd->orig_fe_lun = unpacked_lun;
1387
1388 cmd->state_active = false;
1389}
1390EXPORT_SYMBOL(transport_init_se_cmd);
1391
1392static sense_reason_t
1393transport_check_alloc_task_attr(struct se_cmd *cmd)
1394{
1395 struct se_device *dev = cmd->se_dev;
1396
1397 /*
1398 * Check if SAM Task Attribute emulation is enabled for this
1399 * struct se_device storage object
1400 */
1401 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1402 return 0;
1403
1404 if (cmd->sam_task_attr == TCM_ACA_TAG) {
1405 pr_debug("SAM Task Attribute ACA"
1406 " emulation is not supported\n");
1407 return TCM_INVALID_CDB_FIELD;
1408 }
1409
1410 return 0;
1411}
1412
1413sense_reason_t
1414target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb)
1415{
1416 sense_reason_t ret;
1417
1418 cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1419 /*
1420 * Ensure that the received CDB is less than the max (252 + 8) bytes
1421 * for VARIABLE_LENGTH_CMD
1422 */
1423 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1424 pr_err("Received SCSI CDB with command_size: %d that"
1425 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1426 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1427 ret = TCM_INVALID_CDB_FIELD;
1428 goto err;
1429 }
1430 /*
1431 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
1432 * allocate the additional extended CDB buffer now.. Otherwise
1433 * setup the pointer from __t_task_cdb to t_task_cdb.
1434 */
1435 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1436 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
1437 GFP_KERNEL);
1438 if (!cmd->t_task_cdb) {
1439 pr_err("Unable to allocate cmd->t_task_cdb"
1440 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1441 scsi_command_size(cdb),
1442 (unsigned long)sizeof(cmd->__t_task_cdb));
1443 ret = TCM_OUT_OF_RESOURCES;
1444 goto err;
1445 }
1446 }
1447 /*
1448 * Copy the original CDB into cmd->
1449 */
1450 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
1451
1452 trace_target_sequencer_start(cmd);
1453 return 0;
1454
1455err:
1456 /*
1457 * Copy the CDB here to allow trace_target_cmd_complete() to
1458 * print the cdb to the trace buffers.
1459 */
1460 memcpy(cmd->t_task_cdb, cdb, min(scsi_command_size(cdb),
1461 (unsigned int)TCM_MAX_COMMAND_SIZE));
1462 return ret;
1463}
1464EXPORT_SYMBOL(target_cmd_init_cdb);
1465
1466sense_reason_t
1467target_cmd_parse_cdb(struct se_cmd *cmd)
1468{
1469 struct se_device *dev = cmd->se_dev;
1470 sense_reason_t ret;
1471
1472 ret = dev->transport->parse_cdb(cmd);
1473 if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
1474 pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
1475 cmd->se_tfo->fabric_name,
1476 cmd->se_sess->se_node_acl->initiatorname,
1477 cmd->t_task_cdb[0]);
1478 if (ret)
1479 return ret;
1480
1481 ret = transport_check_alloc_task_attr(cmd);
1482 if (ret)
1483 return ret;
1484
1485 cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
1486 atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus);
1487 return 0;
1488}
1489EXPORT_SYMBOL(target_cmd_parse_cdb);
1490
1491/*
1492 * Used by fabric module frontends to queue tasks directly.
1493 * May only be used from process context.
1494 */
1495int transport_handle_cdb_direct(
1496 struct se_cmd *cmd)
1497{
1498 sense_reason_t ret;
1499
1500 if (!cmd->se_lun) {
1501 dump_stack();
1502 pr_err("cmd->se_lun is NULL\n");
1503 return -EINVAL;
1504 }
1505 if (in_interrupt()) {
1506 dump_stack();
1507 pr_err("transport_generic_handle_cdb cannot be called"
1508 " from interrupt context\n");
1509 return -EINVAL;
1510 }
1511 /*
1512 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that
1513 * outstanding descriptors are handled correctly during shutdown via
1514 * transport_wait_for_tasks()
1515 *
1516 * Also, we don't take cmd->t_state_lock here as we only expect
1517 * this to be called for initial descriptor submission.
1518 */
1519 cmd->t_state = TRANSPORT_NEW_CMD;
1520 cmd->transport_state |= CMD_T_ACTIVE;
1521
1522 /*
1523 * transport_generic_new_cmd() is already handling QUEUE_FULL,
1524 * so follow TRANSPORT_NEW_CMD processing thread context usage
1525 * and call transport_generic_request_failure() if necessary..
1526 */
1527 ret = transport_generic_new_cmd(cmd);
1528 if (ret)
1529 transport_generic_request_failure(cmd, ret);
1530 return 0;
1531}
1532EXPORT_SYMBOL(transport_handle_cdb_direct);
1533
1534sense_reason_t
1535transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
1536 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
1537{
1538 if (!sgl || !sgl_count)
1539 return 0;
1540
1541 /*
1542 * Reject SCSI data overflow with map_mem_to_cmd() as incoming
1543 * scatterlists already have been set to follow what the fabric
1544 * passes for the original expected data transfer length.
1545 */
1546 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1547 pr_warn("Rejecting SCSI DATA overflow for fabric using"
1548 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
1549 return TCM_INVALID_CDB_FIELD;
1550 }
1551
1552 cmd->t_data_sg = sgl;
1553 cmd->t_data_nents = sgl_count;
1554 cmd->t_bidi_data_sg = sgl_bidi;
1555 cmd->t_bidi_data_nents = sgl_bidi_count;
1556
1557 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
1558 return 0;
1559}
1560
1561/**
1562 * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized
1563 * se_cmd + use pre-allocated SGL memory.
1564 *
1565 * @se_cmd: command descriptor to submit
1566 * @se_sess: associated se_sess for endpoint
1567 * @cdb: pointer to SCSI CDB
1568 * @sense: pointer to SCSI sense buffer
1569 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1570 * @data_length: fabric expected data transfer length
1571 * @task_attr: SAM task attribute
1572 * @data_dir: DMA data direction
1573 * @flags: flags for command submission from target_sc_flags_tables
1574 * @sgl: struct scatterlist memory for unidirectional mapping
1575 * @sgl_count: scatterlist count for unidirectional mapping
1576 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
1577 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping
1578 * @sgl_prot: struct scatterlist memory protection information
1579 * @sgl_prot_count: scatterlist count for protection information
1580 *
1581 * Task tags are supported if the caller has set @se_cmd->tag.
1582 *
1583 * Returns non zero to signal active I/O shutdown failure. All other
1584 * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
1585 * but still return zero here.
1586 *
1587 * This may only be called from process context, and also currently
1588 * assumes internal allocation of fabric payload buffer by target-core.
1589 */
1590int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess,
1591 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
1592 u32 data_length, int task_attr, int data_dir, int flags,
1593 struct scatterlist *sgl, u32 sgl_count,
1594 struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
1595 struct scatterlist *sgl_prot, u32 sgl_prot_count)
1596{
1597 struct se_portal_group *se_tpg;
1598 sense_reason_t rc;
1599 int ret;
1600
1601 se_tpg = se_sess->se_tpg;
1602 BUG_ON(!se_tpg);
1603 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
1604 BUG_ON(in_interrupt());
1605 /*
1606 * Initialize se_cmd for target operation. From this point
1607 * exceptions are handled by sending exception status via
1608 * target_core_fabric_ops->queue_status() callback
1609 */
1610 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1611 data_length, data_dir, task_attr, sense,
1612 unpacked_lun);
1613
1614 if (flags & TARGET_SCF_USE_CPUID)
1615 se_cmd->se_cmd_flags |= SCF_USE_CPUID;
1616 else
1617 se_cmd->cpuid = WORK_CPU_UNBOUND;
1618
1619 if (flags & TARGET_SCF_UNKNOWN_SIZE)
1620 se_cmd->unknown_data_length = 1;
1621 /*
1622 * Obtain struct se_cmd->cmd_kref reference and add new cmd to
1623 * se_sess->sess_cmd_list. A second kref_get here is necessary
1624 * for fabrics using TARGET_SCF_ACK_KREF that expect a second
1625 * kref_put() to happen during fabric packet acknowledgement.
1626 */
1627 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
1628 if (ret)
1629 return ret;
1630 /*
1631 * Signal bidirectional data payloads to target-core
1632 */
1633 if (flags & TARGET_SCF_BIDI_OP)
1634 se_cmd->se_cmd_flags |= SCF_BIDI;
1635
1636 rc = target_cmd_init_cdb(se_cmd, cdb);
1637 if (rc) {
1638 transport_send_check_condition_and_sense(se_cmd, rc, 0);
1639 target_put_sess_cmd(se_cmd);
1640 return 0;
1641 }
1642
1643 /*
1644 * Locate se_lun pointer and attach it to struct se_cmd
1645 */
1646 rc = transport_lookup_cmd_lun(se_cmd);
1647 if (rc) {
1648 transport_send_check_condition_and_sense(se_cmd, rc, 0);
1649 target_put_sess_cmd(se_cmd);
1650 return 0;
1651 }
1652
1653 rc = target_cmd_parse_cdb(se_cmd);
1654 if (rc != 0) {
1655 transport_generic_request_failure(se_cmd, rc);
1656 return 0;
1657 }
1658
1659 /*
1660 * Save pointers for SGLs containing protection information,
1661 * if present.
1662 */
1663 if (sgl_prot_count) {
1664 se_cmd->t_prot_sg = sgl_prot;
1665 se_cmd->t_prot_nents = sgl_prot_count;
1666 se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC;
1667 }
1668
1669 /*
1670 * When a non zero sgl_count has been passed perform SGL passthrough
1671 * mapping for pre-allocated fabric memory instead of having target
1672 * core perform an internal SGL allocation..
1673 */
1674 if (sgl_count != 0) {
1675 BUG_ON(!sgl);
1676
1677 /*
1678 * A work-around for tcm_loop as some userspace code via
1679 * scsi-generic do not memset their associated read buffers,
1680 * so go ahead and do that here for type non-data CDBs. Also
1681 * note that this is currently guaranteed to be a single SGL
1682 * for this case by target core in target_setup_cmd_from_cdb()
1683 * -> transport_generic_cmd_sequencer().
1684 */
1685 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
1686 se_cmd->data_direction == DMA_FROM_DEVICE) {
1687 unsigned char *buf = NULL;
1688
1689 if (sgl)
1690 buf = kmap(sg_page(sgl)) + sgl->offset;
1691
1692 if (buf) {
1693 memset(buf, 0, sgl->length);
1694 kunmap(sg_page(sgl));
1695 }
1696 }
1697
1698 rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
1699 sgl_bidi, sgl_bidi_count);
1700 if (rc != 0) {
1701 transport_generic_request_failure(se_cmd, rc);
1702 return 0;
1703 }
1704 }
1705
1706 /*
1707 * Check if we need to delay processing because of ALUA
1708 * Active/NonOptimized primary access state..
1709 */
1710 core_alua_check_nonop_delay(se_cmd);
1711
1712 transport_handle_cdb_direct(se_cmd);
1713 return 0;
1714}
1715EXPORT_SYMBOL(target_submit_cmd_map_sgls);
1716
1717/**
1718 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
1719 *
1720 * @se_cmd: command descriptor to submit
1721 * @se_sess: associated se_sess for endpoint
1722 * @cdb: pointer to SCSI CDB
1723 * @sense: pointer to SCSI sense buffer
1724 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1725 * @data_length: fabric expected data transfer length
1726 * @task_attr: SAM task attribute
1727 * @data_dir: DMA data direction
1728 * @flags: flags for command submission from target_sc_flags_tables
1729 *
1730 * Task tags are supported if the caller has set @se_cmd->tag.
1731 *
1732 * Returns non zero to signal active I/O shutdown failure. All other
1733 * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
1734 * but still return zero here.
1735 *
1736 * This may only be called from process context, and also currently
1737 * assumes internal allocation of fabric payload buffer by target-core.
1738 *
1739 * It also assumes interal target core SGL memory allocation.
1740 */
1741int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1742 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
1743 u32 data_length, int task_attr, int data_dir, int flags)
1744{
1745 return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense,
1746 unpacked_lun, data_length, task_attr, data_dir,
1747 flags, NULL, 0, NULL, 0, NULL, 0);
1748}
1749EXPORT_SYMBOL(target_submit_cmd);
1750
1751static void target_complete_tmr_failure(struct work_struct *work)
1752{
1753 struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);
1754
1755 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
1756 se_cmd->se_tfo->queue_tm_rsp(se_cmd);
1757
1758 transport_lun_remove_cmd(se_cmd);
1759 transport_cmd_check_stop_to_fabric(se_cmd);
1760}
1761
1762static bool target_lookup_lun_from_tag(struct se_session *se_sess, u64 tag,
1763 u64 *unpacked_lun)
1764{
1765 struct se_cmd *se_cmd;
1766 unsigned long flags;
1767 bool ret = false;
1768
1769 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
1770 list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
1771 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
1772 continue;
1773
1774 if (se_cmd->tag == tag) {
1775 *unpacked_lun = se_cmd->orig_fe_lun;
1776 ret = true;
1777 break;
1778 }
1779 }
1780 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
1781
1782 return ret;
1783}
1784
1785/**
1786 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
1787 * for TMR CDBs
1788 *
1789 * @se_cmd: command descriptor to submit
1790 * @se_sess: associated se_sess for endpoint
1791 * @sense: pointer to SCSI sense buffer
1792 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1793 * @fabric_tmr_ptr: fabric context for TMR req
1794 * @tm_type: Type of TM request
1795 * @gfp: gfp type for caller
1796 * @tag: referenced task tag for TMR_ABORT_TASK
1797 * @flags: submit cmd flags
1798 *
1799 * Callable from all contexts.
1800 **/
1801
1802int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
1803 unsigned char *sense, u64 unpacked_lun,
1804 void *fabric_tmr_ptr, unsigned char tm_type,
1805 gfp_t gfp, u64 tag, int flags)
1806{
1807 struct se_portal_group *se_tpg;
1808 int ret;
1809
1810 se_tpg = se_sess->se_tpg;
1811 BUG_ON(!se_tpg);
1812
1813 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1814 0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun);
1815 /*
1816 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
1817 * allocation failure.
1818 */
1819 ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp);
1820 if (ret < 0)
1821 return -ENOMEM;
1822
1823 if (tm_type == TMR_ABORT_TASK)
1824 se_cmd->se_tmr_req->ref_task_tag = tag;
1825
1826 /* See target_submit_cmd for commentary */
1827 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
1828 if (ret) {
1829 core_tmr_release_req(se_cmd->se_tmr_req);
1830 return ret;
1831 }
1832 /*
1833 * If this is ABORT_TASK with no explicit fabric provided LUN,
1834 * go ahead and search active session tags for a match to figure
1835 * out unpacked_lun for the original se_cmd.
1836 */
1837 if (tm_type == TMR_ABORT_TASK && (flags & TARGET_SCF_LOOKUP_LUN_FROM_TAG)) {
1838 if (!target_lookup_lun_from_tag(se_sess, tag, &unpacked_lun))
1839 goto failure;
1840 }
1841
1842 ret = transport_lookup_tmr_lun(se_cmd);
1843 if (ret)
1844 goto failure;
1845
1846 transport_generic_handle_tmr(se_cmd);
1847 return 0;
1848
1849 /*
1850 * For callback during failure handling, push this work off
1851 * to process context with TMR_LUN_DOES_NOT_EXIST status.
1852 */
1853failure:
1854 INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
1855 schedule_work(&se_cmd->work);
1856 return 0;
1857}
1858EXPORT_SYMBOL(target_submit_tmr);
1859
1860/*
1861 * Handle SAM-esque emulation for generic transport request failures.
1862 */
1863void transport_generic_request_failure(struct se_cmd *cmd,
1864 sense_reason_t sense_reason)
1865{
1866 int ret = 0, post_ret;
1867
1868 pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
1869 sense_reason);
1870 target_show_cmd("-----[ ", cmd);
1871
1872 /*
1873 * For SAM Task Attribute emulation for failed struct se_cmd
1874 */
1875 transport_complete_task_attr(cmd);
1876
1877 if (cmd->transport_complete_callback)
1878 cmd->transport_complete_callback(cmd, false, &post_ret);
1879
1880 if (cmd->transport_state & CMD_T_ABORTED) {
1881 INIT_WORK(&cmd->work, target_abort_work);
1882 queue_work(target_completion_wq, &cmd->work);
1883 return;
1884 }
1885
1886 switch (sense_reason) {
1887 case TCM_NON_EXISTENT_LUN:
1888 case TCM_UNSUPPORTED_SCSI_OPCODE:
1889 case TCM_INVALID_CDB_FIELD:
1890 case TCM_INVALID_PARAMETER_LIST:
1891 case TCM_PARAMETER_LIST_LENGTH_ERROR:
1892 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
1893 case TCM_UNKNOWN_MODE_PAGE:
1894 case TCM_WRITE_PROTECTED:
1895 case TCM_ADDRESS_OUT_OF_RANGE:
1896 case TCM_CHECK_CONDITION_ABORT_CMD:
1897 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
1898 case TCM_CHECK_CONDITION_NOT_READY:
1899 case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
1900 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
1901 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
1902 case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE:
1903 case TCM_TOO_MANY_TARGET_DESCS:
1904 case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE:
1905 case TCM_TOO_MANY_SEGMENT_DESCS:
1906 case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE:
1907 break;
1908 case TCM_OUT_OF_RESOURCES:
1909 cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
1910 goto queue_status;
1911 case TCM_LUN_BUSY:
1912 cmd->scsi_status = SAM_STAT_BUSY;
1913 goto queue_status;
1914 case TCM_RESERVATION_CONFLICT:
1915 /*
1916 * No SENSE Data payload for this case, set SCSI Status
1917 * and queue the response to $FABRIC_MOD.
1918 *
1919 * Uses linux/include/scsi/scsi.h SAM status codes defs
1920 */
1921 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1922 /*
1923 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
1924 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
1925 * CONFLICT STATUS.
1926 *
1927 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
1928 */
1929 if (cmd->se_sess &&
1930 cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl
1931 == TARGET_UA_INTLCK_CTRL_ESTABLISH_UA) {
1932 target_ua_allocate_lun(cmd->se_sess->se_node_acl,
1933 cmd->orig_fe_lun, 0x2C,
1934 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
1935 }
1936
1937 goto queue_status;
1938 default:
1939 pr_err("Unknown transport error for CDB 0x%02x: %d\n",
1940 cmd->t_task_cdb[0], sense_reason);
1941 sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1942 break;
1943 }
1944
1945 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
1946 if (ret)
1947 goto queue_full;
1948
1949check_stop:
1950 transport_lun_remove_cmd(cmd);
1951 transport_cmd_check_stop_to_fabric(cmd);
1952 return;
1953
1954queue_status:
1955 trace_target_cmd_complete(cmd);
1956 ret = cmd->se_tfo->queue_status(cmd);
1957 if (!ret)
1958 goto check_stop;
1959queue_full:
1960 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
1961}
1962EXPORT_SYMBOL(transport_generic_request_failure);
1963
1964void __target_execute_cmd(struct se_cmd *cmd, bool do_checks)
1965{
1966 sense_reason_t ret;
1967
1968 if (!cmd->execute_cmd) {
1969 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1970 goto err;
1971 }
1972 if (do_checks) {
1973 /*
1974 * Check for an existing UNIT ATTENTION condition after
1975 * target_handle_task_attr() has done SAM task attr
1976 * checking, and possibly have already defered execution
1977 * out to target_restart_delayed_cmds() context.
1978 */
1979 ret = target_scsi3_ua_check(cmd);
1980 if (ret)
1981 goto err;
1982
1983 ret = target_alua_state_check(cmd);
1984 if (ret)
1985 goto err;
1986
1987 ret = target_check_reservation(cmd);
1988 if (ret) {
1989 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1990 goto err;
1991 }
1992 }
1993
1994 ret = cmd->execute_cmd(cmd);
1995 if (!ret)
1996 return;
1997err:
1998 spin_lock_irq(&cmd->t_state_lock);
1999 cmd->transport_state &= ~CMD_T_SENT;
2000 spin_unlock_irq(&cmd->t_state_lock);
2001
2002 transport_generic_request_failure(cmd, ret);
2003}
2004
2005static int target_write_prot_action(struct se_cmd *cmd)
2006{
2007 u32 sectors;
2008 /*
2009 * Perform WRITE_INSERT of PI using software emulation when backend
2010 * device has PI enabled, if the transport has not already generated
2011 * PI using hardware WRITE_INSERT offload.
2012 */
2013 switch (cmd->prot_op) {
2014 case TARGET_PROT_DOUT_INSERT:
2015 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT))
2016 sbc_dif_generate(cmd);
2017 break;
2018 case TARGET_PROT_DOUT_STRIP:
2019 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP)
2020 break;
2021
2022 sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size);
2023 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
2024 sectors, 0, cmd->t_prot_sg, 0);
2025 if (unlikely(cmd->pi_err)) {
2026 spin_lock_irq(&cmd->t_state_lock);
2027 cmd->transport_state &= ~CMD_T_SENT;
2028 spin_unlock_irq(&cmd->t_state_lock);
2029 transport_generic_request_failure(cmd, cmd->pi_err);
2030 return -1;
2031 }
2032 break;
2033 default:
2034 break;
2035 }
2036
2037 return 0;
2038}
2039
2040static bool target_handle_task_attr(struct se_cmd *cmd)
2041{
2042 struct se_device *dev = cmd->se_dev;
2043
2044 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
2045 return false;
2046
2047 cmd->se_cmd_flags |= SCF_TASK_ATTR_SET;
2048
2049 /*
2050 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
2051 * to allow the passed struct se_cmd list of tasks to the front of the list.
2052 */
2053 switch (cmd->sam_task_attr) {
2054 case TCM_HEAD_TAG:
2055 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n",
2056 cmd->t_task_cdb[0]);
2057 return false;
2058 case TCM_ORDERED_TAG:
2059 atomic_inc_mb(&dev->dev_ordered_sync);
2060
2061 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n",
2062 cmd->t_task_cdb[0]);
2063
2064 /*
2065 * Execute an ORDERED command if no other older commands
2066 * exist that need to be completed first.
2067 */
2068 if (!atomic_read(&dev->simple_cmds))
2069 return false;
2070 break;
2071 default:
2072 /*
2073 * For SIMPLE and UNTAGGED Task Attribute commands
2074 */
2075 atomic_inc_mb(&dev->simple_cmds);
2076 break;
2077 }
2078
2079 if (atomic_read(&dev->dev_ordered_sync) == 0)
2080 return false;
2081
2082 spin_lock(&dev->delayed_cmd_lock);
2083 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
2084 spin_unlock(&dev->delayed_cmd_lock);
2085
2086 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn",
2087 cmd->t_task_cdb[0], cmd->sam_task_attr);
2088 return true;
2089}
2090
2091void target_execute_cmd(struct se_cmd *cmd)
2092{
2093 /*
2094 * Determine if frontend context caller is requesting the stopping of
2095 * this command for frontend exceptions.
2096 *
2097 * If the received CDB has already been aborted stop processing it here.
2098 */
2099 if (target_cmd_interrupted(cmd))
2100 return;
2101
2102 spin_lock_irq(&cmd->t_state_lock);
2103 cmd->t_state = TRANSPORT_PROCESSING;
2104 cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
2105 spin_unlock_irq(&cmd->t_state_lock);
2106
2107 if (target_write_prot_action(cmd))
2108 return;
2109
2110 if (target_handle_task_attr(cmd)) {
2111 spin_lock_irq(&cmd->t_state_lock);
2112 cmd->transport_state &= ~CMD_T_SENT;
2113 spin_unlock_irq(&cmd->t_state_lock);
2114 return;
2115 }
2116
2117 __target_execute_cmd(cmd, true);
2118}
2119EXPORT_SYMBOL(target_execute_cmd);
2120
2121/*
2122 * Process all commands up to the last received ORDERED task attribute which
2123 * requires another blocking boundary
2124 */
2125static void target_restart_delayed_cmds(struct se_device *dev)
2126{
2127 for (;;) {
2128 struct se_cmd *cmd;
2129
2130 spin_lock(&dev->delayed_cmd_lock);
2131 if (list_empty(&dev->delayed_cmd_list)) {
2132 spin_unlock(&dev->delayed_cmd_lock);
2133 break;
2134 }
2135
2136 cmd = list_entry(dev->delayed_cmd_list.next,
2137 struct se_cmd, se_delayed_node);
2138 list_del(&cmd->se_delayed_node);
2139 spin_unlock(&dev->delayed_cmd_lock);
2140
2141 cmd->transport_state |= CMD_T_SENT;
2142
2143 __target_execute_cmd(cmd, true);
2144
2145 if (cmd->sam_task_attr == TCM_ORDERED_TAG)
2146 break;
2147 }
2148}
2149
2150/*
2151 * Called from I/O completion to determine which dormant/delayed
2152 * and ordered cmds need to have their tasks added to the execution queue.
2153 */
2154static void transport_complete_task_attr(struct se_cmd *cmd)
2155{
2156 struct se_device *dev = cmd->se_dev;
2157
2158 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
2159 return;
2160
2161 if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET))
2162 goto restart;
2163
2164 if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
2165 atomic_dec_mb(&dev->simple_cmds);
2166 dev->dev_cur_ordered_id++;
2167 } else if (cmd->sam_task_attr == TCM_HEAD_TAG) {
2168 dev->dev_cur_ordered_id++;
2169 pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n",
2170 dev->dev_cur_ordered_id);
2171 } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
2172 atomic_dec_mb(&dev->dev_ordered_sync);
2173
2174 dev->dev_cur_ordered_id++;
2175 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
2176 dev->dev_cur_ordered_id);
2177 }
2178 cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET;
2179
2180restart:
2181 target_restart_delayed_cmds(dev);
2182}
2183
2184static void transport_complete_qf(struct se_cmd *cmd)
2185{
2186 int ret = 0;
2187
2188 transport_complete_task_attr(cmd);
2189 /*
2190 * If a fabric driver ->write_pending() or ->queue_data_in() callback
2191 * has returned neither -ENOMEM or -EAGAIN, assume it's fatal and
2192 * the same callbacks should not be retried. Return CHECK_CONDITION
2193 * if a scsi_status is not already set.
2194 *
2195 * If a fabric driver ->queue_status() has returned non zero, always
2196 * keep retrying no matter what..
2197 */
2198 if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) {
2199 if (cmd->scsi_status)
2200 goto queue_status;
2201
2202 translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
2203 goto queue_status;
2204 }
2205
2206 /*
2207 * Check if we need to send a sense buffer from
2208 * the struct se_cmd in question. We do NOT want
2209 * to take this path of the IO has been marked as
2210 * needing to be treated like a "normal read". This
2211 * is the case if it's a tape read, and either the
2212 * FM, EOM, or ILI bits are set, but there is no
2213 * sense data.
2214 */
2215 if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
2216 cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
2217 goto queue_status;
2218
2219 switch (cmd->data_direction) {
2220 case DMA_FROM_DEVICE:
2221 /* queue status if not treating this as a normal read */
2222 if (cmd->scsi_status &&
2223 !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL))
2224 goto queue_status;
2225
2226 trace_target_cmd_complete(cmd);
2227 ret = cmd->se_tfo->queue_data_in(cmd);
2228 break;
2229 case DMA_TO_DEVICE:
2230 if (cmd->se_cmd_flags & SCF_BIDI) {
2231 ret = cmd->se_tfo->queue_data_in(cmd);
2232 break;
2233 }
2234 /* fall through */
2235 case DMA_NONE:
2236queue_status:
2237 trace_target_cmd_complete(cmd);
2238 ret = cmd->se_tfo->queue_status(cmd);
2239 break;
2240 default:
2241 break;
2242 }
2243
2244 if (ret < 0) {
2245 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
2246 return;
2247 }
2248 transport_lun_remove_cmd(cmd);
2249 transport_cmd_check_stop_to_fabric(cmd);
2250}
2251
2252static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev,
2253 int err, bool write_pending)
2254{
2255 /*
2256 * -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or
2257 * ->queue_data_in() callbacks from new process context.
2258 *
2259 * Otherwise for other errors, transport_complete_qf() will send
2260 * CHECK_CONDITION via ->queue_status() instead of attempting to
2261 * retry associated fabric driver data-transfer callbacks.
2262 */
2263 if (err == -EAGAIN || err == -ENOMEM) {
2264 cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP :
2265 TRANSPORT_COMPLETE_QF_OK;
2266 } else {
2267 pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err);
2268 cmd->t_state = TRANSPORT_COMPLETE_QF_ERR;
2269 }
2270
2271 spin_lock_irq(&dev->qf_cmd_lock);
2272 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
2273 atomic_inc_mb(&dev->dev_qf_count);
2274 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
2275
2276 schedule_work(&cmd->se_dev->qf_work_queue);
2277}
2278
2279static bool target_read_prot_action(struct se_cmd *cmd)
2280{
2281 switch (cmd->prot_op) {
2282 case TARGET_PROT_DIN_STRIP:
2283 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) {
2284 u32 sectors = cmd->data_length >>
2285 ilog2(cmd->se_dev->dev_attrib.block_size);
2286
2287 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
2288 sectors, 0, cmd->t_prot_sg,
2289 0);
2290 if (cmd->pi_err)
2291 return true;
2292 }
2293 break;
2294 case TARGET_PROT_DIN_INSERT:
2295 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT)
2296 break;
2297
2298 sbc_dif_generate(cmd);
2299 break;
2300 default:
2301 break;
2302 }
2303
2304 return false;
2305}
2306
2307static void target_complete_ok_work(struct work_struct *work)
2308{
2309 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
2310 int ret;
2311
2312 /*
2313 * Check if we need to move delayed/dormant tasks from cmds on the
2314 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
2315 * Attribute.
2316 */
2317 transport_complete_task_attr(cmd);
2318
2319 /*
2320 * Check to schedule QUEUE_FULL work, or execute an existing
2321 * cmd->transport_qf_callback()
2322 */
2323 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
2324 schedule_work(&cmd->se_dev->qf_work_queue);
2325
2326 /*
2327 * Check if we need to send a sense buffer from
2328 * the struct se_cmd in question. We do NOT want
2329 * to take this path of the IO has been marked as
2330 * needing to be treated like a "normal read". This
2331 * is the case if it's a tape read, and either the
2332 * FM, EOM, or ILI bits are set, but there is no
2333 * sense data.
2334 */
2335 if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
2336 cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
2337 WARN_ON(!cmd->scsi_status);
2338 ret = transport_send_check_condition_and_sense(
2339 cmd, 0, 1);
2340 if (ret)
2341 goto queue_full;
2342
2343 transport_lun_remove_cmd(cmd);
2344 transport_cmd_check_stop_to_fabric(cmd);
2345 return;
2346 }
2347 /*
2348 * Check for a callback, used by amongst other things
2349 * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation.
2350 */
2351 if (cmd->transport_complete_callback) {
2352 sense_reason_t rc;
2353 bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE);
2354 bool zero_dl = !(cmd->data_length);
2355 int post_ret = 0;
2356
2357 rc = cmd->transport_complete_callback(cmd, true, &post_ret);
2358 if (!rc && !post_ret) {
2359 if (caw && zero_dl)
2360 goto queue_rsp;
2361
2362 return;
2363 } else if (rc) {
2364 ret = transport_send_check_condition_and_sense(cmd,
2365 rc, 0);
2366 if (ret)
2367 goto queue_full;
2368
2369 transport_lun_remove_cmd(cmd);
2370 transport_cmd_check_stop_to_fabric(cmd);
2371 return;
2372 }
2373 }
2374
2375queue_rsp:
2376 switch (cmd->data_direction) {
2377 case DMA_FROM_DEVICE:
2378 /*
2379 * if this is a READ-type IO, but SCSI status
2380 * is set, then skip returning data and just
2381 * return the status -- unless this IO is marked
2382 * as needing to be treated as a normal read,
2383 * in which case we want to go ahead and return
2384 * the data. This happens, for example, for tape
2385 * reads with the FM, EOM, or ILI bits set, with
2386 * no sense data.
2387 */
2388 if (cmd->scsi_status &&
2389 !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL))
2390 goto queue_status;
2391
2392 atomic_long_add(cmd->data_length,
2393 &cmd->se_lun->lun_stats.tx_data_octets);
2394 /*
2395 * Perform READ_STRIP of PI using software emulation when
2396 * backend had PI enabled, if the transport will not be
2397 * performing hardware READ_STRIP offload.
2398 */
2399 if (target_read_prot_action(cmd)) {
2400 ret = transport_send_check_condition_and_sense(cmd,
2401 cmd->pi_err, 0);
2402 if (ret)
2403 goto queue_full;
2404
2405 transport_lun_remove_cmd(cmd);
2406 transport_cmd_check_stop_to_fabric(cmd);
2407 return;
2408 }
2409
2410 trace_target_cmd_complete(cmd);
2411 ret = cmd->se_tfo->queue_data_in(cmd);
2412 if (ret)
2413 goto queue_full;
2414 break;
2415 case DMA_TO_DEVICE:
2416 atomic_long_add(cmd->data_length,
2417 &cmd->se_lun->lun_stats.rx_data_octets);
2418 /*
2419 * Check if we need to send READ payload for BIDI-COMMAND
2420 */
2421 if (cmd->se_cmd_flags & SCF_BIDI) {
2422 atomic_long_add(cmd->data_length,
2423 &cmd->se_lun->lun_stats.tx_data_octets);
2424 ret = cmd->se_tfo->queue_data_in(cmd);
2425 if (ret)
2426 goto queue_full;
2427 break;
2428 }
2429 /* fall through */
2430 case DMA_NONE:
2431queue_status:
2432 trace_target_cmd_complete(cmd);
2433 ret = cmd->se_tfo->queue_status(cmd);
2434 if (ret)
2435 goto queue_full;
2436 break;
2437 default:
2438 break;
2439 }
2440
2441 transport_lun_remove_cmd(cmd);
2442 transport_cmd_check_stop_to_fabric(cmd);
2443 return;
2444
2445queue_full:
2446 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
2447 " data_direction: %d\n", cmd, cmd->data_direction);
2448
2449 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
2450}
2451
2452void target_free_sgl(struct scatterlist *sgl, int nents)
2453{
2454 sgl_free_n_order(sgl, nents, 0);
2455}
2456EXPORT_SYMBOL(target_free_sgl);
2457
2458static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
2459{
2460 /*
2461 * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE
2462 * emulation, and free + reset pointers if necessary..
2463 */
2464 if (!cmd->t_data_sg_orig)
2465 return;
2466
2467 kfree(cmd->t_data_sg);
2468 cmd->t_data_sg = cmd->t_data_sg_orig;
2469 cmd->t_data_sg_orig = NULL;
2470 cmd->t_data_nents = cmd->t_data_nents_orig;
2471 cmd->t_data_nents_orig = 0;
2472}
2473
2474static inline void transport_free_pages(struct se_cmd *cmd)
2475{
2476 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
2477 target_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
2478 cmd->t_prot_sg = NULL;
2479 cmd->t_prot_nents = 0;
2480 }
2481
2482 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
2483 /*
2484 * Release special case READ buffer payload required for
2485 * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE
2486 */
2487 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
2488 target_free_sgl(cmd->t_bidi_data_sg,
2489 cmd->t_bidi_data_nents);
2490 cmd->t_bidi_data_sg = NULL;
2491 cmd->t_bidi_data_nents = 0;
2492 }
2493 transport_reset_sgl_orig(cmd);
2494 return;
2495 }
2496 transport_reset_sgl_orig(cmd);
2497
2498 target_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
2499 cmd->t_data_sg = NULL;
2500 cmd->t_data_nents = 0;
2501
2502 target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
2503 cmd->t_bidi_data_sg = NULL;
2504 cmd->t_bidi_data_nents = 0;
2505}
2506
2507void *transport_kmap_data_sg(struct se_cmd *cmd)
2508{
2509 struct scatterlist *sg = cmd->t_data_sg;
2510 struct page **pages;
2511 int i;
2512
2513 /*
2514 * We need to take into account a possible offset here for fabrics like
2515 * tcm_loop who may be using a contig buffer from the SCSI midlayer for
2516 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
2517 */
2518 if (!cmd->t_data_nents)
2519 return NULL;
2520
2521 BUG_ON(!sg);
2522 if (cmd->t_data_nents == 1)
2523 return kmap(sg_page(sg)) + sg->offset;
2524
2525 /* >1 page. use vmap */
2526 pages = kmalloc_array(cmd->t_data_nents, sizeof(*pages), GFP_KERNEL);
2527 if (!pages)
2528 return NULL;
2529
2530 /* convert sg[] to pages[] */
2531 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
2532 pages[i] = sg_page(sg);
2533 }
2534
2535 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL);
2536 kfree(pages);
2537 if (!cmd->t_data_vmap)
2538 return NULL;
2539
2540 return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
2541}
2542EXPORT_SYMBOL(transport_kmap_data_sg);
2543
2544void transport_kunmap_data_sg(struct se_cmd *cmd)
2545{
2546 if (!cmd->t_data_nents) {
2547 return;
2548 } else if (cmd->t_data_nents == 1) {
2549 kunmap(sg_page(cmd->t_data_sg));
2550 return;
2551 }
2552
2553 vunmap(cmd->t_data_vmap);
2554 cmd->t_data_vmap = NULL;
2555}
2556EXPORT_SYMBOL(transport_kunmap_data_sg);
2557
2558int
2559target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
2560 bool zero_page, bool chainable)
2561{
2562 gfp_t gfp = GFP_KERNEL | (zero_page ? __GFP_ZERO : 0);
2563
2564 *sgl = sgl_alloc_order(length, 0, chainable, gfp, nents);
2565 return *sgl ? 0 : -ENOMEM;
2566}
2567EXPORT_SYMBOL(target_alloc_sgl);
2568
2569/*
2570 * Allocate any required resources to execute the command. For writes we
2571 * might not have the payload yet, so notify the fabric via a call to
2572 * ->write_pending instead. Otherwise place it on the execution queue.
2573 */
2574sense_reason_t
2575transport_generic_new_cmd(struct se_cmd *cmd)
2576{
2577 unsigned long flags;
2578 int ret = 0;
2579 bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
2580
2581 if (cmd->prot_op != TARGET_PROT_NORMAL &&
2582 !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
2583 ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents,
2584 cmd->prot_length, true, false);
2585 if (ret < 0)
2586 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2587 }
2588
2589 /*
2590 * Determine if the TCM fabric module has already allocated physical
2591 * memory, and is directly calling transport_generic_map_mem_to_cmd()
2592 * beforehand.
2593 */
2594 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
2595 cmd->data_length) {
2596
2597 if ((cmd->se_cmd_flags & SCF_BIDI) ||
2598 (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
2599 u32 bidi_length;
2600
2601 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)
2602 bidi_length = cmd->t_task_nolb *
2603 cmd->se_dev->dev_attrib.block_size;
2604 else
2605 bidi_length = cmd->data_length;
2606
2607 ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
2608 &cmd->t_bidi_data_nents,
2609 bidi_length, zero_flag, false);
2610 if (ret < 0)
2611 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2612 }
2613
2614 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
2615 cmd->data_length, zero_flag, false);
2616 if (ret < 0)
2617 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2618 } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
2619 cmd->data_length) {
2620 /*
2621 * Special case for COMPARE_AND_WRITE with fabrics
2622 * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC.
2623 */
2624 u32 caw_length = cmd->t_task_nolb *
2625 cmd->se_dev->dev_attrib.block_size;
2626
2627 ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
2628 &cmd->t_bidi_data_nents,
2629 caw_length, zero_flag, false);
2630 if (ret < 0)
2631 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2632 }
2633 /*
2634 * If this command is not a write we can execute it right here,
2635 * for write buffers we need to notify the fabric driver first
2636 * and let it call back once the write buffers are ready.
2637 */
2638 target_add_to_state_list(cmd);
2639 if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) {
2640 target_execute_cmd(cmd);
2641 return 0;
2642 }
2643
2644 spin_lock_irqsave(&cmd->t_state_lock, flags);
2645 cmd->t_state = TRANSPORT_WRITE_PENDING;
2646 /*
2647 * Determine if frontend context caller is requesting the stopping of
2648 * this command for frontend exceptions.
2649 */
2650 if (cmd->transport_state & CMD_T_STOP &&
2651 !cmd->se_tfo->write_pending_must_be_called) {
2652 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
2653 __func__, __LINE__, cmd->tag);
2654
2655 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2656
2657 complete_all(&cmd->t_transport_stop_comp);
2658 return 0;
2659 }
2660 cmd->transport_state &= ~CMD_T_ACTIVE;
2661 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2662
2663 ret = cmd->se_tfo->write_pending(cmd);
2664 if (ret)
2665 goto queue_full;
2666
2667 return 0;
2668
2669queue_full:
2670 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
2671 transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
2672 return 0;
2673}
2674EXPORT_SYMBOL(transport_generic_new_cmd);
2675
2676static void transport_write_pending_qf(struct se_cmd *cmd)
2677{
2678 unsigned long flags;
2679 int ret;
2680 bool stop;
2681
2682 spin_lock_irqsave(&cmd->t_state_lock, flags);
2683 stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED));
2684 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2685
2686 if (stop) {
2687 pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n",
2688 __func__, __LINE__, cmd->tag);
2689 complete_all(&cmd->t_transport_stop_comp);
2690 return;
2691 }
2692
2693 ret = cmd->se_tfo->write_pending(cmd);
2694 if (ret) {
2695 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
2696 cmd);
2697 transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
2698 }
2699}
2700
2701static bool
2702__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
2703 unsigned long *flags);
2704
2705static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
2706{
2707 unsigned long flags;
2708
2709 spin_lock_irqsave(&cmd->t_state_lock, flags);
2710 __transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
2711 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2712}
2713
2714/*
2715 * Call target_put_sess_cmd() and wait until target_release_cmd_kref(@cmd) has
2716 * finished.
2717 */
2718void target_put_cmd_and_wait(struct se_cmd *cmd)
2719{
2720 DECLARE_COMPLETION_ONSTACK(compl);
2721
2722 WARN_ON_ONCE(cmd->abrt_compl);
2723 cmd->abrt_compl = &compl;
2724 target_put_sess_cmd(cmd);
2725 wait_for_completion(&compl);
2726}
2727
2728/*
2729 * This function is called by frontend drivers after processing of a command
2730 * has finished.
2731 *
2732 * The protocol for ensuring that either the regular frontend command
2733 * processing flow or target_handle_abort() code drops one reference is as
2734 * follows:
2735 * - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause
2736 * the frontend driver to call this function synchronously or asynchronously.
2737 * That will cause one reference to be dropped.
2738 * - During regular command processing the target core sets CMD_T_COMPLETE
2739 * before invoking one of the .queue_*() functions.
2740 * - The code that aborts commands skips commands and TMFs for which
2741 * CMD_T_COMPLETE has been set.
2742 * - CMD_T_ABORTED is set atomically after the CMD_T_COMPLETE check for
2743 * commands that will be aborted.
2744 * - If the CMD_T_ABORTED flag is set but CMD_T_TAS has not been set
2745 * transport_generic_free_cmd() skips its call to target_put_sess_cmd().
2746 * - For aborted commands for which CMD_T_TAS has been set .queue_status() will
2747 * be called and will drop a reference.
2748 * - For aborted commands for which CMD_T_TAS has not been set .aborted_task()
2749 * will be called. target_handle_abort() will drop the final reference.
2750 */
2751int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2752{
2753 DECLARE_COMPLETION_ONSTACK(compl);
2754 int ret = 0;
2755 bool aborted = false, tas = false;
2756
2757 if (wait_for_tasks)
2758 target_wait_free_cmd(cmd, &aborted, &tas);
2759
2760 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) {
2761 /*
2762 * Handle WRITE failure case where transport_generic_new_cmd()
2763 * has already added se_cmd to state_list, but fabric has
2764 * failed command before I/O submission.
2765 */
2766 if (cmd->state_active)
2767 target_remove_from_state_list(cmd);
2768
2769 if (cmd->se_lun)
2770 transport_lun_remove_cmd(cmd);
2771 }
2772 if (aborted)
2773 cmd->free_compl = &compl;
2774 ret = target_put_sess_cmd(cmd);
2775 if (aborted) {
2776 pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
2777 wait_for_completion(&compl);
2778 ret = 1;
2779 }
2780 return ret;
2781}
2782EXPORT_SYMBOL(transport_generic_free_cmd);
2783
2784/**
2785 * target_get_sess_cmd - Add command to active ->sess_cmd_list
2786 * @se_cmd: command descriptor to add
2787 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd()
2788 */
2789int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
2790{
2791 struct se_session *se_sess = se_cmd->se_sess;
2792 unsigned long flags;
2793 int ret = 0;
2794
2795 /*
2796 * Add a second kref if the fabric caller is expecting to handle
2797 * fabric acknowledgement that requires two target_put_sess_cmd()
2798 * invocations before se_cmd descriptor release.
2799 */
2800 if (ack_kref) {
2801 if (!kref_get_unless_zero(&se_cmd->cmd_kref))
2802 return -EINVAL;
2803
2804 se_cmd->se_cmd_flags |= SCF_ACK_KREF;
2805 }
2806
2807 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2808 if (se_sess->sess_tearing_down) {
2809 ret = -ESHUTDOWN;
2810 goto out;
2811 }
2812 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
2813 percpu_ref_get(&se_sess->cmd_count);
2814out:
2815 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2816
2817 if (ret && ack_kref)
2818 target_put_sess_cmd(se_cmd);
2819
2820 return ret;
2821}
2822EXPORT_SYMBOL(target_get_sess_cmd);
2823
2824static void target_free_cmd_mem(struct se_cmd *cmd)
2825{
2826 transport_free_pages(cmd);
2827
2828 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
2829 core_tmr_release_req(cmd->se_tmr_req);
2830 if (cmd->t_task_cdb != cmd->__t_task_cdb)
2831 kfree(cmd->t_task_cdb);
2832}
2833
2834static void target_release_cmd_kref(struct kref *kref)
2835{
2836 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
2837 struct se_session *se_sess = se_cmd->se_sess;
2838 struct completion *free_compl = se_cmd->free_compl;
2839 struct completion *abrt_compl = se_cmd->abrt_compl;
2840 unsigned long flags;
2841
2842 if (se_sess) {
2843 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2844 list_del_init(&se_cmd->se_cmd_list);
2845 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2846 }
2847
2848 target_free_cmd_mem(se_cmd);
2849 se_cmd->se_tfo->release_cmd(se_cmd);
2850 if (free_compl)
2851 complete(free_compl);
2852 if (abrt_compl)
2853 complete(abrt_compl);
2854
2855 percpu_ref_put(&se_sess->cmd_count);
2856}
2857
2858/**
2859 * target_put_sess_cmd - decrease the command reference count
2860 * @se_cmd: command to drop a reference from
2861 *
2862 * Returns 1 if and only if this target_put_sess_cmd() call caused the
2863 * refcount to drop to zero. Returns zero otherwise.
2864 */
2865int target_put_sess_cmd(struct se_cmd *se_cmd)
2866{
2867 return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
2868}
2869EXPORT_SYMBOL(target_put_sess_cmd);
2870
2871static const char *data_dir_name(enum dma_data_direction d)
2872{
2873 switch (d) {
2874 case DMA_BIDIRECTIONAL: return "BIDI";
2875 case DMA_TO_DEVICE: return "WRITE";
2876 case DMA_FROM_DEVICE: return "READ";
2877 case DMA_NONE: return "NONE";
2878 }
2879
2880 return "(?)";
2881}
2882
2883static const char *cmd_state_name(enum transport_state_table t)
2884{
2885 switch (t) {
2886 case TRANSPORT_NO_STATE: return "NO_STATE";
2887 case TRANSPORT_NEW_CMD: return "NEW_CMD";
2888 case TRANSPORT_WRITE_PENDING: return "WRITE_PENDING";
2889 case TRANSPORT_PROCESSING: return "PROCESSING";
2890 case TRANSPORT_COMPLETE: return "COMPLETE";
2891 case TRANSPORT_ISTATE_PROCESSING:
2892 return "ISTATE_PROCESSING";
2893 case TRANSPORT_COMPLETE_QF_WP: return "COMPLETE_QF_WP";
2894 case TRANSPORT_COMPLETE_QF_OK: return "COMPLETE_QF_OK";
2895 case TRANSPORT_COMPLETE_QF_ERR: return "COMPLETE_QF_ERR";
2896 }
2897
2898 return "(?)";
2899}
2900
2901static void target_append_str(char **str, const char *txt)
2902{
2903 char *prev = *str;
2904
2905 *str = *str ? kasprintf(GFP_ATOMIC, "%s,%s", *str, txt) :
2906 kstrdup(txt, GFP_ATOMIC);
2907 kfree(prev);
2908}
2909
2910/*
2911 * Convert a transport state bitmask into a string. The caller is
2912 * responsible for freeing the returned pointer.
2913 */
2914static char *target_ts_to_str(u32 ts)
2915{
2916 char *str = NULL;
2917
2918 if (ts & CMD_T_ABORTED)
2919 target_append_str(&str, "aborted");
2920 if (ts & CMD_T_ACTIVE)
2921 target_append_str(&str, "active");
2922 if (ts & CMD_T_COMPLETE)
2923 target_append_str(&str, "complete");
2924 if (ts & CMD_T_SENT)
2925 target_append_str(&str, "sent");
2926 if (ts & CMD_T_STOP)
2927 target_append_str(&str, "stop");
2928 if (ts & CMD_T_FABRIC_STOP)
2929 target_append_str(&str, "fabric_stop");
2930
2931 return str;
2932}
2933
2934static const char *target_tmf_name(enum tcm_tmreq_table tmf)
2935{
2936 switch (tmf) {
2937 case TMR_ABORT_TASK: return "ABORT_TASK";
2938 case TMR_ABORT_TASK_SET: return "ABORT_TASK_SET";
2939 case TMR_CLEAR_ACA: return "CLEAR_ACA";
2940 case TMR_CLEAR_TASK_SET: return "CLEAR_TASK_SET";
2941 case TMR_LUN_RESET: return "LUN_RESET";
2942 case TMR_TARGET_WARM_RESET: return "TARGET_WARM_RESET";
2943 case TMR_TARGET_COLD_RESET: return "TARGET_COLD_RESET";
2944 case TMR_UNKNOWN: break;
2945 }
2946 return "(?)";
2947}
2948
2949void target_show_cmd(const char *pfx, struct se_cmd *cmd)
2950{
2951 char *ts_str = target_ts_to_str(cmd->transport_state);
2952 const u8 *cdb = cmd->t_task_cdb;
2953 struct se_tmr_req *tmf = cmd->se_tmr_req;
2954
2955 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
2956 pr_debug("%scmd %#02x:%#02x with tag %#llx dir %s i_state %d t_state %s len %d refcnt %d transport_state %s\n",
2957 pfx, cdb[0], cdb[1], cmd->tag,
2958 data_dir_name(cmd->data_direction),
2959 cmd->se_tfo->get_cmd_state(cmd),
2960 cmd_state_name(cmd->t_state), cmd->data_length,
2961 kref_read(&cmd->cmd_kref), ts_str);
2962 } else {
2963 pr_debug("%stmf %s with tag %#llx ref_task_tag %#llx i_state %d t_state %s refcnt %d transport_state %s\n",
2964 pfx, target_tmf_name(tmf->function), cmd->tag,
2965 tmf->ref_task_tag, cmd->se_tfo->get_cmd_state(cmd),
2966 cmd_state_name(cmd->t_state),
2967 kref_read(&cmd->cmd_kref), ts_str);
2968 }
2969 kfree(ts_str);
2970}
2971EXPORT_SYMBOL(target_show_cmd);
2972
2973/**
2974 * target_sess_cmd_list_set_waiting - Set sess_tearing_down so no new commands are queued.
2975 * @se_sess: session to flag
2976 */
2977void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
2978{
2979 unsigned long flags;
2980
2981 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2982 se_sess->sess_tearing_down = 1;
2983 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2984
2985 percpu_ref_kill(&se_sess->cmd_count);
2986}
2987EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
2988
2989/**
2990 * target_wait_for_sess_cmds - Wait for outstanding commands
2991 * @se_sess: session to wait for active I/O
2992 */
2993void target_wait_for_sess_cmds(struct se_session *se_sess)
2994{
2995 struct se_cmd *cmd;
2996 int ret;
2997
2998 WARN_ON_ONCE(!se_sess->sess_tearing_down);
2999
3000 do {
3001 ret = wait_event_timeout(se_sess->cmd_list_wq,
3002 percpu_ref_is_zero(&se_sess->cmd_count),
3003 180 * HZ);
3004 list_for_each_entry(cmd, &se_sess->sess_cmd_list, se_cmd_list)
3005 target_show_cmd("session shutdown: still waiting for ",
3006 cmd);
3007 } while (ret <= 0);
3008}
3009EXPORT_SYMBOL(target_wait_for_sess_cmds);
3010
3011/*
3012 * Prevent that new percpu_ref_tryget_live() calls succeed and wait until
3013 * all references to the LUN have been released. Called during LUN shutdown.
3014 */
3015void transport_clear_lun_ref(struct se_lun *lun)
3016{
3017 percpu_ref_kill(&lun->lun_ref);
3018 wait_for_completion(&lun->lun_shutdown_comp);
3019}
3020
3021static bool
3022__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
3023 bool *aborted, bool *tas, unsigned long *flags)
3024 __releases(&cmd->t_state_lock)
3025 __acquires(&cmd->t_state_lock)
3026{
3027
3028 assert_spin_locked(&cmd->t_state_lock);
3029 WARN_ON_ONCE(!irqs_disabled());
3030
3031 if (fabric_stop)
3032 cmd->transport_state |= CMD_T_FABRIC_STOP;
3033
3034 if (cmd->transport_state & CMD_T_ABORTED)
3035 *aborted = true;
3036
3037 if (cmd->transport_state & CMD_T_TAS)
3038 *tas = true;
3039
3040 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
3041 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
3042 return false;
3043
3044 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
3045 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
3046 return false;
3047
3048 if (!(cmd->transport_state & CMD_T_ACTIVE))
3049 return false;
3050
3051 if (fabric_stop && *aborted)
3052 return false;
3053
3054 cmd->transport_state |= CMD_T_STOP;
3055
3056 target_show_cmd("wait_for_tasks: Stopping ", cmd);
3057
3058 spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
3059
3060 while (!wait_for_completion_timeout(&cmd->t_transport_stop_comp,
3061 180 * HZ))
3062 target_show_cmd("wait for tasks: ", cmd);
3063
3064 spin_lock_irqsave(&cmd->t_state_lock, *flags);
3065 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
3066
3067 pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->"
3068 "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag);
3069
3070 return true;
3071}
3072
3073/**
3074 * transport_wait_for_tasks - set CMD_T_STOP and wait for t_transport_stop_comp
3075 * @cmd: command to wait on
3076 */
3077bool transport_wait_for_tasks(struct se_cmd *cmd)
3078{
3079 unsigned long flags;
3080 bool ret, aborted = false, tas = false;
3081
3082 spin_lock_irqsave(&cmd->t_state_lock, flags);
3083 ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
3084 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3085
3086 return ret;
3087}
3088EXPORT_SYMBOL(transport_wait_for_tasks);
3089
3090struct sense_info {
3091 u8 key;
3092 u8 asc;
3093 u8 ascq;
3094 bool add_sector_info;
3095};
3096
3097static const struct sense_info sense_info_table[] = {
3098 [TCM_NO_SENSE] = {
3099 .key = NOT_READY
3100 },
3101 [TCM_NON_EXISTENT_LUN] = {
3102 .key = ILLEGAL_REQUEST,
3103 .asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */
3104 },
3105 [TCM_UNSUPPORTED_SCSI_OPCODE] = {
3106 .key = ILLEGAL_REQUEST,
3107 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */
3108 },
3109 [TCM_SECTOR_COUNT_TOO_MANY] = {
3110 .key = ILLEGAL_REQUEST,
3111 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */
3112 },
3113 [TCM_UNKNOWN_MODE_PAGE] = {
3114 .key = ILLEGAL_REQUEST,
3115 .asc = 0x24, /* INVALID FIELD IN CDB */
3116 },
3117 [TCM_CHECK_CONDITION_ABORT_CMD] = {
3118 .key = ABORTED_COMMAND,
3119 .asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */
3120 .ascq = 0x03,
3121 },
3122 [TCM_INCORRECT_AMOUNT_OF_DATA] = {
3123 .key = ABORTED_COMMAND,
3124 .asc = 0x0c, /* WRITE ERROR */
3125 .ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */
3126 },
3127 [TCM_INVALID_CDB_FIELD] = {
3128 .key = ILLEGAL_REQUEST,
3129 .asc = 0x24, /* INVALID FIELD IN CDB */
3130 },
3131 [TCM_INVALID_PARAMETER_LIST] = {
3132 .key = ILLEGAL_REQUEST,
3133 .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */
3134 },
3135 [TCM_TOO_MANY_TARGET_DESCS] = {
3136 .key = ILLEGAL_REQUEST,
3137 .asc = 0x26,
3138 .ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */
3139 },
3140 [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = {
3141 .key = ILLEGAL_REQUEST,
3142 .asc = 0x26,
3143 .ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */
3144 },
3145 [TCM_TOO_MANY_SEGMENT_DESCS] = {
3146 .key = ILLEGAL_REQUEST,
3147 .asc = 0x26,
3148 .ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */
3149 },
3150 [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = {
3151 .key = ILLEGAL_REQUEST,
3152 .asc = 0x26,
3153 .ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */
3154 },
3155 [TCM_PARAMETER_LIST_LENGTH_ERROR] = {
3156 .key = ILLEGAL_REQUEST,
3157 .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */
3158 },
3159 [TCM_UNEXPECTED_UNSOLICITED_DATA] = {
3160 .key = ILLEGAL_REQUEST,
3161 .asc = 0x0c, /* WRITE ERROR */
3162 .ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */
3163 },
3164 [TCM_SERVICE_CRC_ERROR] = {
3165 .key = ABORTED_COMMAND,
3166 .asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */
3167 .ascq = 0x05, /* N/A */
3168 },
3169 [TCM_SNACK_REJECTED] = {
3170 .key = ABORTED_COMMAND,
3171 .asc = 0x11, /* READ ERROR */
3172 .ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */
3173 },
3174 [TCM_WRITE_PROTECTED] = {
3175 .key = DATA_PROTECT,
3176 .asc = 0x27, /* WRITE PROTECTED */
3177 },
3178 [TCM_ADDRESS_OUT_OF_RANGE] = {
3179 .key = ILLEGAL_REQUEST,
3180 .asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
3181 },
3182 [TCM_CHECK_CONDITION_UNIT_ATTENTION] = {
3183 .key = UNIT_ATTENTION,
3184 },
3185 [TCM_CHECK_CONDITION_NOT_READY] = {
3186 .key = NOT_READY,
3187 },
3188 [TCM_MISCOMPARE_VERIFY] = {
3189 .key = MISCOMPARE,
3190 .asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */
3191 .ascq = 0x00,
3192 },
3193 [TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = {
3194 .key = ABORTED_COMMAND,
3195 .asc = 0x10,
3196 .ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */
3197 .add_sector_info = true,
3198 },
3199 [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = {
3200 .key = ABORTED_COMMAND,
3201 .asc = 0x10,
3202 .ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */
3203 .add_sector_info = true,
3204 },
3205 [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = {
3206 .key = ABORTED_COMMAND,
3207 .asc = 0x10,
3208 .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
3209 .add_sector_info = true,
3210 },
3211 [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = {
3212 .key = COPY_ABORTED,
3213 .asc = 0x0d,
3214 .ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */
3215
3216 },
3217 [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = {
3218 /*
3219 * Returning ILLEGAL REQUEST would cause immediate IO errors on
3220 * Solaris initiators. Returning NOT READY instead means the
3221 * operations will be retried a finite number of times and we
3222 * can survive intermittent errors.
3223 */
3224 .key = NOT_READY,
3225 .asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */
3226 },
3227 [TCM_INSUFFICIENT_REGISTRATION_RESOURCES] = {
3228 /*
3229 * From spc4r22 section5.7.7,5.7.8
3230 * If a PERSISTENT RESERVE OUT command with a REGISTER service action
3231 * or a REGISTER AND IGNORE EXISTING KEY service action or
3232 * REGISTER AND MOVE service actionis attempted,
3233 * but there are insufficient device server resources to complete the
3234 * operation, then the command shall be terminated with CHECK CONDITION
3235 * status, with the sense key set to ILLEGAL REQUEST,and the additonal
3236 * sense code set to INSUFFICIENT REGISTRATION RESOURCES.
3237 */
3238 .key = ILLEGAL_REQUEST,
3239 .asc = 0x55,
3240 .ascq = 0x04, /* INSUFFICIENT REGISTRATION RESOURCES */
3241 },
3242};
3243
3244/**
3245 * translate_sense_reason - translate a sense reason into T10 key, asc and ascq
3246 * @cmd: SCSI command in which the resulting sense buffer or SCSI status will
3247 * be stored.
3248 * @reason: LIO sense reason code. If this argument has the value
3249 * TCM_CHECK_CONDITION_UNIT_ATTENTION, try to dequeue a unit attention. If
3250 * dequeuing a unit attention fails due to multiple commands being processed
3251 * concurrently, set the command status to BUSY.
3252 *
3253 * Return: 0 upon success or -EINVAL if the sense buffer is too small.
3254 */
3255static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
3256{
3257 const struct sense_info *si;
3258 u8 *buffer = cmd->sense_buffer;
3259 int r = (__force int)reason;
3260 u8 key, asc, ascq;
3261 bool desc_format = target_sense_desc_format(cmd->se_dev);
3262
3263 if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key)
3264 si = &sense_info_table[r];
3265 else
3266 si = &sense_info_table[(__force int)
3267 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE];
3268
3269 key = si->key;
3270 if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) {
3271 if (!core_scsi3_ua_for_check_condition(cmd, &key, &asc,
3272 &ascq)) {
3273 cmd->scsi_status = SAM_STAT_BUSY;
3274 return;
3275 }
3276 } else if (si->asc == 0) {
3277 WARN_ON_ONCE(cmd->scsi_asc == 0);
3278 asc = cmd->scsi_asc;
3279 ascq = cmd->scsi_ascq;
3280 } else {
3281 asc = si->asc;
3282 ascq = si->ascq;
3283 }
3284
3285 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
3286 cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
3287 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
3288 scsi_build_sense_buffer(desc_format, buffer, key, asc, ascq);
3289 if (si->add_sector_info)
3290 WARN_ON_ONCE(scsi_set_sense_information(buffer,
3291 cmd->scsi_sense_length,
3292 cmd->bad_sector) < 0);
3293}
3294
3295int
3296transport_send_check_condition_and_sense(struct se_cmd *cmd,
3297 sense_reason_t reason, int from_transport)
3298{
3299 unsigned long flags;
3300
3301 WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB);
3302
3303 spin_lock_irqsave(&cmd->t_state_lock, flags);
3304 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
3305 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3306 return 0;
3307 }
3308 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
3309 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3310
3311 if (!from_transport)
3312 translate_sense_reason(cmd, reason);
3313
3314 trace_target_cmd_complete(cmd);
3315 return cmd->se_tfo->queue_status(cmd);
3316}
3317EXPORT_SYMBOL(transport_send_check_condition_and_sense);
3318
3319/**
3320 * target_send_busy - Send SCSI BUSY status back to the initiator
3321 * @cmd: SCSI command for which to send a BUSY reply.
3322 *
3323 * Note: Only call this function if target_submit_cmd*() failed.
3324 */
3325int target_send_busy(struct se_cmd *cmd)
3326{
3327 WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB);
3328
3329 cmd->scsi_status = SAM_STAT_BUSY;
3330 trace_target_cmd_complete(cmd);
3331 return cmd->se_tfo->queue_status(cmd);
3332}
3333EXPORT_SYMBOL(target_send_busy);
3334
3335static void target_tmr_work(struct work_struct *work)
3336{
3337 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
3338 struct se_device *dev = cmd->se_dev;
3339 struct se_tmr_req *tmr = cmd->se_tmr_req;
3340 int ret;
3341
3342 if (cmd->transport_state & CMD_T_ABORTED)
3343 goto aborted;
3344
3345 switch (tmr->function) {
3346 case TMR_ABORT_TASK:
3347 core_tmr_abort_task(dev, tmr, cmd->se_sess);
3348 break;
3349 case TMR_ABORT_TASK_SET:
3350 case TMR_CLEAR_ACA:
3351 case TMR_CLEAR_TASK_SET:
3352 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
3353 break;
3354 case TMR_LUN_RESET:
3355 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
3356 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
3357 TMR_FUNCTION_REJECTED;
3358 if (tmr->response == TMR_FUNCTION_COMPLETE) {
3359 target_ua_allocate_lun(cmd->se_sess->se_node_acl,
3360 cmd->orig_fe_lun, 0x29,
3361 ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED);
3362 }
3363 break;
3364 case TMR_TARGET_WARM_RESET:
3365 tmr->response = TMR_FUNCTION_REJECTED;
3366 break;
3367 case TMR_TARGET_COLD_RESET:
3368 tmr->response = TMR_FUNCTION_REJECTED;
3369 break;
3370 default:
3371 pr_err("Unknown TMR function: 0x%02x.\n",
3372 tmr->function);
3373 tmr->response = TMR_FUNCTION_REJECTED;
3374 break;
3375 }
3376
3377 if (cmd->transport_state & CMD_T_ABORTED)
3378 goto aborted;
3379
3380 cmd->se_tfo->queue_tm_rsp(cmd);
3381
3382 transport_lun_remove_cmd(cmd);
3383 transport_cmd_check_stop_to_fabric(cmd);
3384 return;
3385
3386aborted:
3387 target_handle_abort(cmd);
3388}
3389
3390int transport_generic_handle_tmr(
3391 struct se_cmd *cmd)
3392{
3393 unsigned long flags;
3394 bool aborted = false;
3395
3396 spin_lock_irqsave(&cmd->t_state_lock, flags);
3397 if (cmd->transport_state & CMD_T_ABORTED) {
3398 aborted = true;
3399 } else {
3400 cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
3401 cmd->transport_state |= CMD_T_ACTIVE;
3402 }
3403 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3404
3405 if (aborted) {
3406 pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d ref_tag: %llu tag: %llu\n",
3407 cmd->se_tmr_req->function,
3408 cmd->se_tmr_req->ref_task_tag, cmd->tag);
3409 target_handle_abort(cmd);
3410 return 0;
3411 }
3412
3413 INIT_WORK(&cmd->work, target_tmr_work);
3414 schedule_work(&cmd->work);
3415 return 0;
3416}
3417EXPORT_SYMBOL(transport_generic_handle_tmr);
3418
3419bool
3420target_check_wce(struct se_device *dev)
3421{
3422 bool wce = false;
3423
3424 if (dev->transport->get_write_cache)
3425 wce = dev->transport->get_write_cache(dev);
3426 else if (dev->dev_attrib.emulate_write_cache > 0)
3427 wce = true;
3428
3429 return wce;
3430}
3431
3432bool
3433target_check_fua(struct se_device *dev)
3434{
3435 return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0;
3436}