Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

target: Eliminate usage of struct se_mem

Both backstores and fabrics use arrays of struct scatterlist to describe
data buffers. However TCM used struct se_mems, basically a linked list
of scatterlist entries. We are able to simplify the code by eliminating
this intermediate data structure and just using struct scatterlist[]
throughout.

Also, moved attachment of task to cmd out of transport_generic_get_task
and into allocate_control_task and allocate_data_tasks. The reasoning
is that it's nonintuitive that get_task should automatically add it to
the cmd's task list -- it should just return an allocated, initialized
task. That's all it should do, based on the function's name, so either the
function shouldn't do it, or the name should change to encapsulate the
entire essence of what it does.

(nab: Fix compile warnings in tcm_fc, and make transport_kmap_first_data_page
honor sg->offset for SGLs from contigious memory with TCM_Loop, and
fix control se_cmd descriptor memory leak)

Signed-off-by: Andy Grover <agrover@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>

authored by

Andy Grover and committed by
Nicholas Bellinger
ec98f782 3a867205

+322 -793
+1 -4
drivers/target/loopback/tcm_loop.c
··· 175 175 sgl_bidi_count = sdb->table.nents; 176 176 } 177 177 178 - /* 179 - * Map the SG memory into struct se_mem->page linked list using the same 180 - * physical memory at sg->page_link. 181 - */ 178 + /* Tell the core about our preallocated memory */ 182 179 ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc), 183 180 scsi_sg_count(sc), sgl_bidi, sgl_bidi_count); 184 181 if (ret < 0)
+1 -1
drivers/target/target_core_iblock.c
··· 634 634 hbio = tbio = bio; 635 635 /* 636 636 * Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist 637 - * from TCM struct se_mem -> task->task_sg -> struct scatterlist memory. 637 + * from task->task_sg -> struct scatterlist memory. 638 638 */ 639 639 for_each_sg(task->task_sg, sg, task->task_sg_num, i) { 640 640 DEBUG_IBLOCK("task: %p bio: %p Calling bio_add_page(): page:"
+1 -1
drivers/target/target_core_pscsi.c
··· 1097 1097 return 0; 1098 1098 /* 1099 1099 * For SCF_SCSI_DATA_SG_IO_CDB, Use fs/bio.c:bio_add_page() to setup 1100 - * the bio_vec maplist from TC< struct se_mem -> task->task_sg -> 1100 + * the bio_vec maplist from task->task_sg -> 1101 1101 * struct scatterlist memory. The struct se_task->task_sg[] currently needs 1102 1102 * to be attached to struct bios for submission to Linux/SCSI using 1103 1103 * struct request to struct scsi_device->request_queue.
+274 -739
drivers/target/target_core_transport.c
··· 190 190 static struct kmem_cache *se_sess_cache; 191 191 struct kmem_cache *se_tmr_req_cache; 192 192 struct kmem_cache *se_ua_cache; 193 - struct kmem_cache *se_mem_cache; 194 193 struct kmem_cache *t10_pr_reg_cache; 195 194 struct kmem_cache *t10_alua_lu_gp_cache; 196 195 struct kmem_cache *t10_alua_lu_gp_mem_cache; ··· 209 210 static void transport_direct_request_timeout(struct se_cmd *cmd); 210 211 static void transport_free_dev_tasks(struct se_cmd *cmd); 211 212 static u32 transport_allocate_tasks(struct se_cmd *cmd, 212 - unsigned long long starting_lba, u32 sectors, 213 + unsigned long long starting_lba, 213 214 enum dma_data_direction data_direction, 214 - struct list_head *mem_list, int set_counts); 215 + struct scatterlist *sgl, unsigned int nents); 215 216 static int transport_generic_get_mem(struct se_cmd *cmd); 216 217 static int transport_generic_remove(struct se_cmd *cmd, 217 218 int session_reinstatement); 218 - static int transport_cmd_get_valid_sectors(struct se_cmd *cmd); 219 - static int transport_map_sg_to_mem(struct se_cmd *cmd, 220 - struct list_head *se_mem_list, struct scatterlist *sgl); 221 - static void transport_memcpy_se_mem_read_contig(unsigned char *dst, 222 - struct list_head *se_mem_list, u32 len); 223 219 static void transport_release_fe_cmd(struct se_cmd *cmd); 224 220 static void transport_remove_cmd_from_queue(struct se_cmd *cmd, 225 221 struct se_queue_obj *qobj); ··· 250 256 0, NULL); 251 257 if (!(se_ua_cache)) { 252 258 printk(KERN_ERR "kmem_cache_create() for struct se_ua failed\n"); 253 - goto out; 254 - } 255 - se_mem_cache = kmem_cache_create("se_mem_cache", 256 - sizeof(struct se_mem), __alignof__(struct se_mem), 0, NULL); 257 - if (!(se_mem_cache)) { 258 - printk(KERN_ERR "kmem_cache_create() for struct se_mem failed\n"); 259 259 goto out; 260 260 } 261 261 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", ··· 305 317 kmem_cache_destroy(se_sess_cache); 306 318 if (se_ua_cache) 307 319 kmem_cache_destroy(se_ua_cache); 308 - if (se_mem_cache) 309 - kmem_cache_destroy(se_mem_cache); 310 320 if (t10_pr_reg_cache) 311 321 kmem_cache_destroy(t10_pr_reg_cache); 312 322 if (t10_alua_lu_gp_cache) ··· 324 338 kmem_cache_destroy(se_tmr_req_cache); 325 339 kmem_cache_destroy(se_sess_cache); 326 340 kmem_cache_destroy(se_ua_cache); 327 - kmem_cache_destroy(se_mem_cache); 328 341 kmem_cache_destroy(t10_pr_reg_cache); 329 342 kmem_cache_destroy(t10_alua_lu_gp_cache); 330 343 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); ··· 1687 1702 { 1688 1703 struct se_task *task; 1689 1704 struct se_device *dev = cmd->se_dev; 1690 - unsigned long flags; 1691 1705 1692 1706 task = dev->transport->alloc_task(cmd); 1693 1707 if (!task) { ··· 1701 1717 task->task_se_cmd = cmd; 1702 1718 task->se_dev = dev; 1703 1719 task->task_data_direction = data_direction; 1704 - 1705 - spin_lock_irqsave(&cmd->t_state_lock, flags); 1706 - list_add_tail(&task->t_list, &cmd->t_task_list); 1707 - spin_unlock_irqrestore(&cmd->t_state_lock, flags); 1708 1720 1709 1721 return task; 1710 1722 } ··· 1725 1745 INIT_LIST_HEAD(&cmd->se_ordered_node); 1726 1746 INIT_LIST_HEAD(&cmd->se_qf_node); 1727 1747 1728 - INIT_LIST_HEAD(&cmd->t_mem_list); 1729 - INIT_LIST_HEAD(&cmd->t_mem_bidi_list); 1730 1748 INIT_LIST_HEAD(&cmd->t_task_list); 1731 1749 init_completion(&cmd->transport_lun_fe_stop_comp); 1732 1750 init_completion(&cmd->transport_lun_stop_comp); ··· 2816 2838 static void transport_xor_callback(struct se_cmd *cmd) 2817 2839 { 2818 2840 unsigned char *buf, *addr; 2819 - struct se_mem *se_mem; 2841 + struct scatterlist *sg; 2820 2842 unsigned int offset; 2821 2843 int i; 2844 + int count; 2822 2845 /* 2823 2846 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command 2824 2847 * ··· 2837 2858 return; 2838 2859 } 2839 2860 /* 2840 - * Copy the scatterlist WRITE buffer located at cmd->t_mem_list 2861 + * Copy the scatterlist WRITE buffer located at cmd->t_data_sg 2841 2862 * into the locally allocated *buf 2842 2863 */ 2843 - transport_memcpy_se_mem_read_contig(buf, &cmd->t_mem_list, 2844 - cmd->data_length); 2864 + sg_copy_to_buffer(cmd->t_data_sg, 2865 + cmd->t_data_nents, 2866 + buf, 2867 + cmd->data_length); 2868 + 2845 2869 /* 2846 2870 * Now perform the XOR against the BIDI read memory located at 2847 2871 * cmd->t_mem_bidi_list 2848 2872 */ 2849 2873 2850 2874 offset = 0; 2851 - list_for_each_entry(se_mem, &cmd->t_mem_bidi_list, se_list) { 2852 - addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0); 2853 - if (!(addr)) 2875 + for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { 2876 + addr = kmap_atomic(sg_page(sg), KM_USER0); 2877 + if (!addr) 2854 2878 goto out; 2855 2879 2856 - for (i = 0; i < se_mem->se_len; i++) 2857 - *(addr + se_mem->se_off + i) ^= *(buf + offset + i); 2880 + for (i = 0; i < sg->length; i++) 2881 + *(addr + sg->offset + i) ^= *(buf + offset + i); 2858 2882 2859 - offset += se_mem->se_len; 2883 + offset += sg->length; 2860 2884 kunmap_atomic(addr, KM_USER0); 2861 2885 } 2886 + 2862 2887 out: 2863 2888 kfree(buf); 2864 2889 } ··· 2952 2969 cmd->orig_fe_lun, 0x2C, 2953 2970 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); 2954 2971 return -EINVAL; 2972 + } 2973 + 2974 + static inline long long transport_dev_end_lba(struct se_device *dev) 2975 + { 2976 + return dev->transport->get_blocks(dev) + 1; 2977 + } 2978 + 2979 + static int transport_cmd_get_valid_sectors(struct se_cmd *cmd) 2980 + { 2981 + struct se_device *dev = cmd->se_dev; 2982 + u32 sectors; 2983 + 2984 + if (dev->transport->get_device_type(dev) != TYPE_DISK) 2985 + return 0; 2986 + 2987 + sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size); 2988 + 2989 + if ((cmd->t_task_lba + sectors) > 2990 + transport_dev_end_lba(dev)) { 2991 + printk(KERN_ERR "LBA: %llu Sectors: %u exceeds" 2992 + " transport_dev_end_lba(): %llu\n", 2993 + cmd->t_task_lba, sectors, 2994 + transport_dev_end_lba(dev)); 2995 + printk(KERN_ERR " We should return CHECK_CONDITION" 2996 + " but we don't yet\n"); 2997 + return 0; 2998 + } 2999 + 3000 + return sectors; 2955 3001 } 2956 3002 2957 3003 /* transport_generic_cmd_sequencer(): ··· 3592 3580 return -EINVAL; 3593 3581 } 3594 3582 3595 - static inline void transport_release_tasks(struct se_cmd *); 3596 - 3597 - static void transport_memcpy_se_mem_read_contig( 3598 - unsigned char *dst, 3599 - struct list_head *se_mem_list, 3600 - u32 tot_len) 3601 - { 3602 - struct se_mem *se_mem; 3603 - void *src; 3604 - u32 length; 3605 - 3606 - list_for_each_entry(se_mem, se_mem_list, se_list) { 3607 - length = min_t(u32, se_mem->se_len, tot_len); 3608 - src = page_address(se_mem->se_page) + se_mem->se_off; 3609 - memcpy(dst, src, length); 3610 - tot_len -= length; 3611 - if (!tot_len) 3612 - break; 3613 - dst += length; 3614 - } 3615 - } 3616 - 3617 3583 /* 3618 3584 * Called from transport_generic_complete_ok() and 3619 3585 * transport_generic_request_failure() to determine which dormant/delayed ··· 3674 3684 ret = cmd->se_tfo->queue_data_in(cmd); 3675 3685 break; 3676 3686 case DMA_TO_DEVICE: 3677 - if (!list_empty(&cmd->t_mem_bidi_list)) { 3687 + if (cmd->t_bidi_data_sg) { 3678 3688 ret = cmd->se_tfo->queue_data_in(cmd); 3679 3689 if (ret < 0) 3680 3690 return ret; ··· 3784 3794 /* 3785 3795 * Check if we need to send READ payload for BIDI-COMMAND 3786 3796 */ 3787 - if (!list_empty(&cmd->t_mem_bidi_list)) { 3797 + if (cmd->t_bidi_data_sg) { 3788 3798 spin_lock(&cmd->se_lun->lun_sep_lock); 3789 3799 if (cmd->se_lun->lun_sep) { 3790 3800 cmd->se_lun->lun_sep->sep_stats.tx_data_octets += ··· 3846 3856 3847 3857 static inline void transport_free_pages(struct se_cmd *cmd) 3848 3858 { 3849 - struct se_mem *se_mem, *se_mem_tmp; 3859 + struct scatterlist *sg; 3850 3860 int free_page = 1; 3861 + int count; 3851 3862 3852 3863 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) 3853 3864 free_page = 0; 3854 3865 if (cmd->se_dev->transport->do_se_mem_map) 3855 3866 free_page = 0; 3856 3867 3857 - list_for_each_entry_safe(se_mem, se_mem_tmp, 3858 - &cmd->t_mem_list, se_list) { 3868 + for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, count) { 3859 3869 /* 3860 - * We only release call __free_page(struct se_mem->se_page) when 3870 + * Only called if 3861 3871 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, 3862 3872 */ 3863 3873 if (free_page) 3864 - __free_page(se_mem->se_page); 3874 + __free_page(sg_page(sg)); 3865 3875 3866 - list_del(&se_mem->se_list); 3867 - kmem_cache_free(se_mem_cache, se_mem); 3868 3876 } 3869 - cmd->t_tasks_se_num = 0; 3877 + if (free_page) 3878 + kfree(cmd->t_data_sg); 3879 + cmd->t_data_sg = NULL; 3880 + cmd->t_data_nents = 0; 3870 3881 3871 - list_for_each_entry_safe(se_mem, se_mem_tmp, 3872 - &cmd->t_mem_bidi_list, se_list) { 3882 + for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { 3873 3883 /* 3874 - * We only release call __free_page(struct se_mem->se_page) when 3884 + * Only called if 3875 3885 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, 3876 3886 */ 3877 3887 if (free_page) 3878 - __free_page(se_mem->se_page); 3888 + __free_page(sg_page(sg)); 3879 3889 3880 - list_del(&se_mem->se_list); 3881 - kmem_cache_free(se_mem_cache, se_mem); 3882 3890 } 3883 - cmd->t_tasks_se_bidi_num = 0; 3891 + if (free_page) 3892 + kfree(cmd->t_bidi_data_sg); 3893 + cmd->t_bidi_data_sg = NULL; 3894 + cmd->t_bidi_data_nents = 0; 3884 3895 } 3885 3896 3886 3897 static inline void transport_release_tasks(struct se_cmd *cmd) ··· 3970 3979 } 3971 3980 3972 3981 /* 3973 - * transport_generic_map_mem_to_cmd - Perform SGL -> struct se_mem map 3982 + * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of 3983 + * allocating in the core. 3974 3984 * @cmd: Associated se_cmd descriptor 3975 3985 * @mem: SGL style memory for TCM WRITE / READ 3976 3986 * @sg_mem_num: Number of SGL elements ··· 3988 3996 struct scatterlist *sgl_bidi, 3989 3997 u32 sgl_bidi_count) 3990 3998 { 3991 - int ret; 3992 - 3993 3999 if (!sgl || !sgl_count) 3994 4000 return 0; 3995 4001 3996 - /* 3997 - * Convert sgls (sgl, sgl_bidi) to list of se_mems 3998 - */ 3999 4002 if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || 4000 4003 (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) { 4001 - /* 4002 - * For CDB using TCM struct se_mem linked list scatterlist memory 4003 - * processed into a TCM struct se_subsystem_dev, we do the mapping 4004 - * from the passed physical memory to struct se_mem->se_page here. 4005 - */ 4006 - ret = transport_map_sg_to_mem(cmd, &cmd->t_mem_list, sgl); 4007 - if (ret < 0) 4008 - return -ENOMEM; 4009 4004 4010 - cmd->t_tasks_se_num = ret; 4011 - /* 4012 - * Setup BIDI READ list of struct se_mem elements 4013 - */ 4005 + cmd->t_data_sg = sgl; 4006 + cmd->t_data_nents = sgl_count; 4007 + 4014 4008 if (sgl_bidi && sgl_bidi_count) { 4015 - ret = transport_map_sg_to_mem(cmd, &cmd->t_mem_bidi_list, sgl_bidi); 4016 - if (ret < 0) 4017 - return -ENOMEM; 4018 - 4019 - cmd->t_tasks_se_bidi_num = ret; 4009 + cmd->t_bidi_data_sg = sgl_bidi; 4010 + cmd->t_bidi_data_nents = sgl_bidi_count; 4020 4011 } 4021 4012 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 4022 4013 } ··· 4008 4033 } 4009 4034 EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); 4010 4035 4011 - 4012 - static inline long long transport_dev_end_lba(struct se_device *dev) 4013 - { 4014 - return dev->transport->get_blocks(dev) + 1; 4015 - } 4016 - 4017 - static int transport_cmd_get_valid_sectors(struct se_cmd *cmd) 4018 - { 4019 - struct se_device *dev = cmd->se_dev; 4020 - u32 sectors; 4021 - 4022 - if (dev->transport->get_device_type(dev) != TYPE_DISK) 4023 - return 0; 4024 - 4025 - sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size); 4026 - 4027 - if ((cmd->t_task_lba + sectors) > 4028 - transport_dev_end_lba(dev)) { 4029 - printk(KERN_ERR "LBA: %llu Sectors: %u exceeds" 4030 - " transport_dev_end_lba(): %llu\n", 4031 - cmd->t_task_lba, sectors, 4032 - transport_dev_end_lba(dev)); 4033 - return 0; 4034 - } 4035 - 4036 - return sectors; 4037 - } 4038 - 4039 4036 static int transport_new_cmd_obj(struct se_cmd *cmd) 4040 4037 { 4041 4038 struct se_device *dev = cmd->se_dev; 4042 4039 u32 task_cdbs; 4043 4040 u32 rc; 4041 + int set_counts = 1; 4044 4042 4045 - if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) { 4046 - task_cdbs = 1; 4047 - cmd->t_task_list_num = 1; 4048 - } else { 4049 - int set_counts = 1; 4050 - 4051 - /* 4052 - * Setup any BIDI READ tasks and memory from 4053 - * cmd->t_mem_bidi_list so the READ struct se_tasks 4054 - * are queued first for the non pSCSI passthrough case. 4055 - */ 4056 - if (!list_empty(&cmd->t_mem_bidi_list) && 4057 - (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { 4058 - rc = transport_allocate_tasks(cmd, 4059 - cmd->t_task_lba, 4060 - transport_cmd_get_valid_sectors(cmd), 4061 - DMA_FROM_DEVICE, &cmd->t_mem_bidi_list, 4062 - set_counts); 4063 - if (!(rc)) { 4064 - cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 4065 - cmd->scsi_sense_reason = 4066 - TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 4067 - return PYX_TRANSPORT_LU_COMM_FAILURE; 4068 - } 4069 - set_counts = 0; 4070 - } 4071 - /* 4072 - * Setup the tasks and memory from cmd->t_mem_list 4073 - * Note for BIDI transfers this will contain the WRITE payload 4074 - */ 4075 - task_cdbs = transport_allocate_tasks(cmd, 4076 - cmd->t_task_lba, 4077 - transport_cmd_get_valid_sectors(cmd), 4078 - cmd->data_direction, &cmd->t_mem_list, 4079 - set_counts); 4080 - if (!(task_cdbs)) { 4043 + /* 4044 + * Setup any BIDI READ tasks and memory from 4045 + * cmd->t_mem_bidi_list so the READ struct se_tasks 4046 + * are queued first for the non pSCSI passthrough case. 4047 + */ 4048 + if (cmd->t_bidi_data_sg && 4049 + (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { 4050 + rc = transport_allocate_tasks(cmd, 4051 + cmd->t_task_lba, 4052 + DMA_FROM_DEVICE, 4053 + cmd->t_bidi_data_sg, 4054 + cmd->t_bidi_data_nents); 4055 + if (!rc) { 4081 4056 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 4082 4057 cmd->scsi_sense_reason = 4083 - TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 4058 + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 4084 4059 return PYX_TRANSPORT_LU_COMM_FAILURE; 4085 4060 } 4086 - cmd->t_task_list_num = task_cdbs; 4087 - 4088 - #if 0 4089 - printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:" 4090 - " %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length, 4091 - cmd->t_task_lba, cmd->t_tasks_sectors, 4092 - cmd->t_task_cdbs); 4093 - #endif 4061 + atomic_inc(&cmd->t_fe_count); 4062 + atomic_inc(&cmd->t_se_count); 4063 + set_counts = 0; 4094 4064 } 4065 + /* 4066 + * Setup the tasks and memory from cmd->t_mem_list 4067 + * Note for BIDI transfers this will contain the WRITE payload 4068 + */ 4069 + task_cdbs = transport_allocate_tasks(cmd, 4070 + cmd->t_task_lba, 4071 + cmd->data_direction, 4072 + cmd->t_data_sg, 4073 + cmd->t_data_nents); 4074 + if (!task_cdbs) { 4075 + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 4076 + cmd->scsi_sense_reason = 4077 + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 4078 + return PYX_TRANSPORT_LU_COMM_FAILURE; 4079 + } 4080 + 4081 + if (set_counts) { 4082 + atomic_inc(&cmd->t_fe_count); 4083 + atomic_inc(&cmd->t_se_count); 4084 + } 4085 + 4086 + cmd->t_task_list_num = task_cdbs; 4095 4087 4096 4088 atomic_set(&cmd->t_task_cdbs_left, task_cdbs); 4097 4089 atomic_set(&cmd->t_task_cdbs_ex_left, task_cdbs); ··· 4068 4126 4069 4127 void *transport_kmap_first_data_page(struct se_cmd *cmd) 4070 4128 { 4071 - struct se_mem *se_mem; 4129 + struct scatterlist *sg = cmd->t_data_sg; 4072 4130 4073 - BUG_ON(list_empty(&cmd->t_mem_list)); 4074 - 4075 - se_mem = list_first_entry(&cmd->t_mem_list, struct se_mem, se_list); 4076 - 4131 + BUG_ON(!sg); 4077 4132 /* 4078 - * 1st se_mem should point to a page, and we shouldn't need more than 4079 - * that for this cmd 4133 + * We need to take into account a possible offset here for fabrics like 4134 + * tcm_loop who may be using a contig buffer from the SCSI midlayer for 4135 + * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() 4080 4136 */ 4081 - BUG_ON(cmd->data_length > PAGE_SIZE); 4082 - 4083 - return kmap(se_mem->se_page); 4137 + return kmap(sg_page(sg)) + sg->offset; 4084 4138 } 4085 4139 EXPORT_SYMBOL(transport_kmap_first_data_page); 4086 4140 4087 4141 void transport_kunmap_first_data_page(struct se_cmd *cmd) 4088 4142 { 4089 - struct se_mem *se_mem; 4090 - 4091 - BUG_ON(list_empty(&cmd->t_mem_list)); 4092 - 4093 - se_mem = list_first_entry(&cmd->t_mem_list, struct se_mem, se_list); 4094 - 4095 - kunmap(se_mem->se_page); 4143 + kunmap(sg_page(cmd->t_data_sg)); 4096 4144 } 4097 4145 EXPORT_SYMBOL(transport_kunmap_first_data_page); 4098 4146 4099 4147 static int 4100 4148 transport_generic_get_mem(struct se_cmd *cmd) 4101 4149 { 4102 - struct se_mem *se_mem; 4103 - int length = cmd->data_length; 4150 + u32 length = cmd->data_length; 4151 + unsigned int nents; 4152 + struct page *page; 4153 + int i = 0; 4104 4154 4105 4155 /* 4106 4156 * If the device uses memory mapping this is enough. ··· 4100 4166 if (cmd->se_dev->transport->do_se_mem_map) 4101 4167 return 0; 4102 4168 4103 - /* Even cmds with length 0 will get here, btw */ 4104 - while (length) { 4105 - se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); 4106 - if (!(se_mem)) { 4107 - printk(KERN_ERR "Unable to allocate struct se_mem\n"); 4108 - goto out; 4109 - } 4110 - 4111 - /* #warning FIXME Allocate contigous pages for struct se_mem elements */ 4112 - se_mem->se_page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0); 4113 - if (!(se_mem->se_page)) { 4114 - printk(KERN_ERR "alloc_pages() failed\n"); 4115 - goto out; 4116 - } 4117 - 4118 - INIT_LIST_HEAD(&se_mem->se_list); 4119 - se_mem->se_len = min_t(u32, length, PAGE_SIZE); 4120 - list_add_tail(&se_mem->se_list, &cmd->t_mem_list); 4121 - cmd->t_tasks_se_num++; 4122 - 4123 - DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)" 4124 - " Offset(%u)\n", se_mem->se_page, se_mem->se_len, 4125 - se_mem->se_off); 4126 - 4127 - length -= se_mem->se_len; 4128 - } 4129 - 4130 - DEBUG_MEM("Allocated total struct se_mem elements(%u)\n", 4131 - cmd->t_tasks_se_num); 4132 - 4133 - return 0; 4134 - out: 4135 - if (se_mem) 4136 - __free_pages(se_mem->se_page, 0); 4137 - kmem_cache_free(se_mem_cache, se_mem); 4138 - return -ENOMEM; 4139 - } 4140 - 4141 - int transport_init_task_sg( 4142 - struct se_task *task, 4143 - struct se_mem *in_se_mem, 4144 - u32 task_offset) 4145 - { 4146 - struct se_cmd *se_cmd = task->task_se_cmd; 4147 - struct se_device *se_dev = se_cmd->se_dev; 4148 - struct se_mem *se_mem = in_se_mem; 4149 - struct target_core_fabric_ops *tfo = se_cmd->se_tfo; 4150 - u32 sg_length, task_size = task->task_size, task_sg_num_padded; 4151 - 4152 - while (task_size != 0) { 4153 - DEBUG_SC("se_mem->se_page(%p) se_mem->se_len(%u)" 4154 - " se_mem->se_off(%u) task_offset(%u)\n", 4155 - se_mem->se_page, se_mem->se_len, 4156 - se_mem->se_off, task_offset); 4157 - 4158 - if (task_offset == 0) { 4159 - if (task_size >= se_mem->se_len) { 4160 - sg_length = se_mem->se_len; 4161 - 4162 - if (!(list_is_last(&se_mem->se_list, 4163 - &se_cmd->t_mem_list))) 4164 - se_mem = list_entry(se_mem->se_list.next, 4165 - struct se_mem, se_list); 4166 - } else { 4167 - sg_length = task_size; 4168 - task_size -= sg_length; 4169 - goto next; 4170 - } 4171 - 4172 - DEBUG_SC("sg_length(%u) task_size(%u)\n", 4173 - sg_length, task_size); 4174 - } else { 4175 - if ((se_mem->se_len - task_offset) > task_size) { 4176 - sg_length = task_size; 4177 - task_size -= sg_length; 4178 - goto next; 4179 - } else { 4180 - sg_length = (se_mem->se_len - task_offset); 4181 - 4182 - if (!(list_is_last(&se_mem->se_list, 4183 - &se_cmd->t_mem_list))) 4184 - se_mem = list_entry(se_mem->se_list.next, 4185 - struct se_mem, se_list); 4186 - } 4187 - 4188 - DEBUG_SC("sg_length(%u) task_size(%u)\n", 4189 - sg_length, task_size); 4190 - 4191 - task_offset = 0; 4192 - } 4193 - task_size -= sg_length; 4194 - next: 4195 - DEBUG_SC("task[%u] - Reducing task_size to(%u)\n", 4196 - task->task_no, task_size); 4197 - 4198 - task->task_sg_num++; 4199 - } 4200 - /* 4201 - * Check if the fabric module driver is requesting that all 4202 - * struct se_task->task_sg[] be chained together.. If so, 4203 - * then allocate an extra padding SG entry for linking and 4204 - * marking the end of the chained SGL. 4205 - */ 4206 - if (tfo->task_sg_chaining) { 4207 - task_sg_num_padded = (task->task_sg_num + 1); 4208 - task->task_padded_sg = 1; 4209 - } else 4210 - task_sg_num_padded = task->task_sg_num; 4211 - 4212 - task->task_sg = kzalloc(task_sg_num_padded * 4213 - sizeof(struct scatterlist), GFP_KERNEL); 4214 - if (!(task->task_sg)) { 4215 - printk(KERN_ERR "Unable to allocate memory for" 4216 - " task->task_sg\n"); 4169 + nents = DIV_ROUND_UP(length, PAGE_SIZE); 4170 + cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL); 4171 + if (!cmd->t_data_sg) 4217 4172 return -ENOMEM; 4218 - } 4219 - sg_init_table(&task->task_sg[0], task_sg_num_padded); 4220 - /* 4221 - * Setup task->task_sg_bidi for SCSI READ payload for 4222 - * TCM/pSCSI passthrough if present for BIDI-COMMAND 4223 - */ 4224 - if (!list_empty(&se_cmd->t_mem_bidi_list) && 4225 - (se_dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) { 4226 - task->task_sg_bidi = kzalloc(task_sg_num_padded * 4227 - sizeof(struct scatterlist), GFP_KERNEL); 4228 - if (!(task->task_sg_bidi)) { 4229 - kfree(task->task_sg); 4230 - task->task_sg = NULL; 4231 - printk(KERN_ERR "Unable to allocate memory for" 4232 - " task->task_sg_bidi\n"); 4233 - return -ENOMEM; 4234 - } 4235 - sg_init_table(&task->task_sg_bidi[0], task_sg_num_padded); 4236 - } 4237 - /* 4238 - * For the chaining case, setup the proper end of SGL for the 4239 - * initial submission struct task into struct se_subsystem_api. 4240 - * This will be cleared later by transport_do_task_sg_chain() 4241 - */ 4242 - if (task->task_padded_sg) { 4243 - sg_mark_end(&task->task_sg[task->task_sg_num - 1]); 4244 - /* 4245 - * Added the 'if' check before marking end of bi-directional 4246 - * scatterlist (which gets created only in case of request 4247 - * (RD + WR). 4248 - */ 4249 - if (task->task_sg_bidi) 4250 - sg_mark_end(&task->task_sg_bidi[task->task_sg_num - 1]); 4251 - } 4252 4173 4253 - DEBUG_SC("Successfully allocated task->task_sg_num(%u)," 4254 - " task_sg_num_padded(%u)\n", task->task_sg_num, 4255 - task_sg_num_padded); 4174 + cmd->t_data_nents = nents; 4175 + sg_init_table(cmd->t_data_sg, nents); 4256 4176 4257 - return task->task_sg_num; 4177 + while (length) { 4178 + u32 page_len = min_t(u32, length, PAGE_SIZE); 4179 + page = alloc_page(GFP_KERNEL | __GFP_ZERO); 4180 + if (!page) 4181 + goto out; 4182 + 4183 + sg_set_page(&cmd->t_data_sg[i], page, page_len, 0); 4184 + length -= page_len; 4185 + i++; 4186 + } 4187 + return 0; 4188 + 4189 + out: 4190 + while (i >= 0) { 4191 + __free_page(sg_page(&cmd->t_data_sg[i])); 4192 + i--; 4193 + } 4194 + kfree(cmd->t_data_sg); 4195 + cmd->t_data_sg = NULL; 4196 + return -ENOMEM; 4258 4197 } 4259 4198 4260 4199 /* Reduce sectors if they are too long for the device */ ··· 4145 4338 return sectors; 4146 4339 } 4147 4340 4148 - /* 4149 - * Convert a sgl into a linked list of se_mems. 4150 - */ 4151 - static int transport_map_sg_to_mem( 4152 - struct se_cmd *cmd, 4153 - struct list_head *se_mem_list, 4154 - struct scatterlist *sg) 4155 - { 4156 - struct se_mem *se_mem; 4157 - u32 cmd_size = cmd->data_length; 4158 - int sg_count = 0; 4159 - 4160 - WARN_ON(!sg); 4161 - 4162 - while (cmd_size) { 4163 - /* 4164 - * NOTE: it is safe to return -ENOMEM at any time in creating this 4165 - * list because transport_free_pages() will eventually be called, and is 4166 - * smart enough to deallocate all list items for sg and sg_bidi lists. 4167 - */ 4168 - se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); 4169 - if (!(se_mem)) { 4170 - printk(KERN_ERR "Unable to allocate struct se_mem\n"); 4171 - return -ENOMEM; 4172 - } 4173 - INIT_LIST_HEAD(&se_mem->se_list); 4174 - DEBUG_MEM("sg_to_mem: Starting loop with cmd_size: %u" 4175 - " sg_page: %p offset: %d length: %d\n", cmd_size, 4176 - sg_page(sg), sg->offset, sg->length); 4177 - 4178 - se_mem->se_page = sg_page(sg); 4179 - se_mem->se_off = sg->offset; 4180 - 4181 - if (cmd_size > sg->length) { 4182 - se_mem->se_len = sg->length; 4183 - sg = sg_next(sg); 4184 - } else 4185 - se_mem->se_len = cmd_size; 4186 - 4187 - cmd_size -= se_mem->se_len; 4188 - sg_count++; 4189 - 4190 - DEBUG_MEM("sg_to_mem: sg_count: %u cmd_size: %u\n", 4191 - sg_count, cmd_size); 4192 - DEBUG_MEM("sg_to_mem: Final se_page: %p se_off: %d se_len: %d\n", 4193 - se_mem->se_page, se_mem->se_off, se_mem->se_len); 4194 - 4195 - list_add_tail(&se_mem->se_list, se_mem_list); 4196 - } 4197 - 4198 - DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments\n", sg_count); 4199 - 4200 - return sg_count; 4201 - } 4202 - 4203 - /* transport_map_mem_to_sg(): 4204 - * 4205 - * 4206 - */ 4207 - int transport_map_mem_to_sg( 4208 - struct se_task *task, 4209 - struct list_head *se_mem_list, 4210 - struct scatterlist *sg, 4211 - struct se_mem *in_se_mem, 4212 - struct se_mem **out_se_mem, 4213 - u32 *se_mem_cnt, 4214 - u32 *task_offset) 4215 - { 4216 - struct se_cmd *se_cmd = task->task_se_cmd; 4217 - struct se_mem *se_mem = in_se_mem; 4218 - u32 task_size = task->task_size, sg_no = 0; 4219 - 4220 - if (!sg) { 4221 - printk(KERN_ERR "Unable to locate valid struct" 4222 - " scatterlist pointer\n"); 4223 - return -EINVAL; 4224 - } 4225 - 4226 - while (task_size != 0) { 4227 - /* 4228 - * Setup the contiguous array of scatterlists for 4229 - * this struct se_task. 4230 - */ 4231 - sg_assign_page(sg, se_mem->se_page); 4232 - 4233 - if (*task_offset == 0) { 4234 - sg->offset = se_mem->se_off; 4235 - 4236 - if (task_size >= se_mem->se_len) { 4237 - sg->length = se_mem->se_len; 4238 - 4239 - if (!(list_is_last(&se_mem->se_list, 4240 - &se_cmd->t_mem_list))) { 4241 - se_mem = list_entry(se_mem->se_list.next, 4242 - struct se_mem, se_list); 4243 - (*se_mem_cnt)++; 4244 - } 4245 - } else { 4246 - sg->length = task_size; 4247 - /* 4248 - * Determine if we need to calculate an offset 4249 - * into the struct se_mem on the next go around.. 4250 - */ 4251 - task_size -= sg->length; 4252 - if (!(task_size)) 4253 - *task_offset = sg->length; 4254 - 4255 - goto next; 4256 - } 4257 - 4258 - } else { 4259 - sg->offset = (*task_offset + se_mem->se_off); 4260 - 4261 - if ((se_mem->se_len - *task_offset) > task_size) { 4262 - sg->length = task_size; 4263 - /* 4264 - * Determine if we need to calculate an offset 4265 - * into the struct se_mem on the next go around.. 4266 - */ 4267 - task_size -= sg->length; 4268 - if (!(task_size)) 4269 - *task_offset += sg->length; 4270 - 4271 - goto next; 4272 - } else { 4273 - sg->length = (se_mem->se_len - *task_offset); 4274 - 4275 - if (!(list_is_last(&se_mem->se_list, 4276 - &se_cmd->t_mem_list))) { 4277 - se_mem = list_entry(se_mem->se_list.next, 4278 - struct se_mem, se_list); 4279 - (*se_mem_cnt)++; 4280 - } 4281 - } 4282 - 4283 - *task_offset = 0; 4284 - } 4285 - task_size -= sg->length; 4286 - next: 4287 - DEBUG_MEM("task[%u] mem_to_sg - sg[%u](%p)(%u)(%u) - Reducing" 4288 - " task_size to(%u), task_offset: %u\n", task->task_no, sg_no, 4289 - sg_page(sg), sg->length, sg->offset, task_size, *task_offset); 4290 - 4291 - sg_no++; 4292 - if (!(task_size)) 4293 - break; 4294 - 4295 - sg = sg_next(sg); 4296 - 4297 - if (task_size > se_cmd->data_length) 4298 - BUG(); 4299 - } 4300 - *out_se_mem = se_mem; 4301 - 4302 - DEBUG_MEM("task[%u] - Mapped(%u) struct se_mem segments to total(%u)" 4303 - " SGs\n", task->task_no, *se_mem_cnt, sg_no); 4304 - 4305 - return 0; 4306 - } 4307 4341 4308 4342 /* 4309 4343 * This function can be used by HW target mode drivers to create a linked ··· 4154 4506 */ 4155 4507 void transport_do_task_sg_chain(struct se_cmd *cmd) 4156 4508 { 4157 - struct scatterlist *sg_head = NULL, *sg_link = NULL, *sg_first = NULL; 4158 - struct scatterlist *sg_head_cur = NULL, *sg_link_cur = NULL; 4159 - struct scatterlist *sg, *sg_end = NULL, *sg_end_cur = NULL; 4509 + struct scatterlist *sg_first = NULL; 4510 + struct scatterlist *sg_prev = NULL; 4511 + int sg_prev_nents = 0; 4512 + struct scatterlist *sg; 4160 4513 struct se_task *task; 4161 - struct target_core_fabric_ops *tfo = cmd->se_tfo; 4162 - u32 task_sg_num = 0, sg_count = 0; 4514 + u32 chained_nents = 0; 4163 4515 int i; 4164 4516 4165 - if (tfo->task_sg_chaining == 0) { 4166 - printk(KERN_ERR "task_sg_chaining is diabled for fabric module:" 4167 - " %s\n", tfo->get_fabric_name()); 4168 - dump_stack(); 4169 - return; 4170 - } 4517 + BUG_ON(!cmd->se_tfo->task_sg_chaining); 4518 + 4171 4519 /* 4172 4520 * Walk the struct se_task list and setup scatterlist chains 4173 4521 * for each contiguously allocated struct se_task->task_sg[]. 4174 4522 */ 4175 4523 list_for_each_entry(task, &cmd->t_task_list, t_list) { 4176 - if (!(task->task_sg) || !(task->task_padded_sg)) 4524 + if (!task->task_sg) 4177 4525 continue; 4178 4526 4179 - if (sg_head && sg_link) { 4180 - sg_head_cur = &task->task_sg[0]; 4181 - sg_link_cur = &task->task_sg[task->task_sg_num]; 4182 - /* 4183 - * Either add chain or mark end of scatterlist 4184 - */ 4185 - if (!(list_is_last(&task->t_list, 4186 - &cmd->t_task_list))) { 4187 - /* 4188 - * Clear existing SGL termination bit set in 4189 - * transport_init_task_sg(), see sg_mark_end() 4190 - */ 4191 - sg_end_cur = &task->task_sg[task->task_sg_num - 1]; 4192 - sg_end_cur->page_link &= ~0x02; 4527 + BUG_ON(!task->task_padded_sg); 4193 4528 4194 - sg_chain(sg_head, task_sg_num, sg_head_cur); 4195 - sg_count += task->task_sg_num; 4196 - task_sg_num = (task->task_sg_num + 1); 4197 - } else { 4198 - sg_chain(sg_head, task_sg_num, sg_head_cur); 4199 - sg_count += task->task_sg_num; 4200 - task_sg_num = task->task_sg_num; 4201 - } 4202 - 4203 - sg_head = sg_head_cur; 4204 - sg_link = sg_link_cur; 4205 - continue; 4206 - } 4207 - sg_head = sg_first = &task->task_sg[0]; 4208 - sg_link = &task->task_sg[task->task_sg_num]; 4209 - /* 4210 - * Check for single task.. 4211 - */ 4212 - if (!(list_is_last(&task->t_list, &cmd->t_task_list))) { 4213 - /* 4214 - * Clear existing SGL termination bit set in 4215 - * transport_init_task_sg(), see sg_mark_end() 4216 - */ 4217 - sg_end = &task->task_sg[task->task_sg_num - 1]; 4218 - sg_end->page_link &= ~0x02; 4219 - sg_count += task->task_sg_num; 4220 - task_sg_num = (task->task_sg_num + 1); 4529 + if (!sg_first) { 4530 + sg_first = task->task_sg; 4531 + chained_nents = task->task_sg_num; 4221 4532 } else { 4222 - sg_count += task->task_sg_num; 4223 - task_sg_num = task->task_sg_num; 4533 + sg_chain(sg_prev, sg_prev_nents, task->task_sg); 4534 + chained_nents += task->task_sg_num; 4224 4535 } 4536 + 4537 + sg_prev = task->task_sg; 4538 + sg_prev_nents = task->task_sg_num; 4225 4539 } 4226 4540 /* 4227 4541 * Setup the starting pointer and total t_tasks_sg_linked_no including 4228 4542 * padding SGs for linking and to mark the end. 4229 4543 */ 4230 4544 cmd->t_tasks_sg_chained = sg_first; 4231 - cmd->t_tasks_sg_chained_no = sg_count; 4545 + cmd->t_tasks_sg_chained_no = chained_nents; 4232 4546 4233 4547 DEBUG_CMD_M("Setup cmd: %p cmd->t_tasks_sg_chained: %p and" 4234 4548 " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained, ··· 4209 4599 } 4210 4600 EXPORT_SYMBOL(transport_do_task_sg_chain); 4211 4601 4212 - static int transport_do_se_mem_map( 4213 - struct se_device *dev, 4214 - struct se_task *task, 4215 - struct list_head *se_mem_list, 4216 - void *in_mem, 4217 - struct se_mem *in_se_mem, 4218 - struct se_mem **out_se_mem, 4219 - u32 *se_mem_cnt, 4220 - u32 *task_offset_in) 4221 - { 4222 - u32 task_offset = *task_offset_in; 4223 - int ret = 0; 4224 - /* 4225 - * se_subsystem_api_t->do_se_mem_map is used when internal allocation 4226 - * has been done by the transport plugin. 4227 - */ 4228 - if (dev->transport->do_se_mem_map) { 4229 - ret = dev->transport->do_se_mem_map(task, se_mem_list, 4230 - in_mem, in_se_mem, out_se_mem, se_mem_cnt, 4231 - task_offset_in); 4232 - if (ret == 0) 4233 - task->task_se_cmd->t_tasks_se_num += *se_mem_cnt; 4234 - 4235 - return ret; 4236 - } 4237 - 4238 - BUG_ON(list_empty(se_mem_list)); 4239 - /* 4240 - * This is the normal path for all normal non BIDI and BIDI-COMMAND 4241 - * WRITE payloads.. If we need to do BIDI READ passthrough for 4242 - * TCM/pSCSI the first call to transport_do_se_mem_map -> 4243 - * transport_init_task_sg() -> transport_map_mem_to_sg() will do the 4244 - * allocation for task->task_sg_bidi, and the subsequent call to 4245 - * transport_do_se_mem_map() from transport_generic_get_cdb_count() 4246 - */ 4247 - if (!(task->task_sg_bidi)) { 4248 - /* 4249 - * Assume default that transport plugin speaks preallocated 4250 - * scatterlists. 4251 - */ 4252 - ret = transport_init_task_sg(task, in_se_mem, task_offset); 4253 - if (ret <= 0) 4254 - return ret; 4255 - /* 4256 - * struct se_task->task_sg now contains the struct scatterlist array. 4257 - */ 4258 - return transport_map_mem_to_sg(task, se_mem_list, task->task_sg, 4259 - in_se_mem, out_se_mem, se_mem_cnt, 4260 - task_offset_in); 4261 - } 4262 - /* 4263 - * Handle the se_mem_list -> struct task->task_sg_bidi 4264 - * memory map for the extra BIDI READ payload 4265 - */ 4266 - return transport_map_mem_to_sg(task, se_mem_list, task->task_sg_bidi, 4267 - in_se_mem, out_se_mem, se_mem_cnt, 4268 - task_offset_in); 4269 - } 4270 - 4271 4602 /* 4272 4603 * Break up cmd into chunks transport can handle 4273 4604 */ 4274 - static u32 transport_allocate_tasks( 4605 + static int transport_allocate_data_tasks( 4275 4606 struct se_cmd *cmd, 4276 4607 unsigned long long lba, 4277 - u32 sectors, 4278 4608 enum dma_data_direction data_direction, 4279 - struct list_head *mem_list, 4280 - int set_counts) 4609 + struct scatterlist *sgl, 4610 + unsigned int sgl_nents) 4281 4611 { 4282 4612 unsigned char *cdb = NULL; 4283 4613 struct se_task *task; 4284 - struct se_mem *se_mem = NULL; 4285 - struct se_mem *se_mem_lout = NULL; 4286 - struct se_mem *se_mem_bidi = NULL; 4287 - struct se_mem *se_mem_bidi_lout = NULL; 4288 4614 struct se_device *dev = cmd->se_dev; 4289 - int ret; 4290 - u32 task_offset_in = 0; 4291 - u32 se_mem_cnt = 0; 4292 - u32 se_mem_bidi_cnt = 0; 4293 - u32 task_cdbs = 0; 4615 + unsigned long flags; 4616 + sector_t sectors; 4617 + int task_count; 4618 + int i; 4619 + sector_t dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; 4620 + u32 sector_size = dev->se_sub_dev->se_dev_attrib.block_size; 4621 + struct scatterlist *sg; 4622 + struct scatterlist *cmd_sg; 4294 4623 4295 - BUG_ON(!mem_list); 4296 - /* 4297 - * While using RAMDISK_DR backstores is the only case where 4298 - * mem_list will ever be empty at this point. 4299 - */ 4300 - if (!(list_empty(mem_list))) 4301 - se_mem = list_first_entry(mem_list, struct se_mem, se_list); 4302 - /* 4303 - * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to 4304 - * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation 4305 - */ 4306 - if (!list_empty(&cmd->t_mem_bidi_list) && 4307 - (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) 4308 - se_mem_bidi = list_first_entry(&cmd->t_mem_bidi_list, 4309 - struct se_mem, se_list); 4624 + WARN_ON(cmd->data_length % sector_size); 4625 + sectors = DIV_ROUND_UP(cmd->data_length, sector_size); 4626 + task_count = DIV_ROUND_UP(sectors, dev_max_sectors); 4310 4627 4311 - while (sectors) { 4312 - sector_t limited_sectors; 4313 - 4314 - DEBUG_VOL("ITT[0x%08x] LBA(%llu) SectorsLeft(%u) EOBJ(%llu)\n", 4315 - cmd->se_tfo->get_task_tag(cmd), lba, sectors, 4316 - transport_dev_end_lba(dev)); 4317 - 4318 - limited_sectors = transport_limit_task_sectors(dev, lba, sectors); 4319 - if (!limited_sectors) 4320 - break; 4628 + cmd_sg = sgl; 4629 + for (i = 0; i < task_count; i++) { 4630 + unsigned int task_size; 4631 + int count; 4321 4632 4322 4633 task = transport_generic_get_task(cmd, data_direction); 4323 4634 if (!task) 4324 - goto out; 4635 + return -ENOMEM; 4325 4636 4326 4637 task->task_lba = lba; 4327 - task->task_sectors = limited_sectors; 4328 - lba += task->task_sectors; 4329 - sectors -= task->task_sectors; 4330 - task->task_size = (task->task_sectors * 4331 - dev->se_sub_dev->se_dev_attrib.block_size); 4638 + task->task_sectors = min(sectors, dev_max_sectors); 4639 + task->task_size = task->task_sectors * sector_size; 4332 4640 4333 4641 cdb = dev->transport->get_cdb(task); 4334 - /* Should be part of task, can't fail */ 4335 4642 BUG_ON(!cdb); 4336 4643 4337 4644 memcpy(cdb, cmd->t_task_cdb, ··· 4258 4731 cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb); 4259 4732 4260 4733 /* 4261 - * Perform the SE OBJ plugin and/or Transport plugin specific 4262 - * mapping for cmd->t_mem_list. And setup the 4263 - * task->task_sg and if necessary task->task_sg_bidi 4734 + * Check if the fabric module driver is requesting that all 4735 + * struct se_task->task_sg[] be chained together.. If so, 4736 + * then allocate an extra padding SG entry for linking and 4737 + * marking the end of the chained SGL. 4738 + * Possibly over-allocate task sgl size by using cmd sgl size. 4739 + * It's so much easier and only a waste when task_count > 1. 4740 + * That is extremely rare. 4264 4741 */ 4265 - ret = transport_do_se_mem_map(dev, task, mem_list, 4266 - NULL, se_mem, &se_mem_lout, &se_mem_cnt, 4267 - &task_offset_in); 4268 - if (ret < 0) 4269 - goto out; 4270 - 4271 - se_mem = se_mem_lout; 4272 - /* 4273 - * Setup the cmd->t_mem_bidi_list -> task->task_sg_bidi 4274 - * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI 4275 - * 4276 - * Note that the first call to transport_do_se_mem_map() above will 4277 - * allocate struct se_task->task_sg_bidi in transport_do_se_mem_map() 4278 - * -> transport_init_task_sg(), and the second here will do the 4279 - * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI. 4280 - */ 4281 - if (task->task_sg_bidi != NULL) { 4282 - ret = transport_do_se_mem_map(dev, task, 4283 - &cmd->t_mem_bidi_list, NULL, 4284 - se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt, 4285 - &task_offset_in); 4286 - if (ret < 0) 4287 - goto out; 4288 - 4289 - se_mem_bidi = se_mem_bidi_lout; 4742 + task->task_sg_num = sgl_nents; 4743 + if (cmd->se_tfo->task_sg_chaining) { 4744 + task->task_sg_num++; 4745 + task->task_padded_sg = 1; 4290 4746 } 4291 - task_cdbs++; 4292 4747 4293 - DEBUG_VOL("Incremented task_cdbs(%u) task->task_sg_num(%u)\n", 4294 - task_cdbs, task->task_sg_num); 4748 + task->task_sg = kmalloc(sizeof(struct scatterlist) * \ 4749 + task->task_sg_num, GFP_KERNEL); 4750 + if (!task->task_sg) { 4751 + cmd->se_dev->transport->free_task(task); 4752 + return -ENOMEM; 4753 + } 4754 + 4755 + sg_init_table(task->task_sg, task->task_sg_num); 4756 + 4757 + task_size = task->task_size; 4758 + 4759 + /* Build new sgl, only up to task_size */ 4760 + for_each_sg(task->task_sg, sg, task->task_sg_num, count) { 4761 + if (cmd_sg->length > task_size) 4762 + break; 4763 + 4764 + *sg = *cmd_sg; 4765 + task_size -= cmd_sg->length; 4766 + cmd_sg = sg_next(cmd_sg); 4767 + } 4768 + 4769 + lba += task->task_sectors; 4770 + sectors -= task->task_sectors; 4771 + 4772 + spin_lock_irqsave(&cmd->t_state_lock, flags); 4773 + list_add_tail(&task->t_list, &cmd->t_task_list); 4774 + spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4295 4775 } 4296 4776 4297 - if (set_counts) { 4298 - atomic_inc(&cmd->t_fe_count); 4299 - atomic_inc(&cmd->t_se_count); 4300 - } 4301 - 4302 - DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n", 4303 - cmd->se_tfo->get_task_tag(cmd), (data_direction == DMA_TO_DEVICE) 4304 - ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE", task_cdbs); 4305 - 4306 - return task_cdbs; 4307 - out: 4308 - return 0; 4777 + return task_count; 4309 4778 } 4310 4779 4311 4780 static int 4312 - transport_map_control_cmd_to_task(struct se_cmd *cmd) 4781 + transport_allocate_control_task(struct se_cmd *cmd) 4313 4782 { 4314 4783 struct se_device *dev = cmd->se_dev; 4315 4784 unsigned char *cdb; 4316 4785 struct se_task *task; 4317 - int ret; 4786 + unsigned long flags; 4318 4787 4319 4788 task = transport_generic_get_task(cmd, cmd->data_direction); 4320 4789 if (!task) 4321 - return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; 4790 + return -ENOMEM; 4322 4791 4323 4792 cdb = dev->transport->get_cdb(task); 4324 4793 BUG_ON(!cdb); 4325 4794 memcpy(cdb, cmd->t_task_cdb, 4326 4795 scsi_command_size(cmd->t_task_cdb)); 4327 4796 4328 - task->task_size = cmd->data_length; 4329 - task->task_sg_num = 4330 - (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) ? 1 : 0; 4797 + task->task_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents, 4798 + GFP_KERNEL); 4799 + if (!task->task_sg) { 4800 + cmd->se_dev->transport->free_task(task); 4801 + return -ENOMEM; 4802 + } 4331 4803 4332 - atomic_inc(&cmd->t_fe_count); 4333 - atomic_inc(&cmd->t_se_count); 4804 + memcpy(task->task_sg, cmd->t_data_sg, 4805 + sizeof(struct scatterlist) * cmd->t_data_nents); 4806 + task->task_size = cmd->data_length; 4807 + task->task_sg_num = cmd->t_data_nents; 4808 + 4809 + spin_lock_irqsave(&cmd->t_state_lock, flags); 4810 + list_add_tail(&task->t_list, &cmd->t_task_list); 4811 + spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4334 4812 4335 4813 if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) { 4336 - struct se_mem *se_mem = NULL, *se_mem_lout = NULL; 4337 - u32 se_mem_cnt = 0, task_offset = 0; 4338 - 4339 - if (!list_empty(&cmd->t_mem_list)) 4340 - se_mem = list_first_entry(&cmd->t_mem_list, 4341 - struct se_mem, se_list); 4342 - 4343 - ret = transport_do_se_mem_map(dev, task, 4344 - &cmd->t_mem_list, NULL, se_mem, 4345 - &se_mem_lout, &se_mem_cnt, &task_offset); 4346 - if (ret < 0) 4347 - return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; 4348 - 4349 4814 if (dev->transport->map_task_SG) 4350 4815 return dev->transport->map_task_SG(task); 4351 4816 return 0; ··· 4347 4828 return 0; 4348 4829 } else { 4349 4830 BUG(); 4350 - return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; 4831 + return -ENOMEM; 4351 4832 } 4352 4833 } 4834 + 4835 + static u32 transport_allocate_tasks( 4836 + struct se_cmd *cmd, 4837 + unsigned long long lba, 4838 + enum dma_data_direction data_direction, 4839 + struct scatterlist *sgl, 4840 + unsigned int sgl_nents) 4841 + { 4842 + int ret; 4843 + 4844 + if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { 4845 + return transport_allocate_data_tasks(cmd, lba, data_direction, 4846 + sgl, sgl_nents); 4847 + } else { 4848 + ret = transport_allocate_control_task(cmd); 4849 + if (ret < 0) 4850 + return ret; 4851 + else 4852 + return 1; 4853 + } 4854 + } 4855 + 4353 4856 4354 4857 /* transport_generic_new_cmd(): Called from transport_processing_thread() 4355 4858 * ··· 4391 4850 /* 4392 4851 * Determine is the TCM fabric module has already allocated physical 4393 4852 * memory, and is directly calling transport_generic_map_mem_to_cmd() 4394 - * to setup beforehand the linked list of physical memory at 4395 - * cmd->t_mem_list of struct se_mem->se_page 4853 + * beforehand. 4396 4854 */ 4397 - if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) { 4855 + if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && 4856 + cmd->data_length) { 4398 4857 ret = transport_generic_get_mem(cmd); 4399 4858 if (ret < 0) 4400 4859 return ret; ··· 4404 4863 if (ret < 0) 4405 4864 return ret; 4406 4865 4407 - if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { 4408 - list_for_each_entry(task, &cmd->t_task_list, t_list) { 4409 - if (atomic_read(&task->task_sent)) 4410 - continue; 4411 - if (!dev->transport->map_task_SG) 4412 - continue; 4866 + list_for_each_entry(task, &cmd->t_task_list, t_list) { 4867 + if (atomic_read(&task->task_sent)) 4868 + continue; 4869 + if (!dev->transport->map_task_SG) 4870 + continue; 4413 4871 4414 - ret = dev->transport->map_task_SG(task); 4415 - if (ret < 0) 4416 - return ret; 4417 - } 4418 - } else { 4419 - ret = transport_map_control_cmd_to_task(cmd); 4872 + ret = dev->transport->map_task_SG(task); 4420 4873 if (ret < 0) 4421 4874 return ret; 4422 4875 }
+13 -12
drivers/target/tcm_fc/tfc_cmd.c
··· 59 59 struct fc_exch *ep; 60 60 struct fc_seq *sp; 61 61 struct se_cmd *se_cmd; 62 - struct se_mem *mem; 62 + struct scatterlist *sg; 63 + int count; 63 64 64 65 if (!(ft_debug_logging & FT_DEBUG_IO)) 65 66 return; ··· 72 71 caller, cmd, cmd->cdb); 73 72 printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun); 74 73 75 - printk(KERN_INFO "%s: cmd %p se_num %u len %u se_cmd_flags <0x%x>\n", 76 - caller, cmd, se_cmd->t_tasks_se_num, 74 + printk(KERN_INFO "%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n", 75 + caller, cmd, se_cmd->t_data_nents, 77 76 se_cmd->data_length, se_cmd->se_cmd_flags); 78 77 79 - list_for_each_entry(mem, &se_cmd->t_mem_list, se_list) 80 - printk(KERN_INFO "%s: cmd %p mem %p page %p " 81 - "len 0x%x off 0x%x\n", 82 - caller, cmd, mem, 83 - mem->se_page, mem->se_len, mem->se_off); 78 + for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, count) 79 + printk(KERN_INFO "%s: cmd %p sg %p page %p " 80 + "len 0x%x off 0x%x\n", 81 + caller, cmd, sg, 82 + sg_page(sg), sg->length, sg->offset); 83 + 84 84 sp = cmd->seq; 85 85 if (sp) { 86 86 ep = fc_seq_exch(sp); ··· 258 256 (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) { 259 257 if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { 260 258 /* 261 - * Map se_mem list to scatterlist, so that 262 - * DDP can be setup. DDP setup function require 263 - * scatterlist. se_mem_list is internal to 264 - * TCM/LIO target 259 + * cmd may have been broken up into multiple 260 + * tasks. Link their sgs together so we can 261 + * operate on them all at once. 265 262 */ 266 263 transport_do_task_sg_chain(se_cmd); 267 264 cmd->sg = se_cmd->t_tasks_sg_chained;
+26 -32
drivers/target/tcm_fc/tfc_io.c
··· 68 68 struct fc_frame *fp = NULL; 69 69 struct fc_exch *ep; 70 70 struct fc_lport *lport; 71 - struct se_mem *mem; 71 + struct scatterlist *sg = NULL; 72 72 size_t remaining; 73 73 u32 f_ctl = FC_FC_EX_CTX | FC_FC_REL_OFF; 74 - u32 mem_off; 74 + u32 mem_off = 0; 75 75 u32 fh_off = 0; 76 76 u32 frame_off = 0; 77 77 size_t frame_len = 0; 78 - size_t mem_len; 78 + size_t mem_len = 0; 79 79 size_t tlen; 80 80 size_t off_in_page; 81 - struct page *page; 81 + struct page *page = NULL; 82 82 int use_sg; 83 83 int error; 84 84 void *page_addr; ··· 94 94 /* 95 95 * Setup to use first mem list entry, unless no data. 96 96 */ 97 - BUG_ON(remaining && list_empty(&se_cmd->t_mem_list)); 97 + BUG_ON(remaining && !se_cmd->t_data_sg); 98 98 if (remaining) { 99 - mem = list_first_entry(&se_cmd->t_mem_list, 100 - struct se_mem, se_list); 101 - mem_len = mem->se_len; 102 - mem_off = mem->se_off; 103 - page = mem->se_page; 99 + sg = se_cmd->t_data_sg; 100 + mem_len = sg->length; 101 + mem_off = sg->offset; 102 + page = sg_page(sg); 104 103 } 105 104 106 105 /* no scatter/gather in skb for odd word length due to fc_seq_send() */ ··· 107 108 108 109 while (remaining) { 109 110 if (!mem_len) { 110 - BUG_ON(!mem); 111 - mem = list_entry(mem->se_list.next, 112 - struct se_mem, se_list); 113 - mem_len = min((size_t)mem->se_len, remaining); 114 - mem_off = mem->se_off; 115 - page = mem->se_page; 111 + sg = sg_next(sg); 112 + mem_len = min((size_t)sg->length, remaining); 113 + mem_off = sg->offset; 114 + page = sg_page(sg); 116 115 } 117 116 if (!frame_len) { 118 117 /* ··· 197 200 struct fc_exch *ep; 198 201 struct fc_lport *lport; 199 202 struct fc_frame_header *fh; 200 - struct se_mem *mem; 201 - u32 mem_off; 203 + struct scatterlist *sg = NULL; 204 + u32 mem_off = 0; 202 205 u32 rel_off; 203 206 size_t frame_len; 204 - size_t mem_len; 207 + size_t mem_len = 0; 205 208 size_t tlen; 206 - struct page *page; 209 + struct page *page = NULL; 207 210 void *page_addr; 208 211 void *from; 209 212 void *to; ··· 285 288 /* 286 289 * Setup to use first mem list entry, unless no data. 287 290 */ 288 - BUG_ON(frame_len && list_empty(&se_cmd->t_mem_list)); 291 + BUG_ON(frame_len && !se_cmd->t_data_sg); 289 292 if (frame_len) { 290 - mem = list_first_entry(&se_cmd->t_mem_list, 291 - struct se_mem, se_list); 292 - mem_len = mem->se_len; 293 - mem_off = mem->se_off; 294 - page = mem->se_page; 293 + sg = se_cmd->t_data_sg; 294 + mem_len = sg->length; 295 + mem_off = sg->offset; 296 + page = sg_page(sg); 295 297 } 296 298 297 299 while (frame_len) { 298 300 if (!mem_len) { 299 - BUG_ON(!mem); 300 - mem = list_entry(mem->se_list.next, 301 - struct se_mem, se_list); 302 - mem_len = mem->se_len; 303 - mem_off = mem->se_off; 304 - page = mem->se_page; 301 + sg = sg_next(sg); 302 + mem_len = sg->length; 303 + mem_off = sg->offset; 304 + page = sg_page(sg); 305 305 } 306 306 if (rel_off >= mem_len) { 307 307 rel_off -= mem_len;
+6 -4
include/target/target_core_base.h
··· 490 490 int t_tasks_failed; 491 491 int t_tasks_fua; 492 492 bool t_tasks_bidi; 493 - u32 t_tasks_se_num; 494 - u32 t_tasks_se_bidi_num; 495 493 u32 t_tasks_sg_chained_no; 496 494 atomic_t t_fe_count; 497 495 atomic_t t_se_count; ··· 521 523 */ 522 524 struct scatterlist *t_task_pt_sgl; 523 525 u32 t_task_pt_sgl_num; 524 - struct list_head t_mem_list; 526 + 527 + struct scatterlist *t_data_sg; 528 + unsigned int t_data_nents; 529 + struct scatterlist *t_bidi_data_sg; 530 + unsigned int t_bidi_data_nents; 531 + 525 532 /* Used for BIDI READ */ 526 - struct list_head t_mem_bidi_list; 527 533 struct list_head t_task_list; 528 534 u32 t_task_list_num; 529 535