Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

target: enhance and export target_alloc_sgl/target_free_sgl

The SRP target driver will need to allocate and chain it's own SGLs soon.
For this export target_alloc_sgl, and add a new argument to it so that it
can allocate an additional chain entry that doesn't point to a page. Also
export transport_free_sgl after renaming it to target_free_sgl to free
these SGLs again.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>

authored by

Christoph Hellwig and committed by
Doug Ledford
e64aa657 a060b562

+23 -16
+18 -14
drivers/target/target_core_transport.c
··· 2195 2195 transport_handle_queue_full(cmd, cmd->se_dev); 2196 2196 } 2197 2197 2198 - static inline void transport_free_sgl(struct scatterlist *sgl, int nents) 2198 + void target_free_sgl(struct scatterlist *sgl, int nents) 2199 2199 { 2200 2200 struct scatterlist *sg; 2201 2201 int count; ··· 2205 2205 2206 2206 kfree(sgl); 2207 2207 } 2208 + EXPORT_SYMBOL(target_free_sgl); 2208 2209 2209 2210 static inline void transport_reset_sgl_orig(struct se_cmd *cmd) 2210 2211 { ··· 2226 2225 static inline void transport_free_pages(struct se_cmd *cmd) 2227 2226 { 2228 2227 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { 2229 - transport_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents); 2228 + target_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents); 2230 2229 cmd->t_prot_sg = NULL; 2231 2230 cmd->t_prot_nents = 0; 2232 2231 } ··· 2237 2236 * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE 2238 2237 */ 2239 2238 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) { 2240 - transport_free_sgl(cmd->t_bidi_data_sg, 2239 + target_free_sgl(cmd->t_bidi_data_sg, 2241 2240 cmd->t_bidi_data_nents); 2242 2241 cmd->t_bidi_data_sg = NULL; 2243 2242 cmd->t_bidi_data_nents = 0; ··· 2247 2246 } 2248 2247 transport_reset_sgl_orig(cmd); 2249 2248 2250 - transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents); 2249 + target_free_sgl(cmd->t_data_sg, cmd->t_data_nents); 2251 2250 cmd->t_data_sg = NULL; 2252 2251 cmd->t_data_nents = 0; 2253 2252 2254 - transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); 2253 + target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); 2255 2254 cmd->t_bidi_data_sg = NULL; 2256 2255 cmd->t_bidi_data_nents = 0; 2257 2256 } ··· 2325 2324 2326 2325 int 2327 2326 target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length, 2328 - bool zero_page) 2327 + bool zero_page, bool chainable) 2329 2328 { 2330 2329 struct scatterlist *sg; 2331 2330 struct page *page; 2332 2331 gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0; 2333 - unsigned int nent; 2332 + unsigned int nalloc, nent; 2334 2333 int i = 0; 2335 2334 2336 - nent = DIV_ROUND_UP(length, PAGE_SIZE); 2337 - sg = kmalloc(sizeof(struct scatterlist) * nent, GFP_KERNEL); 2335 + nalloc = nent = DIV_ROUND_UP(length, PAGE_SIZE); 2336 + if (chainable) 2337 + nalloc++; 2338 + sg = kmalloc_array(nalloc, sizeof(struct scatterlist), GFP_KERNEL); 2338 2339 if (!sg) 2339 2340 return -ENOMEM; 2340 2341 2341 - sg_init_table(sg, nent); 2342 + sg_init_table(sg, nalloc); 2342 2343 2343 2344 while (length) { 2344 2345 u32 page_len = min_t(u32, length, PAGE_SIZE); ··· 2364 2361 kfree(sg); 2365 2362 return -ENOMEM; 2366 2363 } 2364 + EXPORT_SYMBOL(target_alloc_sgl); 2367 2365 2368 2366 /* 2369 2367 * Allocate any required resources to execute the command. For writes we ··· 2380 2376 if (cmd->prot_op != TARGET_PROT_NORMAL && 2381 2377 !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { 2382 2378 ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents, 2383 - cmd->prot_length, true); 2379 + cmd->prot_length, true, false); 2384 2380 if (ret < 0) 2385 2381 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2386 2382 } ··· 2405 2401 2406 2402 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2407 2403 &cmd->t_bidi_data_nents, 2408 - bidi_length, zero_flag); 2404 + bidi_length, zero_flag, false); 2409 2405 if (ret < 0) 2410 2406 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2411 2407 } 2412 2408 2413 2409 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, 2414 - cmd->data_length, zero_flag); 2410 + cmd->data_length, zero_flag, false); 2415 2411 if (ret < 0) 2416 2412 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2417 2413 } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && ··· 2425 2421 2426 2422 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2427 2423 &cmd->t_bidi_data_nents, 2428 - caw_length, zero_flag); 2424 + caw_length, zero_flag, false); 2429 2425 if (ret < 0) 2430 2426 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2431 2427 }
+1 -1
drivers/target/target_core_xcopy.c
··· 563 563 564 564 if (alloc_mem) { 565 565 rc = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, 566 - cmd->data_length, false); 566 + cmd->data_length, false, false); 567 567 if (rc < 0) { 568 568 ret = rc; 569 569 goto out;
-1
include/target/target_core_backend.h
··· 85 85 void *transport_kmap_data_sg(struct se_cmd *); 86 86 void transport_kunmap_data_sg(struct se_cmd *); 87 87 /* core helpers also used by xcopy during internal command setup */ 88 - int target_alloc_sgl(struct scatterlist **, unsigned int *, u32, bool); 89 88 sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *, 90 89 struct scatterlist *, u32, struct scatterlist *, u32); 91 90
+4
include/target/target_core_fabric.h
··· 185 185 int core_tpg_register(struct se_wwn *, struct se_portal_group *, int); 186 186 int core_tpg_deregister(struct se_portal_group *); 187 187 188 + int target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, 189 + u32 length, bool zero_page, bool chainable); 190 + void target_free_sgl(struct scatterlist *sgl, int nents); 191 + 188 192 /* 189 193 * The LIO target core uses DMA_TO_DEVICE to mean that data is going 190 194 * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean