at v4.16 14 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_SCATTERLIST_H 3#define _LINUX_SCATTERLIST_H 4 5#include <linux/string.h> 6#include <linux/types.h> 7#include <linux/bug.h> 8#include <linux/mm.h> 9#include <asm/io.h> 10 11struct scatterlist { 12#ifdef CONFIG_DEBUG_SG 13 unsigned long sg_magic; 14#endif 15 unsigned long page_link; 16 unsigned int offset; 17 unsigned int length; 18 dma_addr_t dma_address; 19#ifdef CONFIG_NEED_SG_DMA_LENGTH 20 unsigned int dma_length; 21#endif 22}; 23 24/* 25 * Since the above length field is an unsigned int, below we define the maximum 26 * length in bytes that can be stored in one scatterlist entry. 27 */ 28#define SCATTERLIST_MAX_SEGMENT (UINT_MAX & PAGE_MASK) 29 30/* 31 * These macros should be used after a dma_map_sg call has been done 32 * to get bus addresses of each of the SG entries and their lengths. 33 * You should only work with the number of sg entries dma_map_sg 34 * returns, or alternatively stop on the first sg_dma_len(sg) which 35 * is 0. 36 */ 37#define sg_dma_address(sg) ((sg)->dma_address) 38 39#ifdef CONFIG_NEED_SG_DMA_LENGTH 40#define sg_dma_len(sg) ((sg)->dma_length) 41#else 42#define sg_dma_len(sg) ((sg)->length) 43#endif 44 45struct sg_table { 46 struct scatterlist *sgl; /* the list */ 47 unsigned int nents; /* number of mapped entries */ 48 unsigned int orig_nents; /* original size of list */ 49}; 50 51/* 52 * Notes on SG table design. 53 * 54 * We use the unsigned long page_link field in the scatterlist struct to place 55 * the page pointer AND encode information about the sg table as well. The two 56 * lower bits are reserved for this information. 57 * 58 * If bit 0 is set, then the page_link contains a pointer to the next sg 59 * table list. Otherwise the next entry is at sg + 1. 60 * 61 * If bit 1 is set, then this sg entry is the last element in a list. 62 * 63 * See sg_next(). 64 * 65 */ 66 67#define SG_MAGIC 0x87654321 68 69/* 70 * We overload the LSB of the page pointer to indicate whether it's 71 * a valid sg entry, or whether it points to the start of a new scatterlist. 72 * Those low bits are there for everyone! (thanks mason :-) 73 */ 74#define sg_is_chain(sg) ((sg)->page_link & 0x01) 75#define sg_is_last(sg) ((sg)->page_link & 0x02) 76#define sg_chain_ptr(sg) \ 77 ((struct scatterlist *) ((sg)->page_link & ~0x03)) 78 79/** 80 * sg_assign_page - Assign a given page to an SG entry 81 * @sg: SG entry 82 * @page: The page 83 * 84 * Description: 85 * Assign page to sg entry. Also see sg_set_page(), the most commonly used 86 * variant. 87 * 88 **/ 89static inline void sg_assign_page(struct scatterlist *sg, struct page *page) 90{ 91 unsigned long page_link = sg->page_link & 0x3; 92 93 /* 94 * In order for the low bit stealing approach to work, pages 95 * must be aligned at a 32-bit boundary as a minimum. 96 */ 97 BUG_ON((unsigned long) page & 0x03); 98#ifdef CONFIG_DEBUG_SG 99 BUG_ON(sg->sg_magic != SG_MAGIC); 100 BUG_ON(sg_is_chain(sg)); 101#endif 102 sg->page_link = page_link | (unsigned long) page; 103} 104 105/** 106 * sg_set_page - Set sg entry to point at given page 107 * @sg: SG entry 108 * @page: The page 109 * @len: Length of data 110 * @offset: Offset into page 111 * 112 * Description: 113 * Use this function to set an sg entry pointing at a page, never assign 114 * the page directly. We encode sg table information in the lower bits 115 * of the page pointer. See sg_page() for looking up the page belonging 116 * to an sg entry. 117 * 118 **/ 119static inline void sg_set_page(struct scatterlist *sg, struct page *page, 120 unsigned int len, unsigned int offset) 121{ 122 sg_assign_page(sg, page); 123 sg->offset = offset; 124 sg->length = len; 125} 126 127static inline struct page *sg_page(struct scatterlist *sg) 128{ 129#ifdef CONFIG_DEBUG_SG 130 BUG_ON(sg->sg_magic != SG_MAGIC); 131 BUG_ON(sg_is_chain(sg)); 132#endif 133 return (struct page *)((sg)->page_link & ~0x3); 134} 135 136/** 137 * sg_set_buf - Set sg entry to point at given data 138 * @sg: SG entry 139 * @buf: Data 140 * @buflen: Data length 141 * 142 **/ 143static inline void sg_set_buf(struct scatterlist *sg, const void *buf, 144 unsigned int buflen) 145{ 146#ifdef CONFIG_DEBUG_SG 147 BUG_ON(!virt_addr_valid(buf)); 148#endif 149 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf)); 150} 151 152/* 153 * Loop over each sg element, following the pointer to a new list if necessary 154 */ 155#define for_each_sg(sglist, sg, nr, __i) \ 156 for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg)) 157 158/** 159 * sg_chain - Chain two sglists together 160 * @prv: First scatterlist 161 * @prv_nents: Number of entries in prv 162 * @sgl: Second scatterlist 163 * 164 * Description: 165 * Links @prv@ and @sgl@ together, to form a longer scatterlist. 166 * 167 **/ 168static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents, 169 struct scatterlist *sgl) 170{ 171 /* 172 * offset and length are unused for chain entry. Clear them. 173 */ 174 prv[prv_nents - 1].offset = 0; 175 prv[prv_nents - 1].length = 0; 176 177 /* 178 * Set lowest bit to indicate a link pointer, and make sure to clear 179 * the termination bit if it happens to be set. 180 */ 181 prv[prv_nents - 1].page_link = ((unsigned long) sgl | 0x01) & ~0x02; 182} 183 184/** 185 * sg_mark_end - Mark the end of the scatterlist 186 * @sg: SG entryScatterlist 187 * 188 * Description: 189 * Marks the passed in sg entry as the termination point for the sg 190 * table. A call to sg_next() on this entry will return NULL. 191 * 192 **/ 193static inline void sg_mark_end(struct scatterlist *sg) 194{ 195#ifdef CONFIG_DEBUG_SG 196 BUG_ON(sg->sg_magic != SG_MAGIC); 197#endif 198 /* 199 * Set termination bit, clear potential chain bit 200 */ 201 sg->page_link |= 0x02; 202 sg->page_link &= ~0x01; 203} 204 205/** 206 * sg_unmark_end - Undo setting the end of the scatterlist 207 * @sg: SG entryScatterlist 208 * 209 * Description: 210 * Removes the termination marker from the given entry of the scatterlist. 211 * 212 **/ 213static inline void sg_unmark_end(struct scatterlist *sg) 214{ 215#ifdef CONFIG_DEBUG_SG 216 BUG_ON(sg->sg_magic != SG_MAGIC); 217#endif 218 sg->page_link &= ~0x02; 219} 220 221/** 222 * sg_phys - Return physical address of an sg entry 223 * @sg: SG entry 224 * 225 * Description: 226 * This calls page_to_phys() on the page in this sg entry, and adds the 227 * sg offset. The caller must know that it is legal to call page_to_phys() 228 * on the sg page. 229 * 230 **/ 231static inline dma_addr_t sg_phys(struct scatterlist *sg) 232{ 233 return page_to_phys(sg_page(sg)) + sg->offset; 234} 235 236/** 237 * sg_virt - Return virtual address of an sg entry 238 * @sg: SG entry 239 * 240 * Description: 241 * This calls page_address() on the page in this sg entry, and adds the 242 * sg offset. The caller must know that the sg page has a valid virtual 243 * mapping. 244 * 245 **/ 246static inline void *sg_virt(struct scatterlist *sg) 247{ 248 return page_address(sg_page(sg)) + sg->offset; 249} 250 251int sg_nents(struct scatterlist *sg); 252int sg_nents_for_len(struct scatterlist *sg, u64 len); 253struct scatterlist *sg_next(struct scatterlist *); 254struct scatterlist *sg_last(struct scatterlist *s, unsigned int); 255void sg_init_table(struct scatterlist *, unsigned int); 256void sg_init_one(struct scatterlist *, const void *, unsigned int); 257int sg_split(struct scatterlist *in, const int in_mapped_nents, 258 const off_t skip, const int nb_splits, 259 const size_t *split_sizes, 260 struct scatterlist **out, int *out_mapped_nents, 261 gfp_t gfp_mask); 262 263typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t); 264typedef void (sg_free_fn)(struct scatterlist *, unsigned int); 265 266void __sg_free_table(struct sg_table *, unsigned int, bool, sg_free_fn *); 267void sg_free_table(struct sg_table *); 268int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, 269 struct scatterlist *, gfp_t, sg_alloc_fn *); 270int sg_alloc_table(struct sg_table *, unsigned int, gfp_t); 271int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, 272 unsigned int n_pages, unsigned int offset, 273 unsigned long size, unsigned int max_segment, 274 gfp_t gfp_mask); 275int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, 276 unsigned int n_pages, unsigned int offset, 277 unsigned long size, gfp_t gfp_mask); 278 279#ifdef CONFIG_SGL_ALLOC 280struct scatterlist *sgl_alloc_order(unsigned long long length, 281 unsigned int order, bool chainable, 282 gfp_t gfp, unsigned int *nent_p); 283struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp, 284 unsigned int *nent_p); 285void sgl_free_n_order(struct scatterlist *sgl, int nents, int order); 286void sgl_free_order(struct scatterlist *sgl, int order); 287void sgl_free(struct scatterlist *sgl); 288#endif /* CONFIG_SGL_ALLOC */ 289 290size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, 291 size_t buflen, off_t skip, bool to_buffer); 292 293size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, 294 const void *buf, size_t buflen); 295size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, 296 void *buf, size_t buflen); 297 298size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents, 299 const void *buf, size_t buflen, off_t skip); 300size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents, 301 void *buf, size_t buflen, off_t skip); 302size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents, 303 size_t buflen, off_t skip); 304 305/* 306 * Maximum number of entries that will be allocated in one piece, if 307 * a list larger than this is required then chaining will be utilized. 308 */ 309#define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist)) 310 311/* 312 * The maximum number of SG segments that we will put inside a 313 * scatterlist (unless chaining is used). Should ideally fit inside a 314 * single page, to avoid a higher order allocation. We could define this 315 * to SG_MAX_SINGLE_ALLOC to pack correctly at the highest order. The 316 * minimum value is 32 317 */ 318#define SG_CHUNK_SIZE 128 319 320/* 321 * Like SG_CHUNK_SIZE, but for archs that have sg chaining. This limit 322 * is totally arbitrary, a setting of 2048 will get you at least 8mb ios. 323 */ 324#ifdef CONFIG_ARCH_HAS_SG_CHAIN 325#define SG_MAX_SEGMENTS 2048 326#else 327#define SG_MAX_SEGMENTS SG_CHUNK_SIZE 328#endif 329 330#ifdef CONFIG_SG_POOL 331void sg_free_table_chained(struct sg_table *table, bool first_chunk); 332int sg_alloc_table_chained(struct sg_table *table, int nents, 333 struct scatterlist *first_chunk); 334#endif 335 336/* 337 * sg page iterator 338 * 339 * Iterates over sg entries page-by-page. On each successful iteration, 340 * you can call sg_page_iter_page(@piter) and sg_page_iter_dma_address(@piter) 341 * to get the current page and its dma address. @piter->sg will point to the 342 * sg holding this page and @piter->sg_pgoffset to the page's page offset 343 * within the sg. The iteration will stop either when a maximum number of sg 344 * entries was reached or a terminating sg (sg_last(sg) == true) was reached. 345 */ 346struct sg_page_iter { 347 struct scatterlist *sg; /* sg holding the page */ 348 unsigned int sg_pgoffset; /* page offset within the sg */ 349 350 /* these are internal states, keep away */ 351 unsigned int __nents; /* remaining sg entries */ 352 int __pg_advance; /* nr pages to advance at the 353 * next step */ 354}; 355 356bool __sg_page_iter_next(struct sg_page_iter *piter); 357void __sg_page_iter_start(struct sg_page_iter *piter, 358 struct scatterlist *sglist, unsigned int nents, 359 unsigned long pgoffset); 360/** 361 * sg_page_iter_page - get the current page held by the page iterator 362 * @piter: page iterator holding the page 363 */ 364static inline struct page *sg_page_iter_page(struct sg_page_iter *piter) 365{ 366 return nth_page(sg_page(piter->sg), piter->sg_pgoffset); 367} 368 369/** 370 * sg_page_iter_dma_address - get the dma address of the current page held by 371 * the page iterator. 372 * @piter: page iterator holding the page 373 */ 374static inline dma_addr_t sg_page_iter_dma_address(struct sg_page_iter *piter) 375{ 376 return sg_dma_address(piter->sg) + (piter->sg_pgoffset << PAGE_SHIFT); 377} 378 379/** 380 * for_each_sg_page - iterate over the pages of the given sg list 381 * @sglist: sglist to iterate over 382 * @piter: page iterator to hold current page, sg, sg_pgoffset 383 * @nents: maximum number of sg entries to iterate over 384 * @pgoffset: starting page offset 385 */ 386#define for_each_sg_page(sglist, piter, nents, pgoffset) \ 387 for (__sg_page_iter_start((piter), (sglist), (nents), (pgoffset)); \ 388 __sg_page_iter_next(piter);) 389 390/* 391 * Mapping sg iterator 392 * 393 * Iterates over sg entries mapping page-by-page. On each successful 394 * iteration, @miter->page points to the mapped page and 395 * @miter->length bytes of data can be accessed at @miter->addr. As 396 * long as an interation is enclosed between start and stop, the user 397 * is free to choose control structure and when to stop. 398 * 399 * @miter->consumed is set to @miter->length on each iteration. It 400 * can be adjusted if the user can't consume all the bytes in one go. 401 * Also, a stopped iteration can be resumed by calling next on it. 402 * This is useful when iteration needs to release all resources and 403 * continue later (e.g. at the next interrupt). 404 */ 405 406#define SG_MITER_ATOMIC (1 << 0) /* use kmap_atomic */ 407#define SG_MITER_TO_SG (1 << 1) /* flush back to phys on unmap */ 408#define SG_MITER_FROM_SG (1 << 2) /* nop */ 409 410struct sg_mapping_iter { 411 /* the following three fields can be accessed directly */ 412 struct page *page; /* currently mapped page */ 413 void *addr; /* pointer to the mapped area */ 414 size_t length; /* length of the mapped area */ 415 size_t consumed; /* number of consumed bytes */ 416 struct sg_page_iter piter; /* page iterator */ 417 418 /* these are internal states, keep away */ 419 unsigned int __offset; /* offset within page */ 420 unsigned int __remaining; /* remaining bytes on page */ 421 unsigned int __flags; 422}; 423 424void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl, 425 unsigned int nents, unsigned int flags); 426bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset); 427bool sg_miter_next(struct sg_mapping_iter *miter); 428void sg_miter_stop(struct sg_mapping_iter *miter); 429 430#endif /* _LINUX_SCATTERLIST_H */