Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.10-rc1 343 lines 10 kB view raw
1#ifndef _LINUX_SCATTERLIST_H 2#define _LINUX_SCATTERLIST_H 3 4#include <linux/string.h> 5#include <linux/bug.h> 6#include <linux/mm.h> 7 8#include <asm/types.h> 9#include <asm/scatterlist.h> 10#include <asm/io.h> 11 12struct sg_table { 13 struct scatterlist *sgl; /* the list */ 14 unsigned int nents; /* number of mapped entries */ 15 unsigned int orig_nents; /* original size of list */ 16}; 17 18/* 19 * Notes on SG table design. 20 * 21 * Architectures must provide an unsigned long page_link field in the 22 * scatterlist struct. We use that to place the page pointer AND encode 23 * information about the sg table as well. The two lower bits are reserved 24 * for this information. 25 * 26 * If bit 0 is set, then the page_link contains a pointer to the next sg 27 * table list. Otherwise the next entry is at sg + 1. 28 * 29 * If bit 1 is set, then this sg entry is the last element in a list. 30 * 31 * See sg_next(). 32 * 33 */ 34 35#define SG_MAGIC 0x87654321 36 37/* 38 * We overload the LSB of the page pointer to indicate whether it's 39 * a valid sg entry, or whether it points to the start of a new scatterlist. 40 * Those low bits are there for everyone! (thanks mason :-) 41 */ 42#define sg_is_chain(sg) ((sg)->page_link & 0x01) 43#define sg_is_last(sg) ((sg)->page_link & 0x02) 44#define sg_chain_ptr(sg) \ 45 ((struct scatterlist *) ((sg)->page_link & ~0x03)) 46 47/** 48 * sg_assign_page - Assign a given page to an SG entry 49 * @sg: SG entry 50 * @page: The page 51 * 52 * Description: 53 * Assign page to sg entry. Also see sg_set_page(), the most commonly used 54 * variant. 55 * 56 **/ 57static inline void sg_assign_page(struct scatterlist *sg, struct page *page) 58{ 59 unsigned long page_link = sg->page_link & 0x3; 60 61 /* 62 * In order for the low bit stealing approach to work, pages 63 * must be aligned at a 32-bit boundary as a minimum. 64 */ 65 BUG_ON((unsigned long) page & 0x03); 66#ifdef CONFIG_DEBUG_SG 67 BUG_ON(sg->sg_magic != SG_MAGIC); 68 BUG_ON(sg_is_chain(sg)); 69#endif 70 sg->page_link = page_link | (unsigned long) page; 71} 72 73/** 74 * sg_set_page - Set sg entry to point at given page 75 * @sg: SG entry 76 * @page: The page 77 * @len: Length of data 78 * @offset: Offset into page 79 * 80 * Description: 81 * Use this function to set an sg entry pointing at a page, never assign 82 * the page directly. We encode sg table information in the lower bits 83 * of the page pointer. See sg_page() for looking up the page belonging 84 * to an sg entry. 85 * 86 **/ 87static inline void sg_set_page(struct scatterlist *sg, struct page *page, 88 unsigned int len, unsigned int offset) 89{ 90 sg_assign_page(sg, page); 91 sg->offset = offset; 92 sg->length = len; 93} 94 95static inline struct page *sg_page(struct scatterlist *sg) 96{ 97#ifdef CONFIG_DEBUG_SG 98 BUG_ON(sg->sg_magic != SG_MAGIC); 99 BUG_ON(sg_is_chain(sg)); 100#endif 101 return (struct page *)((sg)->page_link & ~0x3); 102} 103 104/** 105 * sg_set_buf - Set sg entry to point at given data 106 * @sg: SG entry 107 * @buf: Data 108 * @buflen: Data length 109 * 110 **/ 111static inline void sg_set_buf(struct scatterlist *sg, const void *buf, 112 unsigned int buflen) 113{ 114 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf)); 115} 116 117/* 118 * Loop over each sg element, following the pointer to a new list if necessary 119 */ 120#define for_each_sg(sglist, sg, nr, __i) \ 121 for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg)) 122 123/** 124 * sg_chain - Chain two sglists together 125 * @prv: First scatterlist 126 * @prv_nents: Number of entries in prv 127 * @sgl: Second scatterlist 128 * 129 * Description: 130 * Links @prv@ and @sgl@ together, to form a longer scatterlist. 131 * 132 **/ 133static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents, 134 struct scatterlist *sgl) 135{ 136#ifndef ARCH_HAS_SG_CHAIN 137 BUG(); 138#endif 139 140 /* 141 * offset and length are unused for chain entry. Clear them. 142 */ 143 prv[prv_nents - 1].offset = 0; 144 prv[prv_nents - 1].length = 0; 145 146 /* 147 * Set lowest bit to indicate a link pointer, and make sure to clear 148 * the termination bit if it happens to be set. 149 */ 150 prv[prv_nents - 1].page_link = ((unsigned long) sgl | 0x01) & ~0x02; 151} 152 153/** 154 * sg_mark_end - Mark the end of the scatterlist 155 * @sg: SG entryScatterlist 156 * 157 * Description: 158 * Marks the passed in sg entry as the termination point for the sg 159 * table. A call to sg_next() on this entry will return NULL. 160 * 161 **/ 162static inline void sg_mark_end(struct scatterlist *sg) 163{ 164#ifdef CONFIG_DEBUG_SG 165 BUG_ON(sg->sg_magic != SG_MAGIC); 166#endif 167 /* 168 * Set termination bit, clear potential chain bit 169 */ 170 sg->page_link |= 0x02; 171 sg->page_link &= ~0x01; 172} 173 174/** 175 * sg_unmark_end - Undo setting the end of the scatterlist 176 * @sg: SG entryScatterlist 177 * 178 * Description: 179 * Removes the termination marker from the given entry of the scatterlist. 180 * 181 **/ 182static inline void sg_unmark_end(struct scatterlist *sg) 183{ 184#ifdef CONFIG_DEBUG_SG 185 BUG_ON(sg->sg_magic != SG_MAGIC); 186#endif 187 sg->page_link &= ~0x02; 188} 189 190/** 191 * sg_phys - Return physical address of an sg entry 192 * @sg: SG entry 193 * 194 * Description: 195 * This calls page_to_phys() on the page in this sg entry, and adds the 196 * sg offset. The caller must know that it is legal to call page_to_phys() 197 * on the sg page. 198 * 199 **/ 200static inline dma_addr_t sg_phys(struct scatterlist *sg) 201{ 202 return page_to_phys(sg_page(sg)) + sg->offset; 203} 204 205/** 206 * sg_virt - Return virtual address of an sg entry 207 * @sg: SG entry 208 * 209 * Description: 210 * This calls page_address() on the page in this sg entry, and adds the 211 * sg offset. The caller must know that the sg page has a valid virtual 212 * mapping. 213 * 214 **/ 215static inline void *sg_virt(struct scatterlist *sg) 216{ 217 return page_address(sg_page(sg)) + sg->offset; 218} 219 220int sg_nents(struct scatterlist *sg); 221struct scatterlist *sg_next(struct scatterlist *); 222struct scatterlist *sg_last(struct scatterlist *s, unsigned int); 223void sg_init_table(struct scatterlist *, unsigned int); 224void sg_init_one(struct scatterlist *, const void *, unsigned int); 225 226typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t); 227typedef void (sg_free_fn)(struct scatterlist *, unsigned int); 228 229void __sg_free_table(struct sg_table *, unsigned int, sg_free_fn *); 230void sg_free_table(struct sg_table *); 231int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, gfp_t, 232 sg_alloc_fn *); 233int sg_alloc_table(struct sg_table *, unsigned int, gfp_t); 234int sg_alloc_table_from_pages(struct sg_table *sgt, 235 struct page **pages, unsigned int n_pages, 236 unsigned long offset, unsigned long size, 237 gfp_t gfp_mask); 238 239size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, 240 void *buf, size_t buflen); 241size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, 242 void *buf, size_t buflen); 243 244/* 245 * Maximum number of entries that will be allocated in one piece, if 246 * a list larger than this is required then chaining will be utilized. 247 */ 248#define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist)) 249 250/* 251 * sg page iterator 252 * 253 * Iterates over sg entries page-by-page. On each successful iteration, 254 * you can call sg_page_iter_page(@piter) and sg_page_iter_dma_address(@piter) 255 * to get the current page and its dma address. @piter->sg will point to the 256 * sg holding this page and @piter->sg_pgoffset to the page's page offset 257 * within the sg. The iteration will stop either when a maximum number of sg 258 * entries was reached or a terminating sg (sg_last(sg) == true) was reached. 259 */ 260struct sg_page_iter { 261 struct scatterlist *sg; /* sg holding the page */ 262 unsigned int sg_pgoffset; /* page offset within the sg */ 263 264 /* these are internal states, keep away */ 265 unsigned int __nents; /* remaining sg entries */ 266 int __pg_advance; /* nr pages to advance at the 267 * next step */ 268}; 269 270bool __sg_page_iter_next(struct sg_page_iter *piter); 271void __sg_page_iter_start(struct sg_page_iter *piter, 272 struct scatterlist *sglist, unsigned int nents, 273 unsigned long pgoffset); 274/** 275 * sg_page_iter_page - get the current page held by the page iterator 276 * @piter: page iterator holding the page 277 */ 278static inline struct page *sg_page_iter_page(struct sg_page_iter *piter) 279{ 280 return nth_page(sg_page(piter->sg), piter->sg_pgoffset); 281} 282 283/** 284 * sg_page_iter_dma_address - get the dma address of the current page held by 285 * the page iterator. 286 * @piter: page iterator holding the page 287 */ 288static inline dma_addr_t sg_page_iter_dma_address(struct sg_page_iter *piter) 289{ 290 return sg_dma_address(piter->sg) + (piter->sg_pgoffset << PAGE_SHIFT); 291} 292 293/** 294 * for_each_sg_page - iterate over the pages of the given sg list 295 * @sglist: sglist to iterate over 296 * @piter: page iterator to hold current page, sg, sg_pgoffset 297 * @nents: maximum number of sg entries to iterate over 298 * @pgoffset: starting page offset 299 */ 300#define for_each_sg_page(sglist, piter, nents, pgoffset) \ 301 for (__sg_page_iter_start((piter), (sglist), (nents), (pgoffset)); \ 302 __sg_page_iter_next(piter);) 303 304/* 305 * Mapping sg iterator 306 * 307 * Iterates over sg entries mapping page-by-page. On each successful 308 * iteration, @miter->page points to the mapped page and 309 * @miter->length bytes of data can be accessed at @miter->addr. As 310 * long as an interation is enclosed between start and stop, the user 311 * is free to choose control structure and when to stop. 312 * 313 * @miter->consumed is set to @miter->length on each iteration. It 314 * can be adjusted if the user can't consume all the bytes in one go. 315 * Also, a stopped iteration can be resumed by calling next on it. 316 * This is useful when iteration needs to release all resources and 317 * continue later (e.g. at the next interrupt). 318 */ 319 320#define SG_MITER_ATOMIC (1 << 0) /* use kmap_atomic */ 321#define SG_MITER_TO_SG (1 << 1) /* flush back to phys on unmap */ 322#define SG_MITER_FROM_SG (1 << 2) /* nop */ 323 324struct sg_mapping_iter { 325 /* the following three fields can be accessed directly */ 326 struct page *page; /* currently mapped page */ 327 void *addr; /* pointer to the mapped area */ 328 size_t length; /* length of the mapped area */ 329 size_t consumed; /* number of consumed bytes */ 330 struct sg_page_iter piter; /* page iterator */ 331 332 /* these are internal states, keep away */ 333 unsigned int __offset; /* offset within page */ 334 unsigned int __remaining; /* remaining bytes on page */ 335 unsigned int __flags; 336}; 337 338void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl, 339 unsigned int nents, unsigned int flags); 340bool sg_miter_next(struct sg_mapping_iter *miter); 341void sg_miter_stop(struct sg_mapping_iter *miter); 342 343#endif /* _LINUX_SCATTERLIST_H */