Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Move netfs_extract_iter_to_sg() to lib/scatterlist.c

Move netfs_extract_iter_to_sg() to lib/scatterlist.c as it's going to be
used by more than just network filesystems (AF_ALG, for example).

Signed-off-by: David Howells <dhowells@redhat.com>
cc: Jeff Layton <jlayton@kernel.org>
cc: Steve French <sfrench@samba.org>
cc: Shyam Prasad N <nspmangalore@gmail.com>
cc: Rohith Surabattula <rohiths.msft@gmail.com>
cc: Jens Axboe <axboe@kernel.dk>
cc: Herbert Xu <herbert@gondor.apana.org.au>
cc: "David S. Miller" <davem@davemloft.net>
cc: Eric Dumazet <edumazet@google.com>
cc: Jakub Kicinski <kuba@kernel.org>
cc: Paolo Abeni <pabeni@redhat.com>
cc: Matthew Wilcox <willy@infradead.org>
cc: linux-crypto@vger.kernel.org
cc: linux-cachefs@redhat.com
cc: linux-cifs@vger.kernel.org
cc: linux-fsdevel@vger.kernel.org
cc: netdev@vger.kernel.org
Signed-off-by: Paolo Abeni <pabeni@redhat.com>

authored by

David Howells and committed by
Paolo Abeni
f5f82cd1 936dc763

+274 -271
-267
fs/netfs/iterator.c
··· 101 101 return npages; 102 102 } 103 103 EXPORT_SYMBOL_GPL(netfs_extract_user_iter); 104 - 105 - /* 106 - * Extract and pin a list of up to sg_max pages from UBUF- or IOVEC-class 107 - * iterators, and add them to the scatterlist. 108 - */ 109 - static ssize_t extract_user_to_sg(struct iov_iter *iter, 110 - ssize_t maxsize, 111 - struct sg_table *sgtable, 112 - unsigned int sg_max, 113 - iov_iter_extraction_t extraction_flags) 114 - { 115 - struct scatterlist *sg = sgtable->sgl + sgtable->nents; 116 - struct page **pages; 117 - unsigned int npages; 118 - ssize_t ret = 0, res; 119 - size_t len, off; 120 - 121 - /* We decant the page list into the tail of the scatterlist */ 122 - pages = (void *)sgtable->sgl + 123 - array_size(sg_max, sizeof(struct scatterlist)); 124 - pages -= sg_max; 125 - 126 - do { 127 - res = iov_iter_extract_pages(iter, &pages, maxsize, sg_max, 128 - extraction_flags, &off); 129 - if (res < 0) 130 - goto failed; 131 - 132 - len = res; 133 - maxsize -= len; 134 - ret += len; 135 - npages = DIV_ROUND_UP(off + len, PAGE_SIZE); 136 - sg_max -= npages; 137 - 138 - for (; npages > 0; npages--) { 139 - struct page *page = *pages; 140 - size_t seg = min_t(size_t, PAGE_SIZE - off, len); 141 - 142 - *pages++ = NULL; 143 - sg_set_page(sg, page, seg, off); 144 - sgtable->nents++; 145 - sg++; 146 - len -= seg; 147 - off = 0; 148 - } 149 - } while (maxsize > 0 && sg_max > 0); 150 - 151 - return ret; 152 - 153 - failed: 154 - while (sgtable->nents > sgtable->orig_nents) 155 - put_page(sg_page(&sgtable->sgl[--sgtable->nents])); 156 - return res; 157 - } 158 - 159 - /* 160 - * Extract up to sg_max pages from a BVEC-type iterator and add them to the 161 - * scatterlist. The pages are not pinned. 162 - */ 163 - static ssize_t extract_bvec_to_sg(struct iov_iter *iter, 164 - ssize_t maxsize, 165 - struct sg_table *sgtable, 166 - unsigned int sg_max, 167 - iov_iter_extraction_t extraction_flags) 168 - { 169 - const struct bio_vec *bv = iter->bvec; 170 - struct scatterlist *sg = sgtable->sgl + sgtable->nents; 171 - unsigned long start = iter->iov_offset; 172 - unsigned int i; 173 - ssize_t ret = 0; 174 - 175 - for (i = 0; i < iter->nr_segs; i++) { 176 - size_t off, len; 177 - 178 - len = bv[i].bv_len; 179 - if (start >= len) { 180 - start -= len; 181 - continue; 182 - } 183 - 184 - len = min_t(size_t, maxsize, len - start); 185 - off = bv[i].bv_offset + start; 186 - 187 - sg_set_page(sg, bv[i].bv_page, len, off); 188 - sgtable->nents++; 189 - sg++; 190 - sg_max--; 191 - 192 - ret += len; 193 - maxsize -= len; 194 - if (maxsize <= 0 || sg_max == 0) 195 - break; 196 - start = 0; 197 - } 198 - 199 - if (ret > 0) 200 - iov_iter_advance(iter, ret); 201 - return ret; 202 - } 203 - 204 - /* 205 - * Extract up to sg_max pages from a KVEC-type iterator and add them to the 206 - * scatterlist. This can deal with vmalloc'd buffers as well as kmalloc'd or 207 - * static buffers. The pages are not pinned. 208 - */ 209 - static ssize_t extract_kvec_to_sg(struct iov_iter *iter, 210 - ssize_t maxsize, 211 - struct sg_table *sgtable, 212 - unsigned int sg_max, 213 - iov_iter_extraction_t extraction_flags) 214 - { 215 - const struct kvec *kv = iter->kvec; 216 - struct scatterlist *sg = sgtable->sgl + sgtable->nents; 217 - unsigned long start = iter->iov_offset; 218 - unsigned int i; 219 - ssize_t ret = 0; 220 - 221 - for (i = 0; i < iter->nr_segs; i++) { 222 - struct page *page; 223 - unsigned long kaddr; 224 - size_t off, len, seg; 225 - 226 - len = kv[i].iov_len; 227 - if (start >= len) { 228 - start -= len; 229 - continue; 230 - } 231 - 232 - kaddr = (unsigned long)kv[i].iov_base + start; 233 - off = kaddr & ~PAGE_MASK; 234 - len = min_t(size_t, maxsize, len - start); 235 - kaddr &= PAGE_MASK; 236 - 237 - maxsize -= len; 238 - ret += len; 239 - do { 240 - seg = min_t(size_t, len, PAGE_SIZE - off); 241 - if (is_vmalloc_or_module_addr((void *)kaddr)) 242 - page = vmalloc_to_page((void *)kaddr); 243 - else 244 - page = virt_to_page(kaddr); 245 - 246 - sg_set_page(sg, page, len, off); 247 - sgtable->nents++; 248 - sg++; 249 - sg_max--; 250 - 251 - len -= seg; 252 - kaddr += PAGE_SIZE; 253 - off = 0; 254 - } while (len > 0 && sg_max > 0); 255 - 256 - if (maxsize <= 0 || sg_max == 0) 257 - break; 258 - start = 0; 259 - } 260 - 261 - if (ret > 0) 262 - iov_iter_advance(iter, ret); 263 - return ret; 264 - } 265 - 266 - /* 267 - * Extract up to sg_max folios from an XARRAY-type iterator and add them to 268 - * the scatterlist. The pages are not pinned. 269 - */ 270 - static ssize_t extract_xarray_to_sg(struct iov_iter *iter, 271 - ssize_t maxsize, 272 - struct sg_table *sgtable, 273 - unsigned int sg_max, 274 - iov_iter_extraction_t extraction_flags) 275 - { 276 - struct scatterlist *sg = sgtable->sgl + sgtable->nents; 277 - struct xarray *xa = iter->xarray; 278 - struct folio *folio; 279 - loff_t start = iter->xarray_start + iter->iov_offset; 280 - pgoff_t index = start / PAGE_SIZE; 281 - ssize_t ret = 0; 282 - size_t offset, len; 283 - XA_STATE(xas, xa, index); 284 - 285 - rcu_read_lock(); 286 - 287 - xas_for_each(&xas, folio, ULONG_MAX) { 288 - if (xas_retry(&xas, folio)) 289 - continue; 290 - if (WARN_ON(xa_is_value(folio))) 291 - break; 292 - if (WARN_ON(folio_test_hugetlb(folio))) 293 - break; 294 - 295 - offset = offset_in_folio(folio, start); 296 - len = min_t(size_t, maxsize, folio_size(folio) - offset); 297 - 298 - sg_set_page(sg, folio_page(folio, 0), len, offset); 299 - sgtable->nents++; 300 - sg++; 301 - sg_max--; 302 - 303 - maxsize -= len; 304 - ret += len; 305 - if (maxsize <= 0 || sg_max == 0) 306 - break; 307 - } 308 - 309 - rcu_read_unlock(); 310 - if (ret > 0) 311 - iov_iter_advance(iter, ret); 312 - return ret; 313 - } 314 - 315 - /** 316 - * extract_iter_to_sg - Extract pages from an iterator and add to an sglist 317 - * @iter: The iterator to extract from 318 - * @maxsize: The amount of iterator to copy 319 - * @sgtable: The scatterlist table to fill in 320 - * @sg_max: Maximum number of elements in @sgtable that may be filled 321 - * @extraction_flags: Flags to qualify the request 322 - * 323 - * Extract the page fragments from the given amount of the source iterator and 324 - * add them to a scatterlist that refers to all of those bits, to a maximum 325 - * addition of @sg_max elements. 326 - * 327 - * The pages referred to by UBUF- and IOVEC-type iterators are extracted and 328 - * pinned; BVEC-, KVEC- and XARRAY-type are extracted but aren't pinned; PIPE- 329 - * and DISCARD-type are not supported. 330 - * 331 - * No end mark is placed on the scatterlist; that's left to the caller. 332 - * 333 - * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA 334 - * be allowed on the pages extracted. 335 - * 336 - * If successful, @sgtable->nents is updated to include the number of elements 337 - * added and the number of bytes added is returned. @sgtable->orig_nents is 338 - * left unaltered. 339 - * 340 - * The iov_iter_extract_mode() function should be used to query how cleanup 341 - * should be performed. 342 - */ 343 - ssize_t extract_iter_to_sg(struct iov_iter *iter, size_t maxsize, 344 - struct sg_table *sgtable, unsigned int sg_max, 345 - iov_iter_extraction_t extraction_flags) 346 - { 347 - if (maxsize == 0) 348 - return 0; 349 - 350 - switch (iov_iter_type(iter)) { 351 - case ITER_UBUF: 352 - case ITER_IOVEC: 353 - return extract_user_to_sg(iter, maxsize, sgtable, sg_max, 354 - extraction_flags); 355 - case ITER_BVEC: 356 - return extract_bvec_to_sg(iter, maxsize, sgtable, sg_max, 357 - extraction_flags); 358 - case ITER_KVEC: 359 - return extract_kvec_to_sg(iter, maxsize, sgtable, sg_max, 360 - extraction_flags); 361 - case ITER_XARRAY: 362 - return extract_xarray_to_sg(iter, maxsize, sgtable, sg_max, 363 - extraction_flags); 364 - default: 365 - pr_err("%s(%u) unsupported\n", __func__, iov_iter_type(iter)); 366 - WARN_ON_ONCE(1); 367 - return -EIO; 368 - } 369 - } 370 - EXPORT_SYMBOL_GPL(extract_iter_to_sg);
-4
include/linux/netfs.h
··· 300 300 ssize_t netfs_extract_user_iter(struct iov_iter *orig, size_t orig_len, 301 301 struct iov_iter *new, 302 302 iov_iter_extraction_t extraction_flags); 303 - struct sg_table; 304 - ssize_t extract_iter_to_sg(struct iov_iter *iter, size_t len, 305 - struct sg_table *sgtable, unsigned int sg_max, 306 - iov_iter_extraction_t extraction_flags); 307 303 308 304 /** 309 305 * netfs_inode - Get the netfs inode context from the inode
+5
include/linux/uio.h
··· 433 433 return user_backed_iter(iter); 434 434 } 435 435 436 + struct sg_table; 437 + ssize_t extract_iter_to_sg(struct iov_iter *iter, size_t len, 438 + struct sg_table *sgtable, unsigned int sg_max, 439 + iov_iter_extraction_t extraction_flags); 440 + 436 441 #endif
+269
lib/scatterlist.c
··· 9 9 #include <linux/scatterlist.h> 10 10 #include <linux/highmem.h> 11 11 #include <linux/kmemleak.h> 12 + #include <linux/bvec.h> 13 + #include <linux/uio.h> 12 14 13 15 /** 14 16 * sg_next - return the next scatterlist entry in a list ··· 1097 1095 return offset; 1098 1096 } 1099 1097 EXPORT_SYMBOL(sg_zero_buffer); 1098 + 1099 + /* 1100 + * Extract and pin a list of up to sg_max pages from UBUF- or IOVEC-class 1101 + * iterators, and add them to the scatterlist. 1102 + */ 1103 + static ssize_t extract_user_to_sg(struct iov_iter *iter, 1104 + ssize_t maxsize, 1105 + struct sg_table *sgtable, 1106 + unsigned int sg_max, 1107 + iov_iter_extraction_t extraction_flags) 1108 + { 1109 + struct scatterlist *sg = sgtable->sgl + sgtable->nents; 1110 + struct page **pages; 1111 + unsigned int npages; 1112 + ssize_t ret = 0, res; 1113 + size_t len, off; 1114 + 1115 + /* We decant the page list into the tail of the scatterlist */ 1116 + pages = (void *)sgtable->sgl + 1117 + array_size(sg_max, sizeof(struct scatterlist)); 1118 + pages -= sg_max; 1119 + 1120 + do { 1121 + res = iov_iter_extract_pages(iter, &pages, maxsize, sg_max, 1122 + extraction_flags, &off); 1123 + if (res < 0) 1124 + goto failed; 1125 + 1126 + len = res; 1127 + maxsize -= len; 1128 + ret += len; 1129 + npages = DIV_ROUND_UP(off + len, PAGE_SIZE); 1130 + sg_max -= npages; 1131 + 1132 + for (; npages > 0; npages--) { 1133 + struct page *page = *pages; 1134 + size_t seg = min_t(size_t, PAGE_SIZE - off, len); 1135 + 1136 + *pages++ = NULL; 1137 + sg_set_page(sg, page, seg, off); 1138 + sgtable->nents++; 1139 + sg++; 1140 + len -= seg; 1141 + off = 0; 1142 + } 1143 + } while (maxsize > 0 && sg_max > 0); 1144 + 1145 + return ret; 1146 + 1147 + failed: 1148 + while (sgtable->nents > sgtable->orig_nents) 1149 + put_page(sg_page(&sgtable->sgl[--sgtable->nents])); 1150 + return res; 1151 + } 1152 + 1153 + /* 1154 + * Extract up to sg_max pages from a BVEC-type iterator and add them to the 1155 + * scatterlist. The pages are not pinned. 1156 + */ 1157 + static ssize_t extract_bvec_to_sg(struct iov_iter *iter, 1158 + ssize_t maxsize, 1159 + struct sg_table *sgtable, 1160 + unsigned int sg_max, 1161 + iov_iter_extraction_t extraction_flags) 1162 + { 1163 + const struct bio_vec *bv = iter->bvec; 1164 + struct scatterlist *sg = sgtable->sgl + sgtable->nents; 1165 + unsigned long start = iter->iov_offset; 1166 + unsigned int i; 1167 + ssize_t ret = 0; 1168 + 1169 + for (i = 0; i < iter->nr_segs; i++) { 1170 + size_t off, len; 1171 + 1172 + len = bv[i].bv_len; 1173 + if (start >= len) { 1174 + start -= len; 1175 + continue; 1176 + } 1177 + 1178 + len = min_t(size_t, maxsize, len - start); 1179 + off = bv[i].bv_offset + start; 1180 + 1181 + sg_set_page(sg, bv[i].bv_page, len, off); 1182 + sgtable->nents++; 1183 + sg++; 1184 + sg_max--; 1185 + 1186 + ret += len; 1187 + maxsize -= len; 1188 + if (maxsize <= 0 || sg_max == 0) 1189 + break; 1190 + start = 0; 1191 + } 1192 + 1193 + if (ret > 0) 1194 + iov_iter_advance(iter, ret); 1195 + return ret; 1196 + } 1197 + 1198 + /* 1199 + * Extract up to sg_max pages from a KVEC-type iterator and add them to the 1200 + * scatterlist. This can deal with vmalloc'd buffers as well as kmalloc'd or 1201 + * static buffers. The pages are not pinned. 1202 + */ 1203 + static ssize_t extract_kvec_to_sg(struct iov_iter *iter, 1204 + ssize_t maxsize, 1205 + struct sg_table *sgtable, 1206 + unsigned int sg_max, 1207 + iov_iter_extraction_t extraction_flags) 1208 + { 1209 + const struct kvec *kv = iter->kvec; 1210 + struct scatterlist *sg = sgtable->sgl + sgtable->nents; 1211 + unsigned long start = iter->iov_offset; 1212 + unsigned int i; 1213 + ssize_t ret = 0; 1214 + 1215 + for (i = 0; i < iter->nr_segs; i++) { 1216 + struct page *page; 1217 + unsigned long kaddr; 1218 + size_t off, len, seg; 1219 + 1220 + len = kv[i].iov_len; 1221 + if (start >= len) { 1222 + start -= len; 1223 + continue; 1224 + } 1225 + 1226 + kaddr = (unsigned long)kv[i].iov_base + start; 1227 + off = kaddr & ~PAGE_MASK; 1228 + len = min_t(size_t, maxsize, len - start); 1229 + kaddr &= PAGE_MASK; 1230 + 1231 + maxsize -= len; 1232 + ret += len; 1233 + do { 1234 + seg = min_t(size_t, len, PAGE_SIZE - off); 1235 + if (is_vmalloc_or_module_addr((void *)kaddr)) 1236 + page = vmalloc_to_page((void *)kaddr); 1237 + else 1238 + page = virt_to_page(kaddr); 1239 + 1240 + sg_set_page(sg, page, len, off); 1241 + sgtable->nents++; 1242 + sg++; 1243 + sg_max--; 1244 + 1245 + len -= seg; 1246 + kaddr += PAGE_SIZE; 1247 + off = 0; 1248 + } while (len > 0 && sg_max > 0); 1249 + 1250 + if (maxsize <= 0 || sg_max == 0) 1251 + break; 1252 + start = 0; 1253 + } 1254 + 1255 + if (ret > 0) 1256 + iov_iter_advance(iter, ret); 1257 + return ret; 1258 + } 1259 + 1260 + /* 1261 + * Extract up to sg_max folios from an XARRAY-type iterator and add them to 1262 + * the scatterlist. The pages are not pinned. 1263 + */ 1264 + static ssize_t extract_xarray_to_sg(struct iov_iter *iter, 1265 + ssize_t maxsize, 1266 + struct sg_table *sgtable, 1267 + unsigned int sg_max, 1268 + iov_iter_extraction_t extraction_flags) 1269 + { 1270 + struct scatterlist *sg = sgtable->sgl + sgtable->nents; 1271 + struct xarray *xa = iter->xarray; 1272 + struct folio *folio; 1273 + loff_t start = iter->xarray_start + iter->iov_offset; 1274 + pgoff_t index = start / PAGE_SIZE; 1275 + ssize_t ret = 0; 1276 + size_t offset, len; 1277 + XA_STATE(xas, xa, index); 1278 + 1279 + rcu_read_lock(); 1280 + 1281 + xas_for_each(&xas, folio, ULONG_MAX) { 1282 + if (xas_retry(&xas, folio)) 1283 + continue; 1284 + if (WARN_ON(xa_is_value(folio))) 1285 + break; 1286 + if (WARN_ON(folio_test_hugetlb(folio))) 1287 + break; 1288 + 1289 + offset = offset_in_folio(folio, start); 1290 + len = min_t(size_t, maxsize, folio_size(folio) - offset); 1291 + 1292 + sg_set_page(sg, folio_page(folio, 0), len, offset); 1293 + sgtable->nents++; 1294 + sg++; 1295 + sg_max--; 1296 + 1297 + maxsize -= len; 1298 + ret += len; 1299 + if (maxsize <= 0 || sg_max == 0) 1300 + break; 1301 + } 1302 + 1303 + rcu_read_unlock(); 1304 + if (ret > 0) 1305 + iov_iter_advance(iter, ret); 1306 + return ret; 1307 + } 1308 + 1309 + /** 1310 + * extract_iter_to_sg - Extract pages from an iterator and add to an sglist 1311 + * @iter: The iterator to extract from 1312 + * @maxsize: The amount of iterator to copy 1313 + * @sgtable: The scatterlist table to fill in 1314 + * @sg_max: Maximum number of elements in @sgtable that may be filled 1315 + * @extraction_flags: Flags to qualify the request 1316 + * 1317 + * Extract the page fragments from the given amount of the source iterator and 1318 + * add them to a scatterlist that refers to all of those bits, to a maximum 1319 + * addition of @sg_max elements. 1320 + * 1321 + * The pages referred to by UBUF- and IOVEC-type iterators are extracted and 1322 + * pinned; BVEC-, KVEC- and XARRAY-type are extracted but aren't pinned; PIPE- 1323 + * and DISCARD-type are not supported. 1324 + * 1325 + * No end mark is placed on the scatterlist; that's left to the caller. 1326 + * 1327 + * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA 1328 + * be allowed on the pages extracted. 1329 + * 1330 + * If successful, @sgtable->nents is updated to include the number of elements 1331 + * added and the number of bytes added is returned. @sgtable->orig_nents is 1332 + * left unaltered. 1333 + * 1334 + * The iov_iter_extract_mode() function should be used to query how cleanup 1335 + * should be performed. 1336 + */ 1337 + ssize_t extract_iter_to_sg(struct iov_iter *iter, size_t maxsize, 1338 + struct sg_table *sgtable, unsigned int sg_max, 1339 + iov_iter_extraction_t extraction_flags) 1340 + { 1341 + if (maxsize == 0) 1342 + return 0; 1343 + 1344 + switch (iov_iter_type(iter)) { 1345 + case ITER_UBUF: 1346 + case ITER_IOVEC: 1347 + return extract_user_to_sg(iter, maxsize, sgtable, sg_max, 1348 + extraction_flags); 1349 + case ITER_BVEC: 1350 + return extract_bvec_to_sg(iter, maxsize, sgtable, sg_max, 1351 + extraction_flags); 1352 + case ITER_KVEC: 1353 + return extract_kvec_to_sg(iter, maxsize, sgtable, sg_max, 1354 + extraction_flags); 1355 + case ITER_XARRAY: 1356 + return extract_xarray_to_sg(iter, maxsize, sgtable, sg_max, 1357 + extraction_flags); 1358 + default: 1359 + pr_err("%s(%u) unsupported\n", __func__, iov_iter_type(iter)); 1360 + WARN_ON_ONCE(1); 1361 + return -EIO; 1362 + } 1363 + } 1364 + EXPORT_SYMBOL_GPL(extract_iter_to_sg);