Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

fs/aio: Stop allocating aio rings from HIGHMEM

There is no need to allocate aio rings from HIGHMEM because of very
little memory needed here.

Therefore, use GFP_USER flag in find_or_create_page() and get rid of
kmap*() mappings.

Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Ira Weiny <ira.weiny@intel.com>
Suggested-by: Matthew Wilcox <willy@infradead.org>
Signed-off-by: Fabio M. De Francesco <fmdefrancesco@gmail.com>
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Message-Id: <20230609145937.17610-1-fmdefrancesco@gmail.com>
Signed-off-by: Christian Brauner <brauner@kernel.org>

authored by

Fabio M. De Francesco and committed by
Christian Brauner
5c075c5b b6334e2c

+9 -17
+9 -17
fs/aio.c
··· 530 530 for (i = 0; i < nr_pages; i++) { 531 531 struct page *page; 532 532 page = find_or_create_page(file->f_mapping, 533 - i, GFP_HIGHUSER | __GFP_ZERO); 533 + i, GFP_USER | __GFP_ZERO); 534 534 if (!page) 535 535 break; 536 536 pr_debug("pid(%d) page[%d]->count=%d\n", ··· 571 571 ctx->user_id = ctx->mmap_base; 572 572 ctx->nr_events = nr_events; /* trusted copy */ 573 573 574 - ring = kmap_atomic(ctx->ring_pages[0]); 574 + ring = page_address(ctx->ring_pages[0]); 575 575 ring->nr = nr_events; /* user copy */ 576 576 ring->id = ~0U; 577 577 ring->head = ring->tail = 0; ··· 579 579 ring->compat_features = AIO_RING_COMPAT_FEATURES; 580 580 ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; 581 581 ring->header_length = sizeof(struct aio_ring); 582 - kunmap_atomic(ring); 583 582 flush_dcache_page(ctx->ring_pages[0]); 584 583 585 584 return 0; ··· 681 682 * we are protected from page migration 682 683 * changes ring_pages by ->ring_lock. 683 684 */ 684 - ring = kmap_atomic(ctx->ring_pages[0]); 685 + ring = page_address(ctx->ring_pages[0]); 685 686 ring->id = ctx->id; 686 - kunmap_atomic(ring); 687 687 return 0; 688 688 } 689 689 ··· 1023 1025 * against ctx->completed_events below will make sure we do the 1024 1026 * safe/right thing. 1025 1027 */ 1026 - ring = kmap_atomic(ctx->ring_pages[0]); 1028 + ring = page_address(ctx->ring_pages[0]); 1027 1029 head = ring->head; 1028 - kunmap_atomic(ring); 1029 1030 1030 1031 refill_reqs_available(ctx, head, ctx->tail); 1031 1032 } ··· 1130 1133 if (++tail >= ctx->nr_events) 1131 1134 tail = 0; 1132 1135 1133 - ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); 1136 + ev_page = page_address(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); 1134 1137 event = ev_page + pos % AIO_EVENTS_PER_PAGE; 1135 1138 1136 1139 *event = iocb->ki_res; 1137 1140 1138 - kunmap_atomic(ev_page); 1139 1141 flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); 1140 1142 1141 1143 pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb, ··· 1148 1152 1149 1153 ctx->tail = tail; 1150 1154 1151 - ring = kmap_atomic(ctx->ring_pages[0]); 1155 + ring = page_address(ctx->ring_pages[0]); 1152 1156 head = ring->head; 1153 1157 ring->tail = tail; 1154 - kunmap_atomic(ring); 1155 1158 flush_dcache_page(ctx->ring_pages[0]); 1156 1159 1157 1160 ctx->completed_events++; ··· 1210 1215 mutex_lock(&ctx->ring_lock); 1211 1216 1212 1217 /* Access to ->ring_pages here is protected by ctx->ring_lock. */ 1213 - ring = kmap_atomic(ctx->ring_pages[0]); 1218 + ring = page_address(ctx->ring_pages[0]); 1214 1219 head = ring->head; 1215 1220 tail = ring->tail; 1216 - kunmap_atomic(ring); 1217 1221 1218 1222 /* 1219 1223 * Ensure that once we've read the current tail pointer, that ··· 1244 1250 avail = min(avail, nr - ret); 1245 1251 avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - pos); 1246 1252 1247 - ev = kmap(page); 1253 + ev = page_address(page); 1248 1254 copy_ret = copy_to_user(event + ret, ev + pos, 1249 1255 sizeof(*ev) * avail); 1250 - kunmap(page); 1251 1256 1252 1257 if (unlikely(copy_ret)) { 1253 1258 ret = -EFAULT; ··· 1258 1265 head %= ctx->nr_events; 1259 1266 } 1260 1267 1261 - ring = kmap_atomic(ctx->ring_pages[0]); 1268 + ring = page_address(ctx->ring_pages[0]); 1262 1269 ring->head = head; 1263 - kunmap_atomic(ring); 1264 1270 flush_dcache_page(ctx->ring_pages[0]); 1265 1271 1266 1272 pr_debug("%li h%u t%u\n", ret, head, tail);