Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: introduce page_frag_cache_drain()

When draining a page_frag_cache, most user are doing
the similar steps, so introduce an API to avoid code
duplication.

Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
Acked-by: Jason Wang <jasowang@redhat.com>
Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>

authored by

Yunsheng Lin and committed by
Paolo Abeni
a0727489 4bc0d63a

+17 -33
+2 -9
drivers/net/ethernet/google/gve/gve_main.c
··· 1276 1276 1277 1277 static void gve_drain_page_cache(struct gve_priv *priv) 1278 1278 { 1279 - struct page_frag_cache *nc; 1280 1279 int i; 1281 1280 1282 - for (i = 0; i < priv->rx_cfg.num_queues; i++) { 1283 - nc = &priv->rx[i].page_cache; 1284 - if (nc->va) { 1285 - __page_frag_cache_drain(virt_to_page(nc->va), 1286 - nc->pagecnt_bias); 1287 - nc->va = NULL; 1288 - } 1289 - } 1281 + for (i = 0; i < priv->rx_cfg.num_queues; i++) 1282 + page_frag_cache_drain(&priv->rx[i].page_cache); 1290 1283 } 1291 1284 1292 1285 static void gve_qpls_get_curr_alloc_cfg(struct gve_priv *priv,
+2 -15
drivers/net/ethernet/mediatek/mtk_wed_wo.c
··· 286 286 static void 287 287 mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) 288 288 { 289 - struct page *page; 290 289 int i; 291 290 292 291 for (i = 0; i < q->n_desc; i++) { ··· 300 301 entry->buf = NULL; 301 302 } 302 303 303 - if (!q->cache.va) 304 - return; 305 - 306 - page = virt_to_page(q->cache.va); 307 - __page_frag_cache_drain(page, q->cache.pagecnt_bias); 308 - memset(&q->cache, 0, sizeof(q->cache)); 304 + page_frag_cache_drain(&q->cache); 309 305 } 310 306 311 307 static void 312 308 mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) 313 309 { 314 - struct page *page; 315 - 316 310 for (;;) { 317 311 void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true); 318 312 ··· 315 323 skb_free_frag(buf); 316 324 } 317 325 318 - if (!q->cache.va) 319 - return; 320 - 321 - page = virt_to_page(q->cache.va); 322 - __page_frag_cache_drain(page, q->cache.pagecnt_bias); 323 - memset(&q->cache, 0, sizeof(q->cache)); 326 + page_frag_cache_drain(&q->cache); 324 327 } 325 328 326 329 static void
+1 -6
drivers/nvme/host/tcp.c
··· 1344 1344 1345 1345 static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid) 1346 1346 { 1347 - struct page *page; 1348 1347 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); 1349 1348 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; 1350 1349 unsigned int noreclaim_flag; ··· 1354 1355 if (queue->hdr_digest || queue->data_digest) 1355 1356 nvme_tcp_free_crypto(queue); 1356 1357 1357 - if (queue->pf_cache.va) { 1358 - page = virt_to_head_page(queue->pf_cache.va); 1359 - __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias); 1360 - queue->pf_cache.va = NULL; 1361 - } 1358 + page_frag_cache_drain(&queue->pf_cache); 1362 1359 1363 1360 noreclaim_flag = memalloc_noreclaim_save(); 1364 1361 /* ->sock will be released by fput() */
+1 -3
drivers/nvme/target/tcp.c
··· 1591 1591 1592 1592 static void nvmet_tcp_release_queue_work(struct work_struct *w) 1593 1593 { 1594 - struct page *page; 1595 1594 struct nvmet_tcp_queue *queue = 1596 1595 container_of(w, struct nvmet_tcp_queue, release_work); 1597 1596 ··· 1614 1615 if (queue->hdr_digest || queue->data_digest) 1615 1616 nvmet_tcp_free_crypto(queue); 1616 1617 ida_free(&nvmet_tcp_queue_ida, queue->idx); 1617 - page = virt_to_head_page(queue->pf_cache.va); 1618 - __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias); 1618 + page_frag_cache_drain(&queue->pf_cache); 1619 1619 kfree(queue); 1620 1620 } 1621 1621
+1
include/linux/gfp.h
··· 311 311 extern void free_pages(unsigned long addr, unsigned int order); 312 312 313 313 struct page_frag_cache; 314 + void page_frag_cache_drain(struct page_frag_cache *nc); 314 315 extern void __page_frag_cache_drain(struct page *page, unsigned int count); 315 316 void *__page_frag_alloc_align(struct page_frag_cache *nc, unsigned int fragsz, 316 317 gfp_t gfp_mask, unsigned int align_mask);
+10
mm/page_alloc.c
··· 4699 4699 return page; 4700 4700 } 4701 4701 4702 + void page_frag_cache_drain(struct page_frag_cache *nc) 4703 + { 4704 + if (!nc->va) 4705 + return; 4706 + 4707 + __page_frag_cache_drain(virt_to_head_page(nc->va), nc->pagecnt_bias); 4708 + nc->va = NULL; 4709 + } 4710 + EXPORT_SYMBOL(page_frag_cache_drain); 4711 + 4702 4712 void __page_frag_cache_drain(struct page *page, unsigned int count) 4703 4713 { 4704 4714 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);