Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * page_pool.c
4 * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com>
5 * Copyright (C) 2016 Red Hat, Inc.
6 */
7
8#include <linux/types.h>
9#include <linux/kernel.h>
10#include <linux/slab.h>
11#include <linux/device.h>
12
13#include <net/page_pool.h>
14#include <net/xdp.h>
15
16#include <linux/dma-direction.h>
17#include <linux/dma-mapping.h>
18#include <linux/page-flags.h>
19#include <linux/mm.h> /* for __put_page() */
20#include <linux/poison.h>
21
22#include <trace/events/page_pool.h>
23
24#define DEFER_TIME (msecs_to_jiffies(1000))
25#define DEFER_WARN_INTERVAL (60 * HZ)
26
27#define BIAS_MAX LONG_MAX
28
29static int page_pool_init(struct page_pool *pool,
30 const struct page_pool_params *params)
31{
32 unsigned int ring_qsize = 1024; /* Default */
33
34 memcpy(&pool->p, params, sizeof(pool->p));
35
36 /* Validate only known flags were used */
37 if (pool->p.flags & ~(PP_FLAG_ALL))
38 return -EINVAL;
39
40 if (pool->p.pool_size)
41 ring_qsize = pool->p.pool_size;
42
43 /* Sanity limit mem that can be pinned down */
44 if (ring_qsize > 32768)
45 return -E2BIG;
46
47 /* DMA direction is either DMA_FROM_DEVICE or DMA_BIDIRECTIONAL.
48 * DMA_BIDIRECTIONAL is for allowing page used for DMA sending,
49 * which is the XDP_TX use-case.
50 */
51 if (pool->p.flags & PP_FLAG_DMA_MAP) {
52 if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
53 (pool->p.dma_dir != DMA_BIDIRECTIONAL))
54 return -EINVAL;
55 }
56
57 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) {
58 /* In order to request DMA-sync-for-device the page
59 * needs to be mapped
60 */
61 if (!(pool->p.flags & PP_FLAG_DMA_MAP))
62 return -EINVAL;
63
64 if (!pool->p.max_len)
65 return -EINVAL;
66
67 /* pool->p.offset has to be set according to the address
68 * offset used by the DMA engine to start copying rx data
69 */
70 }
71
72 if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT &&
73 pool->p.flags & PP_FLAG_PAGE_FRAG)
74 return -EINVAL;
75
76 if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
77 return -ENOMEM;
78
79 atomic_set(&pool->pages_state_release_cnt, 0);
80
81 /* Driver calling page_pool_create() also call page_pool_destroy() */
82 refcount_set(&pool->user_cnt, 1);
83
84 if (pool->p.flags & PP_FLAG_DMA_MAP)
85 get_device(pool->p.dev);
86
87 return 0;
88}
89
90struct page_pool *page_pool_create(const struct page_pool_params *params)
91{
92 struct page_pool *pool;
93 int err;
94
95 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid);
96 if (!pool)
97 return ERR_PTR(-ENOMEM);
98
99 err = page_pool_init(pool, params);
100 if (err < 0) {
101 pr_warn("%s() gave up with errno %d\n", __func__, err);
102 kfree(pool);
103 return ERR_PTR(err);
104 }
105
106 return pool;
107}
108EXPORT_SYMBOL(page_pool_create);
109
110static void page_pool_return_page(struct page_pool *pool, struct page *page);
111
112noinline
113static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
114{
115 struct ptr_ring *r = &pool->ring;
116 struct page *page;
117 int pref_nid; /* preferred NUMA node */
118
119 /* Quicker fallback, avoid locks when ring is empty */
120 if (__ptr_ring_empty(r))
121 return NULL;
122
123 /* Softirq guarantee CPU and thus NUMA node is stable. This,
124 * assumes CPU refilling driver RX-ring will also run RX-NAPI.
125 */
126#ifdef CONFIG_NUMA
127 pref_nid = (pool->p.nid == NUMA_NO_NODE) ? numa_mem_id() : pool->p.nid;
128#else
129 /* Ignore pool->p.nid setting if !CONFIG_NUMA, helps compiler */
130 pref_nid = numa_mem_id(); /* will be zero like page_to_nid() */
131#endif
132
133 /* Refill alloc array, but only if NUMA match */
134 do {
135 page = __ptr_ring_consume(r);
136 if (unlikely(!page))
137 break;
138
139 if (likely(page_to_nid(page) == pref_nid)) {
140 pool->alloc.cache[pool->alloc.count++] = page;
141 } else {
142 /* NUMA mismatch;
143 * (1) release 1 page to page-allocator and
144 * (2) break out to fallthrough to alloc_pages_node.
145 * This limit stress on page buddy alloactor.
146 */
147 page_pool_return_page(pool, page);
148 page = NULL;
149 break;
150 }
151 } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
152
153 /* Return last page */
154 if (likely(pool->alloc.count > 0))
155 page = pool->alloc.cache[--pool->alloc.count];
156
157 return page;
158}
159
160/* fast path */
161static struct page *__page_pool_get_cached(struct page_pool *pool)
162{
163 struct page *page;
164
165 /* Caller MUST guarantee safe non-concurrent access, e.g. softirq */
166 if (likely(pool->alloc.count)) {
167 /* Fast-path */
168 page = pool->alloc.cache[--pool->alloc.count];
169 } else {
170 page = page_pool_refill_alloc_cache(pool);
171 }
172
173 return page;
174}
175
176static void page_pool_dma_sync_for_device(struct page_pool *pool,
177 struct page *page,
178 unsigned int dma_sync_size)
179{
180 dma_addr_t dma_addr = page_pool_get_dma_addr(page);
181
182 dma_sync_size = min(dma_sync_size, pool->p.max_len);
183 dma_sync_single_range_for_device(pool->p.dev, dma_addr,
184 pool->p.offset, dma_sync_size,
185 pool->p.dma_dir);
186}
187
188static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
189{
190 dma_addr_t dma;
191
192 /* Setup DMA mapping: use 'struct page' area for storing DMA-addr
193 * since dma_addr_t can be either 32 or 64 bits and does not always fit
194 * into page private data (i.e 32bit cpu with 64bit DMA caps)
195 * This mapping is kept for lifetime of page, until leaving pool.
196 */
197 dma = dma_map_page_attrs(pool->p.dev, page, 0,
198 (PAGE_SIZE << pool->p.order),
199 pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
200 if (dma_mapping_error(pool->p.dev, dma))
201 return false;
202
203 page_pool_set_dma_addr(page, dma);
204
205 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
206 page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
207
208 return true;
209}
210
211static void page_pool_set_pp_info(struct page_pool *pool,
212 struct page *page)
213{
214 page->pp = pool;
215 page->pp_magic |= PP_SIGNATURE;
216 if (pool->p.init_callback)
217 pool->p.init_callback(page, pool->p.init_arg);
218}
219
220static void page_pool_clear_pp_info(struct page *page)
221{
222 page->pp_magic = 0;
223 page->pp = NULL;
224}
225
226static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
227 gfp_t gfp)
228{
229 struct page *page;
230
231 gfp |= __GFP_COMP;
232 page = alloc_pages_node(pool->p.nid, gfp, pool->p.order);
233 if (unlikely(!page))
234 return NULL;
235
236 if ((pool->p.flags & PP_FLAG_DMA_MAP) &&
237 unlikely(!page_pool_dma_map(pool, page))) {
238 put_page(page);
239 return NULL;
240 }
241
242 page_pool_set_pp_info(pool, page);
243
244 /* Track how many pages are held 'in-flight' */
245 pool->pages_state_hold_cnt++;
246 trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt);
247 return page;
248}
249
250/* slow path */
251noinline
252static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
253 gfp_t gfp)
254{
255 const int bulk = PP_ALLOC_CACHE_REFILL;
256 unsigned int pp_flags = pool->p.flags;
257 unsigned int pp_order = pool->p.order;
258 struct page *page;
259 int i, nr_pages;
260
261 /* Don't support bulk alloc for high-order pages */
262 if (unlikely(pp_order))
263 return __page_pool_alloc_page_order(pool, gfp);
264
265 /* Unnecessary as alloc cache is empty, but guarantees zero count */
266 if (unlikely(pool->alloc.count > 0))
267 return pool->alloc.cache[--pool->alloc.count];
268
269 /* Mark empty alloc.cache slots "empty" for alloc_pages_bulk_array */
270 memset(&pool->alloc.cache, 0, sizeof(void *) * bulk);
271
272 nr_pages = alloc_pages_bulk_array(gfp, bulk, pool->alloc.cache);
273 if (unlikely(!nr_pages))
274 return NULL;
275
276 /* Pages have been filled into alloc.cache array, but count is zero and
277 * page element have not been (possibly) DMA mapped.
278 */
279 for (i = 0; i < nr_pages; i++) {
280 page = pool->alloc.cache[i];
281 if ((pp_flags & PP_FLAG_DMA_MAP) &&
282 unlikely(!page_pool_dma_map(pool, page))) {
283 put_page(page);
284 continue;
285 }
286
287 page_pool_set_pp_info(pool, page);
288 pool->alloc.cache[pool->alloc.count++] = page;
289 /* Track how many pages are held 'in-flight' */
290 pool->pages_state_hold_cnt++;
291 trace_page_pool_state_hold(pool, page,
292 pool->pages_state_hold_cnt);
293 }
294
295 /* Return last page */
296 if (likely(pool->alloc.count > 0))
297 page = pool->alloc.cache[--pool->alloc.count];
298 else
299 page = NULL;
300
301 /* When page just alloc'ed is should/must have refcnt 1. */
302 return page;
303}
304
305/* For using page_pool replace: alloc_pages() API calls, but provide
306 * synchronization guarantee for allocation side.
307 */
308struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
309{
310 struct page *page;
311
312 /* Fast-path: Get a page from cache */
313 page = __page_pool_get_cached(pool);
314 if (page)
315 return page;
316
317 /* Slow-path: cache empty, do real allocation */
318 page = __page_pool_alloc_pages_slow(pool, gfp);
319 return page;
320}
321EXPORT_SYMBOL(page_pool_alloc_pages);
322
323/* Calculate distance between two u32 values, valid if distance is below 2^(31)
324 * https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution
325 */
326#define _distance(a, b) (s32)((a) - (b))
327
328static s32 page_pool_inflight(struct page_pool *pool)
329{
330 u32 release_cnt = atomic_read(&pool->pages_state_release_cnt);
331 u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt);
332 s32 inflight;
333
334 inflight = _distance(hold_cnt, release_cnt);
335
336 trace_page_pool_release(pool, inflight, hold_cnt, release_cnt);
337 WARN(inflight < 0, "Negative(%d) inflight packet-pages", inflight);
338
339 return inflight;
340}
341
342/* Disconnects a page (from a page_pool). API users can have a need
343 * to disconnect a page (from a page_pool), to allow it to be used as
344 * a regular page (that will eventually be returned to the normal
345 * page-allocator via put_page).
346 */
347void page_pool_release_page(struct page_pool *pool, struct page *page)
348{
349 dma_addr_t dma;
350 int count;
351
352 if (!(pool->p.flags & PP_FLAG_DMA_MAP))
353 /* Always account for inflight pages, even if we didn't
354 * map them
355 */
356 goto skip_dma_unmap;
357
358 dma = page_pool_get_dma_addr(page);
359
360 /* When page is unmapped, it cannot be returned to our pool */
361 dma_unmap_page_attrs(pool->p.dev, dma,
362 PAGE_SIZE << pool->p.order, pool->p.dma_dir,
363 DMA_ATTR_SKIP_CPU_SYNC);
364 page_pool_set_dma_addr(page, 0);
365skip_dma_unmap:
366 page_pool_clear_pp_info(page);
367
368 /* This may be the last page returned, releasing the pool, so
369 * it is not safe to reference pool afterwards.
370 */
371 count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt);
372 trace_page_pool_state_release(pool, page, count);
373}
374EXPORT_SYMBOL(page_pool_release_page);
375
376/* Return a page to the page allocator, cleaning up our state */
377static void page_pool_return_page(struct page_pool *pool, struct page *page)
378{
379 page_pool_release_page(pool, page);
380
381 put_page(page);
382 /* An optimization would be to call __free_pages(page, pool->p.order)
383 * knowing page is not part of page-cache (thus avoiding a
384 * __page_cache_release() call).
385 */
386}
387
388static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page)
389{
390 int ret;
391 /* BH protection not needed if current is serving softirq */
392 if (in_serving_softirq())
393 ret = ptr_ring_produce(&pool->ring, page);
394 else
395 ret = ptr_ring_produce_bh(&pool->ring, page);
396
397 return (ret == 0) ? true : false;
398}
399
400/* Only allow direct recycling in special circumstances, into the
401 * alloc side cache. E.g. during RX-NAPI processing for XDP_DROP use-case.
402 *
403 * Caller must provide appropriate safe context.
404 */
405static bool page_pool_recycle_in_cache(struct page *page,
406 struct page_pool *pool)
407{
408 if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE))
409 return false;
410
411 /* Caller MUST have verified/know (page_ref_count(page) == 1) */
412 pool->alloc.cache[pool->alloc.count++] = page;
413 return true;
414}
415
416/* If the page refcnt == 1, this will try to recycle the page.
417 * if PP_FLAG_DMA_SYNC_DEV is set, we'll try to sync the DMA area for
418 * the configured size min(dma_sync_size, pool->max_len).
419 * If the page refcnt != 1, then the page will be returned to memory
420 * subsystem.
421 */
422static __always_inline struct page *
423__page_pool_put_page(struct page_pool *pool, struct page *page,
424 unsigned int dma_sync_size, bool allow_direct)
425{
426 /* It is not the last user for the page frag case */
427 if (pool->p.flags & PP_FLAG_PAGE_FRAG &&
428 page_pool_atomic_sub_frag_count_return(page, 1))
429 return NULL;
430
431 /* This allocator is optimized for the XDP mode that uses
432 * one-frame-per-page, but have fallbacks that act like the
433 * regular page allocator APIs.
434 *
435 * refcnt == 1 means page_pool owns page, and can recycle it.
436 *
437 * page is NOT reusable when allocated when system is under
438 * some pressure. (page_is_pfmemalloc)
439 */
440 if (likely(page_ref_count(page) == 1 && !page_is_pfmemalloc(page))) {
441 /* Read barrier done in page_ref_count / READ_ONCE */
442
443 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
444 page_pool_dma_sync_for_device(pool, page,
445 dma_sync_size);
446
447 if (allow_direct && in_serving_softirq() &&
448 page_pool_recycle_in_cache(page, pool))
449 return NULL;
450
451 /* Page found as candidate for recycling */
452 return page;
453 }
454 /* Fallback/non-XDP mode: API user have elevated refcnt.
455 *
456 * Many drivers split up the page into fragments, and some
457 * want to keep doing this to save memory and do refcnt based
458 * recycling. Support this use case too, to ease drivers
459 * switching between XDP/non-XDP.
460 *
461 * In-case page_pool maintains the DMA mapping, API user must
462 * call page_pool_put_page once. In this elevated refcnt
463 * case, the DMA is unmapped/released, as driver is likely
464 * doing refcnt based recycle tricks, meaning another process
465 * will be invoking put_page.
466 */
467 /* Do not replace this with page_pool_return_page() */
468 page_pool_release_page(pool, page);
469 put_page(page);
470
471 return NULL;
472}
473
474void page_pool_put_page(struct page_pool *pool, struct page *page,
475 unsigned int dma_sync_size, bool allow_direct)
476{
477 page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct);
478 if (page && !page_pool_recycle_in_ring(pool, page)) {
479 /* Cache full, fallback to free pages */
480 page_pool_return_page(pool, page);
481 }
482}
483EXPORT_SYMBOL(page_pool_put_page);
484
485/* Caller must not use data area after call, as this function overwrites it */
486void page_pool_put_page_bulk(struct page_pool *pool, void **data,
487 int count)
488{
489 int i, bulk_len = 0;
490
491 for (i = 0; i < count; i++) {
492 struct page *page = virt_to_head_page(data[i]);
493
494 page = __page_pool_put_page(pool, page, -1, false);
495 /* Approved for bulk recycling in ptr_ring cache */
496 if (page)
497 data[bulk_len++] = page;
498 }
499
500 if (unlikely(!bulk_len))
501 return;
502
503 /* Bulk producer into ptr_ring page_pool cache */
504 page_pool_ring_lock(pool);
505 for (i = 0; i < bulk_len; i++) {
506 if (__ptr_ring_produce(&pool->ring, data[i]))
507 break; /* ring full */
508 }
509 page_pool_ring_unlock(pool);
510
511 /* Hopefully all pages was return into ptr_ring */
512 if (likely(i == bulk_len))
513 return;
514
515 /* ptr_ring cache full, free remaining pages outside producer lock
516 * since put_page() with refcnt == 1 can be an expensive operation
517 */
518 for (; i < bulk_len; i++)
519 page_pool_return_page(pool, data[i]);
520}
521EXPORT_SYMBOL(page_pool_put_page_bulk);
522
523static struct page *page_pool_drain_frag(struct page_pool *pool,
524 struct page *page)
525{
526 long drain_count = BIAS_MAX - pool->frag_users;
527
528 /* Some user is still using the page frag */
529 if (likely(page_pool_atomic_sub_frag_count_return(page,
530 drain_count)))
531 return NULL;
532
533 if (page_ref_count(page) == 1 && !page_is_pfmemalloc(page)) {
534 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
535 page_pool_dma_sync_for_device(pool, page, -1);
536
537 return page;
538 }
539
540 page_pool_return_page(pool, page);
541 return NULL;
542}
543
544static void page_pool_free_frag(struct page_pool *pool)
545{
546 long drain_count = BIAS_MAX - pool->frag_users;
547 struct page *page = pool->frag_page;
548
549 pool->frag_page = NULL;
550
551 if (!page ||
552 page_pool_atomic_sub_frag_count_return(page, drain_count))
553 return;
554
555 page_pool_return_page(pool, page);
556}
557
558struct page *page_pool_alloc_frag(struct page_pool *pool,
559 unsigned int *offset,
560 unsigned int size, gfp_t gfp)
561{
562 unsigned int max_size = PAGE_SIZE << pool->p.order;
563 struct page *page = pool->frag_page;
564
565 if (WARN_ON(!(pool->p.flags & PP_FLAG_PAGE_FRAG) ||
566 size > max_size))
567 return NULL;
568
569 size = ALIGN(size, dma_get_cache_alignment());
570 *offset = pool->frag_offset;
571
572 if (page && *offset + size > max_size) {
573 page = page_pool_drain_frag(pool, page);
574 if (page)
575 goto frag_reset;
576 }
577
578 if (!page) {
579 page = page_pool_alloc_pages(pool, gfp);
580 if (unlikely(!page)) {
581 pool->frag_page = NULL;
582 return NULL;
583 }
584
585 pool->frag_page = page;
586
587frag_reset:
588 pool->frag_users = 1;
589 *offset = 0;
590 pool->frag_offset = size;
591 page_pool_set_frag_count(page, BIAS_MAX);
592 return page;
593 }
594
595 pool->frag_users++;
596 pool->frag_offset = *offset + size;
597 return page;
598}
599EXPORT_SYMBOL(page_pool_alloc_frag);
600
601static void page_pool_empty_ring(struct page_pool *pool)
602{
603 struct page *page;
604
605 /* Empty recycle ring */
606 while ((page = ptr_ring_consume_bh(&pool->ring))) {
607 /* Verify the refcnt invariant of cached pages */
608 if (!(page_ref_count(page) == 1))
609 pr_crit("%s() page_pool refcnt %d violation\n",
610 __func__, page_ref_count(page));
611
612 page_pool_return_page(pool, page);
613 }
614}
615
616static void page_pool_free(struct page_pool *pool)
617{
618 if (pool->disconnect)
619 pool->disconnect(pool);
620
621 ptr_ring_cleanup(&pool->ring, NULL);
622
623 if (pool->p.flags & PP_FLAG_DMA_MAP)
624 put_device(pool->p.dev);
625
626 kfree(pool);
627}
628
629static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
630{
631 struct page *page;
632
633 if (pool->destroy_cnt)
634 return;
635
636 /* Empty alloc cache, assume caller made sure this is
637 * no-longer in use, and page_pool_alloc_pages() cannot be
638 * call concurrently.
639 */
640 while (pool->alloc.count) {
641 page = pool->alloc.cache[--pool->alloc.count];
642 page_pool_return_page(pool, page);
643 }
644}
645
646static void page_pool_scrub(struct page_pool *pool)
647{
648 page_pool_empty_alloc_cache_once(pool);
649 pool->destroy_cnt++;
650
651 /* No more consumers should exist, but producers could still
652 * be in-flight.
653 */
654 page_pool_empty_ring(pool);
655}
656
657static int page_pool_release(struct page_pool *pool)
658{
659 int inflight;
660
661 page_pool_scrub(pool);
662 inflight = page_pool_inflight(pool);
663 if (!inflight)
664 page_pool_free(pool);
665
666 return inflight;
667}
668
669static void page_pool_release_retry(struct work_struct *wq)
670{
671 struct delayed_work *dwq = to_delayed_work(wq);
672 struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw);
673 int inflight;
674
675 inflight = page_pool_release(pool);
676 if (!inflight)
677 return;
678
679 /* Periodic warning */
680 if (time_after_eq(jiffies, pool->defer_warn)) {
681 int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ;
682
683 pr_warn("%s() stalled pool shutdown %d inflight %d sec\n",
684 __func__, inflight, sec);
685 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
686 }
687
688 /* Still not ready to be disconnected, retry later */
689 schedule_delayed_work(&pool->release_dw, DEFER_TIME);
690}
691
692void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
693 struct xdp_mem_info *mem)
694{
695 refcount_inc(&pool->user_cnt);
696 pool->disconnect = disconnect;
697 pool->xdp_mem_id = mem->id;
698}
699
700void page_pool_destroy(struct page_pool *pool)
701{
702 if (!pool)
703 return;
704
705 if (!page_pool_put(pool))
706 return;
707
708 page_pool_free_frag(pool);
709
710 if (!page_pool_release(pool))
711 return;
712
713 pool->defer_start = jiffies;
714 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
715
716 INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry);
717 schedule_delayed_work(&pool->release_dw, DEFER_TIME);
718}
719EXPORT_SYMBOL(page_pool_destroy);
720
721/* Caller must provide appropriate safe context, e.g. NAPI. */
722void page_pool_update_nid(struct page_pool *pool, int new_nid)
723{
724 struct page *page;
725
726 trace_page_pool_update_nid(pool, new_nid);
727 pool->p.nid = new_nid;
728
729 /* Flush pool alloc cache, as refill will check NUMA node */
730 while (pool->alloc.count) {
731 page = pool->alloc.cache[--pool->alloc.count];
732 page_pool_return_page(pool, page);
733 }
734}
735EXPORT_SYMBOL(page_pool_update_nid);
736
737bool page_pool_return_skb_page(struct page *page)
738{
739 struct page_pool *pp;
740
741 page = compound_head(page);
742
743 /* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation
744 * in order to preserve any existing bits, such as bit 0 for the
745 * head page of compound page and bit 1 for pfmemalloc page, so
746 * mask those bits for freeing side when doing below checking,
747 * and page_is_pfmemalloc() is checked in __page_pool_put_page()
748 * to avoid recycling the pfmemalloc page.
749 */
750 if (unlikely((page->pp_magic & ~0x3UL) != PP_SIGNATURE))
751 return false;
752
753 pp = page->pp;
754
755 /* Driver set this to memory recycling info. Reset it on recycle.
756 * This will *not* work for NIC using a split-page memory model.
757 * The page will be returned to the pool here regardless of the
758 * 'flipped' fragment being in use or not.
759 */
760 page_pool_put_full_page(pp, page, false);
761
762 return true;
763}
764EXPORT_SYMBOL(page_pool_return_skb_page);