Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

netmem: replace __netmem_clear_lsb() with netmem_to_nmdesc()

Now that we have struct netmem_desc, it'd better access the pp fields
via struct netmem_desc rather than struct net_iov.

Introduce netmem_to_nmdesc() for safely converting netmem_ref to
netmem_desc regardless of the type underneath e.i. netmem_desc, net_iov.

While at it, remove __netmem_clear_lsb() and make netmem_to_nmdesc()
used instead.

Suggested-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Byungchul Park <byungchul@sk.com>
Reviewed-by: Mina Almasry <almasrymina@google.com>
Link: https://patch.msgid.link/20251013044133.69472-1-byungchul@sk.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>

authored by

Byungchul Park and committed by
Paolo Abeni
53615ad2 3dacc900

+42 -42
+34 -34
include/net/netmem.h
··· 247 247 return page_to_pfn(netmem_to_page(netmem)); 248 248 } 249 249 250 + /* XXX: How to extract netmem_desc from page must be changed, once 251 + * netmem_desc no longer overlays on page and will be allocated through 252 + * slab. 253 + */ 254 + #define __pp_page_to_nmdesc(p) (_Generic((p), \ 255 + const struct page * : (const struct netmem_desc *)(p), \ 256 + struct page * : (struct netmem_desc *)(p))) 257 + 258 + /* CAUTION: Check if the page is a pp page before calling this helper or 259 + * know it's a pp page. 260 + */ 261 + #define pp_page_to_nmdesc(p) \ 262 + ({ \ 263 + DEBUG_NET_WARN_ON_ONCE(!page_pool_page_is_pp(p)); \ 264 + __pp_page_to_nmdesc(p); \ 265 + }) 266 + 250 267 /** 251 268 * __netmem_to_nmdesc - unsafely get pointer to the &netmem_desc backing 252 269 * @netmem ··· 282 265 return (__force struct netmem_desc *)netmem; 283 266 } 284 267 285 - /* __netmem_clear_lsb - convert netmem_ref to struct net_iov * for access to 286 - * common fields. 287 - * @netmem: netmem reference to extract as net_iov. 268 + /* netmem_to_nmdesc - convert netmem_ref to struct netmem_desc * for 269 + * access to common fields. 270 + * @netmem: netmem reference to get netmem_desc. 288 271 * 289 - * All the sub types of netmem_ref (page, net_iov) have the same pp, pp_magic, 290 - * dma_addr, and pp_ref_count fields at the same offsets. Thus, we can access 291 - * these fields without a type check to make sure that the underlying mem is 292 - * net_iov or page. 272 + * All the sub types of netmem_ref (netmem_desc, net_iov) have the same 273 + * pp, pp_magic, dma_addr, and pp_ref_count fields via netmem_desc. 293 274 * 294 - * The resulting value of this function can only be used to access the fields 295 - * that are NET_IOV_ASSERT_OFFSET'd. Accessing any other fields will result in 296 - * undefined behavior. 297 - * 298 - * Return: the netmem_ref cast to net_iov* regardless of its underlying type. 275 + * Return: the pointer to struct netmem_desc * regardless of its 276 + * underlying type. 299 277 */ 300 - static inline struct net_iov *__netmem_clear_lsb(netmem_ref netmem) 278 + static inline struct netmem_desc *netmem_to_nmdesc(netmem_ref netmem) 301 279 { 302 - return (struct net_iov *)((__force unsigned long)netmem & ~NET_IOV); 280 + void *p = (void *)((__force unsigned long)netmem & ~NET_IOV); 281 + 282 + if (netmem_is_net_iov(netmem)) 283 + return &((struct net_iov *)p)->desc; 284 + 285 + return __pp_page_to_nmdesc((struct page *)p); 303 286 } 304 - 305 - /* XXX: How to extract netmem_desc from page must be changed, once 306 - * netmem_desc no longer overlays on page and will be allocated through 307 - * slab. 308 - */ 309 - #define __pp_page_to_nmdesc(p) (_Generic((p), \ 310 - const struct page * : (const struct netmem_desc *)(p), \ 311 - struct page * : (struct netmem_desc *)(p))) 312 - 313 - /* CAUTION: Check if the page is a pp page before calling this helper or 314 - * know it's a pp page. 315 - */ 316 - #define pp_page_to_nmdesc(p) \ 317 - ({ \ 318 - DEBUG_NET_WARN_ON_ONCE(!page_pool_page_is_pp(p)); \ 319 - __pp_page_to_nmdesc(p); \ 320 - }) 321 287 322 288 /** 323 289 * __netmem_get_pp - unsafely get pointer to the &page_pool backing @netmem ··· 320 320 321 321 static inline struct page_pool *netmem_get_pp(netmem_ref netmem) 322 322 { 323 - return __netmem_clear_lsb(netmem)->pp; 323 + return netmem_to_nmdesc(netmem)->pp; 324 324 } 325 325 326 326 static inline atomic_long_t *netmem_get_pp_ref_count_ref(netmem_ref netmem) 327 327 { 328 - return &__netmem_clear_lsb(netmem)->pp_ref_count; 328 + return &netmem_to_nmdesc(netmem)->pp_ref_count; 329 329 } 330 330 331 331 static inline bool netmem_is_pref_nid(netmem_ref netmem, int pref_nid) ··· 390 390 391 391 static inline unsigned long netmem_get_dma_addr(netmem_ref netmem) 392 392 { 393 - return __netmem_clear_lsb(netmem)->dma_addr; 393 + return netmem_to_nmdesc(netmem)->dma_addr; 394 394 } 395 395 396 396 void get_netmem(netmem_ref netmem);
+8 -8
net/core/netmem_priv.h
··· 5 5 6 6 static inline unsigned long netmem_get_pp_magic(netmem_ref netmem) 7 7 { 8 - return __netmem_clear_lsb(netmem)->pp_magic & ~PP_DMA_INDEX_MASK; 8 + return netmem_to_nmdesc(netmem)->pp_magic & ~PP_DMA_INDEX_MASK; 9 9 } 10 10 11 11 static inline void netmem_or_pp_magic(netmem_ref netmem, unsigned long pp_magic) 12 12 { 13 - __netmem_clear_lsb(netmem)->pp_magic |= pp_magic; 13 + netmem_to_nmdesc(netmem)->pp_magic |= pp_magic; 14 14 } 15 15 16 16 static inline void netmem_clear_pp_magic(netmem_ref netmem) 17 17 { 18 - WARN_ON_ONCE(__netmem_clear_lsb(netmem)->pp_magic & PP_DMA_INDEX_MASK); 18 + WARN_ON_ONCE(netmem_to_nmdesc(netmem)->pp_magic & PP_DMA_INDEX_MASK); 19 19 20 - __netmem_clear_lsb(netmem)->pp_magic = 0; 20 + netmem_to_nmdesc(netmem)->pp_magic = 0; 21 21 } 22 22 23 23 static inline bool netmem_is_pp(netmem_ref netmem) ··· 27 27 28 28 static inline void netmem_set_pp(netmem_ref netmem, struct page_pool *pool) 29 29 { 30 - __netmem_clear_lsb(netmem)->pp = pool; 30 + netmem_to_nmdesc(netmem)->pp = pool; 31 31 } 32 32 33 33 static inline void netmem_set_dma_addr(netmem_ref netmem, 34 34 unsigned long dma_addr) 35 35 { 36 - __netmem_clear_lsb(netmem)->dma_addr = dma_addr; 36 + netmem_to_nmdesc(netmem)->dma_addr = dma_addr; 37 37 } 38 38 39 39 static inline unsigned long netmem_get_dma_index(netmem_ref netmem) ··· 43 43 if (WARN_ON_ONCE(netmem_is_net_iov(netmem))) 44 44 return 0; 45 45 46 - magic = __netmem_clear_lsb(netmem)->pp_magic; 46 + magic = netmem_to_nmdesc(netmem)->pp_magic; 47 47 48 48 return (magic & PP_DMA_INDEX_MASK) >> PP_DMA_INDEX_SHIFT; 49 49 } ··· 57 57 return; 58 58 59 59 magic = netmem_get_pp_magic(netmem) | (id << PP_DMA_INDEX_SHIFT); 60 - __netmem_clear_lsb(netmem)->pp_magic = magic; 60 + netmem_to_nmdesc(netmem)->pp_magic = magic; 61 61 } 62 62 #endif