Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bpf: Add dmabuf iterator

The dmabuf iterator traverses the list of all DMA buffers.

DMA buffers are refcounted through their associated struct file. A
reference is taken on each buffer as the list is iterated to ensure each
buffer persists for the duration of the bpf program execution without
holding the list mutex.

Signed-off-by: T.J. Mercier <tjmercier@google.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Acked-by: Song Liu <song@kernel.org>
Link: https://lore.kernel.org/r/20250522230429.941193-3-tjmercier@google.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

T.J. Mercier and committed by
Alexei Starovoitov
76ea9553 89f9dba3

+175
+68
drivers/dma-buf/dma-buf.c
··· 19 19 #include <linux/anon_inodes.h> 20 20 #include <linux/export.h> 21 21 #include <linux/debugfs.h> 22 + #include <linux/list.h> 22 23 #include <linux/module.h> 24 + #include <linux/mutex.h> 23 25 #include <linux/seq_file.h> 24 26 #include <linux/sync_file.h> 25 27 #include <linux/poll.h> ··· 55 53 mutex_lock(&dmabuf_list_mutex); 56 54 list_del(&dmabuf->list_node); 57 55 mutex_unlock(&dmabuf_list_mutex); 56 + } 57 + 58 + /** 59 + * dma_buf_iter_begin - begin iteration through global list of all DMA buffers 60 + * 61 + * Returns the first buffer in the global list of DMA-bufs that's not in the 62 + * process of being destroyed. Increments that buffer's reference count to 63 + * prevent buffer destruction. Callers must release the reference, either by 64 + * continuing iteration with dma_buf_iter_next(), or with dma_buf_put(). 65 + * 66 + * Return: 67 + * * First buffer from global list, with refcount elevated 68 + * * NULL if no active buffers are present 69 + */ 70 + struct dma_buf *dma_buf_iter_begin(void) 71 + { 72 + struct dma_buf *ret = NULL, *dmabuf; 73 + 74 + /* 75 + * The list mutex does not protect a dmabuf's refcount, so it can be 76 + * zeroed while we are iterating. We cannot call get_dma_buf() since the 77 + * caller may not already own a reference to the buffer. 78 + */ 79 + mutex_lock(&dmabuf_list_mutex); 80 + list_for_each_entry(dmabuf, &dmabuf_list, list_node) { 81 + if (file_ref_get(&dmabuf->file->f_ref)) { 82 + ret = dmabuf; 83 + break; 84 + } 85 + } 86 + mutex_unlock(&dmabuf_list_mutex); 87 + return ret; 88 + } 89 + 90 + /** 91 + * dma_buf_iter_next - continue iteration through global list of all DMA buffers 92 + * @dmabuf: [in] pointer to dma_buf 93 + * 94 + * Decrements the reference count on the provided buffer. Returns the next 95 + * buffer from the remainder of the global list of DMA-bufs with its reference 96 + * count incremented. Callers must release the reference, either by continuing 97 + * iteration with dma_buf_iter_next(), or with dma_buf_put(). 98 + * 99 + * Return: 100 + * * Next buffer from global list, with refcount elevated 101 + * * NULL if no additional active buffers are present 102 + */ 103 + struct dma_buf *dma_buf_iter_next(struct dma_buf *dmabuf) 104 + { 105 + struct dma_buf *ret = NULL; 106 + 107 + /* 108 + * The list mutex does not protect a dmabuf's refcount, so it can be 109 + * zeroed while we are iterating. We cannot call get_dma_buf() since the 110 + * caller may not already own a reference to the buffer. 111 + */ 112 + mutex_lock(&dmabuf_list_mutex); 113 + dma_buf_put(dmabuf); 114 + list_for_each_entry_continue(dmabuf, &dmabuf_list, list_node) { 115 + if (file_ref_get(&dmabuf->file->f_ref)) { 116 + ret = dmabuf; 117 + break; 118 + } 119 + } 120 + mutex_unlock(&dmabuf_list_mutex); 121 + return ret; 58 122 } 59 123 60 124 static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
+2
include/linux/dma-buf.h
··· 634 634 void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map); 635 635 int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map); 636 636 void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map); 637 + struct dma_buf *dma_buf_iter_begin(void); 638 + struct dma_buf *dma_buf_iter_next(struct dma_buf *dmbuf); 637 639 #endif /* __DMA_BUF_H__ */
+3
kernel/bpf/Makefile
··· 53 53 obj-$(CONFIG_BPF_SYSCALL) += btf_iter.o 54 54 obj-$(CONFIG_BPF_SYSCALL) += btf_relocate.o 55 55 obj-$(CONFIG_BPF_SYSCALL) += kmem_cache_iter.o 56 + ifeq ($(CONFIG_DMA_SHARED_BUFFER),y) 57 + obj-$(CONFIG_BPF_SYSCALL) += dmabuf_iter.o 58 + endif 56 59 57 60 CFLAGS_REMOVE_percpu_freelist.o = $(CC_FLAGS_FTRACE) 58 61 CFLAGS_REMOVE_bpf_lru_list.o = $(CC_FLAGS_FTRACE)
+102
kernel/bpf/dmabuf_iter.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* Copyright (c) 2025 Google LLC */ 3 + #include <linux/bpf.h> 4 + #include <linux/btf_ids.h> 5 + #include <linux/dma-buf.h> 6 + #include <linux/kernel.h> 7 + #include <linux/seq_file.h> 8 + 9 + static void *dmabuf_iter_seq_start(struct seq_file *seq, loff_t *pos) 10 + { 11 + if (*pos) 12 + return NULL; 13 + 14 + return dma_buf_iter_begin(); 15 + } 16 + 17 + static void *dmabuf_iter_seq_next(struct seq_file *seq, void *v, loff_t *pos) 18 + { 19 + struct dma_buf *dmabuf = v; 20 + 21 + ++*pos; 22 + 23 + return dma_buf_iter_next(dmabuf); 24 + } 25 + 26 + struct bpf_iter__dmabuf { 27 + __bpf_md_ptr(struct bpf_iter_meta *, meta); 28 + __bpf_md_ptr(struct dma_buf *, dmabuf); 29 + }; 30 + 31 + static int __dmabuf_seq_show(struct seq_file *seq, void *v, bool in_stop) 32 + { 33 + struct bpf_iter_meta meta = { 34 + .seq = seq, 35 + }; 36 + struct bpf_iter__dmabuf ctx = { 37 + .meta = &meta, 38 + .dmabuf = v, 39 + }; 40 + struct bpf_prog *prog = bpf_iter_get_info(&meta, in_stop); 41 + 42 + if (prog) 43 + return bpf_iter_run_prog(prog, &ctx); 44 + 45 + return 0; 46 + } 47 + 48 + static int dmabuf_iter_seq_show(struct seq_file *seq, void *v) 49 + { 50 + return __dmabuf_seq_show(seq, v, false); 51 + } 52 + 53 + static void dmabuf_iter_seq_stop(struct seq_file *seq, void *v) 54 + { 55 + struct dma_buf *dmabuf = v; 56 + 57 + if (dmabuf) 58 + dma_buf_put(dmabuf); 59 + } 60 + 61 + static const struct seq_operations dmabuf_iter_seq_ops = { 62 + .start = dmabuf_iter_seq_start, 63 + .next = dmabuf_iter_seq_next, 64 + .stop = dmabuf_iter_seq_stop, 65 + .show = dmabuf_iter_seq_show, 66 + }; 67 + 68 + static void bpf_iter_dmabuf_show_fdinfo(const struct bpf_iter_aux_info *aux, 69 + struct seq_file *seq) 70 + { 71 + seq_puts(seq, "dmabuf iter\n"); 72 + } 73 + 74 + static const struct bpf_iter_seq_info dmabuf_iter_seq_info = { 75 + .seq_ops = &dmabuf_iter_seq_ops, 76 + .init_seq_private = NULL, 77 + .fini_seq_private = NULL, 78 + .seq_priv_size = 0, 79 + }; 80 + 81 + static struct bpf_iter_reg bpf_dmabuf_reg_info = { 82 + .target = "dmabuf", 83 + .feature = BPF_ITER_RESCHED, 84 + .show_fdinfo = bpf_iter_dmabuf_show_fdinfo, 85 + .ctx_arg_info_size = 1, 86 + .ctx_arg_info = { 87 + { offsetof(struct bpf_iter__dmabuf, dmabuf), 88 + PTR_TO_BTF_ID_OR_NULL }, 89 + }, 90 + .seq_info = &dmabuf_iter_seq_info, 91 + }; 92 + 93 + DEFINE_BPF_ITER_FUNC(dmabuf, struct bpf_iter_meta *meta, struct dma_buf *dmabuf) 94 + BTF_ID_LIST_SINGLE(bpf_dmabuf_btf_id, struct, dma_buf) 95 + 96 + static int __init dmabuf_iter_init(void) 97 + { 98 + bpf_dmabuf_reg_info.ctx_arg_info[0].btf_id = bpf_dmabuf_btf_id[0]; 99 + return bpf_iter_reg_target(&bpf_dmabuf_reg_info); 100 + } 101 + 102 + late_initcall(dmabuf_iter_init);