Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2019 Facebook
4 * Copyright 2020 Google LLC.
5 */
6
7#ifndef _BPF_LOCAL_STORAGE_H
8#define _BPF_LOCAL_STORAGE_H
9
10#include <linux/bpf.h>
11#include <linux/filter.h>
12#include <linux/rculist.h>
13#include <linux/list.h>
14#include <linux/hash.h>
15#include <linux/types.h>
16#include <linux/bpf_mem_alloc.h>
17#include <uapi/linux/btf.h>
18
19#define BPF_LOCAL_STORAGE_CACHE_SIZE 16
20
21struct bpf_local_storage_map_bucket {
22 struct hlist_head list;
23 raw_spinlock_t lock;
24};
25
26/* Thp map is not the primary owner of a bpf_local_storage_elem.
27 * Instead, the container object (eg. sk->sk_bpf_storage) is.
28 *
29 * The map (bpf_local_storage_map) is for two purposes
30 * 1. Define the size of the "local storage". It is
31 * the map's value_size.
32 *
33 * 2. Maintain a list to keep track of all elems such
34 * that they can be cleaned up during the map destruction.
35 *
36 * When a bpf local storage is being looked up for a
37 * particular object, the "bpf_map" pointer is actually used
38 * as the "key" to search in the list of elem in
39 * the respective bpf_local_storage owned by the object.
40 *
41 * e.g. sk->sk_bpf_storage is the mini-map with the "bpf_map" pointer
42 * as the searching key.
43 */
44struct bpf_local_storage_map {
45 struct bpf_map map;
46 /* Lookup elem does not require accessing the map.
47 *
48 * Updating/Deleting requires a bucket lock to
49 * link/unlink the elem from the map. Having
50 * multiple buckets to improve contention.
51 */
52 struct bpf_local_storage_map_bucket *buckets;
53 u32 bucket_log;
54 u16 elem_size;
55 u16 cache_idx;
56 bool use_kmalloc_nolock;
57};
58
59struct bpf_local_storage_data {
60 /* smap is used as the searching key when looking up
61 * from the object's bpf_local_storage.
62 *
63 * Put it in the same cacheline as the data to minimize
64 * the number of cachelines accessed during the cache hit case.
65 */
66 struct bpf_local_storage_map __rcu *smap;
67 u8 data[] __aligned(8);
68};
69
70/* Linked to bpf_local_storage and bpf_local_storage_map */
71struct bpf_local_storage_elem {
72 struct hlist_node map_node; /* Linked to bpf_local_storage_map */
73 struct hlist_node snode; /* Linked to bpf_local_storage */
74 struct bpf_local_storage __rcu *local_storage;
75 union {
76 struct rcu_head rcu;
77 struct hlist_node free_node; /* used to postpone
78 * bpf_selem_free
79 * after raw_spin_unlock
80 */
81 };
82 /* 8 bytes hole */
83 /* The data is stored in another cacheline to minimize
84 * the number of cachelines access during a cache hit.
85 */
86 struct bpf_local_storage_data sdata ____cacheline_aligned;
87};
88
89struct bpf_local_storage {
90 struct bpf_local_storage_data __rcu *cache[BPF_LOCAL_STORAGE_CACHE_SIZE];
91 struct bpf_local_storage_map __rcu *smap;
92 struct hlist_head list; /* List of bpf_local_storage_elem */
93 void *owner; /* The object that owns the above "list" of
94 * bpf_local_storage_elem.
95 */
96 struct rcu_head rcu;
97 raw_spinlock_t lock; /* Protect adding/removing from the "list" */
98 bool use_kmalloc_nolock;
99};
100
101/* U16_MAX is much more than enough for sk local storage
102 * considering a tcp_sock is ~2k.
103 */
104#define BPF_LOCAL_STORAGE_MAX_VALUE_SIZE \
105 min_t(u32, \
106 (KMALLOC_MAX_SIZE - MAX_BPF_STACK - \
107 sizeof(struct bpf_local_storage_elem)), \
108 (U16_MAX - sizeof(struct bpf_local_storage_elem)))
109
110#define SELEM(_SDATA) \
111 container_of((_SDATA), struct bpf_local_storage_elem, sdata)
112#define SDATA(_SELEM) (&(_SELEM)->sdata)
113
114#define BPF_LOCAL_STORAGE_CACHE_SIZE 16
115
116struct bpf_local_storage_cache {
117 spinlock_t idx_lock;
118 u64 idx_usage_counts[BPF_LOCAL_STORAGE_CACHE_SIZE];
119};
120
121#define DEFINE_BPF_STORAGE_CACHE(name) \
122static struct bpf_local_storage_cache name = { \
123 .idx_lock = __SPIN_LOCK_UNLOCKED(name.idx_lock), \
124}
125
126/* Helper functions for bpf_local_storage */
127int bpf_local_storage_map_alloc_check(union bpf_attr *attr);
128
129struct bpf_map *
130bpf_local_storage_map_alloc(union bpf_attr *attr,
131 struct bpf_local_storage_cache *cache,
132 bool use_kmalloc_nolock);
133
134void __bpf_local_storage_insert_cache(struct bpf_local_storage *local_storage,
135 struct bpf_local_storage_map *smap,
136 struct bpf_local_storage_elem *selem);
137/* If cacheit_lockit is false, this lookup function is lockless */
138static inline struct bpf_local_storage_data *
139bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
140 struct bpf_local_storage_map *smap,
141 bool cacheit_lockit)
142{
143 struct bpf_local_storage_data *sdata;
144 struct bpf_local_storage_elem *selem;
145
146 /* Fast path (cache hit) */
147 sdata = rcu_dereference_check(local_storage->cache[smap->cache_idx],
148 bpf_rcu_lock_held());
149 if (sdata && rcu_access_pointer(sdata->smap) == smap)
150 return sdata;
151
152 /* Slow path (cache miss) */
153 hlist_for_each_entry_rcu(selem, &local_storage->list, snode,
154 rcu_read_lock_trace_held())
155 if (rcu_access_pointer(SDATA(selem)->smap) == smap)
156 break;
157
158 if (!selem)
159 return NULL;
160 if (cacheit_lockit)
161 __bpf_local_storage_insert_cache(local_storage, smap, selem);
162 return SDATA(selem);
163}
164
165void bpf_local_storage_destroy(struct bpf_local_storage *local_storage);
166
167void bpf_local_storage_map_free(struct bpf_map *map,
168 struct bpf_local_storage_cache *cache,
169 int __percpu *busy_counter);
170
171int bpf_local_storage_map_check_btf(const struct bpf_map *map,
172 const struct btf *btf,
173 const struct btf_type *key_type,
174 const struct btf_type *value_type);
175
176void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
177 struct bpf_local_storage_elem *selem);
178
179void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool reuse_now);
180
181void bpf_selem_link_map(struct bpf_local_storage_map *smap,
182 struct bpf_local_storage_elem *selem);
183
184struct bpf_local_storage_elem *
185bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, void *value,
186 bool swap_uptrs, gfp_t gfp_flags);
187
188void bpf_selem_free(struct bpf_local_storage_elem *selem,
189 bool reuse_now);
190
191int
192bpf_local_storage_alloc(void *owner,
193 struct bpf_local_storage_map *smap,
194 struct bpf_local_storage_elem *first_selem,
195 gfp_t gfp_flags);
196
197struct bpf_local_storage_data *
198bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
199 void *value, u64 map_flags, bool swap_uptrs, gfp_t gfp_flags);
200
201u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map);
202
203#endif /* _BPF_LOCAL_STORAGE_H */