Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: hwbm: Make the hwbm_pool lock a mutex

Based on review, `lock' is only acquired in hwbm_pool_add() which is
invoked via ->probe(), ->resume() and ->ndo_change_mtu(). Based on this
the lock can become a mutex and there is no need to disable interrupts
during the procedure.
Now that the lock is a mutex, hwbm_pool_add() no longer invokes
hwbm_pool_refill() in an atomic context so we can pass GFP_KERNEL to
hwbm_pool_refill() and remove the `gfp' argument from hwbm_pool_add().

Cc: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Sebastian Andrzej Siewior and committed by
David S. Miller
6dcdd884 49eef82d

+13 -14
+1 -1
drivers/net/ethernet/marvell/mvneta.c
··· 1119 1119 SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size)); 1120 1120 1121 1121 /* Fill entire long pool */ 1122 - num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC); 1122 + num = hwbm_pool_add(hwbm_pool, hwbm_pool->size); 1123 1123 if (num != hwbm_pool->size) { 1124 1124 WARN(1, "pool %d: %d of %d allocated\n", 1125 1125 bm_pool->id, num, hwbm_pool->size);
+2 -2
drivers/net/ethernet/marvell/mvneta_bm.c
··· 190 190 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 191 191 hwbm_pool->construct = mvneta_bm_construct; 192 192 hwbm_pool->priv = new_pool; 193 - spin_lock_init(&hwbm_pool->lock); 193 + mutex_init(&hwbm_pool->buf_lock); 194 194 195 195 /* Create new pool */ 196 196 err = mvneta_bm_pool_create(priv, new_pool); ··· 201 201 } 202 202 203 203 /* Allocate buffers for this pool */ 204 - num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC); 204 + num = hwbm_pool_add(hwbm_pool, hwbm_pool->size); 205 205 if (num != hwbm_pool->size) { 206 206 WARN(1, "pool %d: %d of %d allocated\n", 207 207 new_pool->id, num, hwbm_pool->size);
+3 -3
include/net/hwbm.h
··· 12 12 /* constructor called during alocation */ 13 13 int (*construct)(struct hwbm_pool *bm_pool, void *buf); 14 14 /* protect acces to the buffer counter*/ 15 - spinlock_t lock; 15 + struct mutex buf_lock; 16 16 /* private data */ 17 17 void *priv; 18 18 }; 19 19 #ifdef CONFIG_HWBM 20 20 void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf); 21 21 int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp); 22 - int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num, gfp_t gfp); 22 + int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num); 23 23 #else 24 24 void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf) {} 25 25 int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp) { return 0; } 26 - int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num, gfp_t gfp) 26 + int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num) 27 27 { return 0; } 28 28 #endif /* CONFIG_HWBM */ 29 29 #endif /* _HWBM_H */
+7 -8
net/core/hwbm.c
··· 43 43 } 44 44 EXPORT_SYMBOL_GPL(hwbm_pool_refill); 45 45 46 - int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num, gfp_t gfp) 46 + int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num) 47 47 { 48 48 int err, i; 49 - unsigned long flags; 50 49 51 - spin_lock_irqsave(&bm_pool->lock, flags); 50 + mutex_lock(&bm_pool->buf_lock); 52 51 if (bm_pool->buf_num == bm_pool->size) { 53 52 pr_warn("pool already filled\n"); 54 - spin_unlock_irqrestore(&bm_pool->lock, flags); 53 + mutex_unlock(&bm_pool->buf_lock); 55 54 return bm_pool->buf_num; 56 55 } 57 56 58 57 if (buf_num + bm_pool->buf_num > bm_pool->size) { 59 58 pr_warn("cannot allocate %d buffers for pool\n", 60 59 buf_num); 61 - spin_unlock_irqrestore(&bm_pool->lock, flags); 60 + mutex_unlock(&bm_pool->buf_lock); 62 61 return 0; 63 62 } 64 63 65 64 if ((buf_num + bm_pool->buf_num) < bm_pool->buf_num) { 66 65 pr_warn("Adding %d buffers to the %d current buffers will overflow\n", 67 66 buf_num, bm_pool->buf_num); 68 - spin_unlock_irqrestore(&bm_pool->lock, flags); 67 + mutex_unlock(&bm_pool->buf_lock); 69 68 return 0; 70 69 } 71 70 72 71 for (i = 0; i < buf_num; i++) { 73 - err = hwbm_pool_refill(bm_pool, gfp); 72 + err = hwbm_pool_refill(bm_pool, GFP_KERNEL); 74 73 if (err < 0) 75 74 break; 76 75 } ··· 78 79 bm_pool->buf_num += i; 79 80 80 81 pr_debug("hwpm pool: %d of %d buffers added\n", i, buf_num); 81 - spin_unlock_irqrestore(&bm_pool->lock, flags); 82 + mutex_unlock(&bm_pool->buf_lock); 82 83 83 84 return i; 84 85 }