mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-28 16:52:18 +00:00
net: hwbm: Make the hwbm_pool lock a mutex
Based on review, `lock' is only acquired in hwbm_pool_add() which is invoked via ->probe(), ->resume() and ->ndo_change_mtu(). Based on this the lock can become a mutex and there is no need to disable interrupts during the procedure. Now that the lock is a mutex, hwbm_pool_add() no longer invokes hwbm_pool_refill() in an atomic context so we can pass GFP_KERNEL to hwbm_pool_refill() and remove the `gfp' argument from hwbm_pool_add(). Cc: Thomas Petazzoni <thomas.petazzoni@bootlin.com> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
49eef82dcd
commit
6dcdd884e2
@ -1119,7 +1119,7 @@ static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu)
|
||||
SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size));
|
||||
|
||||
/* Fill entire long pool */
|
||||
num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC);
|
||||
num = hwbm_pool_add(hwbm_pool, hwbm_pool->size);
|
||||
if (num != hwbm_pool->size) {
|
||||
WARN(1, "pool %d: %d of %d allocated\n",
|
||||
bm_pool->id, num, hwbm_pool->size);
|
||||
|
@ -190,7 +190,7 @@ struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
|
||||
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
||||
hwbm_pool->construct = mvneta_bm_construct;
|
||||
hwbm_pool->priv = new_pool;
|
||||
spin_lock_init(&hwbm_pool->lock);
|
||||
mutex_init(&hwbm_pool->buf_lock);
|
||||
|
||||
/* Create new pool */
|
||||
err = mvneta_bm_pool_create(priv, new_pool);
|
||||
@ -201,7 +201,7 @@ struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
|
||||
}
|
||||
|
||||
/* Allocate buffers for this pool */
|
||||
num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC);
|
||||
num = hwbm_pool_add(hwbm_pool, hwbm_pool->size);
|
||||
if (num != hwbm_pool->size) {
|
||||
WARN(1, "pool %d: %d of %d allocated\n",
|
||||
new_pool->id, num, hwbm_pool->size);
|
||||
|
@ -12,18 +12,18 @@ struct hwbm_pool {
|
||||
/* constructor called during alocation */
|
||||
int (*construct)(struct hwbm_pool *bm_pool, void *buf);
|
||||
/* protect acces to the buffer counter*/
|
||||
spinlock_t lock;
|
||||
struct mutex buf_lock;
|
||||
/* private data */
|
||||
void *priv;
|
||||
};
|
||||
#ifdef CONFIG_HWBM
|
||||
void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf);
|
||||
int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp);
|
||||
int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num, gfp_t gfp);
|
||||
int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num);
|
||||
#else
|
||||
void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf) {}
|
||||
int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp) { return 0; }
|
||||
int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num, gfp_t gfp)
|
||||
int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num)
|
||||
{ return 0; }
|
||||
#endif /* CONFIG_HWBM */
|
||||
#endif /* _HWBM_H */
|
||||
|
@ -43,34 +43,33 @@ int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hwbm_pool_refill);
|
||||
|
||||
int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num, gfp_t gfp)
|
||||
int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num)
|
||||
{
|
||||
int err, i;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&bm_pool->lock, flags);
|
||||
mutex_lock(&bm_pool->buf_lock);
|
||||
if (bm_pool->buf_num == bm_pool->size) {
|
||||
pr_warn("pool already filled\n");
|
||||
spin_unlock_irqrestore(&bm_pool->lock, flags);
|
||||
mutex_unlock(&bm_pool->buf_lock);
|
||||
return bm_pool->buf_num;
|
||||
}
|
||||
|
||||
if (buf_num + bm_pool->buf_num > bm_pool->size) {
|
||||
pr_warn("cannot allocate %d buffers for pool\n",
|
||||
buf_num);
|
||||
spin_unlock_irqrestore(&bm_pool->lock, flags);
|
||||
mutex_unlock(&bm_pool->buf_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if ((buf_num + bm_pool->buf_num) < bm_pool->buf_num) {
|
||||
pr_warn("Adding %d buffers to the %d current buffers will overflow\n",
|
||||
buf_num, bm_pool->buf_num);
|
||||
spin_unlock_irqrestore(&bm_pool->lock, flags);
|
||||
mutex_unlock(&bm_pool->buf_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < buf_num; i++) {
|
||||
err = hwbm_pool_refill(bm_pool, gfp);
|
||||
err = hwbm_pool_refill(bm_pool, GFP_KERNEL);
|
||||
if (err < 0)
|
||||
break;
|
||||
}
|
||||
@ -79,7 +78,7 @@ int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num, gfp_t gfp)
|
||||
bm_pool->buf_num += i;
|
||||
|
||||
pr_debug("hwpm pool: %d of %d buffers added\n", i, buf_num);
|
||||
spin_unlock_irqrestore(&bm_pool->lock, flags);
|
||||
mutex_unlock(&bm_pool->buf_lock);
|
||||
|
||||
return i;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user