mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-18 02:46:06 +00:00
net/ibmvnic: prevent more than one thread from running in reset
The current code allows more than one thread to run in reset. This can corrupt struct adapter data. Check adapter->resetting before performing a reset, if there is another reset running delay (100 msec) before trying again. Signed-off-by: Juliet Kim <julietk@linux.vnet.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
b27507bb59
commit
7ed5b31f4a
@ -1207,7 +1207,7 @@ static void ibmvnic_cleanup(struct net_device *netdev)
|
|||||||
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
|
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
|
||||||
|
|
||||||
/* ensure that transmissions are stopped if called by do_reset */
|
/* ensure that transmissions are stopped if called by do_reset */
|
||||||
if (adapter->resetting)
|
if (test_bit(0, &adapter->resetting))
|
||||||
netif_tx_disable(netdev);
|
netif_tx_disable(netdev);
|
||||||
else
|
else
|
||||||
netif_tx_stop_all_queues(netdev);
|
netif_tx_stop_all_queues(netdev);
|
||||||
@ -1428,7 +1428,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||||||
u8 proto = 0;
|
u8 proto = 0;
|
||||||
netdev_tx_t ret = NETDEV_TX_OK;
|
netdev_tx_t ret = NETDEV_TX_OK;
|
||||||
|
|
||||||
if (adapter->resetting) {
|
if (test_bit(0, &adapter->resetting)) {
|
||||||
if (!netif_subqueue_stopped(netdev, skb))
|
if (!netif_subqueue_stopped(netdev, skb))
|
||||||
netif_stop_subqueue(netdev, queue_num);
|
netif_stop_subqueue(netdev, queue_num);
|
||||||
dev_kfree_skb_any(skb);
|
dev_kfree_skb_any(skb);
|
||||||
@ -2054,6 +2054,12 @@ static void __ibmvnic_reset(struct work_struct *work)
|
|||||||
|
|
||||||
adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
|
adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
|
||||||
|
|
||||||
|
if (test_and_set_bit_lock(0, &adapter->resetting)) {
|
||||||
|
schedule_delayed_work(&adapter->ibmvnic_delayed_reset,
|
||||||
|
IBMVNIC_RESET_DELAY);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
reset_state = adapter->state;
|
reset_state = adapter->state;
|
||||||
|
|
||||||
rwi = get_next_rwi(adapter);
|
rwi = get_next_rwi(adapter);
|
||||||
@ -2095,6 +2101,10 @@ static void __ibmvnic_reset(struct work_struct *work)
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
rwi = get_next_rwi(adapter);
|
rwi = get_next_rwi(adapter);
|
||||||
|
|
||||||
|
if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
|
||||||
|
rwi->reset_reason == VNIC_RESET_MOBILITY))
|
||||||
|
adapter->force_reset_recovery = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (adapter->wait_for_reset) {
|
if (adapter->wait_for_reset) {
|
||||||
@ -2107,7 +2117,16 @@ static void __ibmvnic_reset(struct work_struct *work)
|
|||||||
free_all_rwi(adapter);
|
free_all_rwi(adapter);
|
||||||
}
|
}
|
||||||
|
|
||||||
adapter->resetting = false;
|
clear_bit_unlock(0, &adapter->resetting);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __ibmvnic_delayed_reset(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct ibmvnic_adapter *adapter;
|
||||||
|
|
||||||
|
adapter = container_of(work, struct ibmvnic_adapter,
|
||||||
|
ibmvnic_delayed_reset.work);
|
||||||
|
__ibmvnic_reset(&adapter->ibmvnic_reset);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
|
static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
|
||||||
@ -2162,7 +2181,6 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
|
|||||||
rwi->reset_reason = reason;
|
rwi->reset_reason = reason;
|
||||||
list_add_tail(&rwi->list, &adapter->rwi_list);
|
list_add_tail(&rwi->list, &adapter->rwi_list);
|
||||||
spin_unlock_irqrestore(&adapter->rwi_lock, flags);
|
spin_unlock_irqrestore(&adapter->rwi_lock, flags);
|
||||||
adapter->resetting = true;
|
|
||||||
netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
|
netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
|
||||||
schedule_work(&adapter->ibmvnic_reset);
|
schedule_work(&adapter->ibmvnic_reset);
|
||||||
|
|
||||||
@ -2207,7 +2225,7 @@ restart_poll:
|
|||||||
u16 offset;
|
u16 offset;
|
||||||
u8 flags = 0;
|
u8 flags = 0;
|
||||||
|
|
||||||
if (unlikely(adapter->resetting &&
|
if (unlikely(test_bit(0, &adapter->resetting) &&
|
||||||
adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
|
adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
|
||||||
enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
|
enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
|
||||||
napi_complete_done(napi, frames_processed);
|
napi_complete_done(napi, frames_processed);
|
||||||
@ -2858,7 +2876,7 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (adapter->resetting &&
|
if (test_bit(0, &adapter->resetting) &&
|
||||||
adapter->reset_reason == VNIC_RESET_MOBILITY) {
|
adapter->reset_reason == VNIC_RESET_MOBILITY) {
|
||||||
u64 val = (0xff000000) | scrq->hw_irq;
|
u64 val = (0xff000000) | scrq->hw_irq;
|
||||||
|
|
||||||
@ -3408,7 +3426,7 @@ static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
|
|||||||
if (rc) {
|
if (rc) {
|
||||||
if (rc == H_CLOSED) {
|
if (rc == H_CLOSED) {
|
||||||
dev_warn(dev, "CRQ Queue closed\n");
|
dev_warn(dev, "CRQ Queue closed\n");
|
||||||
if (adapter->resetting)
|
if (test_bit(0, &adapter->resetting))
|
||||||
ibmvnic_reset(adapter, VNIC_RESET_FATAL);
|
ibmvnic_reset(adapter, VNIC_RESET_FATAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4484,7 +4502,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
|
|||||||
case IBMVNIC_CRQ_XPORT_EVENT:
|
case IBMVNIC_CRQ_XPORT_EVENT:
|
||||||
netif_carrier_off(netdev);
|
netif_carrier_off(netdev);
|
||||||
adapter->crq.active = false;
|
adapter->crq.active = false;
|
||||||
if (adapter->resetting)
|
if (test_bit(0, &adapter->resetting))
|
||||||
adapter->force_reset_recovery = true;
|
adapter->force_reset_recovery = true;
|
||||||
if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
|
if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
|
||||||
dev_info(dev, "Migrated, re-enabling adapter\n");
|
dev_info(dev, "Migrated, re-enabling adapter\n");
|
||||||
@ -4822,7 +4840,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (adapter->resetting && !adapter->wait_for_reset &&
|
if (test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
|
||||||
adapter->reset_reason != VNIC_RESET_MOBILITY) {
|
adapter->reset_reason != VNIC_RESET_MOBILITY) {
|
||||||
if (adapter->req_rx_queues != old_num_rx_queues ||
|
if (adapter->req_rx_queues != old_num_rx_queues ||
|
||||||
adapter->req_tx_queues != old_num_tx_queues) {
|
adapter->req_tx_queues != old_num_tx_queues) {
|
||||||
@ -4934,10 +4952,12 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
|
|||||||
spin_lock_init(&adapter->stats_lock);
|
spin_lock_init(&adapter->stats_lock);
|
||||||
|
|
||||||
INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
|
INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
|
||||||
|
INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
|
||||||
|
__ibmvnic_delayed_reset);
|
||||||
INIT_LIST_HEAD(&adapter->rwi_list);
|
INIT_LIST_HEAD(&adapter->rwi_list);
|
||||||
spin_lock_init(&adapter->rwi_lock);
|
spin_lock_init(&adapter->rwi_lock);
|
||||||
init_completion(&adapter->init_done);
|
init_completion(&adapter->init_done);
|
||||||
adapter->resetting = false;
|
clear_bit(0, &adapter->resetting);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
rc = init_crq_queue(adapter);
|
rc = init_crq_queue(adapter);
|
||||||
|
@ -39,6 +39,8 @@
|
|||||||
#define IBMVNIC_MAX_LTB_SIZE ((1 << (MAX_ORDER - 1)) * PAGE_SIZE)
|
#define IBMVNIC_MAX_LTB_SIZE ((1 << (MAX_ORDER - 1)) * PAGE_SIZE)
|
||||||
#define IBMVNIC_BUFFER_HLEN 500
|
#define IBMVNIC_BUFFER_HLEN 500
|
||||||
|
|
||||||
|
#define IBMVNIC_RESET_DELAY 100
|
||||||
|
|
||||||
static const char ibmvnic_priv_flags[][ETH_GSTRING_LEN] = {
|
static const char ibmvnic_priv_flags[][ETH_GSTRING_LEN] = {
|
||||||
#define IBMVNIC_USE_SERVER_MAXES 0x1
|
#define IBMVNIC_USE_SERVER_MAXES 0x1
|
||||||
"use-server-maxes"
|
"use-server-maxes"
|
||||||
@ -1077,7 +1079,8 @@ struct ibmvnic_adapter {
|
|||||||
spinlock_t rwi_lock;
|
spinlock_t rwi_lock;
|
||||||
struct list_head rwi_list;
|
struct list_head rwi_list;
|
||||||
struct work_struct ibmvnic_reset;
|
struct work_struct ibmvnic_reset;
|
||||||
bool resetting;
|
struct delayed_work ibmvnic_delayed_reset;
|
||||||
|
unsigned long resetting;
|
||||||
bool napi_enabled, from_passive_init;
|
bool napi_enabled, from_passive_init;
|
||||||
|
|
||||||
bool failover_pending;
|
bool failover_pending;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user