mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-09 23:39:18 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: net: Fix percpu counters deadlock cpumask: prepare for iterators to only go to nr_cpu_ids/nr_cpumask_bits: net drivers/net/usb: use USB API functions rather than constants cls_cgroup: clean up Kconfig cls_cgroup: clean up for cgroup part cls_cgroup: fix an oops when removing a cgroup EtherExpress16: fix printing timed out status mlx4_en: Added "set_ringparam" Ethtool interface implementation mlx4_en: Always allocate RX ring for each interrupt vector mlx4_en: Verify number of RX rings doesn't exceed MAX_RX_RINGS IPVS: Make "no destination available" message more consistent between schedulers net: KS8695: removed duplicated #include tun: Fix SIOCSIFHWADDR error. smsc911x: compile fix re netif_rx signature changes netns: foreach_netdev_safe is insufficient in default_device_exit net: make xfrm_statistics_seq_show use generic snmp_fold_field net: Fix more NAPI interface netdev argument drop fallout. net: Fix unused variable warnings in pasemi_mac.c and spider_net.c
This commit is contained in:
commit
5ed1836814
@ -307,7 +307,7 @@ poll_some_more:
|
|||||||
}
|
}
|
||||||
spin_unlock_irq(&ep->rx_lock);
|
spin_unlock_irq(&ep->rx_lock);
|
||||||
|
|
||||||
if (more && netif_rx_reschedule(dev, napi))
|
if (more && netif_rx_reschedule(napi))
|
||||||
goto poll_some_more;
|
goto poll_some_more;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -504,7 +504,7 @@ static int eth_poll(struct napi_struct *napi, int budget)
|
|||||||
netif_rx_complete(napi);
|
netif_rx_complete(napi);
|
||||||
qmgr_enable_irq(rxq);
|
qmgr_enable_irq(rxq);
|
||||||
if (!qmgr_stat_empty(rxq) &&
|
if (!qmgr_stat_empty(rxq) &&
|
||||||
netif_rx_reschedule(dev, napi)) {
|
netif_rx_reschedule(napi)) {
|
||||||
#if DEBUG_RX
|
#if DEBUG_RX
|
||||||
printk(KERN_DEBUG "%s: eth_poll"
|
printk(KERN_DEBUG "%s: eth_poll"
|
||||||
" netif_rx_reschedule successed\n",
|
" netif_rx_reschedule successed\n",
|
||||||
|
@ -29,7 +29,6 @@
|
|||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
#include <linux/platform_device.h>
|
#include <linux/platform_device.h>
|
||||||
#include <linux/irq.h>
|
#include <linux/irq.h>
|
||||||
#include <linux/delay.h>
|
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
|
|
||||||
#include <asm/irq.h>
|
#include <asm/irq.h>
|
||||||
|
@ -68,17 +68,17 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
/* these functions take the SCB status word and test the relevant status bit */
|
/* these functions take the SCB status word and test the relevant status bit */
|
||||||
#define SCB_complete(s) ((s&0x8000)!=0)
|
#define SCB_complete(s) (((s) & 0x8000) != 0)
|
||||||
#define SCB_rxdframe(s) ((s&0x4000)!=0)
|
#define SCB_rxdframe(s) (((s) & 0x4000) != 0)
|
||||||
#define SCB_CUdead(s) ((s&0x2000)!=0)
|
#define SCB_CUdead(s) (((s) & 0x2000) != 0)
|
||||||
#define SCB_RUdead(s) ((s&0x1000)!=0)
|
#define SCB_RUdead(s) (((s) & 0x1000) != 0)
|
||||||
#define SCB_ack(s) (s & 0xf000)
|
#define SCB_ack(s) ((s) & 0xf000)
|
||||||
|
|
||||||
/* Command unit status: 0=idle, 1=suspended, 2=active */
|
/* Command unit status: 0=idle, 1=suspended, 2=active */
|
||||||
#define SCB_CUstat(s) ((s&0x0300)>>8)
|
#define SCB_CUstat(s) (((s)&0x0300)>>8)
|
||||||
|
|
||||||
/* Receive unit status: 0=idle, 1=suspended, 2=out of resources, 4=ready */
|
/* Receive unit status: 0=idle, 1=suspended, 2=out of resources, 4=ready */
|
||||||
#define SCB_RUstat(s) ((s&0x0070)>>4)
|
#define SCB_RUstat(s) (((s)&0x0070)>>4)
|
||||||
|
|
||||||
/* SCB commands */
|
/* SCB commands */
|
||||||
#define SCB_CUnop 0x0000
|
#define SCB_CUnop 0x0000
|
||||||
@ -98,18 +98,18 @@
|
|||||||
* Command block defines
|
* Command block defines
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define Stat_Done(s) ((s&0x8000)!=0)
|
#define Stat_Done(s) (((s) & 0x8000) != 0)
|
||||||
#define Stat_Busy(s) ((s&0x4000)!=0)
|
#define Stat_Busy(s) (((s) & 0x4000) != 0)
|
||||||
#define Stat_OK(s) ((s&0x2000)!=0)
|
#define Stat_OK(s) (((s) & 0x2000) != 0)
|
||||||
#define Stat_Abort(s) ((s&0x1000)!=0)
|
#define Stat_Abort(s) (((s) & 0x1000) != 0)
|
||||||
#define Stat_STFail ((s&0x0800)!=0)
|
#define Stat_STFail (((s) & 0x0800) != 0)
|
||||||
#define Stat_TNoCar(s) ((s&0x0400)!=0)
|
#define Stat_TNoCar(s) (((s) & 0x0400) != 0)
|
||||||
#define Stat_TNoCTS(s) ((s&0x0200)!=0)
|
#define Stat_TNoCTS(s) (((s) & 0x0200) != 0)
|
||||||
#define Stat_TNoDMA(s) ((s&0x0100)!=0)
|
#define Stat_TNoDMA(s) (((s) & 0x0100) != 0)
|
||||||
#define Stat_TDefer(s) ((s&0x0080)!=0)
|
#define Stat_TDefer(s) (((s) & 0x0080) != 0)
|
||||||
#define Stat_TColl(s) ((s&0x0040)!=0)
|
#define Stat_TColl(s) (((s) & 0x0040) != 0)
|
||||||
#define Stat_TXColl(s) ((s&0x0020)!=0)
|
#define Stat_TXColl(s) (((s) & 0x0020) != 0)
|
||||||
#define Stat_NoColl(s) (s&0x000f)
|
#define Stat_NoColl(s) ((s) & 0x000f)
|
||||||
|
|
||||||
/* Cmd_END will end AFTER the command if this is the first
|
/* Cmd_END will end AFTER the command if this is the first
|
||||||
* command block after an SCB_CUstart, but BEFORE the command
|
* command block after an SCB_CUstart, but BEFORE the command
|
||||||
@ -136,16 +136,16 @@
|
|||||||
* Frame Descriptor (Receive block) defines
|
* Frame Descriptor (Receive block) defines
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define FD_Done(s) ((s&0x8000)!=0)
|
#define FD_Done(s) (((s) & 0x8000) != 0)
|
||||||
#define FD_Busy(s) ((s&0x4000)!=0)
|
#define FD_Busy(s) (((s) & 0x4000) != 0)
|
||||||
#define FD_OK(s) ((s&0x2000)!=0)
|
#define FD_OK(s) (((s) & 0x2000) != 0)
|
||||||
|
|
||||||
#define FD_CRC(s) ((s&0x0800)!=0)
|
#define FD_CRC(s) (((s) & 0x0800) != 0)
|
||||||
#define FD_Align(s) ((s&0x0400)!=0)
|
#define FD_Align(s) (((s) & 0x0400) != 0)
|
||||||
#define FD_Resrc(s) ((s&0x0200)!=0)
|
#define FD_Resrc(s) (((s) & 0x0200) != 0)
|
||||||
#define FD_DMA(s) ((s&0x0100)!=0)
|
#define FD_DMA(s) (((s) & 0x0100) != 0)
|
||||||
#define FD_Short(s) ((s&0x0080)!=0)
|
#define FD_Short(s) (((s) & 0x0080) != 0)
|
||||||
#define FD_NoEOF(s) ((s&0x0040)!=0)
|
#define FD_NoEOF(s) (((s) & 0x0040) != 0)
|
||||||
|
|
||||||
struct rfd_header {
|
struct rfd_header {
|
||||||
volatile unsigned long flags;
|
volatile unsigned long flags;
|
||||||
|
@ -169,13 +169,10 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
|
|||||||
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
|
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
|
||||||
mlx4_info(mdev, "Using %d tx rings for port:%d\n",
|
mlx4_info(mdev, "Using %d tx rings for port:%d\n",
|
||||||
mdev->profile.prof[i].tx_ring_num, i);
|
mdev->profile.prof[i].tx_ring_num, i);
|
||||||
if (!mdev->profile.prof[i].rx_ring_num) {
|
mdev->profile.prof[i].rx_ring_num =
|
||||||
mdev->profile.prof[i].rx_ring_num = dev->caps.num_comp_vectors;
|
min_t(int, dev->caps.num_comp_vectors, MAX_RX_RINGS);
|
||||||
mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n",
|
mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n",
|
||||||
mdev->profile.prof[i].rx_ring_num, i);
|
mdev->profile.prof[i].rx_ring_num, i);
|
||||||
} else
|
|
||||||
mlx4_info(mdev, "Using %d rx rings for port:%d\n",
|
|
||||||
mdev->profile.prof[i].rx_ring_num, i);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Create our own workqueue for reset/multicast tasks
|
/* Create our own workqueue for reset/multicast tasks
|
||||||
|
@ -552,7 +552,7 @@ static void mlx4_en_linkstate(struct work_struct *work)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static int mlx4_en_start_port(struct net_device *dev)
|
int mlx4_en_start_port(struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||||
struct mlx4_en_dev *mdev = priv->mdev;
|
struct mlx4_en_dev *mdev = priv->mdev;
|
||||||
@ -707,7 +707,7 @@ cq_err:
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static void mlx4_en_stop_port(struct net_device *dev)
|
void mlx4_en_stop_port(struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||||
struct mlx4_en_dev *mdev = priv->mdev;
|
struct mlx4_en_dev *mdev = priv->mdev;
|
||||||
@ -826,7 +826,7 @@ static int mlx4_en_close(struct net_device *dev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
|
void mlx4_en_free_resources(struct mlx4_en_priv *priv)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
@ -845,7 +845,7 @@ static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
|
int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
|
||||||
{
|
{
|
||||||
struct mlx4_en_dev *mdev = priv->mdev;
|
struct mlx4_en_dev *mdev = priv->mdev;
|
||||||
struct mlx4_en_port_profile *prof = priv->prof;
|
struct mlx4_en_port_profile *prof = priv->prof;
|
||||||
|
@ -65,15 +65,6 @@ MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
|
|||||||
MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
|
MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
|
||||||
" Per priority bit mask");
|
" Per priority bit mask");
|
||||||
|
|
||||||
MLX4_EN_PARM_INT(rx_ring_num1, 0, "Number or Rx rings for port 1 (0 = #cores)");
|
|
||||||
MLX4_EN_PARM_INT(rx_ring_num2, 0, "Number or Rx rings for port 2 (0 = #cores)");
|
|
||||||
|
|
||||||
MLX4_EN_PARM_INT(tx_ring_size1, MLX4_EN_AUTO_CONF, "Tx ring size for port 1");
|
|
||||||
MLX4_EN_PARM_INT(tx_ring_size2, MLX4_EN_AUTO_CONF, "Tx ring size for port 2");
|
|
||||||
MLX4_EN_PARM_INT(rx_ring_size1, MLX4_EN_AUTO_CONF, "Rx ring size for port 1");
|
|
||||||
MLX4_EN_PARM_INT(rx_ring_size2, MLX4_EN_AUTO_CONF, "Rx ring size for port 2");
|
|
||||||
|
|
||||||
|
|
||||||
int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
|
int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
|
||||||
{
|
{
|
||||||
struct mlx4_en_profile *params = &mdev->profile;
|
struct mlx4_en_profile *params = &mdev->profile;
|
||||||
@ -87,6 +78,8 @@ int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
|
|||||||
params->prof[i].rx_ppp = pfcrx;
|
params->prof[i].rx_ppp = pfcrx;
|
||||||
params->prof[i].tx_pause = 1;
|
params->prof[i].tx_pause = 1;
|
||||||
params->prof[i].tx_ppp = pfctx;
|
params->prof[i].tx_ppp = pfctx;
|
||||||
|
params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
|
||||||
|
params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
|
||||||
}
|
}
|
||||||
if (pfcrx || pfctx) {
|
if (pfcrx || pfctx) {
|
||||||
params->prof[1].tx_ring_num = MLX4_EN_TX_RING_NUM;
|
params->prof[1].tx_ring_num = MLX4_EN_TX_RING_NUM;
|
||||||
@ -95,32 +88,7 @@ int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
|
|||||||
params->prof[1].tx_ring_num = 1;
|
params->prof[1].tx_ring_num = 1;
|
||||||
params->prof[2].tx_ring_num = 1;
|
params->prof[2].tx_ring_num = 1;
|
||||||
}
|
}
|
||||||
params->prof[1].rx_ring_num = min_t(int, rx_ring_num1, MAX_RX_RINGS);
|
|
||||||
params->prof[2].rx_ring_num = min_t(int, rx_ring_num2, MAX_RX_RINGS);
|
|
||||||
|
|
||||||
if (tx_ring_size1 == MLX4_EN_AUTO_CONF)
|
|
||||||
tx_ring_size1 = MLX4_EN_DEF_TX_RING_SIZE;
|
|
||||||
params->prof[1].tx_ring_size =
|
|
||||||
(tx_ring_size1 < MLX4_EN_MIN_TX_SIZE) ?
|
|
||||||
MLX4_EN_MIN_TX_SIZE : roundup_pow_of_two(tx_ring_size1);
|
|
||||||
|
|
||||||
if (tx_ring_size2 == MLX4_EN_AUTO_CONF)
|
|
||||||
tx_ring_size2 = MLX4_EN_DEF_TX_RING_SIZE;
|
|
||||||
params->prof[2].tx_ring_size =
|
|
||||||
(tx_ring_size2 < MLX4_EN_MIN_TX_SIZE) ?
|
|
||||||
MLX4_EN_MIN_TX_SIZE : roundup_pow_of_two(tx_ring_size2);
|
|
||||||
|
|
||||||
if (rx_ring_size1 == MLX4_EN_AUTO_CONF)
|
|
||||||
rx_ring_size1 = MLX4_EN_DEF_RX_RING_SIZE;
|
|
||||||
params->prof[1].rx_ring_size =
|
|
||||||
(rx_ring_size1 < MLX4_EN_MIN_RX_SIZE) ?
|
|
||||||
MLX4_EN_MIN_RX_SIZE : roundup_pow_of_two(rx_ring_size1);
|
|
||||||
|
|
||||||
if (rx_ring_size2 == MLX4_EN_AUTO_CONF)
|
|
||||||
rx_ring_size2 = MLX4_EN_DEF_RX_RING_SIZE;
|
|
||||||
params->prof[2].rx_ring_size =
|
|
||||||
(rx_ring_size2 < MLX4_EN_MIN_RX_SIZE) ?
|
|
||||||
MLX4_EN_MIN_RX_SIZE : roundup_pow_of_two(rx_ring_size2);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -417,6 +385,54 @@ static void mlx4_en_get_pauseparam(struct net_device *dev,
|
|||||||
pause->rx_pause = priv->prof->rx_pause;
|
pause->rx_pause = priv->prof->rx_pause;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int mlx4_en_set_ringparam(struct net_device *dev,
|
||||||
|
struct ethtool_ringparam *param)
|
||||||
|
{
|
||||||
|
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||||
|
struct mlx4_en_dev *mdev = priv->mdev;
|
||||||
|
u32 rx_size, tx_size;
|
||||||
|
int port_up = 0;
|
||||||
|
int err = 0;
|
||||||
|
|
||||||
|
if (param->rx_jumbo_pending || param->rx_mini_pending)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
rx_size = roundup_pow_of_two(param->rx_pending);
|
||||||
|
rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE);
|
||||||
|
tx_size = roundup_pow_of_two(param->tx_pending);
|
||||||
|
tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
|
||||||
|
|
||||||
|
if (rx_size == priv->prof->rx_ring_size &&
|
||||||
|
tx_size == priv->prof->tx_ring_size)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
mutex_lock(&mdev->state_lock);
|
||||||
|
if (priv->port_up) {
|
||||||
|
port_up = 1;
|
||||||
|
mlx4_en_stop_port(dev);
|
||||||
|
}
|
||||||
|
|
||||||
|
mlx4_en_free_resources(priv);
|
||||||
|
|
||||||
|
priv->prof->tx_ring_size = tx_size;
|
||||||
|
priv->prof->rx_ring_size = rx_size;
|
||||||
|
|
||||||
|
err = mlx4_en_alloc_resources(priv);
|
||||||
|
if (err) {
|
||||||
|
mlx4_err(mdev, "Failed reallocating port resources\n");
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
if (port_up) {
|
||||||
|
err = mlx4_en_start_port(dev);
|
||||||
|
if (err)
|
||||||
|
mlx4_err(mdev, "Failed starting port\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
|
mutex_unlock(&mdev->state_lock);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
static void mlx4_en_get_ringparam(struct net_device *dev,
|
static void mlx4_en_get_ringparam(struct net_device *dev,
|
||||||
struct ethtool_ringparam *param)
|
struct ethtool_ringparam *param)
|
||||||
{
|
{
|
||||||
@ -456,6 +472,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
|
|||||||
.get_pauseparam = mlx4_en_get_pauseparam,
|
.get_pauseparam = mlx4_en_get_pauseparam,
|
||||||
.set_pauseparam = mlx4_en_set_pauseparam,
|
.set_pauseparam = mlx4_en_set_pauseparam,
|
||||||
.get_ringparam = mlx4_en_get_ringparam,
|
.get_ringparam = mlx4_en_get_ringparam,
|
||||||
|
.set_ringparam = mlx4_en_set_ringparam,
|
||||||
.get_flags = ethtool_op_get_flags,
|
.get_flags = ethtool_op_get_flags,
|
||||||
.set_flags = ethtool_op_set_flags,
|
.set_flags = ethtool_op_set_flags,
|
||||||
};
|
};
|
||||||
|
@ -489,6 +489,12 @@ void mlx4_en_destroy_netdev(struct net_device *dev);
|
|||||||
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
|
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
|
||||||
struct mlx4_en_port_profile *prof);
|
struct mlx4_en_port_profile *prof);
|
||||||
|
|
||||||
|
int mlx4_en_start_port(struct net_device *dev);
|
||||||
|
void mlx4_en_stop_port(struct net_device *dev);
|
||||||
|
|
||||||
|
void mlx4_en_free_resources(struct mlx4_en_priv *priv);
|
||||||
|
int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
|
||||||
|
|
||||||
int mlx4_en_get_profile(struct mlx4_en_dev *mdev);
|
int mlx4_en_get_profile(struct mlx4_en_dev *mdev);
|
||||||
|
|
||||||
int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
|
int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
|
||||||
|
@ -954,7 +954,6 @@ static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
|
|||||||
{
|
{
|
||||||
const struct pasemi_mac_rxring *rxring = data;
|
const struct pasemi_mac_rxring *rxring = data;
|
||||||
struct pasemi_mac *mac = rxring->mac;
|
struct pasemi_mac *mac = rxring->mac;
|
||||||
struct net_device *dev = mac->netdev;
|
|
||||||
const struct pasemi_dmachan *chan = &rxring->chan;
|
const struct pasemi_dmachan *chan = &rxring->chan;
|
||||||
unsigned int reg;
|
unsigned int reg;
|
||||||
|
|
||||||
@ -1634,7 +1633,6 @@ static void pasemi_mac_set_rx_mode(struct net_device *dev)
|
|||||||
static int pasemi_mac_poll(struct napi_struct *napi, int budget)
|
static int pasemi_mac_poll(struct napi_struct *napi, int budget)
|
||||||
{
|
{
|
||||||
struct pasemi_mac *mac = container_of(napi, struct pasemi_mac, napi);
|
struct pasemi_mac *mac = container_of(napi, struct pasemi_mac, napi);
|
||||||
struct net_device *dev = mac->netdev;
|
|
||||||
int pkts;
|
int pkts;
|
||||||
|
|
||||||
pasemi_mac_clean_tx(tx_ring(mac));
|
pasemi_mac_clean_tx(tx_ring(mac));
|
||||||
|
@ -1484,13 +1484,13 @@ static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (likely(intsts & inten & INT_STS_RSFL_)) {
|
if (likely(intsts & inten & INT_STS_RSFL_)) {
|
||||||
if (likely(netif_rx_schedule_prep(dev, &pdata->napi))) {
|
if (likely(netif_rx_schedule_prep(&pdata->napi))) {
|
||||||
/* Disable Rx interrupts */
|
/* Disable Rx interrupts */
|
||||||
temp = smsc911x_reg_read(pdata, INT_EN);
|
temp = smsc911x_reg_read(pdata, INT_EN);
|
||||||
temp &= (~INT_EN_RSFL_EN_);
|
temp &= (~INT_EN_RSFL_EN_);
|
||||||
smsc911x_reg_write(pdata, INT_EN, temp);
|
smsc911x_reg_write(pdata, INT_EN, temp);
|
||||||
/* Schedule a NAPI poll */
|
/* Schedule a NAPI poll */
|
||||||
__netif_rx_schedule(dev, &pdata->napi);
|
__netif_rx_schedule(&pdata->napi);
|
||||||
} else {
|
} else {
|
||||||
SMSC_WARNING(RX_ERR,
|
SMSC_WARNING(RX_ERR,
|
||||||
"netif_rx_schedule_prep failed");
|
"netif_rx_schedule_prep failed");
|
||||||
|
@ -1277,7 +1277,6 @@ bad_desc:
|
|||||||
static int spider_net_poll(struct napi_struct *napi, int budget)
|
static int spider_net_poll(struct napi_struct *napi, int budget)
|
||||||
{
|
{
|
||||||
struct spider_net_card *card = container_of(napi, struct spider_net_card, napi);
|
struct spider_net_card *card = container_of(napi, struct spider_net_card, napi);
|
||||||
struct net_device *netdev = card->netdev;
|
|
||||||
int packets_done = 0;
|
int packets_done = 0;
|
||||||
|
|
||||||
while (packets_done < budget) {
|
while (packets_done < budget) {
|
||||||
|
@ -343,7 +343,7 @@ static void tun_net_init(struct net_device *dev)
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case TUN_TAP_DEV:
|
case TUN_TAP_DEV:
|
||||||
dev->netdev_ops = &tun_netdev_ops;
|
dev->netdev_ops = &tap_netdev_ops;
|
||||||
/* Ethernet TAP Device */
|
/* Ethernet TAP Device */
|
||||||
ether_setup(dev);
|
ether_setup(dev);
|
||||||
|
|
||||||
|
@ -2831,7 +2831,7 @@ static struct usb_endpoint_descriptor *hso_get_ep(struct usb_interface *intf,
|
|||||||
for (i = 0; i < iface->desc.bNumEndpoints; i++) {
|
for (i = 0; i < iface->desc.bNumEndpoints; i++) {
|
||||||
endp = &iface->endpoint[i].desc;
|
endp = &iface->endpoint[i].desc;
|
||||||
if (((endp->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == dir) &&
|
if (((endp->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == dir) &&
|
||||||
((endp->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == type))
|
(usb_endpoint_type(endp) == type))
|
||||||
return endp;
|
return endp;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -654,7 +654,7 @@ static int hss_hdlc_poll(struct napi_struct *napi, int budget)
|
|||||||
netif_rx_complete(dev, napi);
|
netif_rx_complete(dev, napi);
|
||||||
qmgr_enable_irq(rxq);
|
qmgr_enable_irq(rxq);
|
||||||
if (!qmgr_stat_empty(rxq) &&
|
if (!qmgr_stat_empty(rxq) &&
|
||||||
netif_rx_reschedule(dev, napi)) {
|
netif_rx_reschedule(napi)) {
|
||||||
#if DEBUG_RX
|
#if DEBUG_RX
|
||||||
printk(KERN_DEBUG "%s: hss_hdlc_poll"
|
printk(KERN_DEBUG "%s: hss_hdlc_poll"
|
||||||
" netif_rx_reschedule succeeded\n",
|
" netif_rx_reschedule succeeded\n",
|
||||||
|
@ -1065,8 +1065,7 @@ static int eject_installer(struct usb_interface *intf)
|
|||||||
/* Find bulk out endpoint */
|
/* Find bulk out endpoint */
|
||||||
endpoint = &iface_desc->endpoint[1].desc;
|
endpoint = &iface_desc->endpoint[1].desc;
|
||||||
if ((endpoint->bEndpointAddress & USB_TYPE_MASK) == USB_DIR_OUT &&
|
if ((endpoint->bEndpointAddress & USB_TYPE_MASK) == USB_DIR_OUT &&
|
||||||
(endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
|
usb_endpoint_xfer_bulk(endpoint)) {
|
||||||
USB_ENDPOINT_XFER_BULK) {
|
|
||||||
bulk_out_ep = endpoint->bEndpointAddress;
|
bulk_out_ep = endpoint->bEndpointAddress;
|
||||||
} else {
|
} else {
|
||||||
dev_err(&udev->dev,
|
dev_err(&udev->dev,
|
||||||
|
@ -5066,13 +5066,14 @@ static struct pernet_operations __net_initdata netdev_net_ops = {
|
|||||||
|
|
||||||
static void __net_exit default_device_exit(struct net *net)
|
static void __net_exit default_device_exit(struct net *net)
|
||||||
{
|
{
|
||||||
struct net_device *dev, *next;
|
struct net_device *dev;
|
||||||
/*
|
/*
|
||||||
* Push all migratable of the network devices back to the
|
* Push all migratable of the network devices back to the
|
||||||
* initial network namespace
|
* initial network namespace
|
||||||
*/
|
*/
|
||||||
rtnl_lock();
|
rtnl_lock();
|
||||||
for_each_netdev_safe(net, dev, next) {
|
restart:
|
||||||
|
for_each_netdev(net, dev) {
|
||||||
int err;
|
int err;
|
||||||
char fb_name[IFNAMSIZ];
|
char fb_name[IFNAMSIZ];
|
||||||
|
|
||||||
@ -5083,7 +5084,7 @@ static void __net_exit default_device_exit(struct net *net)
|
|||||||
/* Delete virtual devices */
|
/* Delete virtual devices */
|
||||||
if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) {
|
if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) {
|
||||||
dev->rtnl_link_ops->dellink(dev);
|
dev->rtnl_link_ops->dellink(dev);
|
||||||
continue;
|
goto restart;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Push remaing network devices to init_net */
|
/* Push remaing network devices to init_net */
|
||||||
@ -5094,6 +5095,7 @@ static void __net_exit default_device_exit(struct net *net)
|
|||||||
__func__, dev->name, err);
|
__func__, dev->name, err);
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
|
goto restart;
|
||||||
}
|
}
|
||||||
rtnl_unlock();
|
rtnl_unlock();
|
||||||
}
|
}
|
||||||
|
@ -2414,7 +2414,7 @@ static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
|
|||||||
if (*pos == 0)
|
if (*pos == 0)
|
||||||
return SEQ_START_TOKEN;
|
return SEQ_START_TOKEN;
|
||||||
|
|
||||||
for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
|
for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
|
||||||
if (!cpu_possible(cpu))
|
if (!cpu_possible(cpu))
|
||||||
continue;
|
continue;
|
||||||
*pos = cpu+1;
|
*pos = cpu+1;
|
||||||
@ -2429,7 +2429,7 @@ static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
|||||||
struct neigh_table *tbl = pde->data;
|
struct neigh_table *tbl = pde->data;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
|
for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
|
||||||
if (!cpu_possible(cpu))
|
if (!cpu_possible(cpu))
|
||||||
continue;
|
continue;
|
||||||
*pos = cpu+1;
|
*pos = cpu+1;
|
||||||
|
@ -964,7 +964,6 @@ adjudge_to_death:
|
|||||||
state = sk->sk_state;
|
state = sk->sk_state;
|
||||||
sock_hold(sk);
|
sock_hold(sk);
|
||||||
sock_orphan(sk);
|
sock_orphan(sk);
|
||||||
percpu_counter_inc(sk->sk_prot->orphan_count);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* It is the last release_sock in its life. It will remove backlog.
|
* It is the last release_sock in its life. It will remove backlog.
|
||||||
@ -978,6 +977,8 @@ adjudge_to_death:
|
|||||||
bh_lock_sock(sk);
|
bh_lock_sock(sk);
|
||||||
WARN_ON(sock_owned_by_user(sk));
|
WARN_ON(sock_owned_by_user(sk));
|
||||||
|
|
||||||
|
percpu_counter_inc(sk->sk_prot->orphan_count);
|
||||||
|
|
||||||
/* Have we already been destroyed by a softirq or backlog? */
|
/* Have we already been destroyed by a softirq or backlog? */
|
||||||
if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)
|
if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -633,8 +633,6 @@ void inet_csk_listen_stop(struct sock *sk)
|
|||||||
|
|
||||||
acc_req = req->dl_next;
|
acc_req = req->dl_next;
|
||||||
|
|
||||||
percpu_counter_inc(sk->sk_prot->orphan_count);
|
|
||||||
|
|
||||||
local_bh_disable();
|
local_bh_disable();
|
||||||
bh_lock_sock(child);
|
bh_lock_sock(child);
|
||||||
WARN_ON(sock_owned_by_user(child));
|
WARN_ON(sock_owned_by_user(child));
|
||||||
@ -644,6 +642,8 @@ void inet_csk_listen_stop(struct sock *sk)
|
|||||||
|
|
||||||
sock_orphan(child);
|
sock_orphan(child);
|
||||||
|
|
||||||
|
percpu_counter_inc(sk->sk_prot->orphan_count);
|
||||||
|
|
||||||
inet_csk_destroy_sock(child);
|
inet_csk_destroy_sock(child);
|
||||||
|
|
||||||
bh_unlock_sock(child);
|
bh_unlock_sock(child);
|
||||||
|
@ -291,7 +291,7 @@ static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
|
|||||||
if (*pos == 0)
|
if (*pos == 0)
|
||||||
return SEQ_START_TOKEN;
|
return SEQ_START_TOKEN;
|
||||||
|
|
||||||
for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
|
for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
|
||||||
if (!cpu_possible(cpu))
|
if (!cpu_possible(cpu))
|
||||||
continue;
|
continue;
|
||||||
*pos = cpu+1;
|
*pos = cpu+1;
|
||||||
@ -306,7 +306,7 @@ static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
|||||||
struct net *net = seq_file_net(seq);
|
struct net *net = seq_file_net(seq);
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
|
for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
|
||||||
if (!cpu_possible(cpu))
|
if (!cpu_possible(cpu))
|
||||||
continue;
|
continue;
|
||||||
*pos = cpu+1;
|
*pos = cpu+1;
|
||||||
|
@ -38,6 +38,7 @@
|
|||||||
#include <net/tcp.h>
|
#include <net/tcp.h>
|
||||||
#include <net/udp.h>
|
#include <net/udp.h>
|
||||||
#include <net/udplite.h>
|
#include <net/udplite.h>
|
||||||
|
#include <linux/bottom_half.h>
|
||||||
#include <linux/inetdevice.h>
|
#include <linux/inetdevice.h>
|
||||||
#include <linux/proc_fs.h>
|
#include <linux/proc_fs.h>
|
||||||
#include <linux/seq_file.h>
|
#include <linux/seq_file.h>
|
||||||
@ -50,13 +51,17 @@
|
|||||||
static int sockstat_seq_show(struct seq_file *seq, void *v)
|
static int sockstat_seq_show(struct seq_file *seq, void *v)
|
||||||
{
|
{
|
||||||
struct net *net = seq->private;
|
struct net *net = seq->private;
|
||||||
|
int orphans, sockets;
|
||||||
|
|
||||||
|
local_bh_disable();
|
||||||
|
orphans = percpu_counter_sum_positive(&tcp_orphan_count),
|
||||||
|
sockets = percpu_counter_sum_positive(&tcp_sockets_allocated),
|
||||||
|
local_bh_enable();
|
||||||
|
|
||||||
socket_seq_show(seq);
|
socket_seq_show(seq);
|
||||||
seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %d\n",
|
seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %d\n",
|
||||||
sock_prot_inuse_get(net, &tcp_prot),
|
sock_prot_inuse_get(net, &tcp_prot), orphans,
|
||||||
(int)percpu_counter_sum_positive(&tcp_orphan_count),
|
tcp_death_row.tw_count, sockets,
|
||||||
tcp_death_row.tw_count,
|
|
||||||
(int)percpu_counter_sum_positive(&tcp_sockets_allocated),
|
|
||||||
atomic_read(&tcp_memory_allocated));
|
atomic_read(&tcp_memory_allocated));
|
||||||
seq_printf(seq, "UDP: inuse %d mem %d\n",
|
seq_printf(seq, "UDP: inuse %d mem %d\n",
|
||||||
sock_prot_inuse_get(net, &udp_prot),
|
sock_prot_inuse_get(net, &udp_prot),
|
||||||
|
@ -429,7 +429,7 @@ static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
|
|||||||
if (*pos == 0)
|
if (*pos == 0)
|
||||||
return SEQ_START_TOKEN;
|
return SEQ_START_TOKEN;
|
||||||
|
|
||||||
for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
|
for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
|
||||||
if (!cpu_possible(cpu))
|
if (!cpu_possible(cpu))
|
||||||
continue;
|
continue;
|
||||||
*pos = cpu+1;
|
*pos = cpu+1;
|
||||||
@ -442,7 +442,7 @@ static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
|||||||
{
|
{
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
|
for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
|
||||||
if (!cpu_possible(cpu))
|
if (!cpu_possible(cpu))
|
||||||
continue;
|
continue;
|
||||||
*pos = cpu+1;
|
*pos = cpu+1;
|
||||||
|
@ -1836,7 +1836,6 @@ adjudge_to_death:
|
|||||||
state = sk->sk_state;
|
state = sk->sk_state;
|
||||||
sock_hold(sk);
|
sock_hold(sk);
|
||||||
sock_orphan(sk);
|
sock_orphan(sk);
|
||||||
percpu_counter_inc(sk->sk_prot->orphan_count);
|
|
||||||
|
|
||||||
/* It is the last release_sock in its life. It will remove backlog. */
|
/* It is the last release_sock in its life. It will remove backlog. */
|
||||||
release_sock(sk);
|
release_sock(sk);
|
||||||
@ -1849,6 +1848,8 @@ adjudge_to_death:
|
|||||||
bh_lock_sock(sk);
|
bh_lock_sock(sk);
|
||||||
WARN_ON(sock_owned_by_user(sk));
|
WARN_ON(sock_owned_by_user(sk));
|
||||||
|
|
||||||
|
percpu_counter_inc(sk->sk_prot->orphan_count);
|
||||||
|
|
||||||
/* Have we already been destroyed by a softirq or backlog? */
|
/* Have we already been destroyed by a softirq or backlog? */
|
||||||
if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
|
if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -51,6 +51,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
#include <linux/bottom_half.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/fcntl.h>
|
#include <linux/fcntl.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
@ -1797,7 +1798,9 @@ static int tcp_v4_init_sock(struct sock *sk)
|
|||||||
sk->sk_sndbuf = sysctl_tcp_wmem[1];
|
sk->sk_sndbuf = sysctl_tcp_wmem[1];
|
||||||
sk->sk_rcvbuf = sysctl_tcp_rmem[1];
|
sk->sk_rcvbuf = sysctl_tcp_rmem[1];
|
||||||
|
|
||||||
|
local_bh_disable();
|
||||||
percpu_counter_inc(&tcp_sockets_allocated);
|
percpu_counter_inc(&tcp_sockets_allocated);
|
||||||
|
local_bh_enable();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -23,6 +23,7 @@
|
|||||||
* 2 of the License, or (at your option) any later version.
|
* 2 of the License, or (at your option) any later version.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <linux/bottom_half.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
@ -1830,7 +1831,9 @@ static int tcp_v6_init_sock(struct sock *sk)
|
|||||||
sk->sk_sndbuf = sysctl_tcp_wmem[1];
|
sk->sk_sndbuf = sysctl_tcp_wmem[1];
|
||||||
sk->sk_rcvbuf = sysctl_tcp_rmem[1];
|
sk->sk_rcvbuf = sysctl_tcp_rmem[1];
|
||||||
|
|
||||||
|
local_bh_disable();
|
||||||
percpu_counter_inc(&tcp_sockets_allocated);
|
percpu_counter_inc(&tcp_sockets_allocated);
|
||||||
|
local_bh_enable();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -507,7 +507,7 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
|
|||||||
/* No cache entry or it is invalid, time to schedule */
|
/* No cache entry or it is invalid, time to schedule */
|
||||||
dest = __ip_vs_lblc_schedule(svc);
|
dest = __ip_vs_lblc_schedule(svc);
|
||||||
if (!dest) {
|
if (!dest) {
|
||||||
IP_VS_DBG(1, "no destination available\n");
|
IP_VS_ERR_RL("LBLC: no destination available\n");
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -690,7 +690,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
|
|||||||
/* The cache entry is invalid, time to schedule */
|
/* The cache entry is invalid, time to schedule */
|
||||||
dest = __ip_vs_lblcr_schedule(svc);
|
dest = __ip_vs_lblcr_schedule(svc);
|
||||||
if (!dest) {
|
if (!dest) {
|
||||||
IP_VS_DBG(1, "no destination available\n");
|
IP_VS_ERR_RL("LBLCR: no destination available\n");
|
||||||
read_unlock(&svc->sched_lock);
|
read_unlock(&svc->sched_lock);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -66,11 +66,15 @@ ip_vs_lc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (least)
|
if (!least)
|
||||||
IP_VS_DBG_BUF(6, "LC: server %s:%u activeconns %d inactconns %d\n",
|
IP_VS_ERR_RL("LC: no destination available\n");
|
||||||
IP_VS_DBG_ADDR(svc->af, &least->addr), ntohs(least->port),
|
else
|
||||||
atomic_read(&least->activeconns),
|
IP_VS_DBG_BUF(6, "LC: server %s:%u activeconns %d "
|
||||||
atomic_read(&least->inactconns));
|
"inactconns %d\n",
|
||||||
|
IP_VS_DBG_ADDR(svc->af, &least->addr),
|
||||||
|
ntohs(least->port),
|
||||||
|
atomic_read(&least->activeconns),
|
||||||
|
atomic_read(&least->inactconns));
|
||||||
|
|
||||||
return least;
|
return least;
|
||||||
}
|
}
|
||||||
|
@ -95,8 +95,10 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!least)
|
if (!least) {
|
||||||
|
IP_VS_ERR_RL("NQ: no destination available\n");
|
||||||
return NULL;
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
IP_VS_DBG_BUF(6, "NQ: server %s:%u "
|
IP_VS_DBG_BUF(6, "NQ: server %s:%u "
|
||||||
|
@ -69,6 +69,7 @@ ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
|
|||||||
q = q->next;
|
q = q->next;
|
||||||
} while (q != p);
|
} while (q != p);
|
||||||
write_unlock(&svc->sched_lock);
|
write_unlock(&svc->sched_lock);
|
||||||
|
IP_VS_ERR_RL("RR: no destination available\n");
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
@ -84,6 +84,7 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
|
|||||||
goto nextstage;
|
goto nextstage;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
IP_VS_ERR_RL("SED: no destination available\n");
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -219,6 +219,7 @@ ip_vs_sh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
|
|||||||
|| !(dest->flags & IP_VS_DEST_F_AVAILABLE)
|
|| !(dest->flags & IP_VS_DEST_F_AVAILABLE)
|
||||||
|| atomic_read(&dest->weight) <= 0
|
|| atomic_read(&dest->weight) <= 0
|
||||||
|| is_overloaded(dest)) {
|
|| is_overloaded(dest)) {
|
||||||
|
IP_VS_ERR_RL("SH: no destination available\n");
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -72,6 +72,7 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
|
|||||||
goto nextstage;
|
goto nextstage;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
IP_VS_ERR_RL("WLC: no destination available\n");
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -155,6 +155,8 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
|
|||||||
|
|
||||||
if (mark->cl == mark->cl->next) {
|
if (mark->cl == mark->cl->next) {
|
||||||
/* no dest entry */
|
/* no dest entry */
|
||||||
|
IP_VS_ERR_RL("WRR: no destination available: "
|
||||||
|
"no destinations present\n");
|
||||||
dest = NULL;
|
dest = NULL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
@ -168,8 +170,8 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
|
|||||||
*/
|
*/
|
||||||
if (mark->cw == 0) {
|
if (mark->cw == 0) {
|
||||||
mark->cl = &svc->destinations;
|
mark->cl = &svc->destinations;
|
||||||
IP_VS_ERR_RL("ip_vs_wrr_schedule(): "
|
IP_VS_ERR_RL("WRR: no destination "
|
||||||
"no available servers\n");
|
"available\n");
|
||||||
dest = NULL;
|
dest = NULL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
@ -191,6 +193,8 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
|
|||||||
/* back to the start, and no dest is found.
|
/* back to the start, and no dest is found.
|
||||||
It is only possible when all dests are OVERLOADED */
|
It is only possible when all dests are OVERLOADED */
|
||||||
dest = NULL;
|
dest = NULL;
|
||||||
|
IP_VS_ERR_RL("WRR: no destination available: "
|
||||||
|
"all destinations are overloaded\n");
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -200,7 +200,7 @@ static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
|
|||||||
if (*pos == 0)
|
if (*pos == 0)
|
||||||
return SEQ_START_TOKEN;
|
return SEQ_START_TOKEN;
|
||||||
|
|
||||||
for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
|
for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
|
||||||
if (!cpu_possible(cpu))
|
if (!cpu_possible(cpu))
|
||||||
continue;
|
continue;
|
||||||
*pos = cpu + 1;
|
*pos = cpu + 1;
|
||||||
@ -215,7 +215,7 @@ static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
|||||||
struct net *net = seq_file_net(seq);
|
struct net *net = seq_file_net(seq);
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
|
for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
|
||||||
if (!cpu_possible(cpu))
|
if (!cpu_possible(cpu))
|
||||||
continue;
|
continue;
|
||||||
*pos = cpu + 1;
|
*pos = cpu + 1;
|
||||||
|
@ -335,9 +335,6 @@ config NET_CLS_CGROUP
|
|||||||
Say Y here if you want to classify packets based on the control
|
Say Y here if you want to classify packets based on the control
|
||||||
cgroup of their process.
|
cgroup of their process.
|
||||||
|
|
||||||
To compile this code as a module, choose M here: the
|
|
||||||
module will be called cls_cgroup.
|
|
||||||
|
|
||||||
config NET_EMATCH
|
config NET_EMATCH
|
||||||
bool "Extended Matches"
|
bool "Extended Matches"
|
||||||
select NET_CLS
|
select NET_CLS
|
||||||
|
@ -24,10 +24,16 @@ struct cgroup_cls_state
|
|||||||
u32 classid;
|
u32 classid;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct cgroup_cls_state *net_cls_state(struct cgroup *cgrp)
|
static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp)
|
||||||
{
|
{
|
||||||
return (struct cgroup_cls_state *)
|
return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id),
|
||||||
cgroup_subsys_state(cgrp, net_cls_subsys_id);
|
struct cgroup_cls_state, css);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline struct cgroup_cls_state *task_cls_state(struct task_struct *p)
|
||||||
|
{
|
||||||
|
return container_of(task_subsys_state(p, net_cls_subsys_id),
|
||||||
|
struct cgroup_cls_state, css);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
|
static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
|
||||||
@ -39,19 +45,19 @@ static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
|
|||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
if (cgrp->parent)
|
if (cgrp->parent)
|
||||||
cs->classid = net_cls_state(cgrp->parent)->classid;
|
cs->classid = cgrp_cls_state(cgrp->parent)->classid;
|
||||||
|
|
||||||
return &cs->css;
|
return &cs->css;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
|
static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
|
||||||
{
|
{
|
||||||
kfree(ss);
|
kfree(cgrp_cls_state(cgrp));
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 read_classid(struct cgroup *cgrp, struct cftype *cft)
|
static u64 read_classid(struct cgroup *cgrp, struct cftype *cft)
|
||||||
{
|
{
|
||||||
return net_cls_state(cgrp)->classid;
|
return cgrp_cls_state(cgrp)->classid;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value)
|
static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value)
|
||||||
@ -59,7 +65,7 @@ static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value)
|
|||||||
if (!cgroup_lock_live_group(cgrp))
|
if (!cgroup_lock_live_group(cgrp))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
net_cls_state(cgrp)->classid = (u32) value;
|
cgrp_cls_state(cgrp)->classid = (u32) value;
|
||||||
|
|
||||||
cgroup_unlock();
|
cgroup_unlock();
|
||||||
|
|
||||||
@ -115,8 +121,7 @@ static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp,
|
|||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
cs = (struct cgroup_cls_state *) task_subsys_state(current,
|
cs = task_cls_state(current);
|
||||||
net_cls_subsys_id);
|
|
||||||
if (cs->classid && tcf_em_tree_match(skb, &head->ematches, NULL)) {
|
if (cs->classid && tcf_em_tree_match(skb, &head->ematches, NULL)) {
|
||||||
res->classid = cs->classid;
|
res->classid = cs->classid;
|
||||||
res->class = 0;
|
res->class = 0;
|
||||||
|
@ -44,27 +44,14 @@ static struct snmp_mib xfrm_mib_list[] = {
|
|||||||
SNMP_MIB_SENTINEL
|
SNMP_MIB_SENTINEL
|
||||||
};
|
};
|
||||||
|
|
||||||
static unsigned long
|
|
||||||
fold_field(void *mib[], int offt)
|
|
||||||
{
|
|
||||||
unsigned long res = 0;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for_each_possible_cpu(i) {
|
|
||||||
res += *(((unsigned long *)per_cpu_ptr(mib[0], i)) + offt);
|
|
||||||
res += *(((unsigned long *)per_cpu_ptr(mib[1], i)) + offt);
|
|
||||||
}
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int xfrm_statistics_seq_show(struct seq_file *seq, void *v)
|
static int xfrm_statistics_seq_show(struct seq_file *seq, void *v)
|
||||||
{
|
{
|
||||||
struct net *net = seq->private;
|
struct net *net = seq->private;
|
||||||
int i;
|
int i;
|
||||||
for (i=0; xfrm_mib_list[i].name; i++)
|
for (i=0; xfrm_mib_list[i].name; i++)
|
||||||
seq_printf(seq, "%-24s\t%lu\n", xfrm_mib_list[i].name,
|
seq_printf(seq, "%-24s\t%lu\n", xfrm_mib_list[i].name,
|
||||||
fold_field((void **)net->mib.xfrm_statistics,
|
snmp_fold_field((void **)net->mib.xfrm_statistics,
|
||||||
xfrm_mib_list[i].entry));
|
xfrm_mib_list[i].entry));
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user