mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-16 05:26:07 +00:00
Merge branches 'cma', 'cxgb4' and 'qib' into for-next
This commit is contained in:
commit
1df9fad122
@ -1198,9 +1198,7 @@ static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
}
|
||||
PDBG("%s ep %p status %d error %d\n", __func__, ep,
|
||||
rpl->status, status2errno(rpl->status));
|
||||
ep->com.wr_wait.ret = status2errno(rpl->status);
|
||||
ep->com.wr_wait.done = 1;
|
||||
wake_up(&ep->com.wr_wait.wait);
|
||||
c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1234,9 +1232,7 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
struct c4iw_listen_ep *ep = lookup_stid(t, stid);
|
||||
|
||||
PDBG("%s ep %p\n", __func__, ep);
|
||||
ep->com.wr_wait.ret = status2errno(rpl->status);
|
||||
ep->com.wr_wait.done = 1;
|
||||
wake_up(&ep->com.wr_wait.wait);
|
||||
c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1466,7 +1462,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
struct c4iw_qp_attributes attrs;
|
||||
int disconnect = 1;
|
||||
int release = 0;
|
||||
int closing = 0;
|
||||
int abort = 0;
|
||||
struct tid_info *t = dev->rdev.lldi.tids;
|
||||
unsigned int tid = GET_TID(hdr);
|
||||
|
||||
@ -1492,23 +1488,22 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
* in rdma connection migration (see c4iw_accept_cr()).
|
||||
*/
|
||||
__state_set(&ep->com, CLOSING);
|
||||
ep->com.wr_wait.done = 1;
|
||||
ep->com.wr_wait.ret = -ECONNRESET;
|
||||
PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
|
||||
wake_up(&ep->com.wr_wait.wait);
|
||||
c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
|
||||
break;
|
||||
case MPA_REP_SENT:
|
||||
__state_set(&ep->com, CLOSING);
|
||||
ep->com.wr_wait.done = 1;
|
||||
ep->com.wr_wait.ret = -ECONNRESET;
|
||||
PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
|
||||
wake_up(&ep->com.wr_wait.wait);
|
||||
c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
|
||||
break;
|
||||
case FPDU_MODE:
|
||||
start_ep_timer(ep);
|
||||
__state_set(&ep->com, CLOSING);
|
||||
closing = 1;
|
||||
attrs.next_state = C4IW_QP_STATE_CLOSING;
|
||||
abort = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
|
||||
C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
|
||||
peer_close_upcall(ep);
|
||||
disconnect = 1;
|
||||
break;
|
||||
case ABORTING:
|
||||
disconnect = 0;
|
||||
@ -1536,11 +1531,6 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
BUG_ON(1);
|
||||
}
|
||||
mutex_unlock(&ep->com.mutex);
|
||||
if (closing) {
|
||||
attrs.next_state = C4IW_QP_STATE_CLOSING;
|
||||
c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
|
||||
C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
|
||||
}
|
||||
if (disconnect)
|
||||
c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
|
||||
if (release)
|
||||
@ -1581,9 +1571,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
/*
|
||||
* Wake up any threads in rdma_init() or rdma_fini().
|
||||
*/
|
||||
ep->com.wr_wait.done = 1;
|
||||
ep->com.wr_wait.ret = -ECONNRESET;
|
||||
wake_up(&ep->com.wr_wait.wait);
|
||||
c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
|
||||
|
||||
mutex_lock(&ep->com.mutex);
|
||||
switch (ep->com.state) {
|
||||
@ -1710,14 +1698,14 @@ static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
ep = lookup_tid(t, tid);
|
||||
BUG_ON(!ep);
|
||||
|
||||
if (ep->com.qp) {
|
||||
if (ep && ep->com.qp) {
|
||||
printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid,
|
||||
ep->com.qp->wq.sq.qid);
|
||||
attrs.next_state = C4IW_QP_STATE_TERMINATE;
|
||||
c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
|
||||
C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
|
||||
} else
|
||||
printk(KERN_WARNING MOD "TERM received tid %u no qp\n", tid);
|
||||
printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2296,14 +2284,8 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
|
||||
wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
|
||||
PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
|
||||
if (wr_waitp) {
|
||||
if (ret)
|
||||
wr_waitp->ret = -ret;
|
||||
else
|
||||
wr_waitp->ret = 0;
|
||||
wr_waitp->done = 1;
|
||||
wake_up(&wr_waitp->wait);
|
||||
}
|
||||
if (wr_waitp)
|
||||
c4iw_wake_up(wr_waitp, ret ? -ret : 0);
|
||||
kfree_skb(skb);
|
||||
break;
|
||||
case 2:
|
||||
|
@ -44,7 +44,7 @@ MODULE_DESCRIPTION("Chelsio T4 RDMA Driver");
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
MODULE_VERSION(DRV_VERSION);
|
||||
|
||||
static LIST_HEAD(dev_list);
|
||||
static LIST_HEAD(uld_ctx_list);
|
||||
static DEFINE_MUTEX(dev_mutex);
|
||||
|
||||
static struct dentry *c4iw_debugfs_root;
|
||||
@ -370,18 +370,23 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev)
|
||||
c4iw_destroy_resource(&rdev->resource);
|
||||
}
|
||||
|
||||
static void c4iw_remove(struct c4iw_dev *dev)
|
||||
struct uld_ctx {
|
||||
struct list_head entry;
|
||||
struct cxgb4_lld_info lldi;
|
||||
struct c4iw_dev *dev;
|
||||
};
|
||||
|
||||
static void c4iw_remove(struct uld_ctx *ctx)
|
||||
{
|
||||
PDBG("%s c4iw_dev %p\n", __func__, dev);
|
||||
list_del(&dev->entry);
|
||||
if (dev->registered)
|
||||
c4iw_unregister_device(dev);
|
||||
c4iw_rdev_close(&dev->rdev);
|
||||
idr_destroy(&dev->cqidr);
|
||||
idr_destroy(&dev->qpidr);
|
||||
idr_destroy(&dev->mmidr);
|
||||
iounmap(dev->rdev.oc_mw_kva);
|
||||
ib_dealloc_device(&dev->ibdev);
|
||||
PDBG("%s c4iw_dev %p\n", __func__, ctx->dev);
|
||||
c4iw_unregister_device(ctx->dev);
|
||||
c4iw_rdev_close(&ctx->dev->rdev);
|
||||
idr_destroy(&ctx->dev->cqidr);
|
||||
idr_destroy(&ctx->dev->qpidr);
|
||||
idr_destroy(&ctx->dev->mmidr);
|
||||
iounmap(ctx->dev->rdev.oc_mw_kva);
|
||||
ib_dealloc_device(&ctx->dev->ibdev);
|
||||
ctx->dev = NULL;
|
||||
}
|
||||
|
||||
static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
|
||||
@ -392,7 +397,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
|
||||
devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
|
||||
if (!devp) {
|
||||
printk(KERN_ERR MOD "Cannot allocate ib device\n");
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
devp->rdev.lldi = *infop;
|
||||
|
||||
@ -402,27 +407,23 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
|
||||
devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa,
|
||||
devp->rdev.lldi.vr->ocq.size);
|
||||
|
||||
printk(KERN_INFO MOD "ocq memory: "
|
||||
PDBG(KERN_INFO MOD "ocq memory: "
|
||||
"hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
|
||||
devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size,
|
||||
devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva);
|
||||
|
||||
mutex_lock(&dev_mutex);
|
||||
|
||||
ret = c4iw_rdev_open(&devp->rdev);
|
||||
if (ret) {
|
||||
mutex_unlock(&dev_mutex);
|
||||
printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret);
|
||||
ib_dealloc_device(&devp->ibdev);
|
||||
return NULL;
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
idr_init(&devp->cqidr);
|
||||
idr_init(&devp->qpidr);
|
||||
idr_init(&devp->mmidr);
|
||||
spin_lock_init(&devp->lock);
|
||||
list_add_tail(&devp->entry, &dev_list);
|
||||
mutex_unlock(&dev_mutex);
|
||||
|
||||
if (c4iw_debugfs_root) {
|
||||
devp->debugfs_root = debugfs_create_dir(
|
||||
@ -435,7 +436,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
|
||||
|
||||
static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
|
||||
{
|
||||
struct c4iw_dev *dev;
|
||||
struct uld_ctx *ctx;
|
||||
static int vers_printed;
|
||||
int i;
|
||||
|
||||
@ -443,25 +444,33 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
|
||||
printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n",
|
||||
DRV_VERSION);
|
||||
|
||||
dev = c4iw_alloc(infop);
|
||||
if (!dev)
|
||||
ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
|
||||
if (!ctx) {
|
||||
ctx = ERR_PTR(-ENOMEM);
|
||||
goto out;
|
||||
}
|
||||
ctx->lldi = *infop;
|
||||
|
||||
PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
|
||||
__func__, pci_name(dev->rdev.lldi.pdev),
|
||||
dev->rdev.lldi.nchan, dev->rdev.lldi.nrxq,
|
||||
dev->rdev.lldi.ntxq, dev->rdev.lldi.nports);
|
||||
__func__, pci_name(ctx->lldi.pdev),
|
||||
ctx->lldi.nchan, ctx->lldi.nrxq,
|
||||
ctx->lldi.ntxq, ctx->lldi.nports);
|
||||
|
||||
for (i = 0; i < dev->rdev.lldi.nrxq; i++)
|
||||
PDBG("rxqid[%u] %u\n", i, dev->rdev.lldi.rxq_ids[i]);
|
||||
mutex_lock(&dev_mutex);
|
||||
list_add_tail(&ctx->entry, &uld_ctx_list);
|
||||
mutex_unlock(&dev_mutex);
|
||||
|
||||
for (i = 0; i < ctx->lldi.nrxq; i++)
|
||||
PDBG("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]);
|
||||
out:
|
||||
return dev;
|
||||
return ctx;
|
||||
}
|
||||
|
||||
static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
|
||||
const struct pkt_gl *gl)
|
||||
{
|
||||
struct c4iw_dev *dev = handle;
|
||||
struct uld_ctx *ctx = handle;
|
||||
struct c4iw_dev *dev = ctx->dev;
|
||||
struct sk_buff *skb;
|
||||
const struct cpl_act_establish *rpl;
|
||||
unsigned int opcode;
|
||||
@ -503,47 +512,49 @@ nomem:
|
||||
|
||||
static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
|
||||
{
|
||||
struct c4iw_dev *dev = handle;
|
||||
struct uld_ctx *ctx = handle;
|
||||
|
||||
PDBG("%s new_state %u\n", __func__, new_state);
|
||||
switch (new_state) {
|
||||
case CXGB4_STATE_UP:
|
||||
printk(KERN_INFO MOD "%s: Up\n", pci_name(dev->rdev.lldi.pdev));
|
||||
if (!dev->registered) {
|
||||
int ret;
|
||||
ret = c4iw_register_device(dev);
|
||||
if (ret)
|
||||
printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev));
|
||||
if (!ctx->dev) {
|
||||
int ret = 0;
|
||||
|
||||
ctx->dev = c4iw_alloc(&ctx->lldi);
|
||||
if (!IS_ERR(ctx->dev))
|
||||
ret = c4iw_register_device(ctx->dev);
|
||||
if (IS_ERR(ctx->dev) || ret)
|
||||
printk(KERN_ERR MOD
|
||||
"%s: RDMA registration failed: %d\n",
|
||||
pci_name(dev->rdev.lldi.pdev), ret);
|
||||
pci_name(ctx->lldi.pdev), ret);
|
||||
}
|
||||
break;
|
||||
case CXGB4_STATE_DOWN:
|
||||
printk(KERN_INFO MOD "%s: Down\n",
|
||||
pci_name(dev->rdev.lldi.pdev));
|
||||
if (dev->registered)
|
||||
c4iw_unregister_device(dev);
|
||||
pci_name(ctx->lldi.pdev));
|
||||
if (ctx->dev)
|
||||
c4iw_remove(ctx);
|
||||
break;
|
||||
case CXGB4_STATE_START_RECOVERY:
|
||||
printk(KERN_INFO MOD "%s: Fatal Error\n",
|
||||
pci_name(dev->rdev.lldi.pdev));
|
||||
dev->rdev.flags |= T4_FATAL_ERROR;
|
||||
if (dev->registered) {
|
||||
pci_name(ctx->lldi.pdev));
|
||||
if (ctx->dev) {
|
||||
struct ib_event event;
|
||||
|
||||
ctx->dev->rdev.flags |= T4_FATAL_ERROR;
|
||||
memset(&event, 0, sizeof event);
|
||||
event.event = IB_EVENT_DEVICE_FATAL;
|
||||
event.device = &dev->ibdev;
|
||||
event.device = &ctx->dev->ibdev;
|
||||
ib_dispatch_event(&event);
|
||||
c4iw_unregister_device(dev);
|
||||
c4iw_remove(ctx);
|
||||
}
|
||||
break;
|
||||
case CXGB4_STATE_DETACH:
|
||||
printk(KERN_INFO MOD "%s: Detach\n",
|
||||
pci_name(dev->rdev.lldi.pdev));
|
||||
mutex_lock(&dev_mutex);
|
||||
c4iw_remove(dev);
|
||||
mutex_unlock(&dev_mutex);
|
||||
pci_name(ctx->lldi.pdev));
|
||||
if (ctx->dev)
|
||||
c4iw_remove(ctx);
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
@ -576,11 +587,13 @@ static int __init c4iw_init_module(void)
|
||||
|
||||
static void __exit c4iw_exit_module(void)
|
||||
{
|
||||
struct c4iw_dev *dev, *tmp;
|
||||
struct uld_ctx *ctx, *tmp;
|
||||
|
||||
mutex_lock(&dev_mutex);
|
||||
list_for_each_entry_safe(dev, tmp, &dev_list, entry) {
|
||||
c4iw_remove(dev);
|
||||
list_for_each_entry_safe(ctx, tmp, &uld_ctx_list, entry) {
|
||||
if (ctx->dev)
|
||||
c4iw_remove(ctx);
|
||||
kfree(ctx);
|
||||
}
|
||||
mutex_unlock(&dev_mutex);
|
||||
cxgb4_unregister_uld(CXGB4_ULD_RDMA);
|
||||
|
@ -131,42 +131,58 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
|
||||
|
||||
#define C4IW_WR_TO (10*HZ)
|
||||
|
||||
enum {
|
||||
REPLY_READY = 0,
|
||||
};
|
||||
|
||||
struct c4iw_wr_wait {
|
||||
wait_queue_head_t wait;
|
||||
int done;
|
||||
unsigned long status;
|
||||
int ret;
|
||||
};
|
||||
|
||||
static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
|
||||
{
|
||||
wr_waitp->ret = 0;
|
||||
wr_waitp->done = 0;
|
||||
wr_waitp->status = 0;
|
||||
init_waitqueue_head(&wr_waitp->wait);
|
||||
}
|
||||
|
||||
static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret)
|
||||
{
|
||||
wr_waitp->ret = ret;
|
||||
set_bit(REPLY_READY, &wr_waitp->status);
|
||||
wake_up(&wr_waitp->wait);
|
||||
}
|
||||
|
||||
static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
|
||||
struct c4iw_wr_wait *wr_waitp,
|
||||
u32 hwtid, u32 qpid,
|
||||
const char *func)
|
||||
{
|
||||
unsigned to = C4IW_WR_TO;
|
||||
do {
|
||||
int ret;
|
||||
|
||||
wait_event_timeout(wr_waitp->wait, wr_waitp->done, to);
|
||||
if (!wr_waitp->done) {
|
||||
do {
|
||||
ret = wait_event_timeout(wr_waitp->wait,
|
||||
test_and_clear_bit(REPLY_READY, &wr_waitp->status), to);
|
||||
if (!ret) {
|
||||
printk(KERN_ERR MOD "%s - Device %s not responding - "
|
||||
"tid %u qpid %u\n", func,
|
||||
pci_name(rdev->lldi.pdev), hwtid, qpid);
|
||||
if (c4iw_fatal_error(rdev)) {
|
||||
wr_waitp->ret = -EIO;
|
||||
break;
|
||||
}
|
||||
to = to << 2;
|
||||
}
|
||||
} while (!wr_waitp->done);
|
||||
} while (!ret);
|
||||
if (wr_waitp->ret)
|
||||
printk(KERN_WARNING MOD "%s: FW reply %d tid %u qpid %u\n",
|
||||
pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
|
||||
PDBG("%s: FW reply %d tid %u qpid %u\n",
|
||||
pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
|
||||
return wr_waitp->ret;
|
||||
}
|
||||
|
||||
|
||||
struct c4iw_dev {
|
||||
struct ib_device ibdev;
|
||||
struct c4iw_rdev rdev;
|
||||
@ -175,9 +191,7 @@ struct c4iw_dev {
|
||||
struct idr qpidr;
|
||||
struct idr mmidr;
|
||||
spinlock_t lock;
|
||||
struct list_head entry;
|
||||
struct dentry *debugfs_root;
|
||||
u8 registered;
|
||||
};
|
||||
|
||||
static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
|
||||
|
@ -516,7 +516,6 @@ int c4iw_register_device(struct c4iw_dev *dev)
|
||||
if (ret)
|
||||
goto bail2;
|
||||
}
|
||||
dev->registered = 1;
|
||||
return 0;
|
||||
bail2:
|
||||
ib_unregister_device(&dev->ibdev);
|
||||
@ -535,6 +534,5 @@ void c4iw_unregister_device(struct c4iw_dev *dev)
|
||||
c4iw_class_attributes[i]);
|
||||
ib_unregister_device(&dev->ibdev);
|
||||
kfree(dev->ibdev.iwcm);
|
||||
dev->registered = 0;
|
||||
return;
|
||||
}
|
||||
|
@ -214,7 +214,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
|
||||
V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
|
||||
V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
|
||||
V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
|
||||
t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0 |
|
||||
(t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0) |
|
||||
V_FW_RI_RES_WR_IQID(scq->cqid));
|
||||
res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
|
||||
V_FW_RI_RES_WR_DCAEN(0) |
|
||||
@ -1210,7 +1210,6 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
|
||||
if (ret) {
|
||||
if (internal)
|
||||
c4iw_get_ep(&qhp->ep->com);
|
||||
disconnect = abort = 1;
|
||||
goto err;
|
||||
}
|
||||
break;
|
||||
|
@ -398,7 +398,6 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
|
||||
struct ipath_devdata *dd;
|
||||
unsigned long long addr;
|
||||
u32 bar0 = 0, bar1 = 0;
|
||||
u8 rev;
|
||||
|
||||
dd = ipath_alloc_devdata(pdev);
|
||||
if (IS_ERR(dd)) {
|
||||
@ -540,13 +539,7 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
|
||||
goto bail_regions;
|
||||
}
|
||||
|
||||
ret = pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
|
||||
if (ret) {
|
||||
ipath_dev_err(dd, "Failed to read PCI revision ID unit "
|
||||
"%u: err %d\n", dd->ipath_unit, -ret);
|
||||
goto bail_regions; /* shouldn't ever happen */
|
||||
}
|
||||
dd->ipath_pcirev = rev;
|
||||
dd->ipath_pcirev = pdev->revision;
|
||||
|
||||
#if defined(__powerpc__)
|
||||
/* There isn't a generic way to specify writethrough mappings */
|
||||
|
@ -7534,7 +7534,8 @@ static int serdes_7322_init_new(struct qib_pportdata *ppd)
|
||||
ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
|
||||
tstart = get_jiffies_64();
|
||||
while (chan_done &&
|
||||
!time_after64(tstart, tstart + msecs_to_jiffies(500))) {
|
||||
!time_after64(get_jiffies_64(),
|
||||
tstart + msecs_to_jiffies(500))) {
|
||||
msleep(20);
|
||||
for (chan = 0; chan < SERDES_CHANS; ++chan) {
|
||||
rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
|
||||
|
@ -526,11 +526,8 @@ static int qib_tune_pcie_coalesce(struct qib_devdata *dd)
|
||||
*/
|
||||
devid = parent->device;
|
||||
if (devid >= 0x25e2 && devid <= 0x25fa) {
|
||||
u8 rev;
|
||||
|
||||
/* 5000 P/V/X/Z */
|
||||
pci_read_config_byte(parent, PCI_REVISION_ID, &rev);
|
||||
if (rev <= 0xb2)
|
||||
if (parent->revision <= 0xb2)
|
||||
bits = 1U << 10;
|
||||
else
|
||||
bits = 7U << 10;
|
||||
|
Loading…
x
Reference in New Issue
Block a user