module_param_named(qp_table_size, hfi1_qp_table_size, uint, S_IRUGO);
MODULE_PARM_DESC(qp_table_size, "QP table size");
-static void flush_tx_list(struct hfi1_qp *qp);
+static void flush_tx_list(struct rvt_qp *qp);
static int iowait_sleep(
struct sdma_engine *sde,
struct iowait *wait,
* Put the QP into the hash table.
* The hash table holds a reference to the QP.
*/
-static void insert_qp(struct hfi1_ibdev *dev, struct hfi1_qp *qp)
+static void insert_qp(struct hfi1_ibdev *dev, struct rvt_qp *qp)
{
struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
unsigned long flags;
spin_lock_irqsave(&dev->qp_dev->qpt_lock, flags);
if (qp->ibqp.qp_num <= 1) {
- rcu_assign_pointer(ibp->qp[qp->ibqp.qp_num], qp);
+ rcu_assign_pointer(ibp->rvp.qp[qp->ibqp.qp_num], qp);
} else {
u32 n = qpn_hash(dev->qp_dev, qp->ibqp.qp_num);
* Remove the QP from the table so it can't be found asynchronously by
* the receive interrupt routine.
*/
-static void remove_qp(struct hfi1_ibdev *dev, struct hfi1_qp *qp)
+static void remove_qp(struct hfi1_ibdev *dev, struct rvt_qp *qp)
{
struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
u32 n = qpn_hash(dev->qp_dev, qp->ibqp.qp_num);
spin_lock_irqsave(&dev->qp_dev->qpt_lock, flags);
- if (rcu_dereference_protected(ibp->qp[0],
+ if (rcu_dereference_protected(ibp->rvp.qp[0],
+ lockdep_is_held(
+ &dev->qp_dev->qpt_lock)) == qp) {
+ RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
+ } else if (rcu_dereference_protected(ibp->rvp.qp[1],
lockdep_is_held(&dev->qp_dev->qpt_lock)) == qp) {
- RCU_INIT_POINTER(ibp->qp[0], NULL);
- } else if (rcu_dereference_protected(ibp->qp[1],
- lockdep_is_held(&dev->qp_dev->qpt_lock)) == qp) {
- RCU_INIT_POINTER(ibp->qp[1], NULL);
+ RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
} else {
- struct hfi1_qp *q;
- struct hfi1_qp __rcu **qpp;
+ struct rvt_qp *q;
+ struct rvt_qp __rcu **qpp;
removed = 0;
qpp = &dev->qp_dev->qp_table[n];
{
struct hfi1_ibdev *dev = &dd->verbs_dev;
unsigned long flags;
- struct hfi1_qp *qp;
+ struct rvt_qp *qp;
unsigned n, qp_inuse = 0;
for (n = 0; n < dd->num_pports; n++) {
if (!hfi1_mcast_tree_empty(ibp))
qp_inuse++;
rcu_read_lock();
- if (rcu_dereference(ibp->qp[0]))
+ if (rcu_dereference(ibp->rvp.qp[0]))
qp_inuse++;
- if (rcu_dereference(ibp->qp[1]))
+ if (rcu_dereference(ibp->rvp.qp[1]))
qp_inuse++;
rcu_read_unlock();
}
* @qp: the QP to reset
* @type: the QP type
*/
-static void reset_qp(struct hfi1_qp *qp, enum ib_qp_type type)
+static void reset_qp(struct rvt_qp *qp, enum ib_qp_type type)
{
+ struct hfi1_qp_priv *priv = qp->priv;
qp->remote_qpn = 0;
qp->qkey = 0;
qp->qp_access_flags = 0;
iowait_init(
- &qp->s_iowait,
+ &priv->s_iowait,
1,
hfi1_do_send,
iowait_sleep,
iowait_wakeup);
- qp->s_flags &= HFI1_S_SIGNAL_REQ_WR;
+ qp->s_flags &= RVT_S_SIGNAL_REQ_WR;
qp->s_hdrwords = 0;
qp->s_wqe = NULL;
qp->s_draining = 0;
}
qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
qp->r_nak_state = 0;
- qp->r_adefered = 0;
+ priv->r_adefered = 0;
qp->r_aflags = 0;
qp->r_flags = 0;
qp->s_head = 0;
qp->r_sge.num_sge = 0;
}
-static void clear_mr_refs(struct hfi1_qp *qp, int clr_sends)
+static void clear_mr_refs(struct rvt_qp *qp, int clr_sends)
{
unsigned n;
- if (test_and_clear_bit(HFI1_R_REWIND_SGE, &qp->r_aflags))
+ if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
hfi1_put_ss(&qp->s_rdma_read_sge);
hfi1_put_ss(&qp->r_sge);
if (clr_sends) {
while (qp->s_last != qp->s_head) {
- struct hfi1_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
+ struct rvt_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
unsigned i;
for (i = 0; i < wqe->wr.num_sge; i++) {
- struct hfi1_sge *sge = &wqe->sg_list[i];
+ struct rvt_sge *sge = &wqe->sg_list[i];
- hfi1_put_mr(sge->mr);
+ rvt_put_mr(sge->mr);
}
if (qp->ibqp.qp_type == IB_QPT_UD ||
qp->ibqp.qp_type == IB_QPT_SMI ||
qp->ibqp.qp_type == IB_QPT_GSI)
- atomic_dec(&to_iah(wqe->ud_wr.ah)->refcount);
+ atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
if (++qp->s_last >= qp->s_size)
qp->s_last = 0;
}
if (qp->s_rdma_mr) {
- hfi1_put_mr(qp->s_rdma_mr);
+ rvt_put_mr(qp->s_rdma_mr);
qp->s_rdma_mr = NULL;
}
}
return;
for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
- struct hfi1_ack_entry *e = &qp->s_ack_queue[n];
+ struct rvt_ack_entry *e = &qp->s_ack_queue[n];
if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
e->rdma_sge.mr) {
- hfi1_put_mr(e->rdma_sge.mr);
+ rvt_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL;
}
}
* The QP r_lock and s_lock should be held and interrupts disabled.
* If we are already in error state, just return.
*/
-int hfi1_error_qp(struct hfi1_qp *qp, enum ib_wc_status err)
+int hfi1_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
{
struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
+ struct hfi1_qp_priv *priv = qp->priv;
struct ib_wc wc;
int ret = 0;
qp->state = IB_QPS_ERR;
- if (qp->s_flags & (HFI1_S_TIMER | HFI1_S_WAIT_RNR)) {
- qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_WAIT_RNR);
+ if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
+ qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
del_timer(&qp->s_timer);
}
- if (qp->s_flags & HFI1_S_ANY_WAIT_SEND)
- qp->s_flags &= ~HFI1_S_ANY_WAIT_SEND;
+ if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
+ qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;
write_seqlock(&dev->iowait_lock);
- if (!list_empty(&qp->s_iowait.list) && !(qp->s_flags & HFI1_S_BUSY)) {
- qp->s_flags &= ~HFI1_S_ANY_WAIT_IO;
- list_del_init(&qp->s_iowait.list);
+ if (!list_empty(&priv->s_iowait.list) && !(qp->s_flags & RVT_S_BUSY)) {
+ qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
+ list_del_init(&priv->s_iowait.list);
if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait);
}
write_sequnlock(&dev->iowait_lock);
- if (!(qp->s_flags & HFI1_S_BUSY)) {
+ if (!(qp->s_flags & RVT_S_BUSY)) {
qp->s_hdrwords = 0;
if (qp->s_rdma_mr) {
- hfi1_put_mr(qp->s_rdma_mr);
+ rvt_put_mr(qp->s_rdma_mr);
qp->s_rdma_mr = NULL;
}
flush_tx_list(qp);
wc.qp = &qp->ibqp;
wc.opcode = IB_WC_RECV;
- if (test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags)) {
+ if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
wc.wr_id = qp->r_wr_id;
wc.status = err;
hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
wc.status = IB_WC_WR_FLUSH_ERR;
if (qp->r_rq.wq) {
- struct hfi1_rwq *wq;
+ struct rvt_rwq *wq;
u32 head;
u32 tail;
return ret;
}
-static void flush_tx_list(struct hfi1_qp *qp)
+static void flush_tx_list(struct rvt_qp *qp)
{
- while (!list_empty(&qp->s_iowait.tx_head)) {
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ while (!list_empty(&priv->s_iowait.tx_head)) {
struct sdma_txreq *tx;
tx = list_first_entry(
- &qp->s_iowait.tx_head,
+ &priv->s_iowait.tx_head,
struct sdma_txreq,
list);
list_del_init(&tx->list);
}
}
-static void flush_iowait(struct hfi1_qp *qp)
+static void flush_iowait(struct rvt_qp *qp)
{
+ struct hfi1_qp_priv *priv = qp->priv;
struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
unsigned long flags;
write_seqlock_irqsave(&dev->iowait_lock, flags);
- if (!list_empty(&qp->s_iowait.list)) {
- list_del_init(&qp->s_iowait.list);
+ if (!list_empty(&priv->s_iowait.list)) {
+ list_del_init(&priv->s_iowait.list);
if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait);
}
int attr_mask, struct ib_udata *udata)
{
struct hfi1_ibdev *dev = to_idev(ibqp->device);
- struct hfi1_qp *qp = to_iqp(ibqp);
+ struct rvt_qp *qp = to_iqp(ibqp);
+ struct hfi1_qp_priv *priv = qp->priv;
enum ib_qp_state cur_state, new_state;
struct ib_event ev;
int lastwqe = 0;
if (attr_mask & IB_QP_AV) {
u8 sc;
- if (attr->ah_attr.dlid >= HFI1_MULTICAST_LID_BASE)
+ if (attr->ah_attr.dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
goto inval;
- if (hfi1_check_ah(qp->ibqp.device, &attr->ah_attr))
+ if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr))
goto inval;
sc = ah_to_sc(ibqp->device, &attr->ah_attr);
if (!qp_to_sdma_engine(qp, sc) &&
if (attr_mask & IB_QP_ALT_PATH) {
u8 sc;
- if (attr->alt_ah_attr.dlid >= HFI1_MULTICAST_LID_BASE)
+ if (attr->alt_ah_attr.dlid >=
+ be16_to_cpu(IB_MULTICAST_LID_BASE))
goto inval;
- if (hfi1_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
+ if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
goto inval;
if (attr->alt_pkey_index >= hfi1_get_npkeys(dd))
goto inval;
if (qp->state != IB_QPS_RESET) {
qp->state = IB_QPS_RESET;
flush_iowait(qp);
- qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_ANY_WAIT);
+ qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
spin_unlock(&qp->s_lock);
spin_unlock_irq(&qp->r_lock);
/* Stop the sending work queue and retry timer */
- cancel_work_sync(&qp->s_iowait.iowork);
+ cancel_work_sync(&priv->s_iowait.iowork);
del_timer_sync(&qp->s_timer);
- iowait_sdma_drain(&qp->s_iowait);
+ iowait_sdma_drain(&priv->s_iowait);
flush_tx_list(qp);
remove_qp(dev, qp);
wait_event(qp->wait, !atomic_read(&qp->refcount));
case IB_QPS_RTR:
/* Allow event to re-trigger if QP set to RTR more than once */
- qp->r_flags &= ~HFI1_R_COMM_EST;
+ qp->r_flags &= ~RVT_R_COMM_EST;
qp->state = new_state;
break;
qp->remote_ah_attr = attr->ah_attr;
qp->s_srate = attr->ah_attr.static_rate;
qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
- qp->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
- qp->s_sde = qp_to_sdma_engine(qp, qp->s_sc);
+ priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
+ priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
}
if (attr_mask & IB_QP_ALT_PATH) {
qp->remote_ah_attr = qp->alt_ah_attr;
qp->port_num = qp->alt_ah_attr.port_num;
qp->s_pkey_index = qp->s_alt_pkey_index;
- qp->s_flags |= HFI1_S_AHG_CLEAR;
- qp->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
- qp->s_sde = qp_to_sdma_engine(qp, qp->s_sc);
+ qp->s_flags |= RVT_S_AHG_CLEAR;
+ priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
+ priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
}
}
int hfi1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_qp_init_attr *init_attr)
{
- struct hfi1_qp *qp = to_iqp(ibqp);
+ struct rvt_qp *qp = to_iqp(ibqp);
attr->qp_state = qp->state;
attr->cur_qp_state = attr->qp_state;
init_attr->recv_cq = qp->ibqp.recv_cq;
init_attr->srq = qp->ibqp.srq;
init_attr->cap = attr->cap;
- if (qp->s_flags & HFI1_S_SIGNAL_REQ_WR)
+ if (qp->s_flags & RVT_S_SIGNAL_REQ_WR)
init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
else
init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
*
* Returns the AETH.
*/
-__be32 hfi1_compute_aeth(struct hfi1_qp *qp)
+__be32 hfi1_compute_aeth(struct rvt_qp *qp)
{
u32 aeth = qp->r_msn & HFI1_MSN_MASK;
} else {
u32 min, max, x;
u32 credits;
- struct hfi1_rwq *wq = qp->r_rq.wq;
+ struct rvt_rwq *wq = qp->r_rq.wq;
u32 head;
u32 tail;
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata)
{
- struct hfi1_qp *qp;
+ struct rvt_qp *qp;
+ struct hfi1_qp_priv *priv;
int err;
- struct hfi1_swqe *swq = NULL;
+ struct rvt_swqe *swq = NULL;
struct hfi1_ibdev *dev;
struct hfi1_devdata *dd;
size_t sz;
case IB_QPT_UC:
case IB_QPT_RC:
case IB_QPT_UD:
- sz = sizeof(struct hfi1_sge) *
+ sz = sizeof(struct rvt_sge) *
init_attr->cap.max_send_sge +
- sizeof(struct hfi1_swqe);
+ sizeof(struct rvt_swqe);
swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
if (swq == NULL) {
ret = ERR_PTR(-ENOMEM);
sz = sizeof(*qp);
sg_list_sz = 0;
if (init_attr->srq) {
- struct hfi1_srq *srq = to_isrq(init_attr->srq);
+ struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
if (srq->rq.max_sge > 1)
sg_list_sz = sizeof(*qp->r_sg_list) *
goto bail_swq;
}
RCU_INIT_POINTER(qp->next, NULL);
- qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), GFP_KERNEL);
- if (!qp->s_hdr) {
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_qp_priv;
+ }
+ priv->owner = qp;
+ priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), GFP_KERNEL);
+ if (!priv->s_hdr) {
ret = ERR_PTR(-ENOMEM);
goto bail_qp;
}
+ qp->priv = priv;
qp->timeout_jiffies =
usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1000UL);
qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
- sizeof(struct hfi1_rwqe);
- qp->r_rq.wq = vmalloc_user(sizeof(struct hfi1_rwq) +
+ sizeof(struct rvt_rwqe);
+ qp->r_rq.wq = vmalloc_user(sizeof(struct rvt_rwq) +
qp->r_rq.size * sz);
if (!qp->r_rq.wq) {
ret = ERR_PTR(-ENOMEM);
qp->s_size = init_attr->cap.max_send_wr + 1;
qp->s_max_sge = init_attr->cap.max_send_sge;
if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
- qp->s_flags = HFI1_S_SIGNAL_REQ_WR;
+ qp->s_flags = RVT_S_SIGNAL_REQ_WR;
dev = to_idev(ibpd->device);
dd = dd_from_dev(dev);
err = alloc_qpn(dd, &dev->qp_dev->qpn_table, init_attr->qp_type,
goto bail_ip;
}
} else {
- u32 s = sizeof(struct hfi1_rwq) + qp->r_rq.size * sz;
+ u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
- qp->ip = hfi1_create_mmap_info(dev, s,
+ qp->ip = rvt_create_mmap_info(&dev->rdi, s,
ibpd->uobject->context,
qp->r_rq.wq);
if (!qp->ip) {
spin_unlock(&dev->n_qps_lock);
if (qp->ip) {
- spin_lock_irq(&dev->pending_lock);
- list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
+ spin_lock_irq(&dev->rdi.pending_lock);
+ list_add(&qp->ip->pending_mmaps, &dev->rdi.pending_mmaps);
+ spin_unlock_irq(&dev->rdi.pending_lock);
}
ret = &qp->ibqp;
bail_ip:
if (qp->ip)
- kref_put(&qp->ip->ref, hfi1_release_mmap_info);
+ kref_put(&qp->ip->ref, rvt_release_mmap_info);
else
vfree(qp->r_rq.wq);
free_qpn(&dev->qp_dev->qpn_table, qp->ibqp.qp_num);
bail_qp:
- kfree(qp->s_hdr);
+ kfree(priv->s_hdr);
+ kfree(priv);
+bail_qp_priv:
kfree(qp);
bail_swq:
vfree(swq);
*/
int hfi1_destroy_qp(struct ib_qp *ibqp)
{
- struct hfi1_qp *qp = to_iqp(ibqp);
+ struct rvt_qp *qp = to_iqp(ibqp);
struct hfi1_ibdev *dev = to_idev(ibqp->device);
+ struct hfi1_qp_priv *priv = qp->priv;
/* Make sure HW and driver activity is stopped. */
spin_lock_irq(&qp->r_lock);
if (qp->state != IB_QPS_RESET) {
qp->state = IB_QPS_RESET;
flush_iowait(qp);
- qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_ANY_WAIT);
+ qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
spin_unlock(&qp->s_lock);
spin_unlock_irq(&qp->r_lock);
- cancel_work_sync(&qp->s_iowait.iowork);
+ cancel_work_sync(&priv->s_iowait.iowork);
del_timer_sync(&qp->s_timer);
- iowait_sdma_drain(&qp->s_iowait);
+ iowait_sdma_drain(&priv->s_iowait);
flush_tx_list(qp);
remove_qp(dev, qp);
wait_event(qp->wait, !atomic_read(&qp->refcount));
spin_unlock(&dev->n_qps_lock);
if (qp->ip)
- kref_put(&qp->ip->ref, hfi1_release_mmap_info);
+ kref_put(&qp->ip->ref, rvt_release_mmap_info);
else
vfree(qp->r_rq.wq);
vfree(qp->s_wq);
- kfree(qp->s_hdr);
+ kfree(priv->s_hdr);
+ kfree(priv);
kfree(qp);
return 0;
}
*
* The QP s_lock should be held.
*/
-void hfi1_get_credit(struct hfi1_qp *qp, u32 aeth)
+void hfi1_get_credit(struct rvt_qp *qp, u32 aeth)
{
u32 credit = (aeth >> HFI1_AETH_CREDIT_SHIFT) & HFI1_AETH_CREDIT_MASK;
* honor the credit field.
*/
if (credit == HFI1_AETH_CREDIT_INVAL) {
- if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT)) {
- qp->s_flags |= HFI1_S_UNLIMITED_CREDIT;
- if (qp->s_flags & HFI1_S_WAIT_SSN_CREDIT) {
- qp->s_flags &= ~HFI1_S_WAIT_SSN_CREDIT;
+ if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
+ qp->s_flags |= RVT_S_UNLIMITED_CREDIT;
+ if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
+ qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
hfi1_schedule_send(qp);
}
}
- } else if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT)) {
+ } else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
/* Compute new LSN (i.e., MSN + credit) */
credit = (aeth + credit_table[credit]) & HFI1_MSN_MASK;
if (cmp_msn(credit, qp->s_lsn) > 0) {
qp->s_lsn = credit;
- if (qp->s_flags & HFI1_S_WAIT_SSN_CREDIT) {
- qp->s_flags &= ~HFI1_S_WAIT_SSN_CREDIT;
+ if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
+ qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
hfi1_schedule_send(qp);
}
}
}
}
-void hfi1_qp_wakeup(struct hfi1_qp *qp, u32 flag)
+void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag)
{
unsigned long flags;
unsigned seq)
{
struct verbs_txreq *tx = container_of(stx, struct verbs_txreq, txreq);
- struct hfi1_qp *qp;
+ struct rvt_qp *qp;
+ struct hfi1_qp_priv *priv;
unsigned long flags;
int ret = 0;
struct hfi1_ibdev *dev;
qp = tx->qp;
+ priv = qp->priv;
spin_lock_irqsave(&qp->s_lock, flags);
if (ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK) {
write_seqlock(&dev->iowait_lock);
if (sdma_progress(sde, seq, stx))
goto eagain;
- if (list_empty(&qp->s_iowait.list)) {
+ if (list_empty(&priv->s_iowait.list)) {
struct hfi1_ibport *ibp =
to_iport(qp->ibqp.device, qp->port_num);
- ibp->n_dmawait++;
- qp->s_flags |= HFI1_S_WAIT_DMA_DESC;
- list_add_tail(&qp->s_iowait.list, &sde->dmawait);
- trace_hfi1_qpsleep(qp, HFI1_S_WAIT_DMA_DESC);
+ ibp->rvp.n_dmawait++;
+ qp->s_flags |= RVT_S_WAIT_DMA_DESC;
+ list_add_tail(&priv->s_iowait.list, &sde->dmawait);
+ trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC);
atomic_inc(&qp->refcount);
}
write_sequnlock(&dev->iowait_lock);
- qp->s_flags &= ~HFI1_S_BUSY;
+ qp->s_flags &= ~RVT_S_BUSY;
spin_unlock_irqrestore(&qp->s_lock, flags);
ret = -EBUSY;
} else {
static void iowait_wakeup(struct iowait *wait, int reason)
{
- struct hfi1_qp *qp = container_of(wait, struct hfi1_qp, s_iowait);
+ struct rvt_qp *qp = iowait_to_qp(wait);
WARN_ON(reason != SDMA_AVAIL_REASON);
- hfi1_qp_wakeup(qp, HFI1_S_WAIT_DMA_DESC);
+ hfi1_qp_wakeup(qp, RVT_S_WAIT_DMA_DESC);
}
int hfi1_qp_init(struct hfi1_ibdev *dev)
* Return:
* A send engine for the qp or NULL for SMI type qp.
*/
-struct sdma_engine *qp_to_sdma_engine(struct hfi1_qp *qp, u8 sc5)
+struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5)
{
struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
struct sdma_engine *sde;
struct qp_iter {
struct hfi1_ibdev *dev;
- struct hfi1_qp *qp;
+ struct rvt_qp *qp;
int specials;
int n;
};
struct hfi1_ibdev *dev = iter->dev;
int n = iter->n;
int ret = 1;
- struct hfi1_qp *pqp = iter->qp;
- struct hfi1_qp *qp;
+ struct rvt_qp *pqp = iter->qp;
+ struct rvt_qp *qp;
/*
* The approach is to consider the special qps
ibp = &ppd->ibport_data;
if (!(n & 1))
- qp = rcu_dereference(ibp->qp[0]);
+ qp = rcu_dereference(ibp->rvp.qp[0]);
else
- qp = rcu_dereference(ibp->qp[1]);
+ qp = rcu_dereference(ibp->rvp.qp[1]);
} else {
qp = rcu_dereference(
dev->qp_dev->qp_table[
"SMI", "GSI", "RC", "UC", "UD",
};
-static int qp_idle(struct hfi1_qp *qp)
+static int qp_idle(struct rvt_qp *qp)
{
return
qp->s_last == qp->s_acked &&
void qp_iter_print(struct seq_file *s, struct qp_iter *iter)
{
- struct hfi1_swqe *wqe;
- struct hfi1_qp *qp = iter->qp;
+ struct rvt_swqe *wqe;
+ struct rvt_qp *qp = iter->qp;
+ struct hfi1_qp_priv *priv = qp->priv;
struct sdma_engine *sde;
- sde = qp_to_sdma_engine(qp, qp->s_sc);
+ sde = qp_to_sdma_engine(qp, priv->s_sc);
wqe = get_swqe_ptr(qp, qp->s_last);
seq_printf(s,
"N %d %s QP%u R %u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x SL %u MTU %d %u %u %u SDE %p,%u\n",
wqe ? wqe->wr.opcode : 0,
qp->s_hdrwords,
qp->s_flags,
- atomic_read(&qp->s_iowait.sdma_busy),
- !list_empty(&qp->s_iowait.list),
+ atomic_read(&priv->s_iowait.sdma_busy),
+ !list_empty(&priv->s_iowait.list),
qp->timeout,
wqe ? wqe->ssn : 0,
qp->s_lsn,
sde ? sde->this_idx : 0);
}
-void qp_comm_est(struct hfi1_qp *qp)
+void qp_comm_est(struct rvt_qp *qp)
{
- qp->r_flags |= HFI1_R_COMM_EST;
+ qp->r_flags |= RVT_R_COMM_EST;
if (qp->ibqp.event_handler) {
struct ib_event ev;
* Switch to alternate path.
* The QP s_lock should be held and interrupts disabled.
*/
-void hfi1_migrate_qp(struct hfi1_qp *qp)
+void hfi1_migrate_qp(struct rvt_qp *qp)
{
+ struct hfi1_qp_priv *priv = qp->priv;
struct ib_event ev;
qp->s_mig_state = IB_MIG_MIGRATED;
qp->remote_ah_attr = qp->alt_ah_attr;
qp->port_num = qp->alt_ah_attr.port_num;
qp->s_pkey_index = qp->s_alt_pkey_index;
- qp->s_flags |= HFI1_S_AHG_CLEAR;
- qp->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr);
- qp->s_sde = qp_to_sdma_engine(qp, qp->s_sc);
+ qp->s_flags |= RVT_S_AHG_CLEAR;
+ priv->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr);
+ priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
ev.device = qp->ibqp.device;
ev.element.qp = &qp->ibqp;