]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blobdiff - drivers/staging/rdma/hfi1/sdma.c
staging/rdma/hfi1: Fix misspellings
[mirror_ubuntu-zesty-kernel.git] / drivers / staging / rdma / hfi1 / sdma.c
index 9a15f1f32b45d5dbdea1d446e287dbf1bf2a36de..cd818de47c66f34c9c776b9d58365c344656fbaf 100644 (file)
@@ -112,10 +112,10 @@ MODULE_PARM_DESC(desct_intr, "Number of SDMA descriptor before interrupt");
        | SD(ENG_ERR_STATUS_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SMASK))
 
 /* sdma_sendctrl operations */
-#define SDMA_SENDCTRL_OP_ENABLE    (1U << 0)
-#define SDMA_SENDCTRL_OP_INTENABLE (1U << 1)
-#define SDMA_SENDCTRL_OP_HALT      (1U << 2)
-#define SDMA_SENDCTRL_OP_CLEANUP   (1U << 3)
+#define SDMA_SENDCTRL_OP_ENABLE    BIT(0)
+#define SDMA_SENDCTRL_OP_INTENABLE BIT(1)
+#define SDMA_SENDCTRL_OP_HALT      BIT(2)
+#define SDMA_SENDCTRL_OP_CLEANUP   BIT(3)
 
 /* handle long defines */
 #define SDMA_EGRESS_PACKET_OCCUPANCY_SMASK \
@@ -361,6 +361,28 @@ static inline void sdma_set_desc_cnt(struct sdma_engine *sde, unsigned cnt)
        write_sde_csr(sde, SD(DESC_CNT), reg);
 }
 
+static inline void complete_tx(struct sdma_engine *sde,
+                              struct sdma_txreq *tx,
+                              int res)
+{
+       /* protect against complete modifying */
+       struct iowait *wait = tx->wait;
+       callback_t complete = tx->complete;
+
+#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
+       trace_hfi1_sdma_out_sn(sde, txp->sn);
+       if (WARN_ON_ONCE(sde->head_sn != txp->sn))
+               dd_dev_err(sde->dd, "expected %llu got %llu\n",
+                          sde->head_sn, txp->sn);
+       sde->head_sn++;
+#endif
+       sdma_txclean(sde->dd, tx);
+       if (complete)
+               (*complete)(tx, res);
+       if (iowait_sdma_dec(wait) && wait)
+               iowait_drain_wakeup(wait);
+}
+
 /*
  * Complete all the sdma requests with a SDMA_TXREQ_S_ABORTED status
  *
@@ -395,27 +417,8 @@ static void sdma_flush(struct sdma_engine *sde)
        }
        spin_unlock_irqrestore(&sde->flushlist_lock, flags);
        /* flush from flush list */
-       list_for_each_entry_safe(txp, txp_next, &flushlist, list) {
-               int drained = 0;
-               /* protect against complete modifying */
-               struct iowait *wait = txp->wait;
-
-               list_del_init(&txp->list);
-#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
-               trace_hfi1_sdma_out_sn(sde, txp->sn);
-               if (WARN_ON_ONCE(sde->head_sn != txp->sn))
-                       dd_dev_err(sde->dd, "expected %llu got %llu\n",
-                               sde->head_sn, txp->sn);
-               sde->head_sn++;
-#endif
-               sdma_txclean(sde->dd, txp);
-               if (wait)
-                       drained = atomic_dec_and_test(&wait->sdma_busy);
-               if (txp->complete)
-                       (*txp->complete)(txp, SDMA_TXREQ_S_ABORTED, drained);
-               if (wait && drained)
-                       iowait_drain_wakeup(wait);
-       }
+       list_for_each_entry_safe(txp, txp_next, &flushlist, list)
+               complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED);
 }
 
 /*
@@ -472,7 +475,6 @@ static void sdma_err_halt_wait(struct work_struct *work)
 static void sdma_err_progress_check_schedule(struct sdma_engine *sde)
 {
        if (!is_bx(sde->dd) && HFI1_CAP_IS_KSET(SDMA_AHG)) {
-
                unsigned index;
                struct hfi1_devdata *dd = sde->dd;
 
@@ -531,7 +533,7 @@ static void sdma_err_progress_check(unsigned long data)
 
 static void sdma_hw_clean_up_task(unsigned long opaque)
 {
-       struct sdma_engine *sde = (struct sdma_engine *) opaque;
+       struct sdma_engine *sde = (struct sdma_engine *)opaque;
        u64 statuscsr;
 
        while (1) {
@@ -577,31 +579,10 @@ static void sdma_flush_descq(struct sdma_engine *sde)
                head = ++sde->descq_head & sde->sdma_mask;
                /* if now past this txp's descs, do the callback */
                if (txp && txp->next_descq_idx == head) {
-                       int drained = 0;
-                       /* protect against complete modifying */
-                       struct iowait *wait = txp->wait;
-
                        /* remove from list */
                        sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
-                       if (wait)
-                               drained = atomic_dec_and_test(&wait->sdma_busy);
-#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
-                       trace_hfi1_sdma_out_sn(sde, txp->sn);
-                       if (WARN_ON_ONCE(sde->head_sn != txp->sn))
-                               dd_dev_err(sde->dd, "expected %llu got %llu\n",
-                                       sde->head_sn, txp->sn);
-                       sde->head_sn++;
-#endif
-                       sdma_txclean(sde->dd, txp);
+                       complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED);
                        trace_hfi1_sdma_progress(sde, head, tail, txp);
-                       if (txp->complete)
-                               (*txp->complete)(
-                                       txp,
-                                       SDMA_TXREQ_S_ABORTED,
-                                       drained);
-                       if (wait && drained)
-                               iowait_drain_wakeup(wait);
-                       /* see if there is another txp */
                        txp = get_txhead(sde);
                }
                progress++;
@@ -612,7 +593,7 @@ static void sdma_flush_descq(struct sdma_engine *sde)
 
 static void sdma_sw_clean_up_task(unsigned long opaque)
 {
-       struct sdma_engine *sde = (struct sdma_engine *) opaque;
+       struct sdma_engine *sde = (struct sdma_engine *)opaque;
        unsigned long flags;
 
        spin_lock_irqsave(&sde->tail_lock, flags);
@@ -627,7 +608,6 @@ static void sdma_sw_clean_up_task(unsigned long opaque)
         *   descq are ours to play with.
         */
 
-
        /*
         * In the error clean up sequence, software clean must be called
         * before the hardware clean so we can use the hardware head in
@@ -692,8 +672,8 @@ static void sdma_set_state(struct sdma_engine *sde,
        ss->previous_op = ss->current_op;
        ss->current_state = next_state;
 
-       if (ss->previous_state != sdma_state_s99_running
-               && next_state == sdma_state_s99_running)
+       if (ss->previous_state != sdma_state_s99_running &&
+           next_state == sdma_state_s99_running)
                sdma_flush(sde);
 
        if (action[next_state].op_enable)
@@ -890,6 +870,9 @@ int sdma_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_engines)
        newmap->actual_vls = num_vls;
        newmap->vls = roundup_pow_of_two(num_vls);
        newmap->mask = (1 << ilog2(newmap->vls)) - 1;
+       /* initialize back-map */
+       for (i = 0; i < TXE_NUM_SDMA_ENGINES; i++)
+               newmap->engine_to_vl[i] = -1;
        for (i = 0; i < newmap->vls; i++) {
                /* save for wrap around */
                int first_engine = engine;
@@ -913,6 +896,9 @@ int sdma_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_engines)
                                        /* wrap back to first engine */
                                        engine = first_engine;
                        }
+                       /* assign back-map */
+                       for (j = 0; j < vl_engines[i]; j++)
+                               newmap->engine_to_vl[first_engine + j] = i;
                } else {
                        /* just re-use entry without allocating */
                        newmap->map[i] = newmap->map[i % num_vls];
@@ -1032,7 +1018,7 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
                dd->chip_sdma_mem_size);
 
        per_sdma_credits =
-               dd->chip_sdma_mem_size/(num_engines * SDMA_BLOCK_SIZE);
+               dd->chip_sdma_mem_size / (num_engines * SDMA_BLOCK_SIZE);
 
        /* set up freeze waitqueue */
        init_waitqueue_head(&dd->sdma_unfreeze_wq);
@@ -1061,18 +1047,18 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
                sde->desc_avail = sdma_descq_freecnt(sde);
                sde->sdma_shift = ilog2(descq_cnt);
                sde->sdma_mask = (1 << sde->sdma_shift) - 1;
-               sde->descq_full_count = 0;
-
-               /* Create a mask for all 3 chip interrupt sources */
-               sde->imask = (u64)1 << (0*TXE_NUM_SDMA_ENGINES + this_idx)
-                       | (u64)1 << (1*TXE_NUM_SDMA_ENGINES + this_idx)
-                       | (u64)1 << (2*TXE_NUM_SDMA_ENGINES + this_idx);
-               /* Create a mask specifically for sdma_idle */
-               sde->idle_mask =
-                       (u64)1 << (2*TXE_NUM_SDMA_ENGINES + this_idx);
-               /* Create a mask specifically for sdma_progress */
-               sde->progress_mask =
-                       (u64)1 << (TXE_NUM_SDMA_ENGINES + this_idx);
+
+               /* Create a mask specifically for each interrupt source */
+               sde->int_mask = (u64)1 << (0 * TXE_NUM_SDMA_ENGINES +
+                                          this_idx);
+               sde->progress_mask = (u64)1 << (1 * TXE_NUM_SDMA_ENGINES +
+                                               this_idx);
+               sde->idle_mask = (u64)1 << (2 * TXE_NUM_SDMA_ENGINES +
+                                           this_idx);
+               /* Create a combined mask to cover all 3 interrupt sources */
+               sde->imask = sde->int_mask | sde->progress_mask |
+                            sde->idle_mask;
+
                spin_lock_init(&sde->tail_lock);
                seqlock_init(&sde->head_lock);
                spin_lock_init(&sde->senddmactrl_lock);
@@ -1251,7 +1237,6 @@ void sdma_exit(struct hfi1_devdata *dd)
 
        for (this_idx = 0; dd->per_sdma && this_idx < dd->num_sdma;
                        ++this_idx) {
-
                sde = &dd->per_sdma[this_idx];
                if (!list_empty(&sde->dmawait))
                        dd_dev_err(dd, "sde %u: dmawait list not empty!\n",
@@ -1358,8 +1343,8 @@ retry:
        use_dmahead = HFI1_CAP_IS_KSET(USE_SDMA_HEAD) && __sdma_running(sde) &&
                                        (dd->flags & HFI1_HAS_SDMA_TIMEOUT);
        hwhead = use_dmahead ?
-               (u16) le64_to_cpu(*sde->head_dma) :
-               (u16) read_sde_csr(sde, SD(HEAD));
+               (u16)le64_to_cpu(*sde->head_dma) :
+               (u16)read_sde_csr(sde, SD(HEAD));
 
        if (unlikely(HFI1_CAP_IS_KSET(SDMA_HEAD_CHECK))) {
                u16 cnt;
@@ -1464,7 +1449,7 @@ static void sdma_make_progress(struct sdma_engine *sde, u64 status)
 {
        struct sdma_txreq *txp = NULL;
        int progress = 0;
-       u16 hwhead, swhead, swtail;
+       u16 hwhead, swhead;
        int idle_check_done = 0;
 
        hwhead = sdma_gethead(sde);
@@ -1485,29 +1470,9 @@ retry:
 
                /* if now past this txp's descs, do the callback */
                if (txp && txp->next_descq_idx == swhead) {
-                       int drained = 0;
-                       /* protect against complete modifying */
-                       struct iowait *wait = txp->wait;
-
                        /* remove from list */
                        sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
-                       if (wait)
-                               drained = atomic_dec_and_test(&wait->sdma_busy);
-#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
-                       trace_hfi1_sdma_out_sn(sde, txp->sn);
-                       if (WARN_ON_ONCE(sde->head_sn != txp->sn))
-                               dd_dev_err(sde->dd, "expected %llu got %llu\n",
-                                       sde->head_sn, txp->sn);
-                       sde->head_sn++;
-#endif
-                       sdma_txclean(sde->dd, txp);
-                       if (txp->complete)
-                               (*txp->complete)(
-                                       txp,
-                                       SDMA_TXREQ_S_OK,
-                                       drained);
-                       if (wait && drained)
-                               iowait_drain_wakeup(wait);
+                       complete_tx(sde, txp, SDMA_TXREQ_S_OK);
                        /* see if there is another txp */
                        txp = get_txhead(sde);
                }
@@ -1525,6 +1490,8 @@ retry:
         * of sdma_make_progress(..) which is ensured by idle_check_done flag
         */
        if ((status & sde->idle_mask) && !idle_check_done) {
+               u16 swtail;
+
                swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
                if (swtail != hwhead) {
                        hwhead = (u16)read_sde_csr(sde, SD(HEAD));
@@ -1552,6 +1519,12 @@ void sdma_engine_interrupt(struct sdma_engine *sde, u64 status)
        trace_hfi1_sdma_engine_interrupt(sde, status);
        write_seqlock(&sde->head_lock);
        sdma_set_desc_cnt(sde, sdma_desct_intr);
+       if (status & sde->idle_mask)
+               sde->idle_int_cnt++;
+       else if (status & sde->progress_mask)
+               sde->progress_int_cnt++;
+       else if (status & sde->int_mask)
+               sde->sdma_int_cnt++;
        sdma_make_progress(sde, status);
        write_sequnlock(&sde->head_lock);
 }
@@ -1649,10 +1622,10 @@ static void sdma_setlengen(struct sdma_engine *sde)
         * generation counter.
         */
        write_sde_csr(sde, SD(LEN_GEN),
-               (sde->descq_cnt/64) << SD(LEN_GEN_LENGTH_SHIFT)
+               (sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)
        );
        write_sde_csr(sde, SD(LEN_GEN),
-               ((sde->descq_cnt/64) << SD(LEN_GEN_LENGTH_SHIFT))
+               ((sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT))
                | (4ULL << SD(LEN_GEN_GENERATION_SHIFT))
        );
 }
@@ -1714,7 +1687,6 @@ static void set_sdma_integrity(struct sdma_engine *sde)
        write_sde_csr(sde, SD(CHECK_ENABLE), reg);
 }
 
-
 static void init_sdma_regs(
        struct sdma_engine *sde,
        u32 credits,
@@ -2080,14 +2052,14 @@ retry:
                goto nodesc;
        tail = submit_tx(sde, tx);
        if (wait)
-               atomic_inc(&wait->sdma_busy);
+               iowait_sdma_inc(wait);
        sdma_update_tail(sde, tail);
 unlock:
        spin_unlock_irqrestore(&sde->tail_lock, flags);
        return ret;
 unlock_noconn:
        if (wait)
-               atomic_inc(&wait->sdma_busy);
+               iowait_sdma_inc(wait);
        tx->next_descq_idx = 0;
 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
        tx->sn = sde->tail_sn++;
@@ -2132,8 +2104,8 @@ nodesc:
  * side locking.
  *
  * Return:
- * 0 - Success, -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring
- * (wait == NULL)
+ * > 0 - Success (value is number of sdma_txreq's submitted),
+ * -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring (wait == NULL)
  * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
  */
 int sdma_send_txlist(struct sdma_engine *sde,
@@ -2169,18 +2141,18 @@ retry:
        }
 update_tail:
        if (wait)
-               atomic_add(count, &wait->sdma_busy);
+               iowait_sdma_add(wait, count);
        if (tail != INVALID_TAIL)
                sdma_update_tail(sde, tail);
        spin_unlock_irqrestore(&sde->tail_lock, flags);
-       return ret;
+       return ret == 0 ? count : ret;
 unlock_noconn:
        spin_lock(&sde->flushlist_lock);
        list_for_each_entry_safe(tx, tx_next, tx_list, list) {
                tx->wait = wait;
                list_del_init(&tx->list);
                if (wait)
-                       atomic_inc(&wait->sdma_busy);
+                       iowait_sdma_inc(wait);
                tx->next_descq_idx = 0;
 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
                tx->sn = sde->tail_sn++;
@@ -2767,7 +2739,7 @@ enomem:
  * This function calls _extend_sdma_tx_descs to extend or allocate
  * coalesce buffer. If there is a allocated coalesce buffer, it will
  * copy the input packet data into the coalesce buffer. It also adds
- * coalesce buffer descriptor once whe whole packet is received.
+ * coalesce buffer descriptor once when whole packet is received.
  *
  * Return:
  * <0 - error
@@ -3047,7 +3019,7 @@ void sdma_freeze(struct hfi1_devdata *dd)
         * software clean will read engine CSRs, so must be completed before
         * the next step, which will clear the engine CSRs.
         */
-       (void) wait_event_interruptible(dd->sdma_unfreeze_wq,
+       (void)wait_event_interruptible(dd->sdma_unfreeze_wq,
                                atomic_read(&dd->sdma_unfreeze_count) <= 0);
        /* no need to check results - done no matter what */
 }
@@ -3081,5 +3053,5 @@ void _sdma_engine_progress_schedule(
        trace_hfi1_sdma_engine_progress(sde, sde->progress_mask);
        /* assume we have selected a good cpu */
        write_csr(sde->dd,
-                 CCE_INT_FORCE + (8*(IS_SDMA_START/64)), sde->progress_mask);
+                 CCE_INT_FORCE + (8 * (IS_SDMA_START / 64)), sde->progress_mask);
 }