]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - drivers/staging/rdma/hfi1/sdma.c
staging/rdma/hfi1: Fix code alignment
[mirror_ubuntu-artful-kernel.git] / drivers / staging / rdma / hfi1 / sdma.c
index 5f62d022908845c239e37991c9e0338d50fdf180..74086eabbb259b5881610ed3fffa14c011304c8a 100644 (file)
@@ -325,9 +325,9 @@ static void sdma_wait_for_packet_egress(struct sdma_engine *sde,
                if (lcnt++ > 500) {
                        /* timed out - bounce the link */
                        dd_dev_err(dd, "%s: engine %u timeout waiting for packets to egress, remaining count %u, bouncing link\n",
-                                 __func__, sde->this_idx, (u32)reg);
+                                  __func__, sde->this_idx, (u32)reg);
                        queue_work(dd->pport->hfi1_wq,
-                               &dd->pport->link_bounce_work);
+                                  &dd->pport->link_bounce_work);
                        break;
                }
                udelay(1);
@@ -458,8 +458,8 @@ static void sdma_err_halt_wait(struct work_struct *work)
                        break;
                if (time_after(jiffies, timeout)) {
                        dd_dev_err(sde->dd,
-                               "SDMA engine %d - timeout waiting for engine to halt\n",
-                               sde->this_idx);
+                                  "SDMA engine %d - timeout waiting for engine to halt\n",
+                                  sde->this_idx);
                        /*
                         * Continue anyway.  This could happen if there was
                         * an uncorrectable error in the wrong spot.
@@ -656,7 +656,7 @@ static void sdma_start_hw_clean_up(struct sdma_engine *sde)
 }
 
 static void sdma_set_state(struct sdma_engine *sde,
-       enum sdma_states next_state)
+                          enum sdma_states next_state)
 {
        struct sdma_state *ss = &sde->state;
        const struct sdma_set_state_action *action = sdma_action_table;
@@ -908,7 +908,7 @@ int sdma_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_engines)
        /* newmap in hand, save old map */
        spin_lock_irq(&dd->sde_map_lock);
        oldmap = rcu_dereference_protected(dd->sdma_map,
-                       lockdep_is_held(&dd->sde_map_lock));
+                                          lockdep_is_held(&dd->sde_map_lock));
 
        /* publish newmap */
        rcu_assign_pointer(dd->sdma_map, newmap);
@@ -1006,16 +1006,16 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
                return 0;
        }
        if (mod_num_sdma &&
-               /* can't exceed chip support */
-               mod_num_sdma <= dd->chip_sdma_engines &&
-               /* count must be >= vls */
-               mod_num_sdma >= num_vls)
+           /* can't exceed chip support */
+           mod_num_sdma <= dd->chip_sdma_engines &&
+           /* count must be >= vls */
+           mod_num_sdma >= num_vls)
                num_engines = mod_num_sdma;
 
        dd_dev_info(dd, "SDMA mod_num_sdma: %u\n", mod_num_sdma);
        dd_dev_info(dd, "SDMA chip_sdma_engines: %u\n", dd->chip_sdma_engines);
        dd_dev_info(dd, "SDMA chip_sdma_mem_size: %u\n",
-               dd->chip_sdma_mem_size);
+                   dd->chip_sdma_mem_size);
 
        per_sdma_credits =
                dd->chip_sdma_mem_size / (num_engines * SDMA_BLOCK_SIZE);
@@ -1026,7 +1026,7 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
 
        descq_cnt = sdma_get_descq_cnt();
        dd_dev_info(dd, "SDMA engines %zu descq_cnt %u\n",
-               num_engines, descq_cnt);
+                   num_engines, descq_cnt);
 
        /* alloc memory for array of send engines */
        dd->per_sdma = kcalloc(num_engines, sizeof(*dd->per_sdma), GFP_KERNEL);
@@ -1086,10 +1086,10 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
                                SDMA_DESC1_INT_REQ_FLAG;
 
                tasklet_init(&sde->sdma_hw_clean_up_task, sdma_hw_clean_up_task,
-                       (unsigned long)sde);
+                            (unsigned long)sde);
 
                tasklet_init(&sde->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
-                       (unsigned long)sde);
+                            (unsigned long)sde);
                INIT_WORK(&sde->err_halt_worker, sdma_err_halt_wait);
                INIT_WORK(&sde->flush_worker, sdma_field_flush);
 
@@ -1240,7 +1240,7 @@ void sdma_exit(struct hfi1_devdata *dd)
                sde = &dd->per_sdma[this_idx];
                if (!list_empty(&sde->dmawait))
                        dd_dev_err(dd, "sde %u: dmawait list not empty!\n",
-                               sde->this_idx);
+                                  sde->this_idx);
                sdma_process_event(sde, sdma_event_e00_go_hw_down);
 
                del_timer_sync(&sde->err_progress_check_timer);
@@ -1370,9 +1370,9 @@ retry:
 
                if (unlikely(!sane)) {
                        dd_dev_err(dd, "SDMA(%u) bad head (%s) hwhd=%hu swhd=%hu swtl=%hu cnt=%hu\n",
-                               sde->this_idx,
-                               use_dmahead ? "dma" : "kreg",
-                               hwhead, swhead, swtail, cnt);
+                                  sde->this_idx,
+                                  use_dmahead ? "dma" : "kreg",
+                                  hwhead, swhead, swtail, cnt);
                        if (use_dmahead) {
                                /* try one more time, using csr */
                                use_dmahead = 0;
@@ -1550,10 +1550,10 @@ void sdma_engine_error(struct sdma_engine *sde, u64 status)
                __sdma_process_event(sde, sdma_event_e60_hw_halted);
        if (status & ~SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK)) {
                dd_dev_err(sde->dd,
-                       "SDMA (%u) engine error: 0x%llx state %s\n",
-                       sde->this_idx,
-                       (unsigned long long)status,
-                       sdma_state_names[sde->state.current_state]);
+                          "SDMA (%u) engine error: 0x%llx state %s\n",
+                          sde->this_idx,
+                          (unsigned long long)status,
+                          sdma_state_names[sde->state.current_state]);
                dump_sdma_state(sde);
        }
        write_sequnlock(&sde->head_lock);
@@ -1597,8 +1597,8 @@ static void sdma_sendctrl(struct sdma_engine *sde, unsigned op)
 
        if (op & SDMA_SENDCTRL_OP_CLEANUP)
                write_sde_csr(sde, SD(CTRL),
-                       sde->p_senddmactrl |
-                       SD(CTRL_SDMA_CLEANUP_SMASK));
+                             sde->p_senddmactrl |
+                             SD(CTRL_SDMA_CLEANUP_SMASK));
        else
                write_sde_csr(sde, SD(CTRL), sde->p_senddmactrl);
 
@@ -1622,12 +1622,10 @@ static void sdma_setlengen(struct sdma_engine *sde)
         * generation counter.
         */
        write_sde_csr(sde, SD(LEN_GEN),
-               (sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)
-       );
+                     (sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT));
        write_sde_csr(sde, SD(LEN_GEN),
-               ((sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT))
-               | (4ULL << SD(LEN_GEN_GENERATION_SHIFT))
-       );
+                     ((sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)) |
+                     (4ULL << SD(LEN_GEN_GENERATION_SHIFT)));
 }
 
 static inline void sdma_update_tail(struct sdma_engine *sde, u16 tail)
@@ -1707,17 +1705,16 @@ static void init_sdma_regs(
        write_sde_csr(sde, SD(DESC_CNT), 0);
        write_sde_csr(sde, SD(HEAD_ADDR), sde->head_phys);
        write_sde_csr(sde, SD(MEMORY),
-               ((u64)credits <<
-                       SD(MEMORY_SDMA_MEMORY_CNT_SHIFT)) |
-               ((u64)(credits * sde->this_idx) <<
-                       SD(MEMORY_SDMA_MEMORY_INDEX_SHIFT)));
+                     ((u64)credits << SD(MEMORY_SDMA_MEMORY_CNT_SHIFT)) |
+                     ((u64)(credits * sde->this_idx) <<
+                      SD(MEMORY_SDMA_MEMORY_INDEX_SHIFT)));
        write_sde_csr(sde, SD(ENG_ERR_MASK), ~0ull);
        set_sdma_integrity(sde);
        opmask = OPCODE_CHECK_MASK_DISABLED;
        opval = OPCODE_CHECK_VAL_DISABLED;
        write_sde_csr(sde, SD(CHECK_OPCODE),
-               (opmask << SEND_CTXT_CHECK_OPCODE_MASK_SHIFT) |
-               (opval << SEND_CTXT_CHECK_OPCODE_VALUE_SHIFT));
+                     (opmask << SEND_CTXT_CHECK_OPCODE_MASK_SHIFT) |
+                     (opval << SEND_CTXT_CHECK_OPCODE_VALUE_SHIFT));
 }
 
 #ifdef CONFIG_SDMA_VERBOSITY
@@ -1796,12 +1793,9 @@ static void dump_sdma_state(struct sdma_engine *sde)
        descq = sde->descq;
 
        dd_dev_err(sde->dd,
-               "SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n",
-               sde->this_idx,
-               head,
-               tail,
-               cnt,
-               !list_empty(&sde->flushlist));
+                  "SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n",
+                  sde->this_idx, head, tail, cnt,
+                  !list_empty(&sde->flushlist));
 
        /* print info for each entry in the descriptor queue */
        while (head != tail) {
@@ -1822,20 +1816,23 @@ static void dump_sdma_state(struct sdma_engine *sde)
                len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT)
                        & SDMA_DESC0_BYTE_COUNT_MASK;
                dd_dev_err(sde->dd,
-                       "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
-                        head, flags, addr, gen, len);
+                          "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
+                          head, flags, addr, gen, len);
                dd_dev_err(sde->dd,
-                       "\tdesc0:0x%016llx desc1 0x%016llx\n",
-                        desc[0], desc[1]);
+                          "\tdesc0:0x%016llx desc1 0x%016llx\n",
+                          desc[0], desc[1]);
                if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
                        dd_dev_err(sde->dd,
-                               "\taidx: %u amode: %u alen: %u\n",
-                               (u8)((desc[1] & SDMA_DESC1_HEADER_INDEX_SMASK)
-                                       >> SDMA_DESC1_HEADER_INDEX_SHIFT),
-                               (u8)((desc[1] & SDMA_DESC1_HEADER_MODE_SMASK)
-                                       >> SDMA_DESC1_HEADER_MODE_SHIFT),
-                               (u8)((desc[1] & SDMA_DESC1_HEADER_DWS_SMASK)
-                                       >> SDMA_DESC1_HEADER_DWS_SHIFT));
+                                  "\taidx: %u amode: %u alen: %u\n",
+                                  (u8)((desc[1] &
+                                        SDMA_DESC1_HEADER_INDEX_SMASK) >>
+                                       SDMA_DESC1_HEADER_INDEX_SHIFT),
+                                  (u8)((desc[1] &
+                                        SDMA_DESC1_HEADER_MODE_SMASK) >>
+                                       SDMA_DESC1_HEADER_MODE_SHIFT),
+                                  (u8)((desc[1] &
+                                        SDMA_DESC1_HEADER_DWS_SMASK) >>
+                                       SDMA_DESC1_HEADER_DWS_SHIFT));
                head++;
                head &= sde->sdma_mask;
        }
@@ -1862,29 +1859,26 @@ void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde)
        head = sde->descq_head & sde->sdma_mask;
        tail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
        seq_printf(s, SDE_FMT, sde->this_idx,
-               sde->cpu,
-               sdma_state_name(sde->state.current_state),
-               (unsigned long long)read_sde_csr(sde, SD(CTRL)),
-               (unsigned long long)read_sde_csr(sde, SD(STATUS)),
-               (unsigned long long)read_sde_csr(sde,
-                       SD(ENG_ERR_STATUS)),
-               (unsigned long long)read_sde_csr(sde, SD(TAIL)),
-               tail,
-               (unsigned long long)read_sde_csr(sde, SD(HEAD)),
-               head,
-               (unsigned long long)le64_to_cpu(*sde->head_dma),
-               (unsigned long long)read_sde_csr(sde, SD(MEMORY)),
-               (unsigned long long)read_sde_csr(sde, SD(LEN_GEN)),
-               (unsigned long long)read_sde_csr(sde, SD(RELOAD_CNT)),
-               (unsigned long long)sde->last_status,
-               (unsigned long long)sde->ahg_bits,
-               sde->tx_tail,
-               sde->tx_head,
-               sde->descq_tail,
-               sde->descq_head,
+                  sde->cpu,
+                  sdma_state_name(sde->state.current_state),
+                  (unsigned long long)read_sde_csr(sde, SD(CTRL)),
+                  (unsigned long long)read_sde_csr(sde, SD(STATUS)),
+                  (unsigned long long)read_sde_csr(sde, SD(ENG_ERR_STATUS)),
+                  (unsigned long long)read_sde_csr(sde, SD(TAIL)), tail,
+                  (unsigned long long)read_sde_csr(sde, SD(HEAD)), head,
+                  (unsigned long long)le64_to_cpu(*sde->head_dma),
+                  (unsigned long long)read_sde_csr(sde, SD(MEMORY)),
+                  (unsigned long long)read_sde_csr(sde, SD(LEN_GEN)),
+                  (unsigned long long)read_sde_csr(sde, SD(RELOAD_CNT)),
+                  (unsigned long long)sde->last_status,
+                  (unsigned long long)sde->ahg_bits,
+                  sde->tx_tail,
+                  sde->tx_head,
+                  sde->descq_tail,
+                  sde->descq_head,
                   !list_empty(&sde->flushlist),
-               sde->descq_full_count,
-               (unsigned long long)read_sde_csr(sde, SEND_DMA_CHECK_SLID));
+                  sde->descq_full_count,
+                  (unsigned long long)read_sde_csr(sde, SEND_DMA_CHECK_SLID));
 
        /* print info for each entry in the descriptor queue */
        while (head != tail) {
@@ -1905,14 +1899,16 @@ void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde)
                len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT)
                        & SDMA_DESC0_BYTE_COUNT_MASK;
                seq_printf(s,
-                       "\tdesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
-                       head, flags, addr, gen, len);
+                          "\tdesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
+                          head, flags, addr, gen, len);
                if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
                        seq_printf(s, "\t\tahgidx: %u ahgmode: %u\n",
-                               (u8)((desc[1] & SDMA_DESC1_HEADER_INDEX_SMASK)
-                                       >> SDMA_DESC1_HEADER_INDEX_SHIFT),
-                               (u8)((desc[1] & SDMA_DESC1_HEADER_MODE_SMASK)
-                                       >> SDMA_DESC1_HEADER_MODE_SHIFT));
+                                  (u8)((desc[1] &
+                                        SDMA_DESC1_HEADER_INDEX_SMASK) >>
+                                       SDMA_DESC1_HEADER_INDEX_SHIFT),
+                                  (u8)((desc[1] &
+                                        SDMA_DESC1_HEADER_MODE_SMASK) >>
+                                       SDMA_DESC1_HEADER_MODE_SHIFT));
                head = (head + 1) & sde->sdma_mask;
        }
 }
@@ -2108,9 +2104,8 @@ nodesc:
  * -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring (wait == NULL)
  * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
  */
-int sdma_send_txlist(struct sdma_engine *sde,
-                   struct iowait *wait,
-                   struct list_head *tx_list)
+int sdma_send_txlist(struct sdma_engine *sde, struct iowait *wait,
+                    struct list_head *tx_list)
 {
        struct sdma_txreq *tx, *tx_next;
        int ret = 0;
@@ -2178,8 +2173,7 @@ nodesc:
        goto update_tail;
 }
 
-static void sdma_process_event(struct sdma_engine *sde,
-       enum sdma_events event)
+static void sdma_process_event(struct sdma_engine *sde, enum sdma_events event)
 {
        unsigned long flags;
 
@@ -2196,7 +2190,7 @@ static void sdma_process_event(struct sdma_engine *sde,
 }
 
 static void __sdma_process_event(struct sdma_engine *sde,
-       enum sdma_events event)
+                                enum sdma_events event)
 {
        struct sdma_state *ss = &sde->state;
        int need_progress = 0;
@@ -2227,7 +2221,7 @@ static void __sdma_process_event(struct sdma_engine *sde,
                        /* This reference means the state machine is started */
                        sdma_get(&sde->state);
                        sdma_set_state(sde,
-                               sdma_state_s10_hw_start_up_halt_wait);
+                                      sdma_state_s10_hw_start_up_halt_wait);
                        break;
                case sdma_event_e15_hw_halt_done:
                        break;
@@ -2265,7 +2259,7 @@ static void __sdma_process_event(struct sdma_engine *sde,
                        break;
                case sdma_event_e15_hw_halt_done:
                        sdma_set_state(sde,
-                               sdma_state_s15_hw_start_up_clean_wait);
+                                      sdma_state_s15_hw_start_up_clean_wait);
                        sdma_start_hw_clean_up(sde);
                        break;
                case sdma_event_e25_hw_clean_up_done:
@@ -3003,7 +2997,8 @@ void sdma_freeze(struct hfi1_devdata *dd)
         * continuing.
         */
        ret = wait_event_interruptible(dd->sdma_unfreeze_wq,
-                               atomic_read(&dd->sdma_unfreeze_count) <= 0);
+                                      atomic_read(&dd->sdma_unfreeze_count) <=
+                                      0);
        /* interrupted or count is negative, then unloading - just exit */
        if (ret || atomic_read(&dd->sdma_unfreeze_count) < 0)
                return;
@@ -3040,7 +3035,7 @@ void sdma_unfreeze(struct hfi1_devdata *dd)
        /* tell all engines start freeze clean up */
        for (i = 0; i < dd->num_sdma; i++)
                sdma_process_event(&dd->per_sdma[i],
-                                       sdma_event_e82_hw_unfreeze);
+                                  sdma_event_e82_hw_unfreeze);
 }
 
 /**
@@ -3054,5 +3049,6 @@ void _sdma_engine_progress_schedule(
        trace_hfi1_sdma_engine_progress(sde, sde->progress_mask);
        /* assume we have selected a good cpu */
        write_csr(sde->dd,
-                 CCE_INT_FORCE + (8 * (IS_SDMA_START / 64)), sde->progress_mask);
+                 CCE_INT_FORCE + (8 * (IS_SDMA_START / 64)),
+                 sde->progress_mask);
 }