*/
}
+void octeon_schedule_rxq_oom_work(struct octeon_device *oct,
+ struct octeon_droq *droq)
+{
+ struct net_device *netdev = oct->props[0].netdev;
+ struct lio *lio = GET_LIO(netdev);
+ struct cavium_wq *wq = &lio->rxq_status_wq[droq->q_no];
+
+ queue_delayed_work(wq->wq, &wq->wk.work,
+ msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
+}
+
static void octnet_poll_check_rxq_oom_status(struct work_struct *work)
{
struct cavium_wk *wk = (struct cavium_wk *)work;
struct lio *lio = (struct lio *)wk->ctxptr;
struct octeon_device *oct = lio->oct_dev;
- struct octeon_droq *droq;
- int q, q_no = 0;
+ int q_no = wk->ctxul;
+ struct octeon_droq *droq = oct->droq[q_no];
- if (ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
- for (q = 0; q < lio->linfo.num_rxpciq; q++) {
- q_no = lio->linfo.rxpciq[q].s.q_no;
- droq = oct->droq[q_no];
- if (!droq)
- continue;
- octeon_droq_check_oom(droq);
- }
- }
- queue_delayed_work(lio->rxq_status_wq.wq,
- &lio->rxq_status_wq.wk.work,
- msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
+ if (!ifstate_check(lio, LIO_IFSTATE_RUNNING) || !droq)
+ return;
+
+ if (octeon_retry_droq_refill(droq))
+ octeon_schedule_rxq_oom_work(oct, droq);
}
int setup_rx_oom_poll_fn(struct net_device *netdev)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
+ struct cavium_wq *wq;
+ int q, q_no;
- lio->rxq_status_wq.wq = alloc_workqueue("rxq-oom-status",
- WQ_MEM_RECLAIM, 0);
- if (!lio->rxq_status_wq.wq) {
- dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n");
- return -ENOMEM;
+ for (q = 0; q < oct->num_oqs; q++) {
+ q_no = lio->linfo.rxpciq[q].s.q_no;
+ wq = &lio->rxq_status_wq[q_no];
+ wq->wq = alloc_workqueue("rxq-oom-status",
+ WQ_MEM_RECLAIM, 0);
+ if (!wq->wq) {
+ dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n");
+ return -ENOMEM;
+ }
+
+ INIT_DELAYED_WORK(&wq->wk.work,
+ octnet_poll_check_rxq_oom_status);
+ wq->wk.ctxptr = lio;
+ wq->wk.ctxul = q_no;
}
- INIT_DELAYED_WORK(&lio->rxq_status_wq.wk.work,
- octnet_poll_check_rxq_oom_status);
- lio->rxq_status_wq.wk.ctxptr = lio;
- queue_delayed_work(lio->rxq_status_wq.wq,
- &lio->rxq_status_wq.wk.work,
- msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
+
return 0;
}
void cleanup_rx_oom_poll_fn(struct net_device *netdev)
{
struct lio *lio = GET_LIO(netdev);
-
- if (lio->rxq_status_wq.wq) {
- cancel_delayed_work_sync(&lio->rxq_status_wq.wk.work);
- flush_workqueue(lio->rxq_status_wq.wq);
- destroy_workqueue(lio->rxq_status_wq.wq);
+ struct octeon_device *oct = lio->oct_dev;
+ struct cavium_wq *wq;
+ int q_no;
+
+ for (q_no = 0; q_no < oct->num_oqs; q_no++) {
+ wq = &lio->rxq_status_wq[q_no];
+ if (wq->wq) {
+ cancel_delayed_work_sync(&wq->wk.work);
+ flush_workqueue(wq->wq);
+ destroy_workqueue(wq->wq);
+ wq->wq = NULL;
+ }
}
}
* Returns:
* Success: Pointer to recv_info_t
* Failure: NULL.
- * Locks:
- * The droq->lock is held when this routine is called.
*/
static inline struct octeon_recv_info *octeon_create_recv_info(
struct octeon_device *octeon_dev,
* up buffers (that were not dispatched) to form a contiguous ring.
* Returns:
* No of descriptors refilled.
- * Locks:
- * This routine is called with droq->lock held.
*/
static u32
octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
while (droq->refill_count && (desc_refilled < droq->max_count)) {
/* If a valid buffer exists (happens if there is no dispatch),
- * reuse
- * the buffer, else allocate.
+ * reuse the buffer, else allocate.
*/
if (!droq->recv_buf_list[droq->refill_idx].buffer) {
pg_info =
/** check if we can allocate packets to get out of oom.
* @param droq - Droq being checked.
- * @return does not return anything
+ * @return 1 if fails to refill minimum
*/
-void octeon_droq_check_oom(struct octeon_droq *droq)
+int octeon_retry_droq_refill(struct octeon_droq *droq)
{
- int desc_refilled;
struct octeon_device *oct = droq->oct_dev;
+ int desc_refilled, reschedule = 1;
+ u32 pkts_credit;
+
+ spin_lock_bh(&droq->lock);
+ pkts_credit = readl(droq->pkts_credit_reg);
+ desc_refilled = octeon_droq_refill(oct, droq);
+ if (desc_refilled) {
+ /* Flush the droq descriptor data to memory to be sure
+ * that when we update the credits the data in memory
+ * is accurate.
+ */
+ wmb();
+ writel(desc_refilled, droq->pkts_credit_reg);
+ /* make sure mmio write completes */
+ mmiowb();
- if (readl(droq->pkts_credit_reg) <= CN23XX_SLI_DEF_BP) {
- spin_lock_bh(&droq->lock);
- desc_refilled = octeon_droq_refill(oct, droq);
- if (desc_refilled) {
- /* Flush the droq descriptor data to memory to be sure
- * that when we update the credits the data in memory
- * is accurate.
- */
- wmb();
- writel(desc_refilled, droq->pkts_credit_reg);
- /* make sure mmio write completes */
- mmiowb();
- }
- spin_unlock_bh(&droq->lock);
+ if (pkts_credit + desc_refilled >= CN23XX_SLI_DEF_BP)
+ reschedule = 0;
}
+ spin_unlock_bh(&droq->lock);
+
+ return reschedule;
}
static inline u32
struct octeon_droq *droq,
u32 pkts_to_process)
{
+ u32 pkt, total_len = 0, pkt_count, retval;
struct octeon_droq_info *info;
union octeon_rh *rh;
- u32 pkt, total_len = 0, pkt_count;
pkt_count = pkts_to_process;
if (droq->refill_count >= droq->refill_threshold) {
int desc_refilled = octeon_droq_refill(oct, droq);
- /* Flush the droq descriptor data to memory to be sure
- * that when we update the credits the data in memory
- * is accurate.
- */
- wmb();
- writel((desc_refilled), droq->pkts_credit_reg);
- /* make sure mmio write completes */
- mmiowb();
+ if (desc_refilled) {
+ /* Flush the droq descriptor data to memory to
+ * be sure that when we update the credits the
+ * data in memory is accurate.
+ */
+ wmb();
+ writel(desc_refilled, droq->pkts_credit_reg);
+ /* make sure mmio write completes */
+ mmiowb();
+ }
}
-
} /* for (each packet)... */
/* Increment refill_count by the number of buffers processed. */
droq->stats.pkts_received += pkt;
droq->stats.bytes_received += total_len;
+ retval = pkt;
if ((droq->ops.drop_on_max) && (pkts_to_process - pkt)) {
octeon_droq_drop_packets(oct, droq, (pkts_to_process - pkt));
droq->stats.dropped_toomany += (pkts_to_process - pkt);
- return pkts_to_process;
+ retval = pkts_to_process;
}
- return pkt;
+ atomic_sub(retval, &droq->pkts_pending);
+
+ if (droq->refill_count >= droq->refill_threshold &&
+ readl(droq->pkts_credit_reg) < CN23XX_SLI_DEF_BP) {
+ octeon_droq_check_hw_for_pkts(droq);
+
+ /* Make sure there are no pkts_pending */
+ if (!atomic_read(&droq->pkts_pending))
+ octeon_schedule_rxq_oom_work(oct, droq);
+ }
+
+ return retval;
}
int
struct octeon_droq *droq,
u32 budget)
{
- u32 pkt_count = 0, pkts_processed = 0;
+ u32 pkt_count = 0;
struct list_head *tmp, *tmp2;
/* Grab the droq lock */
if (pkt_count > budget)
pkt_count = budget;
- pkts_processed = octeon_droq_fast_process_packets(oct, droq, pkt_count);
-
- atomic_sub(pkts_processed, &droq->pkts_pending);
+ octeon_droq_fast_process_packets(oct, droq, pkt_count);
/* Release the spin lock */
spin_unlock(&droq->lock);
octeon_droq_fast_process_packets(oct, droq,
pkts_available);
- atomic_sub(pkts_processed, &droq->pkts_pending);
-
total_pkts_processed += pkts_processed;
}