]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
liquidio: improve soft command handling
authorFelix Manlunas <felix.manlunas@cavium.com>
Wed, 29 Aug 2018 01:51:30 +0000 (18:51 -0700)
committerDavid S. Miller <davem@davemloft.net>
Thu, 30 Aug 2018 03:07:41 +0000 (20:07 -0700)
1. Set LIO_SC_MAX_TMO_MS as the maximum timeout value for a soft command
   (sc).  All sc's use this value as a hard timeout value. Add expiry_time
   in struct octeon_soft_command to keep the hard timeout value. The field
   wait_time and timeout in struct octeon_soft_command will be obsoleted in
   the last patch of this patch series.
2. Add processing a synchronous sc in sc response thread
   lio_process_ordered_list. The memory allocated for a synchronous sc will
   be freed by lio_process_ordered_list() to the sc pool.
3. Add two response lists for lio_process_ordered_list to process the
   storage allocated for sc's:
   OCTEON_DONE_SC_LIST response list keeps all sc's which will be freed to
   the pool after their requestors have finished processing the responses.
   OCTEON_ZOMBIE_SC_LIST response list keeps all sc's which have got
   LIO_SC_MAX_TMO_MS timeout.
   When an sc gets a hard timeout, lio_process_order_list() will recheck
   its status 1 ms later. If the status has not updated by the firmware at
   that time, the sc will be removed from OCTEON_DONE_SC_LIST response list
   to OCTEON_ZOMBIE_SC_LIST response list. The sc's in the
   OCTEON_ZOMBIE_SC_LIST response list will be freed when the driver is
   unloaded.

Signed-off-by: Weilin Chang <weilin.chang@cavium.com>
Signed-off-by: Felix Manlunas <felix.manlunas@cavium.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/cavium/liquidio/lio_main.c
drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
drivers/net/ethernet/cavium/liquidio/octeon_config.h
drivers/net/ethernet/cavium/liquidio/octeon_iq.h
drivers/net/ethernet/cavium/liquidio/octeon_nic.c
drivers/net/ethernet/cavium/liquidio/request_manager.c
drivers/net/ethernet/cavium/liquidio/response_manager.c
drivers/net/ethernet/cavium/liquidio/response_manager.h

index 6fb13fa73b271c9d05f49ad5a04ea71f9a12c987..6663749bb336e40a0b90d8db0f2835b15b45cf09 100644 (file)
@@ -1037,12 +1037,12 @@ static void octeon_destroy_resources(struct octeon_device *oct)
 
                /* fallthrough */
        case OCT_DEV_IO_QUEUES_DONE:
-               if (wait_for_pending_requests(oct))
-                       dev_err(&oct->pci_dev->dev, "There were pending requests\n");
-
                if (lio_wait_for_instr_fetch(oct))
                        dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
 
+               if (wait_for_pending_requests(oct))
+                       dev_err(&oct->pci_dev->dev, "There were pending requests\n");
+
                /* Disable the input and output queues now. No more packets will
                 * arrive from Octeon, but we should wait for all packet
                 * processing to finish.
@@ -1052,6 +1052,31 @@ static void octeon_destroy_resources(struct octeon_device *oct)
                if (lio_wait_for_oq_pkts(oct))
                        dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
 
+               /* Force all requests waiting to be fetched by OCTEON to
+                * complete.
+                */
+               for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
+                       struct octeon_instr_queue *iq;
+
+                       if (!(oct->io_qmask.iq & BIT_ULL(i)))
+                               continue;
+                       iq = oct->instr_queue[i];
+
+                       if (atomic_read(&iq->instr_pending)) {
+                               spin_lock_bh(&iq->lock);
+                               iq->fill_cnt = 0;
+                               iq->octeon_read_index = iq->host_write_index;
+                               iq->stats.instr_processed +=
+                                       atomic_read(&iq->instr_pending);
+                               lio_process_iq_request_list(oct, iq, 0);
+                               spin_unlock_bh(&iq->lock);
+                       }
+               }
+
+               lio_process_ordered_list(oct, 1);
+               octeon_free_sc_done_list(oct);
+               octeon_free_sc_zombie_list(oct);
+
        /* fallthrough */
        case OCT_DEV_INTR_SET_DONE:
                /* Disable interrupts  */
index b77835724dc84d037c88bcb9ef7153db8f1f6e48..59c2dd92aac5a490703e30f3e03d77a0a0aa2f11 100644 (file)
@@ -471,12 +471,12 @@ static void octeon_destroy_resources(struct octeon_device *oct)
        case OCT_DEV_HOST_OK:
                /* fallthrough */
        case OCT_DEV_IO_QUEUES_DONE:
-               if (wait_for_pending_requests(oct))
-                       dev_err(&oct->pci_dev->dev, "There were pending requests\n");
-
                if (lio_wait_for_instr_fetch(oct))
                        dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
 
+               if (wait_for_pending_requests(oct))
+                       dev_err(&oct->pci_dev->dev, "There were pending requests\n");
+
                /* Disable the input and output queues now. No more packets will
                 * arrive from Octeon, but we should wait for all packet
                 * processing to finish.
@@ -485,7 +485,33 @@ static void octeon_destroy_resources(struct octeon_device *oct)
 
                if (lio_wait_for_oq_pkts(oct))
                        dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
-               /* fall through */
+
+               /* Force all requests waiting to be fetched by OCTEON to
+                * complete.
+                */
+               for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
+                       struct octeon_instr_queue *iq;
+
+                       if (!(oct->io_qmask.iq & BIT_ULL(i)))
+                               continue;
+                       iq = oct->instr_queue[i];
+
+                       if (atomic_read(&iq->instr_pending)) {
+                               spin_lock_bh(&iq->lock);
+                               iq->fill_cnt = 0;
+                               iq->octeon_read_index = iq->host_write_index;
+                               iq->stats.instr_processed +=
+                                       atomic_read(&iq->instr_pending);
+                               lio_process_iq_request_list(oct, iq, 0);
+                               spin_unlock_bh(&iq->lock);
+                       }
+               }
+
+               lio_process_ordered_list(oct, 1);
+               octeon_free_sc_done_list(oct);
+               octeon_free_sc_zombie_list(oct);
+
+       /* fall through */
        case OCT_DEV_INTR_SET_DONE:
                /* Disable interrupts  */
                oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
index ceac74388e090901edc7826176d0b949a512bd93..056dceb2a5584552c4a2703e2b0d2fe010ed14d3 100644 (file)
@@ -440,7 +440,7 @@ struct octeon_config {
 /* Response lists - 1 ordered, 1 unordered-blocking, 1 unordered-nonblocking
  * NoResponse Lists are now maintained with each IQ. (Dec' 2007).
  */
-#define MAX_RESPONSE_LISTS           4
+#define MAX_RESPONSE_LISTS           6
 
 /* Opcode hash bits. The opcode is hashed on the lower 6-bits to lookup the
  * dispatch table.
index aecd0d36d6349869e8703f48b62f2e94fd18397c..3437d7f6acada247a90dd2c59bcea06cd386c333 100644 (file)
@@ -294,11 +294,20 @@ struct octeon_soft_command {
        /** Time out and callback */
        size_t wait_time;
        size_t timeout;
+       size_t expiry_time;
+
        u32 iq_no;
        void (*callback)(struct octeon_device *, u32, void *);
        void *callback_arg;
+
+       int caller_is_done;
+       u32 sc_status;
+       struct completion complete;
 };
 
+/* max timeout (in milli sec) for soft request */
+#define LIO_SC_MAX_TMO_MS       60000
+
 /** Maximum number of buffers to allocate into soft command buffer pool
  */
 #define  MAX_SOFT_COMMAND_BUFFERS      256
@@ -319,6 +328,8 @@ struct octeon_sc_buffer_pool {
                (((octeon_dev_ptr)->instr_queue[iq_no]->stats.field) += count)
 
 int octeon_setup_sc_buffer_pool(struct octeon_device *oct);
+int octeon_free_sc_done_list(struct octeon_device *oct);
+int octeon_free_sc_zombie_list(struct octeon_device *oct);
 int octeon_free_sc_buffer_pool(struct octeon_device *oct);
 struct octeon_soft_command *
        octeon_alloc_soft_command(struct octeon_device *oct,
index 150609bd8849cc490a2b7246eb75cf5feaf718e8..b7364bb3b186c3bb8bd20a5a1edbd695641604df 100644 (file)
@@ -75,8 +75,7 @@ octeon_alloc_soft_command_resp(struct octeon_device    *oct,
        else
                sc->cmd.cmd2.rptr =  sc->dmarptr;
 
-       sc->wait_time = 1000;
-       sc->timeout = jiffies + sc->wait_time;
+       sc->expiry_time = jiffies + msecs_to_jiffies(LIO_SC_MAX_TMO_MS);
 
        return sc;
 }
index 5de5ce9a8f54b19c82f8e155d354717123455f24..bd0153e16debbc4a2321a2bf0eb86988c32ea809 100644 (file)
@@ -409,33 +409,22 @@ lio_process_iq_request_list(struct octeon_device *oct,
                        else
                                irh = (struct octeon_instr_irh *)
                                        &sc->cmd.cmd2.irh;
-                       if (irh->rflag) {
-                               /* We're expecting a response from Octeon.
-                                * It's up to lio_process_ordered_list() to
-                                * process  sc. Add sc to the ordered soft
-                                * command response list because we expect
-                                * a response from Octeon.
-                                */
-                               spin_lock_irqsave
-                                       (&oct->response_list
-                                        [OCTEON_ORDERED_SC_LIST].lock,
-                                        flags);
-                               atomic_inc(&oct->response_list
-                                       [OCTEON_ORDERED_SC_LIST].
-                                       pending_req_count);
-                               list_add_tail(&sc->node, &oct->response_list
-                                       [OCTEON_ORDERED_SC_LIST].head);
-                               spin_unlock_irqrestore
-                                       (&oct->response_list
-                                        [OCTEON_ORDERED_SC_LIST].lock,
-                                        flags);
-                       } else {
-                               if (sc->callback) {
-                                       /* This callback must not sleep */
-                                       sc->callback(oct, OCTEON_REQUEST_DONE,
-                                                    sc->callback_arg);
-                               }
-                       }
+
+                       /* We're expecting a response from Octeon.
+                        * It's up to lio_process_ordered_list() to
+                        * process  sc. Add sc to the ordered soft
+                        * command response list because we expect
+                        * a response from Octeon.
+                        */
+                       spin_lock_irqsave(&oct->response_list
+                                         [OCTEON_ORDERED_SC_LIST].lock, flags);
+                       atomic_inc(&oct->response_list
+                                  [OCTEON_ORDERED_SC_LIST].pending_req_count);
+                       list_add_tail(&sc->node, &oct->response_list
+                               [OCTEON_ORDERED_SC_LIST].head);
+                       spin_unlock_irqrestore(&oct->response_list
+                                              [OCTEON_ORDERED_SC_LIST].lock,
+                                              flags);
                        break;
                default:
                        dev_err(&oct->pci_dev->dev,
@@ -755,8 +744,7 @@ int octeon_send_soft_command(struct octeon_device *oct,
                len = (u32)ih2->dlengsz;
        }
 
-       if (sc->wait_time)
-               sc->timeout = jiffies + sc->wait_time;
+       sc->expiry_time = jiffies + msecs_to_jiffies(LIO_SC_MAX_TMO_MS);
 
        return (octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
                                    len, REQTYPE_SOFT_COMMAND));
@@ -791,11 +779,76 @@ int octeon_setup_sc_buffer_pool(struct octeon_device *oct)
        return 0;
 }
 
+int octeon_free_sc_done_list(struct octeon_device *oct)
+{
+       struct octeon_response_list *done_sc_list, *zombie_sc_list;
+       struct octeon_soft_command *sc;
+       struct list_head *tmp, *tmp2;
+       spinlock_t *sc_lists_lock; /* lock for response_list */
+
+       done_sc_list = &oct->response_list[OCTEON_DONE_SC_LIST];
+       zombie_sc_list = &oct->response_list[OCTEON_ZOMBIE_SC_LIST];
+
+       if (!atomic_read(&done_sc_list->pending_req_count))
+               return 0;
+
+       sc_lists_lock = &oct->response_list[OCTEON_ORDERED_SC_LIST].lock;
+
+       spin_lock_bh(sc_lists_lock);
+
+       list_for_each_safe(tmp, tmp2, &done_sc_list->head) {
+               sc = list_entry(tmp, struct octeon_soft_command, node);
+
+               if (READ_ONCE(sc->caller_is_done)) {
+                       list_del(&sc->node);
+                       atomic_dec(&done_sc_list->pending_req_count);
+
+                       if (*sc->status_word == COMPLETION_WORD_INIT) {
+                               /* timeout; move sc to zombie list */
+                               list_add_tail(&sc->node, &zombie_sc_list->head);
+                               atomic_inc(&zombie_sc_list->pending_req_count);
+                       } else {
+                               octeon_free_soft_command(oct, sc);
+                       }
+               }
+       }
+
+       spin_unlock_bh(sc_lists_lock);
+
+       return 0;
+}
+
+int octeon_free_sc_zombie_list(struct octeon_device *oct)
+{
+       struct octeon_response_list *zombie_sc_list;
+       struct octeon_soft_command *sc;
+       struct list_head *tmp, *tmp2;
+       spinlock_t *sc_lists_lock; /* lock for response_list */
+
+       zombie_sc_list = &oct->response_list[OCTEON_ZOMBIE_SC_LIST];
+       sc_lists_lock = &oct->response_list[OCTEON_ORDERED_SC_LIST].lock;
+
+       spin_lock_bh(sc_lists_lock);
+
+       list_for_each_safe(tmp, tmp2, &zombie_sc_list->head) {
+               list_del(tmp);
+               atomic_dec(&zombie_sc_list->pending_req_count);
+               sc = list_entry(tmp, struct octeon_soft_command, node);
+               octeon_free_soft_command(oct, sc);
+       }
+
+       spin_unlock_bh(sc_lists_lock);
+
+       return 0;
+}
+
 int octeon_free_sc_buffer_pool(struct octeon_device *oct)
 {
        struct list_head *tmp, *tmp2;
        struct octeon_soft_command *sc;
 
+       octeon_free_sc_zombie_list(oct);
+
        spin_lock_bh(&oct->sc_buf_pool.lock);
 
        list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) {
@@ -824,6 +877,9 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
        struct octeon_soft_command *sc = NULL;
        struct list_head *tmp;
 
+       if (!rdatasize)
+               rdatasize = 16;
+
        WARN_ON((offset + datasize + rdatasize + ctxsize) >
               SOFT_COMMAND_BUFFER_SIZE);
 
index fe5b537005763cbed01d1cc998b26d2d02521095..ac7747ccf56a03a338c5a87ab4e92de770a3e6e2 100644 (file)
@@ -69,6 +69,8 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev,
        u32 status;
        u64 status64;
 
+       octeon_free_sc_done_list(octeon_dev);
+
        ordered_sc_list = &octeon_dev->response_list[OCTEON_ORDERED_SC_LIST];
 
        do {
@@ -111,26 +113,88 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev,
                                        }
                                }
                        }
-               } else if (force_quit || (sc->timeout &&
-                       time_after(jiffies, (unsigned long)sc->timeout))) {
-                       dev_err(&octeon_dev->pci_dev->dev, "%s: cmd failed, timeout (%ld, %ld)\n",
-                               __func__, (long)jiffies, (long)sc->timeout);
+               } else if (unlikely(force_quit) || (sc->expiry_time &&
+                       time_after(jiffies, (unsigned long)sc->expiry_time))) {
+                       struct octeon_instr_irh *irh =
+                               (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
+
+                       dev_err(&octeon_dev->pci_dev->dev, "%s: ", __func__);
+                       dev_err(&octeon_dev->pci_dev->dev,
+                               "cmd %x/%x/%llx/%llx failed, ",
+                               irh->opcode, irh->subcode,
+                               sc->cmd.cmd3.ossp[0], sc->cmd.cmd3.ossp[1]);
+                       dev_err(&octeon_dev->pci_dev->dev,
+                               "timeout (%ld, %ld)\n",
+                               (long)jiffies, (long)sc->expiry_time);
                        status = OCTEON_REQUEST_TIMEOUT;
                }
 
                if (status != OCTEON_REQUEST_PENDING) {
+                       sc->sc_status = status;
+
                        /* we have received a response or we have timed out */
                        /* remove node from linked list */
                        list_del(&sc->node);
                        atomic_dec(&octeon_dev->response_list
-                                         [OCTEON_ORDERED_SC_LIST].
-                                         pending_req_count);
-                       spin_unlock_bh
-                           (&ordered_sc_list->lock);
+                                  [OCTEON_ORDERED_SC_LIST].
+                                  pending_req_count);
+
+                       if (!sc->callback) {
+                               atomic_inc(&octeon_dev->response_list
+                                          [OCTEON_DONE_SC_LIST].
+                                          pending_req_count);
+                               list_add_tail(&sc->node,
+                                             &octeon_dev->response_list
+                                             [OCTEON_DONE_SC_LIST].head);
+
+                               if (unlikely(READ_ONCE(sc->caller_is_done))) {
+                                       /* caller does not wait for response
+                                        * from firmware
+                                        */
+                                       if (status != OCTEON_REQUEST_DONE) {
+                                               struct octeon_instr_irh *irh;
+
+                                               irh =
+                                                   (struct octeon_instr_irh *)
+                                                   &sc->cmd.cmd3.irh;
+                                               dev_dbg
+                                                   (&octeon_dev->pci_dev->dev,
+                                                   "%s: sc failed: opcode=%x, ",
+                                                   __func__, irh->opcode);
+                                               dev_dbg
+                                                   (&octeon_dev->pci_dev->dev,
+                                                   "subcode=%x, ossp[0]=%llx, ",
+                                                   irh->subcode,
+                                                   sc->cmd.cmd3.ossp[0]);
+                                               dev_dbg
+                                                   (&octeon_dev->pci_dev->dev,
+                                                   "ossp[1]=%llx, status=%d\n",
+                                                   sc->cmd.cmd3.ossp[1],
+                                                   status);
+                                       }
+                               } else {
+                                       complete(&sc->complete);
+                               }
+
+                               spin_unlock_bh(&ordered_sc_list->lock);
+                       } else {
+                               /* sc with callback function */
+                               if (status == OCTEON_REQUEST_TIMEOUT) {
+                                       atomic_inc(&octeon_dev->response_list
+                                                  [OCTEON_ZOMBIE_SC_LIST].
+                                                  pending_req_count);
+                                       list_add_tail(&sc->node,
+                                                     &octeon_dev->response_list
+                                                     [OCTEON_ZOMBIE_SC_LIST].
+                                                     head);
+                               }
+
+                               spin_unlock_bh(&ordered_sc_list->lock);
 
-                       if (sc->callback)
                                sc->callback(octeon_dev, status,
                                             sc->callback_arg);
+                               /* sc is freed by caller */
+                       }
 
                        request_complete++;
 
index 9169c2815dba36c59b7b8cea5642a7fb27bff439..ed4020d26fae2d5034ea13b0632712eef3e9a49c 100644 (file)
@@ -53,7 +53,9 @@ enum {
        OCTEON_ORDERED_LIST = 0,
        OCTEON_UNORDERED_NONBLOCKING_LIST = 1,
        OCTEON_UNORDERED_BLOCKING_LIST = 2,
-       OCTEON_ORDERED_SC_LIST = 3
+       OCTEON_ORDERED_SC_LIST = 3,
+       OCTEON_DONE_SC_LIST = 4,
+       OCTEON_ZOMBIE_SC_LIST = 5
 };
 
 /** Response Order values for a Octeon Request. */