]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
authorDavid S. Miller <davem@davemloft.net>
Sat, 20 Jun 2009 08:16:40 +0000 (01:16 -0700)
committerDavid S. Miller <davem@davemloft.net>
Sat, 20 Jun 2009 08:16:40 +0000 (01:16 -0700)
17 files changed:
Documentation/isdn/00-INDEX
MAINTAINERS
drivers/net/benet/be.h
drivers/net/benet/be_cmds.c
drivers/net/benet/be_cmds.h
drivers/net/benet/be_hw.h
drivers/net/benet/be_main.c
drivers/net/e1000e/netdev.c
drivers/net/mv643xx_eth.c
drivers/net/r8169.c
drivers/net/usb/Kconfig
drivers/net/usb/cdc_ether.c
drivers/net/usb/pegasus.c
include/net/iucv/af_iucv.h
net/ieee802154/af_ieee802154.c
net/ipv4/route.c
net/iucv/af_iucv.c

index f6010a536590bb0384ab87a549c1af4bdacca6f9..e87e336f590ef256bb2a2e446a1222328261f700 100644 (file)
@@ -14,25 +14,14 @@ README
        - general info on what you need and what to do for Linux ISDN.
 README.FAQ
        - general info for FAQ.
-README.audio
-       - info for running audio over ISDN.
-README.fax
-       - info for using Fax over ISDN.
-README.gigaset
-       - info on the drivers for Siemens Gigaset ISDN adapters.
-README.icn
-       - info on the ICN-ISDN-card and its driver.
->>>>>>> 93af7aca44f0e82e67bda10a0fb73d383edcc8bd:Documentation/isdn/00-INDEX
 README.HiSax
        - info on the HiSax driver which replaces the old teles.
+README.act2000
+       - info on driver for IBM ACT-2000 card.
 README.audio
        - info for running audio over ISDN.
 README.avmb1
        - info on driver for AVM-B1 ISDN card.
-README.act2000
-       - info on driver for IBM ACT-2000 card.
-README.eicon
-       - info on driver for Eicon active cards.
 README.concap
        - info on "CONCAP" encapsulation protocol interface used for X.25.
 README.diversion
@@ -59,7 +48,3 @@ README.x25
        - info for running X.25 over ISDN.
 syncPPP.FAQ
        - frequently asked questions about running PPP over ISDN.
-README.hysdn
-       - info on driver for Hypercope active HYSDN cards
-README.mISDN
-       - info on the Modular ISDN subsystem (mISDN).
index d8209002fddc3f86fff2c4f77f256e8475c25add..90a48e99e8b36c03059831c339590a18000dd865 100644 (file)
@@ -2863,7 +2863,7 @@ P:        Sergey Lapin
 M:     slapin@ossfans.org
 L:     linux-zigbee-devel@lists.sourceforge.net
 W:     http://apps.sourceforge.net/trac/linux-zigbee
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/lumag/lowpan.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git
 S:     Maintained
 F:     net/ieee802154/
 F:     drivers/ieee801254/
index b4bb06fdf307d570cf431482bd23074191435b3a..f703758f0a6e4f7fdaa730dbe53fec400b86d5f7 100644 (file)
@@ -65,7 +65,7 @@ static inline char *nic_name(struct pci_dev *pdev)
 #define TX_CQ_LEN              1024
 #define RX_Q_LEN               1024    /* Does not support any other value */
 #define RX_CQ_LEN              1024
-#define MCC_Q_LEN              64      /* total size not to exceed 8 pages */
+#define MCC_Q_LEN              128     /* total size not to exceed 8 pages */
 #define MCC_CQ_LEN             256
 
 #define BE_NAPI_WEIGHT         64
@@ -91,6 +91,61 @@ struct be_queue_info {
        atomic_t used;  /* Number of valid elements in the queue */
 };
 
+static inline u32 MODULO(u16 val, u16 limit)
+{
+       BUG_ON(limit & (limit - 1));
+       return val & (limit - 1);
+}
+
+static inline void index_adv(u16 *index, u16 val, u16 limit)
+{
+       *index = MODULO((*index + val), limit);
+}
+
+static inline void index_inc(u16 *index, u16 limit)
+{
+       *index = MODULO((*index + 1), limit);
+}
+
+static inline void *queue_head_node(struct be_queue_info *q)
+{
+       return q->dma_mem.va + q->head * q->entry_size;
+}
+
+static inline void *queue_tail_node(struct be_queue_info *q)
+{
+       return q->dma_mem.va + q->tail * q->entry_size;
+}
+
+static inline void queue_head_inc(struct be_queue_info *q)
+{
+       index_inc(&q->head, q->len);
+}
+
+static inline void queue_tail_inc(struct be_queue_info *q)
+{
+       index_inc(&q->tail, q->len);
+}
+
+
+struct be_eq_obj {
+       struct be_queue_info q;
+       char desc[32];
+
+       /* Adaptive interrupt coalescing (AIC) info */
+       bool enable_aic;
+       u16 min_eqd;            /* in usecs */
+       u16 max_eqd;            /* in usecs */
+       u16 cur_eqd;            /* in usecs */
+
+       struct napi_struct napi;
+};
+
+struct be_mcc_obj {
+       struct be_queue_info q;
+       struct be_queue_info cq;
+};
+
 struct be_ctrl_info {
        u8 __iomem *csr;
        u8 __iomem *db;         /* Door Bell */
@@ -98,11 +153,20 @@ struct be_ctrl_info {
        int pci_func;
 
        /* Mbox used for cmd request/response */
-       spinlock_t cmd_lock;    /* For serializing cmds to BE card */
+       spinlock_t mbox_lock;   /* For serializing mbox cmds to BE card */
        struct be_dma_mem mbox_mem;
        /* Mbox mem is adjusted to align to 16 bytes. The allocated addr
         * is stored for freeing purpose */
        struct be_dma_mem mbox_mem_alloced;
+
+       /* MCC Rings */
+       struct be_mcc_obj mcc_obj;
+       spinlock_t mcc_lock;    /* For serializing mcc cmds to BE card */
+       spinlock_t mcc_cq_lock;
+
+       /* MCC Async callback */
+       void (*async_cb)(void *adapter, bool link_up);
+       void *adapter_ctxt;
 };
 
 #include "be_cmds.h"
@@ -150,19 +214,6 @@ struct be_stats_obj {
        struct be_dma_mem cmd;
 };
 
-struct be_eq_obj {
-       struct be_queue_info q;
-       char desc[32];
-
-       /* Adaptive interrupt coalescing (AIC) info */
-       bool enable_aic;
-       u16 min_eqd;            /* in usecs */
-       u16 max_eqd;            /* in usecs */
-       u16 cur_eqd;            /* in usecs */
-
-       struct napi_struct napi;
-};
-
 struct be_tx_obj {
        struct be_queue_info q;
        struct be_queue_info cq;
@@ -225,8 +276,9 @@ struct be_adapter {
        u32 if_handle;          /* Used to configure filtering */
        u32 pmac_id;            /* MAC addr handle used by BE card */
 
-       struct be_link_info link;
+       bool link_up;
        u32 port_num;
+       bool promiscuous;
 };
 
 extern struct ethtool_ops be_ethtool_ops;
@@ -235,22 +287,6 @@ extern struct ethtool_ops be_ethtool_ops;
 
 #define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops)
 
-static inline u32 MODULO(u16 val, u16 limit)
-{
-       BUG_ON(limit & (limit - 1));
-       return val & (limit - 1);
-}
-
-static inline void index_adv(u16 *index, u16 val, u16 limit)
-{
-       *index = MODULO((*index + val), limit);
-}
-
-static inline void index_inc(u16 *index, u16 limit)
-{
-       *index = MODULO((*index + 1), limit);
-}
-
 #define PAGE_SHIFT_4K          12
 #define PAGE_SIZE_4K           (1 << PAGE_SHIFT_4K)
 
@@ -339,4 +375,6 @@ static inline u8 is_udp_pkt(struct sk_buff *skb)
        return val;
 }
 
+extern void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid, bool arm,
+               u16 num_popped);
 #endif                         /* BE_H */
index d444aed962bc8bef4a41956659ff4f02cb3eabfa..583517ed56f05da7d4a5f77c42d183db3d44f258 100644 (file)
 
 #include "be.h"
 
+static void be_mcc_notify(struct be_ctrl_info *ctrl)
+{
+       struct be_queue_info *mccq = &ctrl->mcc_obj.q;
+       u32 val = 0;
+
+       val |= mccq->id & DB_MCCQ_RING_ID_MASK;
+       val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
+       iowrite32(val, ctrl->db + DB_MCCQ_OFFSET);
+}
+
+/* To check if valid bit is set, check the entire word as we don't know
+ * the endianness of the data (old entry is host endian while a new entry is
+ * little endian) */
+static inline bool be_mcc_compl_is_new(struct be_mcc_cq_entry *compl)
+{
+       if (compl->flags != 0) {
+               compl->flags = le32_to_cpu(compl->flags);
+               BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
+               return true;
+       } else {
+               return false;
+       }
+}
+
+/* Need to reset the entire word that houses the valid bit */
+static inline void be_mcc_compl_use(struct be_mcc_cq_entry *compl)
+{
+       compl->flags = 0;
+}
+
+static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
+       struct be_mcc_cq_entry *compl)
+{
+       u16 compl_status, extd_status;
+
+       /* Just swap the status to host endian; mcc tag is opaquely copied
+        * from mcc_wrb */
+       be_dws_le_to_cpu(compl, 4);
+
+       compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
+                               CQE_STATUS_COMPL_MASK;
+       if (compl_status != MCC_STATUS_SUCCESS) {
+               extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
+                               CQE_STATUS_EXTD_MASK;
+               printk(KERN_WARNING DRV_NAME
+                       " error in cmd completion: status(compl/extd)=%d/%d\n",
+                       compl_status, extd_status);
+               return -1;
+       }
+       return 0;
+}
+
+/* Link state evt is a string of bytes; no need for endian swapping */
+static void be_async_link_state_process(struct be_ctrl_info *ctrl,
+               struct be_async_event_link_state *evt)
+{
+       ctrl->async_cb(ctrl->adapter_ctxt,
+               evt->port_link_status == ASYNC_EVENT_LINK_UP ? true : false);
+}
+
+static inline bool is_link_state_evt(u32 trailer)
+{
+       return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
+               ASYNC_TRAILER_EVENT_CODE_MASK) ==
+                               ASYNC_EVENT_CODE_LINK_STATE);
+}
+
+static struct be_mcc_cq_entry *be_mcc_compl_get(struct be_ctrl_info *ctrl)
+{
+       struct be_queue_info *mcc_cq = &ctrl->mcc_obj.cq;
+       struct be_mcc_cq_entry *compl = queue_tail_node(mcc_cq);
+
+       if (be_mcc_compl_is_new(compl)) {
+               queue_tail_inc(mcc_cq);
+               return compl;
+       }
+       return NULL;
+}
+
+void be_process_mcc(struct be_ctrl_info *ctrl)
+{
+       struct be_mcc_cq_entry *compl;
+       int num = 0;
+
+       spin_lock_bh(&ctrl->mcc_cq_lock);
+       while ((compl = be_mcc_compl_get(ctrl))) {
+               if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
+                       /* Interpret flags as an async trailer */
+                       BUG_ON(!is_link_state_evt(compl->flags));
+
+                       /* Interpret compl as a async link evt */
+                       be_async_link_state_process(ctrl,
+                               (struct be_async_event_link_state *) compl);
+               } else {
+                       be_mcc_compl_process(ctrl, compl);
+                       atomic_dec(&ctrl->mcc_obj.q.used);
+               }
+               be_mcc_compl_use(compl);
+               num++;
+       }
+       if (num)
+               be_cq_notify(ctrl, ctrl->mcc_obj.cq.id, true, num);
+       spin_unlock_bh(&ctrl->mcc_cq_lock);
+}
+
+/* Wait till no more pending mcc requests are present */
+static void be_mcc_wait_compl(struct be_ctrl_info *ctrl)
+{
+#define mcc_timeout            50000 /* 5s timeout */
+       int i;
+       for (i = 0; i < mcc_timeout; i++) {
+               be_process_mcc(ctrl);
+               if (atomic_read(&ctrl->mcc_obj.q.used) == 0)
+                       break;
+               udelay(100);
+       }
+       if (i == mcc_timeout)
+               printk(KERN_WARNING DRV_NAME "mcc poll timed out\n");
+}
+
+/* Notify MCC requests and wait for completion */
+static void be_mcc_notify_wait(struct be_ctrl_info *ctrl)
+{
+       be_mcc_notify(ctrl);
+       be_mcc_wait_compl(ctrl);
+}
+
 static int be_mbox_db_ready_wait(void __iomem *db)
 {
        int cnt = 0, wait = 5;
@@ -44,11 +171,11 @@ static int be_mbox_db_ready_wait(void __iomem *db)
 
 /*
  * Insert the mailbox address into the doorbell in two steps
+ * Polls on the mbox doorbell till a command completion (or a timeout) occurs
  */
 static int be_mbox_db_ring(struct be_ctrl_info *ctrl)
 {
        int status;
-       u16 compl_status, extd_status;
        u32 val = 0;
        void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
        struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
@@ -79,24 +206,17 @@ static int be_mbox_db_ring(struct be_ctrl_info *ctrl)
        if (status != 0)
                return status;
 
-       /* compl entry has been made now */
-       be_dws_le_to_cpu(cqe, sizeof(*cqe));
-       if (!(cqe->flags & CQE_FLAGS_VALID_MASK)) {
-               printk(KERN_WARNING DRV_NAME ": ERROR invalid mbox compl\n");
+       /* A cq entry has been made now */
+       if (be_mcc_compl_is_new(cqe)) {
+               status = be_mcc_compl_process(ctrl, &mbox->cqe);
+               be_mcc_compl_use(cqe);
+               if (status)
+                       return status;
+       } else {
+               printk(KERN_WARNING DRV_NAME "invalid mailbox completion\n");
                return -1;
        }
-
-       compl_status = (cqe->status >> CQE_STATUS_COMPL_SHIFT) &
-                               CQE_STATUS_COMPL_MASK;
-       if (compl_status != MCC_STATUS_SUCCESS) {
-               extd_status = (cqe->status >> CQE_STATUS_EXTD_SHIFT) &
-                               CQE_STATUS_EXTD_MASK;
-               printk(KERN_WARNING DRV_NAME
-                       ": ERROR in cmd compl. status(compl/extd)=%d/%d\n",
-                       compl_status, extd_status);
-       }
-
-       return compl_status;
+       return 0;
 }
 
 static int be_POST_stage_get(struct be_ctrl_info *ctrl, u16 *stage)
@@ -235,6 +355,18 @@ static inline struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
        return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
 }
 
+static inline struct be_mcc_wrb *wrb_from_mcc(struct be_queue_info *mccq)
+{
+       struct be_mcc_wrb *wrb = NULL;
+       if (atomic_read(&mccq->used) < mccq->len) {
+               wrb = queue_head_node(mccq);
+               queue_head_inc(mccq);
+               atomic_inc(&mccq->used);
+               memset(wrb, 0, sizeof(*wrb));
+       }
+       return wrb;
+}
+
 int be_cmd_eq_create(struct be_ctrl_info *ctrl,
                struct be_queue_info *eq, int eq_delay)
 {
@@ -244,7 +376,7 @@ int be_cmd_eq_create(struct be_ctrl_info *ctrl,
        struct be_dma_mem *q_mem = &eq->dma_mem;
        int status;
 
-       spin_lock(&ctrl->cmd_lock);
+       spin_lock(&ctrl->mbox_lock);
        memset(wrb, 0, sizeof(*wrb));
 
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -272,7 +404,7 @@ int be_cmd_eq_create(struct be_ctrl_info *ctrl,
                eq->id = le16_to_cpu(resp->eq_id);
                eq->created = true;
        }
-       spin_unlock(&ctrl->cmd_lock);
+       spin_unlock(&ctrl->mbox_lock);
        return status;
 }
 
@@ -284,7 +416,7 @@ int be_cmd_mac_addr_query(struct be_ctrl_info *ctrl, u8 *mac_addr,
        struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
        int status;
 
-       spin_lock(&ctrl->cmd_lock);
+       spin_lock(&ctrl->mbox_lock);
        memset(wrb, 0, sizeof(*wrb));
 
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -304,7 +436,7 @@ int be_cmd_mac_addr_query(struct be_ctrl_info *ctrl, u8 *mac_addr,
        if (!status)
                memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
 
-       spin_unlock(&ctrl->cmd_lock);
+       spin_unlock(&ctrl->mbox_lock);
        return status;
 }
 
@@ -315,7 +447,7 @@ int be_cmd_pmac_add(struct be_ctrl_info *ctrl, u8 *mac_addr,
        struct be_cmd_req_pmac_add *req = embedded_payload(wrb);
        int status;
 
-       spin_lock(&ctrl->cmd_lock);
+       spin_lock(&ctrl->mbox_lock);
        memset(wrb, 0, sizeof(*wrb));
 
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -332,7 +464,7 @@ int be_cmd_pmac_add(struct be_ctrl_info *ctrl, u8 *mac_addr,
                *pmac_id = le32_to_cpu(resp->pmac_id);
        }
 
-       spin_unlock(&ctrl->cmd_lock);
+       spin_unlock(&ctrl->mbox_lock);
        return status;
 }
 
@@ -342,7 +474,7 @@ int be_cmd_pmac_del(struct be_ctrl_info *ctrl, u32 if_id, u32 pmac_id)
        struct be_cmd_req_pmac_del *req = embedded_payload(wrb);
        int status;
 
-       spin_lock(&ctrl->cmd_lock);
+       spin_lock(&ctrl->mbox_lock);
        memset(wrb, 0, sizeof(*wrb));
 
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -354,7 +486,7 @@ int be_cmd_pmac_del(struct be_ctrl_info *ctrl, u32 if_id, u32 pmac_id)
        req->pmac_id = cpu_to_le32(pmac_id);
 
        status = be_mbox_db_ring(ctrl);
-       spin_unlock(&ctrl->cmd_lock);
+       spin_unlock(&ctrl->mbox_lock);
 
        return status;
 }
@@ -370,7 +502,7 @@ int be_cmd_cq_create(struct be_ctrl_info *ctrl,
        void *ctxt = &req->context;
        int status;
 
-       spin_lock(&ctrl->cmd_lock);
+       spin_lock(&ctrl->mbox_lock);
        memset(wrb, 0, sizeof(*wrb));
 
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -388,7 +520,7 @@ int be_cmd_cq_create(struct be_ctrl_info *ctrl,
        AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
        AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
        AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
-       AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 0);
+       AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
        AMAP_SET_BITS(struct amap_cq_context, func, ctxt, ctrl->pci_func);
        be_dws_cpu_to_le(ctxt, sizeof(req->context));
 
@@ -399,7 +531,56 @@ int be_cmd_cq_create(struct be_ctrl_info *ctrl,
                cq->id = le16_to_cpu(resp->cq_id);
                cq->created = true;
        }
-       spin_unlock(&ctrl->cmd_lock);
+       spin_unlock(&ctrl->mbox_lock);
+
+       return status;
+}
+
+static u32 be_encoded_q_len(int q_len)
+{
+       u32 len_encoded = fls(q_len); /* log2(len) + 1 */
+       if (len_encoded == 16)
+               len_encoded = 0;
+       return len_encoded;
+}
+
+int be_cmd_mccq_create(struct be_ctrl_info *ctrl,
+                       struct be_queue_info *mccq,
+                       struct be_queue_info *cq)
+{
+       struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+       struct be_cmd_req_mcc_create *req = embedded_payload(wrb);
+       struct be_dma_mem *q_mem = &mccq->dma_mem;
+       void *ctxt = &req->context;
+       int status;
+
+       spin_lock(&ctrl->mbox_lock);
+       memset(wrb, 0, sizeof(*wrb));
+
+       be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+
+       be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+                       OPCODE_COMMON_MCC_CREATE, sizeof(*req));
+
+       req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
+
+       AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, ctrl->pci_func);
+       AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
+       AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
+               be_encoded_q_len(mccq->len));
+       AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
+
+       be_dws_cpu_to_le(ctxt, sizeof(req->context));
+
+       be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+
+       status = be_mbox_db_ring(ctrl);
+       if (!status) {
+               struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
+               mccq->id = le16_to_cpu(resp->id);
+               mccq->created = true;
+       }
+       spin_unlock(&ctrl->mbox_lock);
 
        return status;
 }
@@ -415,7 +596,7 @@ int be_cmd_txq_create(struct be_ctrl_info *ctrl,
        int status;
        u32 len_encoded;
 
-       spin_lock(&ctrl->cmd_lock);
+       spin_lock(&ctrl->mbox_lock);
        memset(wrb, 0, sizeof(*wrb));
 
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -446,7 +627,7 @@ int be_cmd_txq_create(struct be_ctrl_info *ctrl,
                txq->id = le16_to_cpu(resp->cid);
                txq->created = true;
        }
-       spin_unlock(&ctrl->cmd_lock);
+       spin_unlock(&ctrl->mbox_lock);
 
        return status;
 }
@@ -460,7 +641,7 @@ int be_cmd_rxq_create(struct be_ctrl_info *ctrl,
        struct be_dma_mem *q_mem = &rxq->dma_mem;
        int status;
 
-       spin_lock(&ctrl->cmd_lock);
+       spin_lock(&ctrl->mbox_lock);
        memset(wrb, 0, sizeof(*wrb));
 
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -482,7 +663,7 @@ int be_cmd_rxq_create(struct be_ctrl_info *ctrl,
                rxq->id = le16_to_cpu(resp->id);
                rxq->created = true;
        }
-       spin_unlock(&ctrl->cmd_lock);
+       spin_unlock(&ctrl->mbox_lock);
 
        return status;
 }
@@ -496,7 +677,7 @@ int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
        u8 subsys = 0, opcode = 0;
        int status;
 
-       spin_lock(&ctrl->cmd_lock);
+       spin_lock(&ctrl->mbox_lock);
 
        memset(wrb, 0, sizeof(*wrb));
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -518,6 +699,10 @@ int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
                subsys = CMD_SUBSYSTEM_ETH;
                opcode = OPCODE_ETH_RX_DESTROY;
                break;
+       case QTYPE_MCCQ:
+               subsys = CMD_SUBSYSTEM_COMMON;
+               opcode = OPCODE_COMMON_MCC_DESTROY;
+               break;
        default:
                printk(KERN_WARNING DRV_NAME ":bad Q type in Q destroy cmd\n");
                status = -1;
@@ -528,7 +713,7 @@ int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
 
        status = be_mbox_db_ring(ctrl);
 err:
-       spin_unlock(&ctrl->cmd_lock);
+       spin_unlock(&ctrl->mbox_lock);
 
        return status;
 }
@@ -541,7 +726,7 @@ int be_cmd_if_create(struct be_ctrl_info *ctrl, u32 flags, u8 *mac,
        struct be_cmd_req_if_create *req = embedded_payload(wrb);
        int status;
 
-       spin_lock(&ctrl->cmd_lock);
+       spin_lock(&ctrl->mbox_lock);
        memset(wrb, 0, sizeof(*wrb));
 
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -562,7 +747,7 @@ int be_cmd_if_create(struct be_ctrl_info *ctrl, u32 flags, u8 *mac,
                        *pmac_id = le32_to_cpu(resp->pmac_id);
        }
 
-       spin_unlock(&ctrl->cmd_lock);
+       spin_unlock(&ctrl->mbox_lock);
        return status;
 }
 
@@ -572,7 +757,7 @@ int be_cmd_if_destroy(struct be_ctrl_info *ctrl, u32 interface_id)
        struct be_cmd_req_if_destroy *req = embedded_payload(wrb);
        int status;
 
-       spin_lock(&ctrl->cmd_lock);
+       spin_lock(&ctrl->mbox_lock);
        memset(wrb, 0, sizeof(*wrb));
 
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -583,7 +768,7 @@ int be_cmd_if_destroy(struct be_ctrl_info *ctrl, u32 interface_id)
        req->interface_id = cpu_to_le32(interface_id);
        status = be_mbox_db_ring(ctrl);
 
-       spin_unlock(&ctrl->cmd_lock);
+       spin_unlock(&ctrl->mbox_lock);
 
        return status;
 }
@@ -598,7 +783,7 @@ int be_cmd_get_stats(struct be_ctrl_info *ctrl, struct be_dma_mem *nonemb_cmd)
        struct be_sge *sge = nonembedded_sgl(wrb);
        int status;
 
-       spin_lock(&ctrl->cmd_lock);
+       spin_lock(&ctrl->mbox_lock);
        memset(wrb, 0, sizeof(*wrb));
 
        memset(req, 0, sizeof(*req));
@@ -617,18 +802,20 @@ int be_cmd_get_stats(struct be_ctrl_info *ctrl, struct be_dma_mem *nonemb_cmd)
                be_dws_le_to_cpu(&resp->hw_stats, sizeof(resp->hw_stats));
        }
 
-       spin_unlock(&ctrl->cmd_lock);
+       spin_unlock(&ctrl->mbox_lock);
        return status;
 }
 
 int be_cmd_link_status_query(struct be_ctrl_info *ctrl,
-                       struct be_link_info *link)
+                       bool *link_up)
 {
        struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
        struct be_cmd_req_link_status *req = embedded_payload(wrb);
        int status;
 
-       spin_lock(&ctrl->cmd_lock);
+       spin_lock(&ctrl->mbox_lock);
+
+       *link_up = false;
        memset(wrb, 0, sizeof(*wrb));
 
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -639,14 +826,11 @@ int be_cmd_link_status_query(struct be_ctrl_info *ctrl,
        status = be_mbox_db_ring(ctrl);
        if (!status) {
                struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
-               link->speed = resp->mac_speed;
-               link->duplex = resp->mac_duplex;
-               link->fault = resp->mac_fault;
-       } else {
-               link->speed = PHY_LINK_SPEED_ZERO;
+               if (resp->mac_speed != PHY_LINK_SPEED_ZERO)
+                       *link_up = true;
        }
 
-       spin_unlock(&ctrl->cmd_lock);
+       spin_unlock(&ctrl->mbox_lock);
        return status;
 }
 
@@ -656,7 +840,7 @@ int be_cmd_get_fw_ver(struct be_ctrl_info *ctrl, char *fw_ver)
        struct be_cmd_req_get_fw_version *req = embedded_payload(wrb);
        int status;
 
-       spin_lock(&ctrl->cmd_lock);
+       spin_lock(&ctrl->mbox_lock);
        memset(wrb, 0, sizeof(*wrb));
 
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -670,7 +854,7 @@ int be_cmd_get_fw_ver(struct be_ctrl_info *ctrl, char *fw_ver)
                strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
        }
 
-       spin_unlock(&ctrl->cmd_lock);
+       spin_unlock(&ctrl->mbox_lock);
        return status;
 }
 
@@ -681,7 +865,7 @@ int be_cmd_modify_eqd(struct be_ctrl_info *ctrl, u32 eq_id, u32 eqd)
        struct be_cmd_req_modify_eq_delay *req = embedded_payload(wrb);
        int status;
 
-       spin_lock(&ctrl->cmd_lock);
+       spin_lock(&ctrl->mbox_lock);
        memset(wrb, 0, sizeof(*wrb));
 
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -696,7 +880,7 @@ int be_cmd_modify_eqd(struct be_ctrl_info *ctrl, u32 eq_id, u32 eqd)
 
        status = be_mbox_db_ring(ctrl);
 
-       spin_unlock(&ctrl->cmd_lock);
+       spin_unlock(&ctrl->mbox_lock);
        return status;
 }
 
@@ -707,7 +891,7 @@ int be_cmd_vlan_config(struct be_ctrl_info *ctrl, u32 if_id, u16 *vtag_array,
        struct be_cmd_req_vlan_config *req = embedded_payload(wrb);
        int status;
 
-       spin_lock(&ctrl->cmd_lock);
+       spin_lock(&ctrl->mbox_lock);
        memset(wrb, 0, sizeof(*wrb));
 
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -726,18 +910,22 @@ int be_cmd_vlan_config(struct be_ctrl_info *ctrl, u32 if_id, u16 *vtag_array,
 
        status = be_mbox_db_ring(ctrl);
 
-       spin_unlock(&ctrl->cmd_lock);
+       spin_unlock(&ctrl->mbox_lock);
        return status;
 }
 
+/* Use MCC for this command as it may be called in BH context */
 int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl, u8 port_num, bool en)
 {
-       struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
-       struct be_cmd_req_promiscuous_config *req = embedded_payload(wrb);
-       int status;
+       struct be_mcc_wrb *wrb;
+       struct be_cmd_req_promiscuous_config *req;
 
-       spin_lock(&ctrl->cmd_lock);
-       memset(wrb, 0, sizeof(*wrb));
+       spin_lock_bh(&ctrl->mcc_lock);
+
+       wrb = wrb_from_mcc(&ctrl->mcc_obj.q);
+       BUG_ON(!wrb);
+
+       req = embedded_payload(wrb);
 
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
 
@@ -749,21 +937,29 @@ int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl, u8 port_num, bool en)
        else
                req->port0_promiscuous = en;
 
-       status = be_mbox_db_ring(ctrl);
+       be_mcc_notify_wait(ctrl);
 
-       spin_unlock(&ctrl->cmd_lock);
-       return status;
+       spin_unlock_bh(&ctrl->mcc_lock);
+       return 0;
 }
 
-int be_cmd_mcast_mac_set(struct be_ctrl_info *ctrl, u32 if_id, u8 *mac_table,
-                       u32 num, bool promiscuous)
+/*
+ * Use MCC for this command as it may be called in BH context
+ * (mc == NULL) => multicast promiscous
+ */
+int be_cmd_multicast_set(struct be_ctrl_info *ctrl, u32 if_id,
+               struct dev_mc_list *mc_list, u32 mc_count)
 {
-       struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
-       struct be_cmd_req_mcast_mac_config *req = embedded_payload(wrb);
-       int status;
+#define BE_MAX_MC              32 /* set mcast promisc if > 32 */
+       struct be_mcc_wrb *wrb;
+       struct be_cmd_req_mcast_mac_config *req;
 
-       spin_lock(&ctrl->cmd_lock);
-       memset(wrb, 0, sizeof(*wrb));
+       spin_lock_bh(&ctrl->mcc_lock);
+
+       wrb = wrb_from_mcc(&ctrl->mcc_obj.q);
+       BUG_ON(!wrb);
+
+       req = embedded_payload(wrb);
 
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
 
@@ -771,17 +967,23 @@ int be_cmd_mcast_mac_set(struct be_ctrl_info *ctrl, u32 if_id, u8 *mac_table,
                OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
 
        req->interface_id = if_id;
-       req->promiscuous = promiscuous;
-       if (!promiscuous) {
-               req->num_mac = cpu_to_le16(num);
-               if (num)
-                       memcpy(req->mac, mac_table, ETH_ALEN * num);
+       if (mc_list && mc_count <= BE_MAX_MC) {
+               int i;
+               struct dev_mc_list *mc;
+
+               req->num_mac = cpu_to_le16(mc_count);
+
+               for (mc = mc_list, i = 0; mc; mc = mc->next, i++)
+                       memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN);
+       } else {
+               req->promiscuous = 1;
        }
 
-       status = be_mbox_db_ring(ctrl);
+       be_mcc_notify_wait(ctrl);
 
-       spin_unlock(&ctrl->cmd_lock);
-       return status;
+       spin_unlock_bh(&ctrl->mcc_lock);
+
+       return 0;
 }
 
 int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, u32 tx_fc, u32 rx_fc)
@@ -790,7 +992,7 @@ int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, u32 tx_fc, u32 rx_fc)
        struct be_cmd_req_set_flow_control *req = embedded_payload(wrb);
        int status;
 
-       spin_lock(&ctrl->cmd_lock);
+       spin_lock(&ctrl->mbox_lock);
 
        memset(wrb, 0, sizeof(*wrb));
 
@@ -804,7 +1006,7 @@ int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, u32 tx_fc, u32 rx_fc)
 
        status = be_mbox_db_ring(ctrl);
 
-       spin_unlock(&ctrl->cmd_lock);
+       spin_unlock(&ctrl->mbox_lock);
        return status;
 }
 
@@ -814,7 +1016,7 @@ int be_cmd_get_flow_control(struct be_ctrl_info *ctrl, u32 *tx_fc, u32 *rx_fc)
        struct be_cmd_req_get_flow_control *req = embedded_payload(wrb);
        int status;
 
-       spin_lock(&ctrl->cmd_lock);
+       spin_lock(&ctrl->mbox_lock);
 
        memset(wrb, 0, sizeof(*wrb));
 
@@ -831,7 +1033,7 @@ int be_cmd_get_flow_control(struct be_ctrl_info *ctrl, u32 *tx_fc, u32 *rx_fc)
                *rx_fc = le16_to_cpu(resp->rx_flow_control);
        }
 
-       spin_unlock(&ctrl->cmd_lock);
+       spin_unlock(&ctrl->mbox_lock);
        return status;
 }
 
@@ -841,7 +1043,7 @@ int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num)
        struct be_cmd_req_query_fw_cfg *req = embedded_payload(wrb);
        int status;
 
-       spin_lock(&ctrl->cmd_lock);
+       spin_lock(&ctrl->mbox_lock);
 
        memset(wrb, 0, sizeof(*wrb));
 
@@ -856,6 +1058,6 @@ int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num)
                *port_num = le32_to_cpu(resp->phys_port);
        }
 
-       spin_unlock(&ctrl->cmd_lock);
+       spin_unlock(&ctrl->mbox_lock);
        return status;
 }
index e499e2d5b8c367d9a2eb334a51c070cfe8b9eef2..747626da7b4e7122b24189819cb5316a4db76642 100644 (file)
@@ -76,6 +76,34 @@ struct be_mcc_cq_entry {
        u32 flags;              /* dword 3 */
 };
 
+/* When the async bit of mcc_compl is set, the last 4 bytes of
+ * mcc_compl is interpreted as follows:
+ */
+#define ASYNC_TRAILER_EVENT_CODE_SHIFT 8       /* bits 8 - 15 */
+#define ASYNC_TRAILER_EVENT_CODE_MASK  0xFF
+#define ASYNC_EVENT_CODE_LINK_STATE    0x1
+struct be_async_event_trailer {
+       u32 code;
+};
+
+enum {
+       ASYNC_EVENT_LINK_DOWN   = 0x0,
+       ASYNC_EVENT_LINK_UP     = 0x1
+};
+
+/* When the event code of an async trailer is link-state, the mcc_compl
+ * must be interpreted as follows
+ */
+struct be_async_event_link_state {
+       u8 physical_port;
+       u8 port_link_status;
+       u8 port_duplex;
+       u8 port_speed;
+       u8 port_fault;
+       u8 rsvd0[7];
+       struct be_async_event_trailer trailer;
+} __packed;
+
 struct be_mcc_mailbox {
        struct be_mcc_wrb wrb;
        struct be_mcc_cq_entry cqe;
@@ -101,6 +129,7 @@ struct be_mcc_mailbox {
 #define OPCODE_COMMON_FIRMWARE_CONFIG                  42
 #define OPCODE_COMMON_NTWK_INTERFACE_CREATE            50
 #define OPCODE_COMMON_NTWK_INTERFACE_DESTROY           51
+#define OPCODE_COMMON_MCC_DESTROY                      53
 #define OPCODE_COMMON_CQ_DESTROY                       54
 #define OPCODE_COMMON_EQ_DESTROY                       55
 #define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG            58
@@ -269,6 +298,38 @@ struct be_cmd_resp_cq_create {
        u16 rsvd0;
 } __packed;
 
+/******************** Create MCCQ ***************************/
+/* Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field */
+struct amap_mcc_context {
+       u8 con_index[14];
+       u8 rsvd0[2];
+       u8 ring_size[4];
+       u8 fetch_wrb;
+       u8 fetch_r2t;
+       u8 cq_id[10];
+       u8 prod_index[14];
+       u8 fid[8];
+       u8 pdid[9];
+       u8 valid;
+       u8 rsvd1[32];
+       u8 rsvd2[32];
+} __packed;
+
+struct be_cmd_req_mcc_create {
+       struct be_cmd_req_hdr hdr;
+       u16 num_pages;
+       u16 rsvd0;
+       u8 context[sizeof(struct amap_mcc_context) / 8];
+       struct phys_addr pages[8];
+} __packed;
+
+struct be_cmd_resp_mcc_create {
+       struct be_cmd_resp_hdr hdr;
+       u16 id;
+       u16 rsvd0;
+} __packed;
+
 /******************** Create TxQ ***************************/
 #define BE_ETH_TX_RING_TYPE_STANDARD           2
 #define BE_ULP1_NUM                            1
@@ -341,7 +402,8 @@ enum {
        QTYPE_EQ = 1,
        QTYPE_CQ,
        QTYPE_TXQ,
-       QTYPE_RXQ
+       QTYPE_RXQ,
+       QTYPE_MCCQ
 };
 
 struct be_cmd_req_q_destroy {
@@ -546,12 +608,6 @@ struct be_cmd_req_link_status {
        u32 rsvd;
 };
 
-struct be_link_info {
-       u8 duplex;
-       u8 speed;
-       u8 fault;
-};
-
 enum {
        PHY_LINK_DUPLEX_NONE = 0x0,
        PHY_LINK_DUPLEX_HALF = 0x1,
@@ -657,6 +713,9 @@ extern int be_cmd_cq_create(struct be_ctrl_info *ctrl,
                        struct be_queue_info *cq, struct be_queue_info *eq,
                        bool sol_evts, bool no_delay,
                        int num_cqe_dma_coalesce);
+extern int be_cmd_mccq_create(struct be_ctrl_info *ctrl,
+                       struct be_queue_info *mccq,
+                       struct be_queue_info *cq);
 extern int be_cmd_txq_create(struct be_ctrl_info *ctrl,
                        struct be_queue_info *txq,
                        struct be_queue_info *cq);
@@ -667,7 +726,7 @@ extern int be_cmd_rxq_create(struct be_ctrl_info *ctrl,
 extern int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
                        int type);
 extern int be_cmd_link_status_query(struct be_ctrl_info *ctrl,
-                       struct be_link_info *link);
+                       bool *link_up);
 extern int be_cmd_reset(struct be_ctrl_info *ctrl);
 extern int be_cmd_get_stats(struct be_ctrl_info *ctrl,
                        struct be_dma_mem *nonemb_cmd);
@@ -679,10 +738,11 @@ extern int be_cmd_vlan_config(struct be_ctrl_info *ctrl, u32 if_id,
                        bool promiscuous);
 extern int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl,
                        u8 port_num, bool en);
-extern int be_cmd_mcast_mac_set(struct be_ctrl_info *ctrl, u32 if_id,
-                       u8 *mac_table, u32 num, bool promiscuous);
+extern int be_cmd_multicast_set(struct be_ctrl_info *ctrl, u32 if_id,
+                       struct dev_mc_list *mc_list, u32 mc_count);
 extern int be_cmd_set_flow_control(struct be_ctrl_info *ctrl,
                        u32 tx_fc, u32 rx_fc);
 extern int be_cmd_get_flow_control(struct be_ctrl_info *ctrl,
                        u32 *tx_fc, u32 *rx_fc);
 extern int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num);
+extern void be_process_mcc(struct be_ctrl_info *ctrl);
index b132aa4893ca596e31d4342dacec786ebcb286f4..b02e805c1db37c8e645510b6b479905b74d19e56 100644 (file)
@@ -61,7 +61,7 @@
 /* Clear the interrupt for this eq */
 #define DB_EQ_CLR_SHIFT                        (9)     /* bit 9 */
 /* Must be 1 */
-#define DB_EQ_EVNT_SHIFT                       (10)    /* bit 10 */
+#define DB_EQ_EVNT_SHIFT               (10)    /* bit 10 */
 /* Number of event entries processed */
 #define DB_EQ_NUM_POPPED_SHIFT         (16)    /* bits 16 - 28 */
 /* Rearm bit */
 /* Number of rx frags posted */
 #define DB_RQ_NUM_POSTED_SHIFT         (24)    /* bits 24 - 31 */
 
+/********** MCC door bell ************/
+#define DB_MCCQ_OFFSET                         0x140
+#define DB_MCCQ_RING_ID_MASK           0x7FF   /* bits 0 - 10 */
+/* Number of entries posted */
+#define DB_MCCQ_NUM_POSTED_SHIFT       (16)    /* bits 16 - 29 */
+
 /*
  * BE descriptors: host memory data structures whose formats
  * are hardwired in BE silicon.
index 66bb56874d9bd0bbc99b18284f836f2e6a2535ba..66c10c87f5175151d54cff2fedb9a1f96d551201 100644 (file)
@@ -60,26 +60,6 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
        return 0;
 }
 
-static inline void *queue_head_node(struct be_queue_info *q)
-{
-       return q->dma_mem.va + q->head * q->entry_size;
-}
-
-static inline void *queue_tail_node(struct be_queue_info *q)
-{
-       return q->dma_mem.va + q->tail * q->entry_size;
-}
-
-static inline void queue_head_inc(struct be_queue_info *q)
-{
-       index_inc(&q->head, q->len);
-}
-
-static inline void queue_tail_inc(struct be_queue_info *q)
-{
-       index_inc(&q->tail, q->len);
-}
-
 static void be_intr_set(struct be_ctrl_info *ctrl, bool enable)
 {
        u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
@@ -127,7 +107,7 @@ static void be_eq_notify(struct be_ctrl_info *ctrl, u16 qid,
        iowrite32(val, ctrl->db + DB_EQ_OFFSET);
 }
 
-static void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid,
+void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid,
                bool arm, u16 num_popped)
 {
        u32 val = 0;
@@ -234,28 +214,24 @@ static void netdev_stats_update(struct be_adapter *adapter)
        dev_stats->tx_window_errors = 0;
 }
 
-static void be_link_status_update(struct be_adapter *adapter)
+void be_link_status_update(void *ctxt, bool link_up)
 {
-       struct be_link_info *prev = &adapter->link;
-       struct be_link_info now = { 0 };
+       struct be_adapter *adapter = ctxt;
        struct net_device *netdev = adapter->netdev;
 
-       be_cmd_link_status_query(&adapter->ctrl, &now);
-
        /* If link came up or went down */
-       if (now.speed != prev->speed && (now.speed == PHY_LINK_SPEED_ZERO ||
-                       prev->speed == PHY_LINK_SPEED_ZERO)) {
-               if (now.speed == PHY_LINK_SPEED_ZERO) {
-                       netif_stop_queue(netdev);
-                       netif_carrier_off(netdev);
-                       printk(KERN_INFO "%s: Link down\n", netdev->name);
-               } else {
+       if (adapter->link_up != link_up) {
+               if (link_up) {
                        netif_start_queue(netdev);
                        netif_carrier_on(netdev);
                        printk(KERN_INFO "%s: Link up\n", netdev->name);
+               } else {
+                       netif_stop_queue(netdev);
+                       netif_carrier_off(netdev);
+                       printk(KERN_INFO "%s: Link down\n", netdev->name);
                }
+               adapter->link_up = link_up;
        }
-       *prev = now;
 }
 
 /* Update the EQ delay n BE based on the RX frags consumed / sec */
@@ -569,47 +545,32 @@ static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
        be_vid_config(netdev);
 }
 
-static void be_set_multicast_filter(struct net_device *netdev)
+static void be_set_multicast_list(struct net_device *netdev)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
-       struct dev_mc_list *mc_ptr;
-       u8 mac_addr[32][ETH_ALEN];
-       int i = 0;
+       struct be_ctrl_info *ctrl = &adapter->ctrl;
 
-       if (netdev->flags & IFF_ALLMULTI) {
-               /* set BE in Multicast promiscuous */
-               be_cmd_mcast_mac_set(&adapter->ctrl,
-                                       adapter->if_handle, NULL, 0, true);
-               return;
+       if (netdev->flags & IFF_PROMISC) {
+               be_cmd_promiscuous_config(ctrl, adapter->port_num, 1);
+               adapter->promiscuous = true;
+               goto done;
        }
 
-       for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
-               memcpy(&mac_addr[i][0], mc_ptr->dmi_addr, ETH_ALEN);
-               if (++i >= 32) {
-                       be_cmd_mcast_mac_set(&adapter->ctrl,
-                               adapter->if_handle, &mac_addr[0][0], i, false);
-                       i = 0;
-               }
-
+       /* BE was previously in promiscous mode; disable it */
+       if (adapter->promiscuous) {
+               adapter->promiscuous = false;
+               be_cmd_promiscuous_config(ctrl, adapter->port_num, 0);
        }
 
-       if (i) {
-               /* reset the promiscuous mode also. */
-               be_cmd_mcast_mac_set(&adapter->ctrl,
-                       adapter->if_handle, &mac_addr[0][0], i, false);
+       if (netdev->flags & IFF_ALLMULTI) {
+               be_cmd_multicast_set(ctrl, adapter->if_handle, NULL, 0);
+               goto done;
        }
-}
 
-static void be_set_multicast_list(struct net_device *netdev)
-{
-       struct be_adapter *adapter = netdev_priv(netdev);
-
-       if (netdev->flags & IFF_PROMISC) {
-               be_cmd_promiscuous_config(&adapter->ctrl, adapter->port_num, 1);
-       } else {
-               be_cmd_promiscuous_config(&adapter->ctrl, adapter->port_num, 0);
-               be_set_multicast_filter(netdev);
-       }
+       be_cmd_multicast_set(ctrl, adapter->if_handle, netdev->mc_list,
+               netdev->mc_count);
+done:
+       return;
 }
 
 static void be_rx_rate_update(struct be_adapter *adapter)
@@ -960,10 +921,8 @@ static void be_post_rx_frags(struct be_adapter *adapter)
        return;
 }
 
-static struct be_eth_tx_compl *
-be_tx_compl_get(struct be_adapter *adapter)
+static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
 {
-       struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
        struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
 
        if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
@@ -1051,6 +1010,59 @@ static void be_tx_q_clean(struct be_adapter *adapter)
        }
 }
 
+static void be_mcc_queues_destroy(struct be_adapter *adapter)
+{
+       struct be_queue_info *q;
+       struct be_ctrl_info *ctrl = &adapter->ctrl;
+
+       q = &ctrl->mcc_obj.q;
+       if (q->created)
+               be_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
+       be_queue_free(adapter, q);
+
+       q = &ctrl->mcc_obj.cq;
+       if (q->created)
+               be_cmd_q_destroy(ctrl, q, QTYPE_CQ);
+       be_queue_free(adapter, q);
+}
+
+/* Must be called only after TX qs are created as MCC shares TX EQ */
+static int be_mcc_queues_create(struct be_adapter *adapter)
+{
+       struct be_queue_info *q, *cq;
+       struct be_ctrl_info *ctrl = &adapter->ctrl;
+
+       /* Alloc MCC compl queue */
+       cq = &ctrl->mcc_obj.cq;
+       if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
+                       sizeof(struct be_mcc_cq_entry)))
+               goto err;
+
+       /* Ask BE to create MCC compl queue; share TX's eq */
+       if (be_cmd_cq_create(ctrl, cq, &adapter->tx_eq.q, false, true, 0))
+               goto mcc_cq_free;
+
+       /* Alloc MCC queue */
+       q = &ctrl->mcc_obj.q;
+       if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
+               goto mcc_cq_destroy;
+
+       /* Ask BE to create MCC queue */
+       if (be_cmd_mccq_create(ctrl, q, cq))
+               goto mcc_q_free;
+
+       return 0;
+
+mcc_q_free:
+       be_queue_free(adapter, q);
+mcc_cq_destroy:
+       be_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
+mcc_cq_free:
+       be_queue_free(adapter, cq);
+err:
+       return -1;
+}
+
 static void be_tx_queues_destroy(struct be_adapter *adapter)
 {
        struct be_queue_info *q;
@@ -1263,7 +1275,7 @@ static irqreturn_t be_msix_rx(int irq, void *dev)
        return IRQ_HANDLED;
 }
 
-static irqreturn_t be_msix_tx(int irq, void *dev)
+static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
 {
        struct be_adapter *adapter = dev;
 
@@ -1324,40 +1336,51 @@ int be_poll_rx(struct napi_struct *napi, int budget)
        return work_done;
 }
 
-/* For TX we don't honour budget; consume everything */
-int be_poll_tx(struct napi_struct *napi, int budget)
+void be_process_tx(struct be_adapter *adapter)
 {
-       struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
-       struct be_adapter *adapter =
-               container_of(tx_eq, struct be_adapter, tx_eq);
-       struct be_tx_obj *tx_obj = &adapter->tx_obj;
-       struct be_queue_info *tx_cq = &tx_obj->cq;
-       struct be_queue_info *txq = &tx_obj->q;
+       struct be_queue_info *txq = &adapter->tx_obj.q;
+       struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
        struct be_eth_tx_compl *txcp;
        u32 num_cmpl = 0;
        u16 end_idx;
 
-       while ((txcp = be_tx_compl_get(adapter))) {
+       while ((txcp = be_tx_compl_get(tx_cq))) {
                end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
                                        wrb_index, txcp);
                be_tx_compl_process(adapter, end_idx);
                num_cmpl++;
        }
 
-       /* As Tx wrbs have been freed up, wake up netdev queue if
-        * it was stopped due to lack of tx wrbs.
-        */
-       if (netif_queue_stopped(adapter->netdev) &&
+       if (num_cmpl) {
+               be_cq_notify(&adapter->ctrl, tx_cq->id, true, num_cmpl);
+
+               /* As Tx wrbs have been freed up, wake up netdev queue if
+                * it was stopped due to lack of tx wrbs.
+                */
+               if (netif_queue_stopped(adapter->netdev) &&
                        atomic_read(&txq->used) < txq->len / 2) {
-               netif_wake_queue(adapter->netdev);
+                       netif_wake_queue(adapter->netdev);
+               }
+
+               drvr_stats(adapter)->be_tx_events++;
+               drvr_stats(adapter)->be_tx_compl += num_cmpl;
        }
+}
+
+/* As TX and MCC share the same EQ check for both TX and MCC completions.
+ * For TX/MCC we don't honour budget; consume everything
+ */
+static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
+{
+       struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
+       struct be_adapter *adapter =
+               container_of(tx_eq, struct be_adapter, tx_eq);
 
        napi_complete(napi);
 
-       be_cq_notify(&adapter->ctrl, tx_cq->id, true, num_cmpl);
+       be_process_tx(adapter);
 
-       drvr_stats(adapter)->be_tx_events++;
-       drvr_stats(adapter)->be_tx_compl += num_cmpl;
+       be_process_mcc(&adapter->ctrl);
 
        return 1;
 }
@@ -1368,9 +1391,6 @@ static void be_worker(struct work_struct *work)
                container_of(work, struct be_adapter, work.work);
        int status;
 
-       /* Check link */
-       be_link_status_update(adapter);
-
        /* Get Stats */
        status = be_cmd_get_stats(&adapter->ctrl, &adapter->stats.cmd);
        if (!status)
@@ -1419,7 +1439,7 @@ static int be_msix_register(struct be_adapter *adapter)
 
        sprintf(tx_eq->desc, "%s-tx", netdev->name);
        vec = be_msix_vec_get(adapter, tx_eq->q.id);
-       status = request_irq(vec, be_msix_tx, 0, tx_eq->desc, adapter);
+       status = request_irq(vec, be_msix_tx_mcc, 0, tx_eq->desc, adapter);
        if (status)
                goto err;
 
@@ -1495,6 +1515,39 @@ static int be_open(struct net_device *netdev)
        struct be_ctrl_info *ctrl = &adapter->ctrl;
        struct be_eq_obj *rx_eq = &adapter->rx_eq;
        struct be_eq_obj *tx_eq = &adapter->tx_eq;
+       bool link_up;
+       int status;
+
+       /* First time posting */
+       be_post_rx_frags(adapter);
+
+       napi_enable(&rx_eq->napi);
+       napi_enable(&tx_eq->napi);
+
+       be_irq_register(adapter);
+
+       be_intr_set(ctrl, true);
+
+       /* The evt queues are created in unarmed state; arm them */
+       be_eq_notify(ctrl, rx_eq->q.id, true, false, 0);
+       be_eq_notify(ctrl, tx_eq->q.id, true, false, 0);
+
+       /* Rx compl queue may be in unarmed state; rearm it */
+       be_cq_notify(ctrl, adapter->rx_obj.cq.id, true, 0);
+
+       status = be_cmd_link_status_query(ctrl, &link_up);
+       if (status)
+               return status;
+       be_link_status_update(adapter, link_up);
+
+       schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
+       return 0;
+}
+
+static int be_setup(struct be_adapter *adapter)
+{
+       struct be_ctrl_info *ctrl = &adapter->ctrl;
+       struct net_device *netdev = adapter->netdev;
        u32 if_flags;
        int status;
 
@@ -1521,29 +1574,14 @@ static int be_open(struct net_device *netdev)
        if (status != 0)
                goto tx_qs_destroy;
 
-       /* First time posting */
-       be_post_rx_frags(adapter);
-
-       napi_enable(&rx_eq->napi);
-       napi_enable(&tx_eq->napi);
-
-       be_irq_register(adapter);
-
-       be_intr_set(ctrl, true);
-
-       /* The evt queues are created in the unarmed state; arm them */
-       be_eq_notify(ctrl, rx_eq->q.id, true, false, 0);
-       be_eq_notify(ctrl, tx_eq->q.id, true, false, 0);
-
-       /* The compl queues are created in the unarmed state; arm them */
-       be_cq_notify(ctrl, adapter->rx_obj.cq.id, true, 0);
-       be_cq_notify(ctrl, adapter->tx_obj.cq.id, true, 0);
-
-       be_link_status_update(adapter);
+       status = be_mcc_queues_create(adapter);
+       if (status != 0)
+               goto rx_qs_destroy;
 
-       schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
        return 0;
 
+rx_qs_destroy:
+       be_rx_queues_destroy(adapter);
 tx_qs_destroy:
        be_tx_queues_destroy(adapter);
 if_destroy:
@@ -1552,6 +1590,19 @@ do_none:
        return status;
 }
 
+static int be_clear(struct be_adapter *adapter)
+{
+       struct be_ctrl_info *ctrl = &adapter->ctrl;
+
+       be_rx_queues_destroy(adapter);
+       be_tx_queues_destroy(adapter);
+
+       be_cmd_if_destroy(ctrl, adapter->if_handle);
+
+       be_mcc_queues_destroy(adapter);
+       return 0;
+}
+
 static int be_close(struct net_device *netdev)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
@@ -1564,7 +1615,7 @@ static int be_close(struct net_device *netdev)
 
        netif_stop_queue(netdev);
        netif_carrier_off(netdev);
-       adapter->link.speed = PHY_LINK_SPEED_ZERO;
+       adapter->link_up = false;
 
        be_intr_set(ctrl, false);
 
@@ -1581,10 +1632,6 @@ static int be_close(struct net_device *netdev)
        napi_disable(&rx_eq->napi);
        napi_disable(&tx_eq->napi);
 
-       be_rx_queues_destroy(adapter);
-       be_tx_queues_destroy(adapter);
-
-       be_cmd_if_destroy(ctrl, adapter->if_handle);
        return 0;
 }
 
@@ -1673,7 +1720,7 @@ static void be_netdev_init(struct net_device *netdev)
 
        netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx,
                BE_NAPI_WEIGHT);
-       netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx,
+       netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
                BE_NAPI_WEIGHT);
 
        netif_carrier_off(netdev);
@@ -1755,7 +1802,12 @@ static int be_ctrl_init(struct be_adapter *adapter)
        mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
        mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
        memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
-       spin_lock_init(&ctrl->cmd_lock);
+       spin_lock_init(&ctrl->mbox_lock);
+       spin_lock_init(&ctrl->mcc_lock);
+       spin_lock_init(&ctrl->mcc_cq_lock);
+
+       ctrl->async_cb = be_link_status_update;
+       ctrl->adapter_ctxt = adapter;
 
        val = ioread32(ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
        ctrl->pci_func = (val >> MEMBAR_CTRL_INT_CTRL_PFUNC_SHIFT) &
@@ -1793,6 +1845,8 @@ static void __devexit be_remove(struct pci_dev *pdev)
 
        unregister_netdev(adapter->netdev);
 
+       be_clear(adapter);
+
        be_stats_cleanup(adapter);
 
        be_ctrl_cleanup(adapter);
@@ -1890,13 +1944,18 @@ static int __devinit be_probe(struct pci_dev *pdev,
        be_netdev_init(netdev);
        SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
 
+       status = be_setup(adapter);
+       if (status)
+               goto stats_clean;
        status = register_netdev(netdev);
        if (status != 0)
-               goto stats_clean;
+               goto unsetup;
 
        dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
        return 0;
 
+unsetup:
+       be_clear(adapter);
 stats_clean:
        be_stats_cleanup(adapter);
 ctrl_clean:
@@ -1921,6 +1980,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
        if (netif_running(netdev)) {
                rtnl_lock();
                be_close(netdev);
+               be_clear(adapter);
                rtnl_unlock();
        }
 
@@ -1947,6 +2007,7 @@ static int be_resume(struct pci_dev *pdev)
 
        if (netif_running(netdev)) {
                rtnl_lock();
+               be_setup(adapter);
                be_open(netdev);
                rtnl_unlock();
        }
index 677f60490f679d8aa80536a66bc060f479e1a283..679885a122b4da068878a4bcd471c45ecd4e905a 100644 (file)
@@ -1997,7 +1997,7 @@ static int e1000_clean(struct napi_struct *napi, int budget)
        struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
        struct e1000_hw *hw = &adapter->hw;
        struct net_device *poll_dev = adapter->netdev;
-       int tx_cleaned = 0, work_done = 0;
+       int tx_cleaned = 1, work_done = 0;
 
        adapter = netdev_priv(poll_dev);
 
index 745ae8b4a2e8b30d6ac5e25c8acc94a020564c25..0f32db3e92ade2c2403dc47f5aadb519690c3fc6 100644 (file)
@@ -1750,12 +1750,12 @@ static void mv643xx_eth_program_unicast_filter(struct net_device *dev)
 
        uc_addr_set(mp, dev->dev_addr);
 
-       port_config = rdlp(mp, PORT_CONFIG);
+       port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE;
+
        nibbles = uc_addr_filter_mask(dev);
        if (!nibbles) {
                port_config |= UNICAST_PROMISCUOUS_MODE;
-               wrlp(mp, PORT_CONFIG, port_config);
-               return;
+               nibbles = 0xffff;
        }
 
        for (i = 0; i < 16; i += 4) {
@@ -1776,7 +1776,6 @@ static void mv643xx_eth_program_unicast_filter(struct net_device *dev)
                wrl(mp, off, v);
        }
 
-       port_config &= ~UNICAST_PROMISCUOUS_MODE;
        wrlp(mp, PORT_CONFIG, port_config);
 }
 
index 4e22462684c98e63108c0851ba36ae49d5532582..4b53b58d75fc0375e03579e79c9dbb5adf808056 100644 (file)
@@ -51,9 +51,6 @@
 #define TX_BUFFS_AVAIL(tp) \
        (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
 
-/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
-static const int max_interrupt_work = 20;
-
 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
    The RTL chips use a 64 element hash table based on the Ethernet CRC. */
 static const int multicast_filter_limit = 32;
index 3717569828bf360f16a8bc6b8138c6965269bddd..a906d3998131a512b0f125f3f8892cf38c3bdaf5 100644 (file)
@@ -169,10 +169,12 @@ config USB_NET_CDCETHER
          The Linux-USB CDC Ethernet Gadget driver is an open implementation.
          This driver should work with at least the following devices:
 
+           * Dell Wireless 5530 HSPA
            * Ericsson PipeRider (all variants)
+           * Ericsson Mobile Broadband Module (all variants)
            * Motorola (DM100 and SB4100)
            * Broadcom Cable Modem (reference design)
-           * Toshiba PCX1100U
+           * Toshiba (PCX1100U and F3507g)
            * ...
 
          This driver creates an interface named "ethX", where X depends on
index 01fd528306ec1c87c74e930f6effe6201b519171..4a6aff579403907f28331f1ce22d8c570ce52d09 100644 (file)
@@ -533,6 +533,31 @@ static const struct usb_device_id  products [] = {
        USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1900, USB_CLASS_COMM,
                        USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
        .driver_info = (unsigned long) &cdc_info,
+}, {
+       /* Ericsson F3507g ver. 2 */
+       USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1902, USB_CLASS_COMM,
+                       USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+       .driver_info = (unsigned long) &cdc_info,
+}, {
+       /* Ericsson F3607gw */
+       USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1904, USB_CLASS_COMM,
+                       USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+       .driver_info = (unsigned long) &cdc_info,
+}, {
+       /* Ericsson F3307 */
+       USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1906, USB_CLASS_COMM,
+                       USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+       .driver_info = (unsigned long) &cdc_info,
+}, {
+       /* Toshiba F3507g */
+       USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM,
+                       USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+       .driver_info = (unsigned long) &cdc_info,
+}, {
+       /* Dell F3507g */
+       USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8147, USB_CLASS_COMM,
+                       USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+       .driver_info = (unsigned long) &cdc_info,
 },
        { },            // END
 };
index 2138535f2339e762590d5cdba26a8593de8c1199..73acbd244aa106493cbdf7c3474937ee44e370f1 100644 (file)
@@ -297,7 +297,7 @@ static int update_eth_regs_async(pegasus_t * pegasus)
 
        pegasus->dr.bRequestType = PEGASUS_REQT_WRITE;
        pegasus->dr.bRequest = PEGASUS_REQ_SET_REGS;
-       pegasus->dr.wValue = 0;
+       pegasus->dr.wValue = cpu_to_le16(0);
        pegasus->dr.wIndex = cpu_to_le16(EthCtrl0);
        pegasus->dr.wLength = cpu_to_le16(3);
        pegasus->ctrl_urb->transfer_buffer_length = 3;
@@ -446,11 +446,12 @@ static int write_eprom_word(pegasus_t * pegasus, __u8 index, __u16 data)
        int i;
        __u8 tmp, d[4] = { 0x3f, 0, 0, EPROM_WRITE };
        int ret;
+       __le16 le_data = cpu_to_le16(data);
 
        set_registers(pegasus, EpromOffset, 4, d);
        enable_eprom_write(pegasus);
        set_register(pegasus, EpromOffset, index);
-       set_registers(pegasus, EpromData, 2, &data);
+       set_registers(pegasus, EpromData, 2, &le_data);
        set_register(pegasus, EpromCtrl, EPROM_WRITE);
 
        for (i = 0; i < REG_TIMEOUT; i++) {
@@ -923,29 +924,32 @@ static struct net_device_stats *pegasus_netdev_stats(struct net_device *dev)
 
 static inline void disable_net_traffic(pegasus_t * pegasus)
 {
-       int tmp = 0;
+       __le16 tmp = cpu_to_le16(0);
 
-       set_registers(pegasus, EthCtrl0, 2, &tmp);
+       set_registers(pegasus, EthCtrl0, sizeof(tmp), &tmp);
 }
 
 static inline void get_interrupt_interval(pegasus_t * pegasus)
 {
-       __u8 data[2];
+       u16 data;
+       u8 interval;
 
-       read_eprom_word(pegasus, 4, (__u16 *) data);
+       read_eprom_word(pegasus, 4, &data);
+       interval = data >> 8;
        if (pegasus->usb->speed != USB_SPEED_HIGH) {
-               if (data[1] < 0x80) {
+               if (interval < 0x80) {
                        if (netif_msg_timer(pegasus))
                                dev_info(&pegasus->intf->dev, "intr interval "
                                        "changed from %ums to %ums\n",
-                                       data[1], 0x80);
-                       data[1] = 0x80;
+                                       interval, 0x80);
+                       interval = 0x80;
+                       data = (data & 0x00FF) | ((u16)interval << 8);
 #ifdef PEGASUS_WRITE_EEPROM
-                       write_eprom_word(pegasus, 4, *(__u16 *) data);
+                       write_eprom_word(pegasus, 4, data);
 #endif
                }
        }
-       pegasus->intr_interval = data[1];
+       pegasus->intr_interval = interval;
 }
 
 static void set_carrier(struct net_device *net)
@@ -1299,7 +1303,8 @@ static int pegasus_blacklisted(struct usb_device *udev)
        /* Special quirk to keep the driver from handling the Belkin Bluetooth
         * dongle which happens to have the same ID.
         */
-       if ((udd->idVendor == VENDOR_BELKIN && udd->idProduct == 0x0121) &&
+       if ((udd->idVendor == cpu_to_le16(VENDOR_BELKIN)) &&
+           (udd->idProduct == cpu_to_le16(0x0121)) &&
            (udd->bDeviceClass == USB_CLASS_WIRELESS_CONTROLLER) &&
            (udd->bDeviceProtocol == 1))
                return 1;
index 21ee49ffcbaf7d9980a52b4dd84cc2ff4f849c6f..f82a1e87737241ec0b7a5cb961b0a90e7ed8d503 100644 (file)
@@ -94,8 +94,6 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
                            poll_table *wait);
 void iucv_sock_link(struct iucv_sock_list *l, struct sock *s);
 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s);
-int  iucv_sock_wait_state(struct sock *sk, int state, int state2,
-                         unsigned long timeo);
 int  iucv_sock_wait_cnt(struct sock *sk, unsigned long timeo);
 void iucv_accept_enqueue(struct sock *parent, struct sock *sk);
 void iucv_accept_unlink(struct sock *sk);
index 882a927cefaede6f65531717db85d17aa5541e4f..3bb6bdb1dac1d95b537af4752b4039acfd1d52e2 100644 (file)
 
 #include "af802154.h"
 
-#define DBG_DUMP(data, len) { \
-       int i; \
-       pr_debug("function: %s: data: len %d:\n", __func__, len); \
-       for (i = 0; i < len; i++) {\
-               pr_debug("%02x: %02x\n", i, (data)[i]); \
-       } \
-}
-
 /*
  * Utility function for families
  */
@@ -302,10 +294,12 @@ static struct net_proto_family ieee802154_family_ops = {
 static int ieee802154_rcv(struct sk_buff *skb, struct net_device *dev,
        struct packet_type *pt, struct net_device *orig_dev)
 {
-       DBG_DUMP(skb->data, skb->len);
        if (!netif_running(dev))
                return -ENODEV;
        pr_debug("got frame, type %d, dev %p\n", dev->type, dev);
+#ifdef DEBUG
+       print_hex_dump_bytes("ieee802154_rcv ", DUMP_PREFIX_NONE, skb->data, skb->len);
+#endif
 
        if (!net_eq(dev_net(dev), &init_net))
                goto drop;
index cd76b3cb70925fc142c50f652adea2eafc5c7eb2..65b3a8b11a6c3e6699ab426dce59e356da220670 100644 (file)
@@ -1085,8 +1085,16 @@ restart:
        now = jiffies;
 
        if (!rt_caching(dev_net(rt->u.dst.dev))) {
-               rt_drop(rt);
-               return 0;
+               /*
+                * If we're not caching, just tell the caller we
+                * were successful and don't touch the route.  The
+                * caller hold the sole reference to the cache entry, and
+                * it will be released when the caller is done with it.
+                * If we drop it here, the callers have no way to resolve routes
+                * when we're not caching.  Instead, just point *rp at rt, so
+                * the caller gets a single use out of the route
+                */
+               goto report_and_exit;
        }
 
        rthp = &rt_hash_table[hash].chain;
@@ -1217,6 +1225,8 @@ restart:
        rcu_assign_pointer(rt_hash_table[hash].chain, rt);
 
        spin_unlock_bh(rt_hash_lock_addr(hash));
+
+report_and_exit:
        if (rp)
                *rp = rt;
        else
index a9b3a6f9ea95f18f2ea8325a72741fb61e0c14cf..abadb4a846cf04a0d6fcfe842e204aec1e1b9951 100644 (file)
@@ -53,6 +53,38 @@ static const u8 iprm_shutdown[8] =
 #define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
 #define CB_TRGCLS_LEN  (TRGCLS_SIZE)
 
+#define __iucv_sock_wait(sk, condition, timeo, ret)                    \
+do {                                                                   \
+       DEFINE_WAIT(__wait);                                            \
+       long __timeo = timeo;                                           \
+       ret = 0;                                                        \
+       while (!(condition)) {                                          \
+               prepare_to_wait(sk->sk_sleep, &__wait, TASK_INTERRUPTIBLE); \
+               if (!__timeo) {                                         \
+                       ret = -EAGAIN;                                  \
+                       break;                                          \
+               }                                                       \
+               if (signal_pending(current)) {                          \
+                       ret = sock_intr_errno(__timeo);                 \
+                       break;                                          \
+               }                                                       \
+               release_sock(sk);                                       \
+               __timeo = schedule_timeout(__timeo);                    \
+               lock_sock(sk);                                          \
+               ret = sock_error(sk);                                   \
+               if (ret)                                                \
+                       break;                                          \
+       }                                                               \
+       finish_wait(sk->sk_sleep, &__wait);                             \
+} while (0)
+
+#define iucv_sock_wait(sk, condition, timeo)                           \
+({                                                                     \
+       int __ret = 0;                                                  \
+       if (!(condition))                                               \
+               __iucv_sock_wait(sk, condition, timeo, __ret);          \
+       __ret;                                                          \
+})
 
 static void iucv_sock_kill(struct sock *sk);
 static void iucv_sock_close(struct sock *sk);
@@ -121,6 +153,48 @@ static inline size_t iucv_msg_length(struct iucv_message *msg)
        return msg->length;
 }
 
+/**
+ * iucv_sock_in_state() - check for specific states
+ * @sk:                sock structure
+ * @state:     first iucv sk state
+ * @state:     second iucv sk state
+ *
+ * Returns true if the socket in either in the first or second state.
+ */
+static int iucv_sock_in_state(struct sock *sk, int state, int state2)
+{
+       return (sk->sk_state == state || sk->sk_state == state2);
+}
+
+/**
+ * iucv_below_msglim() - function to check if messages can be sent
+ * @sk:                sock structure
+ *
+ * Returns true if the send queue length is lower than the message limit.
+ * Always returns true if the socket is not connected (no iucv path for
+ * checking the message limit).
+ */
+static inline int iucv_below_msglim(struct sock *sk)
+{
+       struct iucv_sock *iucv = iucv_sk(sk);
+
+       if (sk->sk_state != IUCV_CONNECTED)
+               return 1;
+       return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
+}
+
+/**
+ * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
+ */
+static void iucv_sock_wake_msglim(struct sock *sk)
+{
+       read_lock(&sk->sk_callback_lock);
+       if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+               wake_up_interruptible_all(sk->sk_sleep);
+       sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
+       read_unlock(&sk->sk_callback_lock);
+}
+
 /* Timers */
 static void iucv_sock_timeout(unsigned long arg)
 {
@@ -212,7 +286,9 @@ static void iucv_sock_close(struct sock *sk)
                                timeo = sk->sk_lingertime;
                        else
                                timeo = IUCV_DISCONN_TIMEOUT;
-                       err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo);
+                       err = iucv_sock_wait(sk,
+                                       iucv_sock_in_state(sk, IUCV_CLOSED, 0),
+                                       timeo);
                }
 
        case IUCV_CLOSING:   /* fall through */
@@ -393,39 +469,6 @@ struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
        return NULL;
 }
 
-int iucv_sock_wait_state(struct sock *sk, int state, int state2,
-                        unsigned long timeo)
-{
-       DECLARE_WAITQUEUE(wait, current);
-       int err = 0;
-
-       add_wait_queue(sk->sk_sleep, &wait);
-       while (sk->sk_state != state && sk->sk_state != state2) {
-               set_current_state(TASK_INTERRUPTIBLE);
-
-               if (!timeo) {
-                       err = -EAGAIN;
-                       break;
-               }
-
-               if (signal_pending(current)) {
-                       err = sock_intr_errno(timeo);
-                       break;
-               }
-
-               release_sock(sk);
-               timeo = schedule_timeout(timeo);
-               lock_sock(sk);
-
-               err = sock_error(sk);
-               if (err)
-                       break;
-       }
-       set_current_state(TASK_RUNNING);
-       remove_wait_queue(sk->sk_sleep, &wait);
-       return err;
-}
-
 /* Bind an unbound socket */
 static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
                          int addr_len)
@@ -570,8 +613,9 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
        }
 
        if (sk->sk_state != IUCV_CONNECTED) {
-               err = iucv_sock_wait_state(sk, IUCV_CONNECTED, IUCV_DISCONN,
-                               sock_sndtimeo(sk, flags & O_NONBLOCK));
+               err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
+                                                           IUCV_DISCONN),
+                                    sock_sndtimeo(sk, flags & O_NONBLOCK));
        }
 
        if (sk->sk_state == IUCV_DISCONN) {
@@ -725,9 +769,11 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
        struct iucv_message txmsg;
        struct cmsghdr *cmsg;
        int cmsg_done;
+       long timeo;
        char user_id[9];
        char appl_id[9];
        int err;
+       int noblock = msg->msg_flags & MSG_DONTWAIT;
 
        err = sock_error(sk);
        if (err)
@@ -747,108 +793,119 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
                goto out;
        }
 
-       if (sk->sk_state == IUCV_CONNECTED) {
-               /* initialize defaults */
-               cmsg_done   = 0;        /* check for duplicate headers */
-               txmsg.class = 0;
+       /* Return if the socket is not in connected state */
+       if (sk->sk_state != IUCV_CONNECTED) {
+               err = -ENOTCONN;
+               goto out;
+       }
 
-               /* iterate over control messages */
-               for (cmsg = CMSG_FIRSTHDR(msg); cmsg;
-                    cmsg = CMSG_NXTHDR(msg, cmsg)) {
+       /* initialize defaults */
+       cmsg_done   = 0;        /* check for duplicate headers */
+       txmsg.class = 0;
 
-                       if (!CMSG_OK(msg, cmsg)) {
-                               err = -EINVAL;
-                               goto out;
-                       }
+       /* iterate over control messages */
+       for (cmsg = CMSG_FIRSTHDR(msg); cmsg;
+               cmsg = CMSG_NXTHDR(msg, cmsg)) {
+
+               if (!CMSG_OK(msg, cmsg)) {
+                       err = -EINVAL;
+                       goto out;
+               }
+
+               if (cmsg->cmsg_level != SOL_IUCV)
+                       continue;
 
-                       if (cmsg->cmsg_level != SOL_IUCV)
-                               continue;
+               if (cmsg->cmsg_type & cmsg_done) {
+                       err = -EINVAL;
+                       goto out;
+               }
+               cmsg_done |= cmsg->cmsg_type;
 
-                       if (cmsg->cmsg_type & cmsg_done) {
+               switch (cmsg->cmsg_type) {
+               case SCM_IUCV_TRGCLS:
+                       if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
                                err = -EINVAL;
                                goto out;
                        }
-                       cmsg_done |= cmsg->cmsg_type;
-
-                       switch (cmsg->cmsg_type) {
-                       case SCM_IUCV_TRGCLS:
-                               if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
-                                       err = -EINVAL;
-                                       goto out;
-                               }
 
-                               /* set iucv message target class */
-                               memcpy(&txmsg.class,
-                                       (void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
+                       /* set iucv message target class */
+                       memcpy(&txmsg.class,
+                               (void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
 
-                               break;
+                       break;
 
-                       default:
-                               err = -EINVAL;
-                               goto out;
-                               break;
-                       }
+               default:
+                       err = -EINVAL;
+                       goto out;
+                       break;
                }
+       }
 
-               /* allocate one skb for each iucv message:
-                * this is fine for SOCK_SEQPACKET (unless we want to support
-                * segmented records using the MSG_EOR flag), but
-                * for SOCK_STREAM we might want to improve it in future */
-               if (!(skb = sock_alloc_send_skb(sk, len,
-                                               msg->msg_flags & MSG_DONTWAIT,
-                                               &err)))
-                       goto out;
+       /* allocate one skb for each iucv message:
+        * this is fine for SOCK_SEQPACKET (unless we want to support
+        * segmented records using the MSG_EOR flag), but
+        * for SOCK_STREAM we might want to improve it in future */
+       skb = sock_alloc_send_skb(sk, len, noblock, &err);
+       if (!skb)
+               goto out;
+       if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
+               err = -EFAULT;
+               goto fail;
+       }
 
-               if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
-                       err = -EFAULT;
-                       goto fail;
-               }
+       /* wait if outstanding messages for iucv path has reached */
+       timeo = sock_sndtimeo(sk, noblock);
+       err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo);
+       if (err)
+               goto fail;
 
-               /* increment and save iucv message tag for msg_completion cbk */
-               txmsg.tag = iucv->send_tag++;
-               memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
-               skb_queue_tail(&iucv->send_skb_q, skb);
+       /* return -ECONNRESET if the socket is no longer connected */
+       if (sk->sk_state != IUCV_CONNECTED) {
+               err = -ECONNRESET;
+               goto fail;
+       }
 
-               if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
-                   && skb->len <= 7) {
-                       err = iucv_send_iprm(iucv->path, &txmsg, skb);
+       /* increment and save iucv message tag for msg_completion cbk */
+       txmsg.tag = iucv->send_tag++;
+       memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
+       skb_queue_tail(&iucv->send_skb_q, skb);
 
-                       /* on success: there is no message_complete callback
-                        * for an IPRMDATA msg; remove skb from send queue */
-                       if (err == 0) {
-                               skb_unlink(skb, &iucv->send_skb_q);
-                               kfree_skb(skb);
-                       }
+       if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
+             && skb->len <= 7) {
+               err = iucv_send_iprm(iucv->path, &txmsg, skb);
 
-                       /* this error should never happen since the
-                        * IUCV_IPRMDATA path flag is set... sever path */
-                       if (err == 0x15) {
-                               iucv_path_sever(iucv->path, NULL);
-                               skb_unlink(skb, &iucv->send_skb_q);
-                               err = -EPIPE;
-                               goto fail;
-                       }
-               } else
-                       err = iucv_message_send(iucv->path, &txmsg, 0, 0,
-                                               (void *) skb->data, skb->len);
-               if (err) {
-                       if (err == 3) {
-                               user_id[8] = 0;
-                               memcpy(user_id, iucv->dst_user_id, 8);
-                               appl_id[8] = 0;
-                               memcpy(appl_id, iucv->dst_name, 8);
-                               pr_err("Application %s on z/VM guest %s"
-                                      " exceeds message limit\n",
-                                      user_id, appl_id);
-                       }
+               /* on success: there is no message_complete callback
+                * for an IPRMDATA msg; remove skb from send queue */
+               if (err == 0) {
+                       skb_unlink(skb, &iucv->send_skb_q);
+                       kfree_skb(skb);
+               }
+
+               /* this error should never happen since the
+                * IUCV_IPRMDATA path flag is set... sever path */
+               if (err == 0x15) {
+                       iucv_path_sever(iucv->path, NULL);
                        skb_unlink(skb, &iucv->send_skb_q);
                        err = -EPIPE;
                        goto fail;
                }
-
-       } else {
-               err = -ENOTCONN;
-               goto out;
+       } else
+               err = iucv_message_send(iucv->path, &txmsg, 0, 0,
+                                       (void *) skb->data, skb->len);
+       if (err) {
+               if (err == 3) {
+                       user_id[8] = 0;
+                       memcpy(user_id, iucv->dst_user_id, 8);
+                       appl_id[8] = 0;
+                       memcpy(appl_id, iucv->dst_name, 8);
+                       pr_err("Application %s on z/VM guest %s"
+                               " exceeds message limit\n",
+                               appl_id, user_id);
+                       err = -EAGAIN;
+               } else
+                       err = -EPIPE;
+               skb_unlink(skb, &iucv->send_skb_q);
+               goto fail;
        }
 
        release_sock(sk);
@@ -1464,7 +1521,11 @@ static void iucv_callback_txdone(struct iucv_path *path,
 
                spin_unlock_irqrestore(&list->lock, flags);
 
-               kfree_skb(this);
+               if (this) {
+                       kfree_skb(this);
+                       /* wake up any process waiting for sending */
+                       iucv_sock_wake_msglim(sk);
+               }
        }
        BUG_ON(!this);