]>
Commit | Line | Data |
---|---|---|
6b7c5b94 SP |
1 | /* |
2 | * Copyright (C) 2005 - 2009 ServerEngines | |
3 | * All rights reserved. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public License version 2 | |
7 | * as published by the Free Software Foundation. The full GNU General | |
8 | * Public License is included in this distribution in the file called COPYING. | |
9 | * | |
10 | * Contact Information: | |
11 | * linux-drivers@serverengines.com | |
12 | * | |
13 | * ServerEngines | |
14 | * 209 N. Fair Oaks Ave | |
15 | * Sunnyvale, CA 94085 | |
16 | */ | |
17 | ||
18 | #include "be.h" | |
8788fdc2 | 19 | #include "be_cmds.h" |
6b7c5b94 | 20 | |
8788fdc2 | 21 | static void be_mcc_notify(struct be_adapter *adapter) |
5fb379ee | 22 | { |
8788fdc2 | 23 | struct be_queue_info *mccq = &adapter->mcc_obj.q; |
5fb379ee SP |
24 | u32 val = 0; |
25 | ||
26 | val |= mccq->id & DB_MCCQ_RING_ID_MASK; | |
27 | val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; | |
8788fdc2 | 28 | iowrite32(val, adapter->db + DB_MCCQ_OFFSET); |
5fb379ee SP |
29 | } |
30 | ||
31 | /* To check if valid bit is set, check the entire word as we don't know | |
32 | * the endianness of the data (old entry is host endian while a new entry is | |
33 | * little endian) */ | |
34 | static inline bool be_mcc_compl_is_new(struct be_mcc_cq_entry *compl) | |
35 | { | |
36 | if (compl->flags != 0) { | |
37 | compl->flags = le32_to_cpu(compl->flags); | |
38 | BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0); | |
39 | return true; | |
40 | } else { | |
41 | return false; | |
42 | } | |
43 | } | |
44 | ||
45 | /* Need to reset the entire word that houses the valid bit */ | |
46 | static inline void be_mcc_compl_use(struct be_mcc_cq_entry *compl) | |
47 | { | |
48 | compl->flags = 0; | |
49 | } | |
50 | ||
8788fdc2 | 51 | static int be_mcc_compl_process(struct be_adapter *adapter, |
5fb379ee SP |
52 | struct be_mcc_cq_entry *compl) |
53 | { | |
54 | u16 compl_status, extd_status; | |
55 | ||
56 | /* Just swap the status to host endian; mcc tag is opaquely copied | |
57 | * from mcc_wrb */ | |
58 | be_dws_le_to_cpu(compl, 4); | |
59 | ||
60 | compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) & | |
61 | CQE_STATUS_COMPL_MASK; | |
62 | if (compl_status != MCC_STATUS_SUCCESS) { | |
63 | extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & | |
64 | CQE_STATUS_EXTD_MASK; | |
5f0b849e SP |
65 | dev_warn(&adapter->pdev->dev, |
66 | "Error in cmd completion: status(compl/extd)=%d/%d\n", | |
5fb379ee SP |
67 | compl_status, extd_status); |
68 | return -1; | |
69 | } | |
70 | return 0; | |
71 | } | |
72 | ||
a8f447bd | 73 | /* Link state evt is a string of bytes; no need for endian swapping */ |
8788fdc2 | 74 | static void be_async_link_state_process(struct be_adapter *adapter, |
a8f447bd SP |
75 | struct be_async_event_link_state *evt) |
76 | { | |
8788fdc2 SP |
77 | be_link_status_update(adapter, |
78 | evt->port_link_status == ASYNC_EVENT_LINK_UP); | |
a8f447bd SP |
79 | } |
80 | ||
81 | static inline bool is_link_state_evt(u32 trailer) | |
82 | { | |
83 | return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & | |
84 | ASYNC_TRAILER_EVENT_CODE_MASK) == | |
85 | ASYNC_EVENT_CODE_LINK_STATE); | |
86 | } | |
5fb379ee | 87 | |
8788fdc2 | 88 | static struct be_mcc_cq_entry *be_mcc_compl_get(struct be_adapter *adapter) |
5fb379ee | 89 | { |
8788fdc2 | 90 | struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq; |
5fb379ee SP |
91 | struct be_mcc_cq_entry *compl = queue_tail_node(mcc_cq); |
92 | ||
93 | if (be_mcc_compl_is_new(compl)) { | |
94 | queue_tail_inc(mcc_cq); | |
95 | return compl; | |
96 | } | |
97 | return NULL; | |
98 | } | |
99 | ||
8788fdc2 | 100 | void be_process_mcc(struct be_adapter *adapter) |
5fb379ee SP |
101 | { |
102 | struct be_mcc_cq_entry *compl; | |
103 | int num = 0; | |
104 | ||
8788fdc2 SP |
105 | spin_lock_bh(&adapter->mcc_cq_lock); |
106 | while ((compl = be_mcc_compl_get(adapter))) { | |
a8f447bd SP |
107 | if (compl->flags & CQE_FLAGS_ASYNC_MASK) { |
108 | /* Interpret flags as an async trailer */ | |
109 | BUG_ON(!is_link_state_evt(compl->flags)); | |
110 | ||
111 | /* Interpret compl as a async link evt */ | |
8788fdc2 | 112 | be_async_link_state_process(adapter, |
a8f447bd SP |
113 | (struct be_async_event_link_state *) compl); |
114 | } else { | |
8788fdc2 SP |
115 | be_mcc_compl_process(adapter, compl); |
116 | atomic_dec(&adapter->mcc_obj.q.used); | |
5fb379ee SP |
117 | } |
118 | be_mcc_compl_use(compl); | |
119 | num++; | |
120 | } | |
121 | if (num) | |
8788fdc2 SP |
122 | be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, num); |
123 | spin_unlock_bh(&adapter->mcc_cq_lock); | |
5fb379ee SP |
124 | } |
125 | ||
6ac7b687 | 126 | /* Wait till no more pending mcc requests are present */ |
8788fdc2 | 127 | static void be_mcc_wait_compl(struct be_adapter *adapter) |
6ac7b687 SP |
128 | { |
129 | #define mcc_timeout 50000 /* 5s timeout */ | |
130 | int i; | |
131 | for (i = 0; i < mcc_timeout; i++) { | |
8788fdc2 SP |
132 | be_process_mcc(adapter); |
133 | if (atomic_read(&adapter->mcc_obj.q.used) == 0) | |
6ac7b687 SP |
134 | break; |
135 | udelay(100); | |
136 | } | |
137 | if (i == mcc_timeout) | |
5f0b849e | 138 | dev_err(&adapter->pdev->dev, "mccq poll timed out\n"); |
6ac7b687 SP |
139 | } |
140 | ||
141 | /* Notify MCC requests and wait for completion */ | |
8788fdc2 | 142 | static void be_mcc_notify_wait(struct be_adapter *adapter) |
6ac7b687 | 143 | { |
8788fdc2 SP |
144 | be_mcc_notify(adapter); |
145 | be_mcc_wait_compl(adapter); | |
6ac7b687 SP |
146 | } |
147 | ||
5f0b849e | 148 | static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db) |
6b7c5b94 SP |
149 | { |
150 | int cnt = 0, wait = 5; | |
151 | u32 ready; | |
152 | ||
153 | do { | |
154 | ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK; | |
155 | if (ready) | |
156 | break; | |
157 | ||
158 | if (cnt > 200000) { | |
5f0b849e | 159 | dev_err(&adapter->pdev->dev, "mbox poll timed out\n"); |
6b7c5b94 SP |
160 | return -1; |
161 | } | |
162 | ||
163 | if (cnt > 50) | |
164 | wait = 200; | |
165 | cnt += wait; | |
166 | udelay(wait); | |
167 | } while (true); | |
168 | ||
169 | return 0; | |
170 | } | |
171 | ||
172 | /* | |
173 | * Insert the mailbox address into the doorbell in two steps | |
5fb379ee | 174 | * Polls on the mbox doorbell till a command completion (or a timeout) occurs |
6b7c5b94 | 175 | */ |
8788fdc2 | 176 | static int be_mbox_db_ring(struct be_adapter *adapter) |
6b7c5b94 SP |
177 | { |
178 | int status; | |
6b7c5b94 | 179 | u32 val = 0; |
8788fdc2 SP |
180 | void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET; |
181 | struct be_dma_mem *mbox_mem = &adapter->mbox_mem; | |
6b7c5b94 SP |
182 | struct be_mcc_mailbox *mbox = mbox_mem->va; |
183 | struct be_mcc_cq_entry *cqe = &mbox->cqe; | |
184 | ||
185 | memset(cqe, 0, sizeof(*cqe)); | |
186 | ||
6b7c5b94 SP |
187 | val |= MPU_MAILBOX_DB_HI_MASK; |
188 | /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */ | |
189 | val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; | |
190 | iowrite32(val, db); | |
191 | ||
192 | /* wait for ready to be set */ | |
5f0b849e | 193 | status = be_mbox_db_ready_wait(adapter, db); |
6b7c5b94 SP |
194 | if (status != 0) |
195 | return status; | |
196 | ||
197 | val = 0; | |
6b7c5b94 SP |
198 | /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */ |
199 | val |= (u32)(mbox_mem->dma >> 4) << 2; | |
200 | iowrite32(val, db); | |
201 | ||
5f0b849e | 202 | status = be_mbox_db_ready_wait(adapter, db); |
6b7c5b94 SP |
203 | if (status != 0) |
204 | return status; | |
205 | ||
5fb379ee SP |
206 | /* A cq entry has been made now */ |
207 | if (be_mcc_compl_is_new(cqe)) { | |
8788fdc2 | 208 | status = be_mcc_compl_process(adapter, &mbox->cqe); |
5fb379ee SP |
209 | be_mcc_compl_use(cqe); |
210 | if (status) | |
211 | return status; | |
212 | } else { | |
5f0b849e | 213 | dev_err(&adapter->pdev->dev, "invalid mailbox completion\n"); |
6b7c5b94 SP |
214 | return -1; |
215 | } | |
5fb379ee | 216 | return 0; |
6b7c5b94 SP |
217 | } |
218 | ||
8788fdc2 | 219 | static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage) |
6b7c5b94 | 220 | { |
8788fdc2 | 221 | u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET); |
6b7c5b94 SP |
222 | |
223 | *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK; | |
224 | if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK) | |
225 | return -1; | |
226 | else | |
227 | return 0; | |
228 | } | |
229 | ||
8788fdc2 | 230 | static int be_POST_stage_poll(struct be_adapter *adapter, u16 poll_stage) |
6b7c5b94 SP |
231 | { |
232 | u16 stage, cnt, error; | |
233 | for (cnt = 0; cnt < 5000; cnt++) { | |
8788fdc2 | 234 | error = be_POST_stage_get(adapter, &stage); |
6b7c5b94 SP |
235 | if (error) |
236 | return -1; | |
237 | ||
238 | if (stage == poll_stage) | |
239 | break; | |
240 | udelay(1000); | |
241 | } | |
242 | if (stage != poll_stage) | |
243 | return -1; | |
244 | return 0; | |
245 | } | |
246 | ||
247 | ||
8788fdc2 | 248 | int be_cmd_POST(struct be_adapter *adapter) |
6b7c5b94 SP |
249 | { |
250 | u16 stage, error; | |
251 | ||
8788fdc2 | 252 | error = be_POST_stage_get(adapter, &stage); |
6b7c5b94 SP |
253 | if (error) |
254 | goto err; | |
255 | ||
256 | if (stage == POST_STAGE_ARMFW_RDY) | |
257 | return 0; | |
258 | ||
259 | if (stage != POST_STAGE_AWAITING_HOST_RDY) | |
260 | goto err; | |
261 | ||
262 | /* On awaiting host rdy, reset and again poll on awaiting host rdy */ | |
8788fdc2 SP |
263 | iowrite32(POST_STAGE_BE_RESET, adapter->csr + MPU_EP_SEMAPHORE_OFFSET); |
264 | error = be_POST_stage_poll(adapter, POST_STAGE_AWAITING_HOST_RDY); | |
6b7c5b94 SP |
265 | if (error) |
266 | goto err; | |
267 | ||
268 | /* Now kickoff POST and poll on armfw ready */ | |
8788fdc2 SP |
269 | iowrite32(POST_STAGE_HOST_RDY, adapter->csr + MPU_EP_SEMAPHORE_OFFSET); |
270 | error = be_POST_stage_poll(adapter, POST_STAGE_ARMFW_RDY); | |
6b7c5b94 SP |
271 | if (error) |
272 | goto err; | |
273 | ||
274 | return 0; | |
275 | err: | |
276 | printk(KERN_WARNING DRV_NAME ": ERROR, stage=%d\n", stage); | |
277 | return -1; | |
278 | } | |
279 | ||
280 | static inline void *embedded_payload(struct be_mcc_wrb *wrb) | |
281 | { | |
282 | return wrb->payload.embedded_payload; | |
283 | } | |
284 | ||
285 | static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb) | |
286 | { | |
287 | return &wrb->payload.sgl[0]; | |
288 | } | |
289 | ||
290 | /* Don't touch the hdr after it's prepared */ | |
291 | static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len, | |
292 | bool embedded, u8 sge_cnt) | |
293 | { | |
294 | if (embedded) | |
295 | wrb->embedded |= MCC_WRB_EMBEDDED_MASK; | |
296 | else | |
297 | wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) << | |
298 | MCC_WRB_SGE_CNT_SHIFT; | |
299 | wrb->payload_length = payload_len; | |
300 | be_dws_cpu_to_le(wrb, 20); | |
301 | } | |
302 | ||
303 | /* Don't touch the hdr after it's prepared */ | |
304 | static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, | |
305 | u8 subsystem, u8 opcode, int cmd_len) | |
306 | { | |
307 | req_hdr->opcode = opcode; | |
308 | req_hdr->subsystem = subsystem; | |
309 | req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); | |
310 | } | |
311 | ||
312 | static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, | |
313 | struct be_dma_mem *mem) | |
314 | { | |
315 | int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages); | |
316 | u64 dma = (u64)mem->dma; | |
317 | ||
318 | for (i = 0; i < buf_pages; i++) { | |
319 | pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF); | |
320 | pages[i].hi = cpu_to_le32(upper_32_bits(dma)); | |
321 | dma += PAGE_SIZE_4K; | |
322 | } | |
323 | } | |
324 | ||
325 | /* Converts interrupt delay in microseconds to multiplier value */ | |
326 | static u32 eq_delay_to_mult(u32 usec_delay) | |
327 | { | |
328 | #define MAX_INTR_RATE 651042 | |
329 | const u32 round = 10; | |
330 | u32 multiplier; | |
331 | ||
332 | if (usec_delay == 0) | |
333 | multiplier = 0; | |
334 | else { | |
335 | u32 interrupt_rate = 1000000 / usec_delay; | |
336 | /* Max delay, corresponding to the lowest interrupt rate */ | |
337 | if (interrupt_rate == 0) | |
338 | multiplier = 1023; | |
339 | else { | |
340 | multiplier = (MAX_INTR_RATE - interrupt_rate) * round; | |
341 | multiplier /= interrupt_rate; | |
342 | /* Round the multiplier to the closest value.*/ | |
343 | multiplier = (multiplier + round/2) / round; | |
344 | multiplier = min(multiplier, (u32)1023); | |
345 | } | |
346 | } | |
347 | return multiplier; | |
348 | } | |
349 | ||
350 | static inline struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem) | |
351 | { | |
352 | return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb; | |
353 | } | |
354 | ||
5fb379ee SP |
355 | static inline struct be_mcc_wrb *wrb_from_mcc(struct be_queue_info *mccq) |
356 | { | |
357 | struct be_mcc_wrb *wrb = NULL; | |
358 | if (atomic_read(&mccq->used) < mccq->len) { | |
359 | wrb = queue_head_node(mccq); | |
360 | queue_head_inc(mccq); | |
361 | atomic_inc(&mccq->used); | |
362 | memset(wrb, 0, sizeof(*wrb)); | |
363 | } | |
364 | return wrb; | |
365 | } | |
366 | ||
8788fdc2 | 367 | int be_cmd_eq_create(struct be_adapter *adapter, |
6b7c5b94 SP |
368 | struct be_queue_info *eq, int eq_delay) |
369 | { | |
8788fdc2 | 370 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
6b7c5b94 SP |
371 | struct be_cmd_req_eq_create *req = embedded_payload(wrb); |
372 | struct be_cmd_resp_eq_create *resp = embedded_payload(wrb); | |
373 | struct be_dma_mem *q_mem = &eq->dma_mem; | |
374 | int status; | |
375 | ||
8788fdc2 | 376 | spin_lock(&adapter->mbox_lock); |
6b7c5b94 SP |
377 | memset(wrb, 0, sizeof(*wrb)); |
378 | ||
379 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
380 | ||
381 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
382 | OPCODE_COMMON_EQ_CREATE, sizeof(*req)); | |
383 | ||
384 | req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); | |
385 | ||
386 | AMAP_SET_BITS(struct amap_eq_context, func, req->context, | |
eec368fb | 387 | be_pci_func(adapter)); |
6b7c5b94 SP |
388 | AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1); |
389 | /* 4byte eqe*/ | |
390 | AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0); | |
391 | AMAP_SET_BITS(struct amap_eq_context, count, req->context, | |
392 | __ilog2_u32(eq->len/256)); | |
393 | AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context, | |
394 | eq_delay_to_mult(eq_delay)); | |
395 | be_dws_cpu_to_le(req->context, sizeof(req->context)); | |
396 | ||
397 | be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); | |
398 | ||
8788fdc2 | 399 | status = be_mbox_db_ring(adapter); |
6b7c5b94 SP |
400 | if (!status) { |
401 | eq->id = le16_to_cpu(resp->eq_id); | |
402 | eq->created = true; | |
403 | } | |
8788fdc2 | 404 | spin_unlock(&adapter->mbox_lock); |
6b7c5b94 SP |
405 | return status; |
406 | } | |
407 | ||
8788fdc2 | 408 | int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, |
6b7c5b94 SP |
409 | u8 type, bool permanent, u32 if_handle) |
410 | { | |
8788fdc2 | 411 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
6b7c5b94 SP |
412 | struct be_cmd_req_mac_query *req = embedded_payload(wrb); |
413 | struct be_cmd_resp_mac_query *resp = embedded_payload(wrb); | |
414 | int status; | |
415 | ||
8788fdc2 | 416 | spin_lock(&adapter->mbox_lock); |
6b7c5b94 SP |
417 | memset(wrb, 0, sizeof(*wrb)); |
418 | ||
419 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
420 | ||
421 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
422 | OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req)); | |
423 | ||
424 | req->type = type; | |
425 | if (permanent) { | |
426 | req->permanent = 1; | |
427 | } else { | |
428 | req->if_id = cpu_to_le16((u16)if_handle); | |
429 | req->permanent = 0; | |
430 | } | |
431 | ||
8788fdc2 | 432 | status = be_mbox_db_ring(adapter); |
6b7c5b94 SP |
433 | if (!status) |
434 | memcpy(mac_addr, resp->mac.addr, ETH_ALEN); | |
435 | ||
8788fdc2 | 436 | spin_unlock(&adapter->mbox_lock); |
6b7c5b94 SP |
437 | return status; |
438 | } | |
439 | ||
8788fdc2 | 440 | int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, |
6b7c5b94 SP |
441 | u32 if_id, u32 *pmac_id) |
442 | { | |
8788fdc2 | 443 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
6b7c5b94 SP |
444 | struct be_cmd_req_pmac_add *req = embedded_payload(wrb); |
445 | int status; | |
446 | ||
8788fdc2 | 447 | spin_lock(&adapter->mbox_lock); |
6b7c5b94 SP |
448 | memset(wrb, 0, sizeof(*wrb)); |
449 | ||
450 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
451 | ||
452 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
453 | OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req)); | |
454 | ||
455 | req->if_id = cpu_to_le32(if_id); | |
456 | memcpy(req->mac_address, mac_addr, ETH_ALEN); | |
457 | ||
8788fdc2 | 458 | status = be_mbox_db_ring(adapter); |
6b7c5b94 SP |
459 | if (!status) { |
460 | struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb); | |
461 | *pmac_id = le32_to_cpu(resp->pmac_id); | |
462 | } | |
463 | ||
8788fdc2 | 464 | spin_unlock(&adapter->mbox_lock); |
6b7c5b94 SP |
465 | return status; |
466 | } | |
467 | ||
8788fdc2 | 468 | int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id) |
6b7c5b94 | 469 | { |
8788fdc2 | 470 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
6b7c5b94 SP |
471 | struct be_cmd_req_pmac_del *req = embedded_payload(wrb); |
472 | int status; | |
473 | ||
8788fdc2 | 474 | spin_lock(&adapter->mbox_lock); |
6b7c5b94 SP |
475 | memset(wrb, 0, sizeof(*wrb)); |
476 | ||
477 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
478 | ||
479 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
480 | OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req)); | |
481 | ||
482 | req->if_id = cpu_to_le32(if_id); | |
483 | req->pmac_id = cpu_to_le32(pmac_id); | |
484 | ||
8788fdc2 SP |
485 | status = be_mbox_db_ring(adapter); |
486 | spin_unlock(&adapter->mbox_lock); | |
6b7c5b94 SP |
487 | |
488 | return status; | |
489 | } | |
490 | ||
8788fdc2 | 491 | int be_cmd_cq_create(struct be_adapter *adapter, |
6b7c5b94 SP |
492 | struct be_queue_info *cq, struct be_queue_info *eq, |
493 | bool sol_evts, bool no_delay, int coalesce_wm) | |
494 | { | |
8788fdc2 | 495 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
6b7c5b94 SP |
496 | struct be_cmd_req_cq_create *req = embedded_payload(wrb); |
497 | struct be_cmd_resp_cq_create *resp = embedded_payload(wrb); | |
498 | struct be_dma_mem *q_mem = &cq->dma_mem; | |
499 | void *ctxt = &req->context; | |
500 | int status; | |
501 | ||
8788fdc2 | 502 | spin_lock(&adapter->mbox_lock); |
6b7c5b94 SP |
503 | memset(wrb, 0, sizeof(*wrb)); |
504 | ||
505 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
506 | ||
507 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
508 | OPCODE_COMMON_CQ_CREATE, sizeof(*req)); | |
509 | ||
510 | req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); | |
511 | ||
512 | AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm); | |
513 | AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay); | |
514 | AMAP_SET_BITS(struct amap_cq_context, count, ctxt, | |
515 | __ilog2_u32(cq->len/256)); | |
516 | AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1); | |
517 | AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts); | |
518 | AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1); | |
519 | AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id); | |
5fb379ee | 520 | AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1); |
eec368fb | 521 | AMAP_SET_BITS(struct amap_cq_context, func, ctxt, be_pci_func(adapter)); |
6b7c5b94 SP |
522 | be_dws_cpu_to_le(ctxt, sizeof(req->context)); |
523 | ||
524 | be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); | |
525 | ||
8788fdc2 | 526 | status = be_mbox_db_ring(adapter); |
6b7c5b94 SP |
527 | if (!status) { |
528 | cq->id = le16_to_cpu(resp->cq_id); | |
529 | cq->created = true; | |
530 | } | |
8788fdc2 | 531 | spin_unlock(&adapter->mbox_lock); |
5fb379ee SP |
532 | |
533 | return status; | |
534 | } | |
535 | ||
536 | static u32 be_encoded_q_len(int q_len) | |
537 | { | |
538 | u32 len_encoded = fls(q_len); /* log2(len) + 1 */ | |
539 | if (len_encoded == 16) | |
540 | len_encoded = 0; | |
541 | return len_encoded; | |
542 | } | |
543 | ||
8788fdc2 | 544 | int be_cmd_mccq_create(struct be_adapter *adapter, |
5fb379ee SP |
545 | struct be_queue_info *mccq, |
546 | struct be_queue_info *cq) | |
547 | { | |
8788fdc2 | 548 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
5fb379ee SP |
549 | struct be_cmd_req_mcc_create *req = embedded_payload(wrb); |
550 | struct be_dma_mem *q_mem = &mccq->dma_mem; | |
551 | void *ctxt = &req->context; | |
552 | int status; | |
553 | ||
8788fdc2 | 554 | spin_lock(&adapter->mbox_lock); |
5fb379ee SP |
555 | memset(wrb, 0, sizeof(*wrb)); |
556 | ||
557 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
558 | ||
559 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
560 | OPCODE_COMMON_MCC_CREATE, sizeof(*req)); | |
561 | ||
562 | req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); | |
563 | ||
eec368fb | 564 | AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, be_pci_func(adapter)); |
5fb379ee SP |
565 | AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1); |
566 | AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt, | |
567 | be_encoded_q_len(mccq->len)); | |
568 | AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id); | |
569 | ||
570 | be_dws_cpu_to_le(ctxt, sizeof(req->context)); | |
571 | ||
572 | be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); | |
573 | ||
8788fdc2 | 574 | status = be_mbox_db_ring(adapter); |
5fb379ee SP |
575 | if (!status) { |
576 | struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); | |
577 | mccq->id = le16_to_cpu(resp->id); | |
578 | mccq->created = true; | |
579 | } | |
8788fdc2 | 580 | spin_unlock(&adapter->mbox_lock); |
6b7c5b94 SP |
581 | |
582 | return status; | |
583 | } | |
584 | ||
8788fdc2 | 585 | int be_cmd_txq_create(struct be_adapter *adapter, |
6b7c5b94 SP |
586 | struct be_queue_info *txq, |
587 | struct be_queue_info *cq) | |
588 | { | |
8788fdc2 | 589 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
6b7c5b94 SP |
590 | struct be_cmd_req_eth_tx_create *req = embedded_payload(wrb); |
591 | struct be_dma_mem *q_mem = &txq->dma_mem; | |
592 | void *ctxt = &req->context; | |
593 | int status; | |
594 | u32 len_encoded; | |
595 | ||
8788fdc2 | 596 | spin_lock(&adapter->mbox_lock); |
6b7c5b94 SP |
597 | memset(wrb, 0, sizeof(*wrb)); |
598 | ||
599 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
600 | ||
601 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE, | |
602 | sizeof(*req)); | |
603 | ||
604 | req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); | |
605 | req->ulp_num = BE_ULP1_NUM; | |
606 | req->type = BE_ETH_TX_RING_TYPE_STANDARD; | |
607 | ||
608 | len_encoded = fls(txq->len); /* log2(len) + 1 */ | |
609 | if (len_encoded == 16) | |
610 | len_encoded = 0; | |
611 | AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt, len_encoded); | |
612 | AMAP_SET_BITS(struct amap_tx_context, pci_func_id, ctxt, | |
eec368fb | 613 | be_pci_func(adapter)); |
6b7c5b94 SP |
614 | AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1); |
615 | AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id); | |
616 | ||
617 | be_dws_cpu_to_le(ctxt, sizeof(req->context)); | |
618 | ||
619 | be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); | |
620 | ||
8788fdc2 | 621 | status = be_mbox_db_ring(adapter); |
6b7c5b94 SP |
622 | if (!status) { |
623 | struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb); | |
624 | txq->id = le16_to_cpu(resp->cid); | |
625 | txq->created = true; | |
626 | } | |
8788fdc2 | 627 | spin_unlock(&adapter->mbox_lock); |
6b7c5b94 SP |
628 | |
629 | return status; | |
630 | } | |
631 | ||
8788fdc2 | 632 | int be_cmd_rxq_create(struct be_adapter *adapter, |
6b7c5b94 SP |
633 | struct be_queue_info *rxq, u16 cq_id, u16 frag_size, |
634 | u16 max_frame_size, u32 if_id, u32 rss) | |
635 | { | |
8788fdc2 | 636 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
6b7c5b94 SP |
637 | struct be_cmd_req_eth_rx_create *req = embedded_payload(wrb); |
638 | struct be_dma_mem *q_mem = &rxq->dma_mem; | |
639 | int status; | |
640 | ||
8788fdc2 | 641 | spin_lock(&adapter->mbox_lock); |
6b7c5b94 SP |
642 | memset(wrb, 0, sizeof(*wrb)); |
643 | ||
644 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
645 | ||
646 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE, | |
647 | sizeof(*req)); | |
648 | ||
649 | req->cq_id = cpu_to_le16(cq_id); | |
650 | req->frag_size = fls(frag_size) - 1; | |
651 | req->num_pages = 2; | |
652 | be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); | |
653 | req->interface_id = cpu_to_le32(if_id); | |
654 | req->max_frame_size = cpu_to_le16(max_frame_size); | |
655 | req->rss_queue = cpu_to_le32(rss); | |
656 | ||
8788fdc2 | 657 | status = be_mbox_db_ring(adapter); |
6b7c5b94 SP |
658 | if (!status) { |
659 | struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb); | |
660 | rxq->id = le16_to_cpu(resp->id); | |
661 | rxq->created = true; | |
662 | } | |
8788fdc2 | 663 | spin_unlock(&adapter->mbox_lock); |
6b7c5b94 SP |
664 | |
665 | return status; | |
666 | } | |
667 | ||
668 | /* Generic destroyer function for all types of queues */ | |
8788fdc2 | 669 | int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, |
6b7c5b94 SP |
670 | int queue_type) |
671 | { | |
8788fdc2 | 672 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
6b7c5b94 SP |
673 | struct be_cmd_req_q_destroy *req = embedded_payload(wrb); |
674 | u8 subsys = 0, opcode = 0; | |
675 | int status; | |
676 | ||
8788fdc2 | 677 | spin_lock(&adapter->mbox_lock); |
6b7c5b94 SP |
678 | |
679 | memset(wrb, 0, sizeof(*wrb)); | |
680 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
681 | ||
682 | switch (queue_type) { | |
683 | case QTYPE_EQ: | |
684 | subsys = CMD_SUBSYSTEM_COMMON; | |
685 | opcode = OPCODE_COMMON_EQ_DESTROY; | |
686 | break; | |
687 | case QTYPE_CQ: | |
688 | subsys = CMD_SUBSYSTEM_COMMON; | |
689 | opcode = OPCODE_COMMON_CQ_DESTROY; | |
690 | break; | |
691 | case QTYPE_TXQ: | |
692 | subsys = CMD_SUBSYSTEM_ETH; | |
693 | opcode = OPCODE_ETH_TX_DESTROY; | |
694 | break; | |
695 | case QTYPE_RXQ: | |
696 | subsys = CMD_SUBSYSTEM_ETH; | |
697 | opcode = OPCODE_ETH_RX_DESTROY; | |
698 | break; | |
5fb379ee SP |
699 | case QTYPE_MCCQ: |
700 | subsys = CMD_SUBSYSTEM_COMMON; | |
701 | opcode = OPCODE_COMMON_MCC_DESTROY; | |
702 | break; | |
6b7c5b94 | 703 | default: |
5f0b849e | 704 | BUG(); |
6b7c5b94 SP |
705 | } |
706 | be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req)); | |
707 | req->id = cpu_to_le16(q->id); | |
708 | ||
8788fdc2 | 709 | status = be_mbox_db_ring(adapter); |
5f0b849e | 710 | |
8788fdc2 | 711 | spin_unlock(&adapter->mbox_lock); |
6b7c5b94 SP |
712 | |
713 | return status; | |
714 | } | |
715 | ||
716 | /* Create an rx filtering policy configuration on an i/f */ | |
8788fdc2 | 717 | int be_cmd_if_create(struct be_adapter *adapter, u32 flags, u8 *mac, |
6b7c5b94 SP |
718 | bool pmac_invalid, u32 *if_handle, u32 *pmac_id) |
719 | { | |
8788fdc2 | 720 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
6b7c5b94 SP |
721 | struct be_cmd_req_if_create *req = embedded_payload(wrb); |
722 | int status; | |
723 | ||
8788fdc2 | 724 | spin_lock(&adapter->mbox_lock); |
6b7c5b94 SP |
725 | memset(wrb, 0, sizeof(*wrb)); |
726 | ||
727 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
728 | ||
729 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
730 | OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req)); | |
731 | ||
732 | req->capability_flags = cpu_to_le32(flags); | |
733 | req->enable_flags = cpu_to_le32(flags); | |
734 | if (!pmac_invalid) | |
735 | memcpy(req->mac_addr, mac, ETH_ALEN); | |
736 | ||
8788fdc2 | 737 | status = be_mbox_db_ring(adapter); |
6b7c5b94 SP |
738 | if (!status) { |
739 | struct be_cmd_resp_if_create *resp = embedded_payload(wrb); | |
740 | *if_handle = le32_to_cpu(resp->interface_id); | |
741 | if (!pmac_invalid) | |
742 | *pmac_id = le32_to_cpu(resp->pmac_id); | |
743 | } | |
744 | ||
8788fdc2 | 745 | spin_unlock(&adapter->mbox_lock); |
6b7c5b94 SP |
746 | return status; |
747 | } | |
748 | ||
8788fdc2 | 749 | int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id) |
6b7c5b94 | 750 | { |
8788fdc2 | 751 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
6b7c5b94 SP |
752 | struct be_cmd_req_if_destroy *req = embedded_payload(wrb); |
753 | int status; | |
754 | ||
8788fdc2 | 755 | spin_lock(&adapter->mbox_lock); |
6b7c5b94 SP |
756 | memset(wrb, 0, sizeof(*wrb)); |
757 | ||
758 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
759 | ||
760 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
761 | OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req)); | |
762 | ||
763 | req->interface_id = cpu_to_le32(interface_id); | |
8788fdc2 | 764 | status = be_mbox_db_ring(adapter); |
6b7c5b94 | 765 | |
8788fdc2 | 766 | spin_unlock(&adapter->mbox_lock); |
6b7c5b94 SP |
767 | |
768 | return status; | |
769 | } | |
770 | ||
771 | /* Get stats is a non embedded command: the request is not embedded inside | |
772 | * WRB but is a separate dma memory block | |
773 | */ | |
8788fdc2 | 774 | int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) |
6b7c5b94 | 775 | { |
8788fdc2 | 776 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
6b7c5b94 SP |
777 | struct be_cmd_req_get_stats *req = nonemb_cmd->va; |
778 | struct be_sge *sge = nonembedded_sgl(wrb); | |
779 | int status; | |
780 | ||
8788fdc2 | 781 | spin_lock(&adapter->mbox_lock); |
6b7c5b94 SP |
782 | memset(wrb, 0, sizeof(*wrb)); |
783 | ||
784 | memset(req, 0, sizeof(*req)); | |
785 | ||
786 | be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); | |
787 | ||
788 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, | |
789 | OPCODE_ETH_GET_STATISTICS, sizeof(*req)); | |
790 | sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); | |
791 | sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); | |
792 | sge->len = cpu_to_le32(nonemb_cmd->size); | |
793 | ||
8788fdc2 | 794 | status = be_mbox_db_ring(adapter); |
6b7c5b94 SP |
795 | if (!status) { |
796 | struct be_cmd_resp_get_stats *resp = nonemb_cmd->va; | |
797 | be_dws_le_to_cpu(&resp->hw_stats, sizeof(resp->hw_stats)); | |
798 | } | |
799 | ||
8788fdc2 | 800 | spin_unlock(&adapter->mbox_lock); |
6b7c5b94 SP |
801 | return status; |
802 | } | |
803 | ||
8788fdc2 | 804 | int be_cmd_link_status_query(struct be_adapter *adapter, |
a8f447bd | 805 | bool *link_up) |
6b7c5b94 | 806 | { |
8788fdc2 | 807 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
6b7c5b94 SP |
808 | struct be_cmd_req_link_status *req = embedded_payload(wrb); |
809 | int status; | |
810 | ||
8788fdc2 | 811 | spin_lock(&adapter->mbox_lock); |
a8f447bd SP |
812 | |
813 | *link_up = false; | |
6b7c5b94 SP |
814 | memset(wrb, 0, sizeof(*wrb)); |
815 | ||
816 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
817 | ||
818 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
819 | OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req)); | |
820 | ||
8788fdc2 | 821 | status = be_mbox_db_ring(adapter); |
6b7c5b94 SP |
822 | if (!status) { |
823 | struct be_cmd_resp_link_status *resp = embedded_payload(wrb); | |
a8f447bd SP |
824 | if (resp->mac_speed != PHY_LINK_SPEED_ZERO) |
825 | *link_up = true; | |
6b7c5b94 SP |
826 | } |
827 | ||
8788fdc2 | 828 | spin_unlock(&adapter->mbox_lock); |
6b7c5b94 SP |
829 | return status; |
830 | } | |
831 | ||
8788fdc2 | 832 | int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver) |
6b7c5b94 | 833 | { |
8788fdc2 | 834 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
6b7c5b94 SP |
835 | struct be_cmd_req_get_fw_version *req = embedded_payload(wrb); |
836 | int status; | |
837 | ||
8788fdc2 | 838 | spin_lock(&adapter->mbox_lock); |
6b7c5b94 SP |
839 | memset(wrb, 0, sizeof(*wrb)); |
840 | ||
841 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
842 | ||
843 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
844 | OPCODE_COMMON_GET_FW_VERSION, sizeof(*req)); | |
845 | ||
8788fdc2 | 846 | status = be_mbox_db_ring(adapter); |
6b7c5b94 SP |
847 | if (!status) { |
848 | struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb); | |
849 | strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN); | |
850 | } | |
851 | ||
8788fdc2 | 852 | spin_unlock(&adapter->mbox_lock); |
6b7c5b94 SP |
853 | return status; |
854 | } | |
855 | ||
856 | /* set the EQ delay interval of an EQ to specified value */ | |
8788fdc2 | 857 | int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd) |
6b7c5b94 | 858 | { |
8788fdc2 | 859 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
6b7c5b94 SP |
860 | struct be_cmd_req_modify_eq_delay *req = embedded_payload(wrb); |
861 | int status; | |
862 | ||
8788fdc2 | 863 | spin_lock(&adapter->mbox_lock); |
6b7c5b94 SP |
864 | memset(wrb, 0, sizeof(*wrb)); |
865 | ||
866 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
867 | ||
868 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
869 | OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req)); | |
870 | ||
871 | req->num_eq = cpu_to_le32(1); | |
872 | req->delay[0].eq_id = cpu_to_le32(eq_id); | |
873 | req->delay[0].phase = 0; | |
874 | req->delay[0].delay_multiplier = cpu_to_le32(eqd); | |
875 | ||
8788fdc2 | 876 | status = be_mbox_db_ring(adapter); |
6b7c5b94 | 877 | |
8788fdc2 | 878 | spin_unlock(&adapter->mbox_lock); |
6b7c5b94 SP |
879 | return status; |
880 | } | |
881 | ||
8788fdc2 | 882 | int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, |
6b7c5b94 SP |
883 | u32 num, bool untagged, bool promiscuous) |
884 | { | |
8788fdc2 | 885 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
6b7c5b94 SP |
886 | struct be_cmd_req_vlan_config *req = embedded_payload(wrb); |
887 | int status; | |
888 | ||
8788fdc2 | 889 | spin_lock(&adapter->mbox_lock); |
6b7c5b94 SP |
890 | memset(wrb, 0, sizeof(*wrb)); |
891 | ||
892 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
893 | ||
894 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
895 | OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req)); | |
896 | ||
897 | req->interface_id = if_id; | |
898 | req->promiscuous = promiscuous; | |
899 | req->untagged = untagged; | |
900 | req->num_vlan = num; | |
901 | if (!promiscuous) { | |
902 | memcpy(req->normal_vlan, vtag_array, | |
903 | req->num_vlan * sizeof(vtag_array[0])); | |
904 | } | |
905 | ||
8788fdc2 | 906 | status = be_mbox_db_ring(adapter); |
6b7c5b94 | 907 | |
8788fdc2 | 908 | spin_unlock(&adapter->mbox_lock); |
6b7c5b94 SP |
909 | return status; |
910 | } | |
911 | ||
6ac7b687 | 912 | /* Use MCC for this command as it may be called in BH context */ |
8788fdc2 | 913 | int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en) |
6b7c5b94 | 914 | { |
6ac7b687 SP |
915 | struct be_mcc_wrb *wrb; |
916 | struct be_cmd_req_promiscuous_config *req; | |
6b7c5b94 | 917 | |
8788fdc2 | 918 | spin_lock_bh(&adapter->mcc_lock); |
6ac7b687 | 919 | |
8788fdc2 | 920 | wrb = wrb_from_mcc(&adapter->mcc_obj.q); |
6ac7b687 SP |
921 | BUG_ON(!wrb); |
922 | ||
923 | req = embedded_payload(wrb); | |
6b7c5b94 SP |
924 | |
925 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
926 | ||
927 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, | |
928 | OPCODE_ETH_PROMISCUOUS, sizeof(*req)); | |
929 | ||
930 | if (port_num) | |
931 | req->port1_promiscuous = en; | |
932 | else | |
933 | req->port0_promiscuous = en; | |
934 | ||
8788fdc2 | 935 | be_mcc_notify_wait(adapter); |
6b7c5b94 | 936 | |
8788fdc2 | 937 | spin_unlock_bh(&adapter->mcc_lock); |
6ac7b687 | 938 | return 0; |
6b7c5b94 SP |
939 | } |
940 | ||
6ac7b687 SP |
941 | /* |
942 | * Use MCC for this command as it may be called in BH context | |
943 | * (mc == NULL) => multicast promiscous | |
944 | */ | |
8788fdc2 | 945 | int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id, |
24307eef | 946 | struct dev_mc_list *mc_list, u32 mc_count) |
6b7c5b94 | 947 | { |
6ac7b687 SP |
948 | #define BE_MAX_MC 32 /* set mcast promisc if > 32 */ |
949 | struct be_mcc_wrb *wrb; | |
950 | struct be_cmd_req_mcast_mac_config *req; | |
6b7c5b94 | 951 | |
8788fdc2 | 952 | spin_lock_bh(&adapter->mcc_lock); |
6ac7b687 | 953 | |
8788fdc2 | 954 | wrb = wrb_from_mcc(&adapter->mcc_obj.q); |
6ac7b687 SP |
955 | BUG_ON(!wrb); |
956 | ||
957 | req = embedded_payload(wrb); | |
6b7c5b94 SP |
958 | |
959 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
960 | ||
961 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
962 | OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req)); | |
963 | ||
964 | req->interface_id = if_id; | |
24307eef SP |
965 | if (mc_list && mc_count <= BE_MAX_MC) { |
966 | int i; | |
967 | struct dev_mc_list *mc; | |
968 | ||
969 | req->num_mac = cpu_to_le16(mc_count); | |
970 | ||
971 | for (mc = mc_list, i = 0; mc; mc = mc->next, i++) | |
972 | memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN); | |
973 | } else { | |
974 | req->promiscuous = 1; | |
6b7c5b94 SP |
975 | } |
976 | ||
8788fdc2 | 977 | be_mcc_notify_wait(adapter); |
6b7c5b94 | 978 | |
8788fdc2 | 979 | spin_unlock_bh(&adapter->mcc_lock); |
6ac7b687 SP |
980 | |
981 | return 0; | |
6b7c5b94 SP |
982 | } |
983 | ||
8788fdc2 | 984 | int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc) |
6b7c5b94 | 985 | { |
8788fdc2 | 986 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
6b7c5b94 SP |
987 | struct be_cmd_req_set_flow_control *req = embedded_payload(wrb); |
988 | int status; | |
989 | ||
8788fdc2 | 990 | spin_lock(&adapter->mbox_lock); |
6b7c5b94 SP |
991 | |
992 | memset(wrb, 0, sizeof(*wrb)); | |
993 | ||
994 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
995 | ||
996 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
997 | OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req)); | |
998 | ||
999 | req->tx_flow_control = cpu_to_le16((u16)tx_fc); | |
1000 | req->rx_flow_control = cpu_to_le16((u16)rx_fc); | |
1001 | ||
8788fdc2 | 1002 | status = be_mbox_db_ring(adapter); |
6b7c5b94 | 1003 | |
8788fdc2 | 1004 | spin_unlock(&adapter->mbox_lock); |
6b7c5b94 SP |
1005 | return status; |
1006 | } | |
1007 | ||
8788fdc2 | 1008 | int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc) |
6b7c5b94 | 1009 | { |
8788fdc2 | 1010 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
6b7c5b94 SP |
1011 | struct be_cmd_req_get_flow_control *req = embedded_payload(wrb); |
1012 | int status; | |
1013 | ||
8788fdc2 | 1014 | spin_lock(&adapter->mbox_lock); |
6b7c5b94 SP |
1015 | |
1016 | memset(wrb, 0, sizeof(*wrb)); | |
1017 | ||
1018 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
1019 | ||
1020 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
1021 | OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req)); | |
1022 | ||
8788fdc2 | 1023 | status = be_mbox_db_ring(adapter); |
6b7c5b94 SP |
1024 | if (!status) { |
1025 | struct be_cmd_resp_get_flow_control *resp = | |
1026 | embedded_payload(wrb); | |
1027 | *tx_fc = le16_to_cpu(resp->tx_flow_control); | |
1028 | *rx_fc = le16_to_cpu(resp->rx_flow_control); | |
1029 | } | |
1030 | ||
8788fdc2 | 1031 | spin_unlock(&adapter->mbox_lock); |
6b7c5b94 SP |
1032 | return status; |
1033 | } | |
1034 | ||
8788fdc2 | 1035 | int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num) |
6b7c5b94 | 1036 | { |
8788fdc2 | 1037 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
6b7c5b94 SP |
1038 | struct be_cmd_req_query_fw_cfg *req = embedded_payload(wrb); |
1039 | int status; | |
1040 | ||
8788fdc2 | 1041 | spin_lock(&adapter->mbox_lock); |
6b7c5b94 SP |
1042 | |
1043 | memset(wrb, 0, sizeof(*wrb)); | |
1044 | ||
1045 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
1046 | ||
1047 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
1048 | OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req)); | |
1049 | ||
8788fdc2 | 1050 | status = be_mbox_db_ring(adapter); |
6b7c5b94 SP |
1051 | if (!status) { |
1052 | struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb); | |
1053 | *port_num = le32_to_cpu(resp->phys_port); | |
1054 | } | |
1055 | ||
8788fdc2 | 1056 | spin_unlock(&adapter->mbox_lock); |
6b7c5b94 SP |
1057 | return status; |
1058 | } |