4 * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Cavium, Inc. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_ethdev.h>
37 #include <rte_cycles.h>
38 #include <rte_malloc.h>
41 #include "lio_23xx_vf.h"
42 #include "lio_23xx_reg.h"
46 cn23xx_vf_reset_io_queues(struct lio_device
*lio_dev
, uint32_t num_queues
)
48 uint32_t loop
= CN23XX_VF_BUSY_READING_REG_LOOP_COUNT
;
52 PMD_INIT_FUNC_TRACE();
54 for (q_no
= 0; q_no
< num_queues
; q_no
++) {
55 /* set RST bit to 1. This bit applies to both IQ and OQ */
56 d64
= lio_read_csr64(lio_dev
,
57 CN23XX_SLI_IQ_PKT_CONTROL64(q_no
));
58 d64
= d64
| CN23XX_PKT_INPUT_CTL_RST
;
59 lio_write_csr64(lio_dev
, CN23XX_SLI_IQ_PKT_CONTROL64(q_no
),
63 /* wait until the RST bit is clear or the RST and QUIET bits are set */
64 for (q_no
= 0; q_no
< num_queues
; q_no
++) {
65 volatile uint64_t reg_val
;
67 reg_val
= lio_read_csr64(lio_dev
,
68 CN23XX_SLI_IQ_PKT_CONTROL64(q_no
));
69 while ((reg_val
& CN23XX_PKT_INPUT_CTL_RST
) &&
70 !(reg_val
& CN23XX_PKT_INPUT_CTL_QUIET
) &&
72 reg_val
= lio_read_csr64(
74 CN23XX_SLI_IQ_PKT_CONTROL64(q_no
));
80 "clearing the reset reg failed or setting the quiet reg failed for qno: %lu\n",
85 reg_val
= reg_val
& ~CN23XX_PKT_INPUT_CTL_RST
;
86 lio_write_csr64(lio_dev
, CN23XX_SLI_IQ_PKT_CONTROL64(q_no
),
89 reg_val
= lio_read_csr64(
90 lio_dev
, CN23XX_SLI_IQ_PKT_CONTROL64(q_no
));
91 if (reg_val
& CN23XX_PKT_INPUT_CTL_RST
) {
93 "clearing the reset failed for qno: %lu\n",
103 cn23xx_vf_setup_global_input_regs(struct lio_device
*lio_dev
)
108 PMD_INIT_FUNC_TRACE();
110 if (cn23xx_vf_reset_io_queues(lio_dev
,
111 lio_dev
->sriov_info
.rings_per_vf
))
114 for (q_no
= 0; q_no
< (lio_dev
->sriov_info
.rings_per_vf
); q_no
++) {
115 lio_write_csr64(lio_dev
, CN23XX_SLI_IQ_DOORBELL(q_no
),
118 d64
= lio_read_csr64(lio_dev
,
119 CN23XX_SLI_IQ_INSTR_COUNT64(q_no
));
121 d64
&= 0xEFFFFFFFFFFFFFFFL
;
123 lio_write_csr64(lio_dev
, CN23XX_SLI_IQ_INSTR_COUNT64(q_no
),
126 /* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for
129 lio_write_csr64(lio_dev
, CN23XX_SLI_IQ_PKT_CONTROL64(q_no
),
130 CN23XX_PKT_INPUT_CTL_MASK
);
137 cn23xx_vf_setup_global_output_regs(struct lio_device
*lio_dev
)
142 PMD_INIT_FUNC_TRACE();
144 for (q_no
= 0; q_no
< lio_dev
->sriov_info
.rings_per_vf
; q_no
++) {
145 lio_write_csr(lio_dev
, CN23XX_SLI_OQ_PKTS_CREDIT(q_no
),
149 lio_read_csr(lio_dev
, CN23XX_SLI_OQ_PKTS_SENT(q_no
));
151 reg_val
&= 0xEFFFFFFFFFFFFFFFL
;
154 lio_read_csr(lio_dev
, CN23XX_SLI_OQ_PKT_CONTROL(q_no
));
156 /* set IPTR & DPTR */
158 (CN23XX_PKT_OUTPUT_CTL_IPTR
| CN23XX_PKT_OUTPUT_CTL_DPTR
);
161 reg_val
&= ~(CN23XX_PKT_OUTPUT_CTL_BMODE
);
163 /* No Relaxed Ordering, No Snoop, 64-bit Byte swap
164 * for Output Queue Scatter List
167 reg_val
&= ~(CN23XX_PKT_OUTPUT_CTL_ROR_P
);
168 reg_val
&= ~(CN23XX_PKT_OUTPUT_CTL_NSR_P
);
170 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
171 reg_val
&= ~(CN23XX_PKT_OUTPUT_CTL_ES_P
);
172 #elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN
173 reg_val
|= (CN23XX_PKT_OUTPUT_CTL_ES_P
);
175 /* No Relaxed Ordering, No Snoop, 64-bit Byte swap
176 * for Output Queue Data
179 reg_val
&= ~(CN23XX_PKT_OUTPUT_CTL_ROR
);
180 reg_val
&= ~(CN23XX_PKT_OUTPUT_CTL_NSR
);
182 reg_val
|= (CN23XX_PKT_OUTPUT_CTL_ES
);
184 /* write all the selected settings */
185 lio_write_csr(lio_dev
, CN23XX_SLI_OQ_PKT_CONTROL(q_no
),
191 cn23xx_vf_setup_device_regs(struct lio_device
*lio_dev
)
193 PMD_INIT_FUNC_TRACE();
195 if (cn23xx_vf_setup_global_input_regs(lio_dev
))
198 cn23xx_vf_setup_global_output_regs(lio_dev
);
204 cn23xx_vf_setup_iq_regs(struct lio_device
*lio_dev
, uint32_t iq_no
)
206 struct lio_instr_queue
*iq
= lio_dev
->instr_queue
[iq_no
];
207 uint64_t pkt_in_done
= 0;
209 PMD_INIT_FUNC_TRACE();
211 /* Write the start of the input queue's ring and its size */
212 lio_write_csr64(lio_dev
, CN23XX_SLI_IQ_BASE_ADDR64(iq_no
),
214 lio_write_csr(lio_dev
, CN23XX_SLI_IQ_SIZE(iq_no
), iq
->max_count
);
216 /* Remember the doorbell & instruction count register addr
219 iq
->doorbell_reg
= (uint8_t *)lio_dev
->hw_addr
+
220 CN23XX_SLI_IQ_DOORBELL(iq_no
);
221 iq
->inst_cnt_reg
= (uint8_t *)lio_dev
->hw_addr
+
222 CN23XX_SLI_IQ_INSTR_COUNT64(iq_no
);
223 lio_dev_dbg(lio_dev
, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
224 iq_no
, iq
->doorbell_reg
, iq
->inst_cnt_reg
);
226 /* Store the current instruction counter (used in flush_iq
229 pkt_in_done
= rte_read64(iq
->inst_cnt_reg
);
231 /* Clear the count by writing back what we read, but don't
232 * enable data traffic here
234 rte_write64(pkt_in_done
, iq
->inst_cnt_reg
);
238 cn23xx_vf_setup_oq_regs(struct lio_device
*lio_dev
, uint32_t oq_no
)
240 struct lio_droq
*droq
= lio_dev
->droq
[oq_no
];
242 PMD_INIT_FUNC_TRACE();
244 lio_write_csr64(lio_dev
, CN23XX_SLI_OQ_BASE_ADDR64(oq_no
),
245 droq
->desc_ring_dma
);
246 lio_write_csr(lio_dev
, CN23XX_SLI_OQ_SIZE(oq_no
), droq
->max_count
);
248 lio_write_csr(lio_dev
, CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq_no
),
249 (droq
->buffer_size
| (OCTEON_RH_SIZE
<< 16)));
251 /* Get the mapped address of the pkt_sent and pkts_credit regs */
252 droq
->pkts_sent_reg
= (uint8_t *)lio_dev
->hw_addr
+
253 CN23XX_SLI_OQ_PKTS_SENT(oq_no
);
254 droq
->pkts_credit_reg
= (uint8_t *)lio_dev
->hw_addr
+
255 CN23XX_SLI_OQ_PKTS_CREDIT(oq_no
);
259 cn23xx_vf_free_mbox(struct lio_device
*lio_dev
)
261 PMD_INIT_FUNC_TRACE();
263 rte_free(lio_dev
->mbox
[0]);
264 lio_dev
->mbox
[0] = NULL
;
266 rte_free(lio_dev
->mbox
);
267 lio_dev
->mbox
= NULL
;
271 cn23xx_vf_setup_mbox(struct lio_device
*lio_dev
)
273 struct lio_mbox
*mbox
;
275 PMD_INIT_FUNC_TRACE();
277 if (lio_dev
->mbox
== NULL
) {
278 lio_dev
->mbox
= rte_zmalloc(NULL
, sizeof(void *), 0);
279 if (lio_dev
->mbox
== NULL
)
283 mbox
= rte_zmalloc(NULL
, sizeof(struct lio_mbox
), 0);
285 rte_free(lio_dev
->mbox
);
286 lio_dev
->mbox
= NULL
;
290 rte_spinlock_init(&mbox
->lock
);
292 mbox
->lio_dev
= lio_dev
;
296 mbox
->state
= LIO_MBOX_STATE_IDLE
;
298 /* VF mbox interrupt reg */
299 mbox
->mbox_int_reg
= (uint8_t *)lio_dev
->hw_addr
+
300 CN23XX_VF_SLI_PKT_MBOX_INT(0);
301 /* VF reads from SIG0 reg */
302 mbox
->mbox_read_reg
= (uint8_t *)lio_dev
->hw_addr
+
303 CN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 0);
304 /* VF writes into SIG1 reg */
305 mbox
->mbox_write_reg
= (uint8_t *)lio_dev
->hw_addr
+
306 CN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 1);
308 lio_dev
->mbox
[0] = mbox
;
310 rte_write64(LIO_PFVFSIG
, mbox
->mbox_read_reg
);
316 cn23xx_vf_enable_io_queues(struct lio_device
*lio_dev
)
320 PMD_INIT_FUNC_TRACE();
322 for (q_no
= 0; q_no
< lio_dev
->num_iqs
; q_no
++) {
325 /* set the corresponding IQ IS_64B bit */
326 if (lio_dev
->io_qmask
.iq64B
& (1ULL << q_no
)) {
327 reg_val
= lio_read_csr64(
329 CN23XX_SLI_IQ_PKT_CONTROL64(q_no
));
330 reg_val
= reg_val
| CN23XX_PKT_INPUT_CTL_IS_64B
;
331 lio_write_csr64(lio_dev
,
332 CN23XX_SLI_IQ_PKT_CONTROL64(q_no
),
336 /* set the corresponding IQ ENB bit */
337 if (lio_dev
->io_qmask
.iq
& (1ULL << q_no
)) {
338 reg_val
= lio_read_csr64(
340 CN23XX_SLI_IQ_PKT_CONTROL64(q_no
));
341 reg_val
= reg_val
| CN23XX_PKT_INPUT_CTL_RING_ENB
;
342 lio_write_csr64(lio_dev
,
343 CN23XX_SLI_IQ_PKT_CONTROL64(q_no
),
347 for (q_no
= 0; q_no
< lio_dev
->num_oqs
; q_no
++) {
350 /* set the corresponding OQ ENB bit */
351 if (lio_dev
->io_qmask
.oq
& (1ULL << q_no
)) {
352 reg_val
= lio_read_csr(
354 CN23XX_SLI_OQ_PKT_CONTROL(q_no
));
355 reg_val
= reg_val
| CN23XX_PKT_OUTPUT_CTL_RING_ENB
;
356 lio_write_csr(lio_dev
,
357 CN23XX_SLI_OQ_PKT_CONTROL(q_no
),
366 cn23xx_vf_disable_io_queues(struct lio_device
*lio_dev
)
370 PMD_INIT_FUNC_TRACE();
372 /* per HRM, rings can only be disabled via reset operation,
373 * NOT via SLI_PKT()_INPUT/OUTPUT_CONTROL[ENB]
375 num_queues
= lio_dev
->num_iqs
;
376 if (num_queues
< lio_dev
->num_oqs
)
377 num_queues
= lio_dev
->num_oqs
;
379 cn23xx_vf_reset_io_queues(lio_dev
, num_queues
);
383 cn23xx_vf_ask_pf_to_do_flr(struct lio_device
*lio_dev
)
385 struct lio_mbox_cmd mbox_cmd
;
387 memset(&mbox_cmd
, 0, sizeof(struct lio_mbox_cmd
));
388 mbox_cmd
.msg
.s
.type
= LIO_MBOX_REQUEST
;
389 mbox_cmd
.msg
.s
.resp_needed
= 0;
390 mbox_cmd
.msg
.s
.cmd
= LIO_VF_FLR_REQUEST
;
391 mbox_cmd
.msg
.s
.len
= 1;
393 mbox_cmd
.recv_len
= 0;
394 mbox_cmd
.recv_status
= 0;
398 lio_mbox_write(lio_dev
, &mbox_cmd
);
402 cn23xx_pfvf_hs_callback(struct lio_device
*lio_dev
,
403 struct lio_mbox_cmd
*cmd
, void *arg
)
407 PMD_INIT_FUNC_TRACE();
409 rte_memcpy((uint8_t *)&lio_dev
->pfvf_hsword
, cmd
->msg
.s
.params
, 6);
410 if (cmd
->recv_len
> 1) {
411 struct lio_version
*lio_ver
= (struct lio_version
*)cmd
->data
;
413 major
= lio_ver
->major
;
417 rte_atomic64_set((rte_atomic64_t
*)arg
, major
| 1);
421 cn23xx_pfvf_handshake(struct lio_device
*lio_dev
)
423 struct lio_mbox_cmd mbox_cmd
;
424 struct lio_version
*lio_ver
= (struct lio_version
*)&mbox_cmd
.data
[0];
425 uint32_t q_no
, count
= 0;
426 rte_atomic64_t status
;
431 PMD_INIT_FUNC_TRACE();
433 /* Sending VF_ACTIVE indication to the PF driver */
434 lio_dev_dbg(lio_dev
, "requesting info from PF\n");
436 mbox_cmd
.msg
.mbox_msg64
= 0;
437 mbox_cmd
.msg
.s
.type
= LIO_MBOX_REQUEST
;
438 mbox_cmd
.msg
.s
.resp_needed
= 1;
439 mbox_cmd
.msg
.s
.cmd
= LIO_VF_ACTIVE
;
440 mbox_cmd
.msg
.s
.len
= 2;
441 mbox_cmd
.data
[0] = 0;
442 lio_ver
->major
= LIO_BASE_MAJOR_VERSION
;
443 lio_ver
->minor
= LIO_BASE_MINOR_VERSION
;
444 lio_ver
->micro
= LIO_BASE_MICRO_VERSION
;
446 mbox_cmd
.recv_len
= 0;
447 mbox_cmd
.recv_status
= 0;
448 mbox_cmd
.fn
= (lio_mbox_callback
)cn23xx_pfvf_hs_callback
;
449 mbox_cmd
.fn_arg
= (void *)&status
;
451 if (lio_mbox_write(lio_dev
, &mbox_cmd
)) {
452 lio_dev_err(lio_dev
, "Write to mailbox failed\n");
456 rte_atomic64_set(&status
, 0);
460 } while ((rte_atomic64_read(&status
) == 0) && (count
++ < 10000));
462 ret
= rte_atomic64_read(&status
);
464 lio_dev_err(lio_dev
, "cn23xx_pfvf_handshake timeout\n");
468 for (q_no
= 0; q_no
< lio_dev
->num_iqs
; q_no
++)
469 lio_dev
->instr_queue
[q_no
]->txpciq
.s
.pkind
=
470 lio_dev
->pfvf_hsword
.pkind
;
472 vfmajor
= LIO_BASE_MAJOR_VERSION
;
474 if (pfmajor
!= vfmajor
) {
476 "VF LiquidIO driver (major version %d) is not compatible with LiquidIO PF driver (major version %d)\n",
481 "VF LiquidIO driver (major version %d), LiquidIO PF driver (major version %d)\n",
486 lio_dev_dbg(lio_dev
, "got data from PF pkind is %d\n",
487 lio_dev
->pfvf_hsword
.pkind
);
493 cn23xx_vf_handle_mbox(struct lio_device
*lio_dev
)
495 uint64_t mbox_int_val
;
497 /* read and clear by writing 1 */
498 mbox_int_val
= rte_read64(lio_dev
->mbox
[0]->mbox_int_reg
);
499 rte_write64(mbox_int_val
, lio_dev
->mbox
[0]->mbox_int_reg
);
500 if (lio_mbox_read(lio_dev
->mbox
[0]))
501 lio_mbox_process_message(lio_dev
->mbox
[0]);
505 cn23xx_vf_setup_device(struct lio_device
*lio_dev
)
509 PMD_INIT_FUNC_TRACE();
511 /* INPUT_CONTROL[RPVF] gives the VF IOq count */
512 reg_val
= lio_read_csr64(lio_dev
, CN23XX_SLI_IQ_PKT_CONTROL64(0));
514 lio_dev
->pf_num
= (reg_val
>> CN23XX_PKT_INPUT_CTL_PF_NUM_POS
) &
515 CN23XX_PKT_INPUT_CTL_PF_NUM_MASK
;
516 lio_dev
->vf_num
= (reg_val
>> CN23XX_PKT_INPUT_CTL_VF_NUM_POS
) &
517 CN23XX_PKT_INPUT_CTL_VF_NUM_MASK
;
519 reg_val
= reg_val
>> CN23XX_PKT_INPUT_CTL_RPVF_POS
;
521 lio_dev
->sriov_info
.rings_per_vf
=
522 reg_val
& CN23XX_PKT_INPUT_CTL_RPVF_MASK
;
524 lio_dev
->default_config
= lio_get_conf(lio_dev
);
525 if (lio_dev
->default_config
== NULL
)
528 lio_dev
->fn_list
.setup_iq_regs
= cn23xx_vf_setup_iq_regs
;
529 lio_dev
->fn_list
.setup_oq_regs
= cn23xx_vf_setup_oq_regs
;
530 lio_dev
->fn_list
.setup_mbox
= cn23xx_vf_setup_mbox
;
531 lio_dev
->fn_list
.free_mbox
= cn23xx_vf_free_mbox
;
533 lio_dev
->fn_list
.setup_device_regs
= cn23xx_vf_setup_device_regs
;
535 lio_dev
->fn_list
.enable_io_queues
= cn23xx_vf_enable_io_queues
;
536 lio_dev
->fn_list
.disable_io_queues
= cn23xx_vf_disable_io_queues
;
542 cn23xx_vf_set_io_queues_off(struct lio_device
*lio_dev
)
544 uint32_t loop
= CN23XX_VF_BUSY_READING_REG_LOOP_COUNT
;
547 /* Disable the i/p and o/p queues for this Octeon.
548 * IOQs will already be in reset.
549 * If RST bit is set, wait for Quiet bit to be set
550 * Once Quiet bit is set, clear the RST bit
552 PMD_INIT_FUNC_TRACE();
554 for (q_no
= 0; q_no
< lio_dev
->sriov_info
.rings_per_vf
; q_no
++) {
555 volatile uint64_t reg_val
;
557 reg_val
= lio_read_csr64(lio_dev
,
558 CN23XX_SLI_IQ_PKT_CONTROL64(q_no
));
559 while ((reg_val
& CN23XX_PKT_INPUT_CTL_RST
) && !(reg_val
&
560 CN23XX_PKT_INPUT_CTL_QUIET
) && loop
) {
561 reg_val
= lio_read_csr64(
563 CN23XX_SLI_IQ_PKT_CONTROL64(q_no
));
569 "clearing the reset reg failed or setting the quiet reg failed for qno %lu\n",
570 (unsigned long)q_no
);
574 reg_val
= reg_val
& ~CN23XX_PKT_INPUT_CTL_RST
;
575 lio_write_csr64(lio_dev
, CN23XX_SLI_IQ_PKT_CONTROL64(q_no
),
578 reg_val
= lio_read_csr64(lio_dev
,
579 CN23XX_SLI_IQ_PKT_CONTROL64(q_no
));
580 if (reg_val
& CN23XX_PKT_INPUT_CTL_RST
) {
581 lio_dev_err(lio_dev
, "unable to reset qno %lu\n",
582 (unsigned long)q_no
);