]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | /*- |
2 | * BSD LICENSE | |
3 | * | |
4 | * Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates. | |
5 | * All rights reserved. | |
6 | * | |
7 | * Redistribution and use in source and binary forms, with or without | |
8 | * modification, are permitted provided that the following conditions | |
9 | * are met: | |
10 | * | |
11 | * * Redistributions of source code must retain the above copyright | |
12 | * notice, this list of conditions and the following disclaimer. | |
13 | * * Redistributions in binary form must reproduce the above copyright | |
14 | * notice, this list of conditions and the following disclaimer in | |
15 | * the documentation and/or other materials provided with the | |
16 | * distribution. | |
17 | * * Neither the name of copyright holder nor the names of its | |
18 | * contributors may be used to endorse or promote products derived | |
19 | * from this software without specific prior written permission. | |
20 | * | |
21 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
22 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
23 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
24 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
25 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
26 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
27 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
28 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
29 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
31 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
32 | */ | |
33 | ||
34 | #ifndef ENA_ETH_COM_H_ | |
35 | #define ENA_ETH_COM_H_ | |
36 | ||
37 | #if defined(__cplusplus) | |
38 | extern "C" { | |
39 | #endif | |
40 | #include "ena_com.h" | |
41 | ||
42 | /* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */ | |
43 | #define ENA_COMP_HEAD_THRESH 4 | |
44 | ||
45 | struct ena_com_tx_ctx { | |
46 | struct ena_com_tx_meta ena_meta; | |
47 | struct ena_com_buf *ena_bufs; | |
48 | /* For LLQ, header buffer - pushed to the device mem space */ | |
49 | void *push_header; | |
50 | ||
51 | enum ena_eth_io_l3_proto_index l3_proto; | |
52 | enum ena_eth_io_l4_proto_index l4_proto; | |
53 | u16 num_bufs; | |
54 | u16 req_id; | |
55 | /* For regular queue, indicate the size of the header | |
56 | * For LLQ, indicate the size of the pushed buffer | |
57 | */ | |
58 | u16 header_len; | |
59 | ||
60 | u8 meta_valid; | |
61 | u8 tso_enable; | |
62 | u8 l3_csum_enable; | |
63 | u8 l4_csum_enable; | |
64 | u8 l4_csum_partial; | |
65 | u8 df; /* Don't fragment */ | |
66 | }; | |
67 | ||
68 | struct ena_com_rx_ctx { | |
69 | struct ena_com_rx_buf_info *ena_bufs; | |
70 | enum ena_eth_io_l3_proto_index l3_proto; | |
71 | enum ena_eth_io_l4_proto_index l4_proto; | |
72 | bool l3_csum_err; | |
73 | bool l4_csum_err; | |
9f95a23c | 74 | bool l4_csum_checked; |
7c673cae FG |
75 | /* fragmented packet */ |
76 | bool frag; | |
77 | u32 hash; | |
78 | u16 descs; | |
79 | int max_bufs; | |
80 | }; | |
81 | ||
9f95a23c TL |
82 | bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq, |
83 | struct ena_com_tx_ctx *ena_tx_ctx); | |
84 | ||
7c673cae FG |
85 | int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, |
86 | struct ena_com_tx_ctx *ena_tx_ctx, | |
87 | int *nb_hw_desc); | |
88 | ||
89 | int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, | |
90 | struct ena_com_io_sq *io_sq, | |
91 | struct ena_com_rx_ctx *ena_rx_ctx); | |
92 | ||
93 | int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, | |
94 | struct ena_com_buf *ena_buf, | |
95 | u16 req_id); | |
96 | ||
11fdf7f2 TL |
97 | bool ena_com_cq_empty(struct ena_com_io_cq *io_cq); |
98 | ||
7c673cae FG |
99 | static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq, |
100 | struct ena_eth_io_intr_reg *intr_reg) | |
101 | { | |
11fdf7f2 | 102 | ENA_REG_WRITE32(io_cq->bus, intr_reg->intr_control, io_cq->unmask_reg); |
7c673cae FG |
103 | } |
104 | ||
9f95a23c | 105 | static inline int ena_com_free_desc(struct ena_com_io_sq *io_sq) |
7c673cae FG |
106 | { |
107 | u16 tail, next_to_comp, cnt; | |
108 | ||
109 | next_to_comp = io_sq->next_to_comp; | |
110 | tail = io_sq->tail; | |
111 | cnt = tail - next_to_comp; | |
112 | ||
113 | return io_sq->q_depth - 1 - cnt; | |
114 | } | |
115 | ||
9f95a23c TL |
116 | /* Check if the submission queue has enough space to hold required_buffers */ |
117 | static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq, | |
118 | u16 required_buffers) | |
7c673cae | 119 | { |
9f95a23c | 120 | int temp; |
7c673cae | 121 | |
9f95a23c TL |
122 | if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) |
123 | return ena_com_free_desc(io_sq) >= required_buffers; | |
124 | ||
125 | /* This calculation doesn't need to be 100% accurate. So to reduce | |
126 | * the calculation overhead just Subtract 2 lines from the free descs | |
127 | * (one for the header line and one to compensate the devision | |
128 | * down calculation. | |
129 | */ | |
130 | temp = required_buffers / io_sq->llq_info.descs_per_entry + 2; | |
131 | ||
132 | return ena_com_free_desc(io_sq) > temp; | |
133 | } | |
134 | ||
135 | static inline bool is_llq_max_tx_burst_exists(struct ena_com_io_sq *io_sq) | |
136 | { | |
137 | return (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) && | |
138 | io_sq->llq_info.max_entries_in_tx_burst > 0; | |
139 | } | |
140 | ||
141 | static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq) | |
142 | { | |
143 | u16 tail = io_sq->tail; | |
144 | u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst; | |
7c673cae FG |
145 | |
146 | ena_trc_dbg("write submission queue doorbell for queue: %d tail: %d\n", | |
147 | io_sq->qid, tail); | |
148 | ||
11fdf7f2 | 149 | ENA_REG_WRITE32(io_sq->bus, tail, io_sq->db_addr); |
7c673cae | 150 | |
9f95a23c TL |
151 | if (is_llq_max_tx_burst_exists(io_sq)) { |
152 | ena_trc_dbg("reset available entries in tx burst for queue %d to %d\n", | |
153 | io_sq->qid, max_entries_in_tx_burst); | |
154 | io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst; | |
155 | } | |
156 | ||
7c673cae FG |
157 | return 0; |
158 | } | |
159 | ||
160 | static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq) | |
161 | { | |
162 | u16 unreported_comp, head; | |
163 | bool need_update; | |
164 | ||
165 | head = io_cq->head; | |
166 | unreported_comp = head - io_cq->last_head_update; | |
167 | need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH); | |
168 | ||
169 | if (io_cq->cq_head_db_reg && need_update) { | |
170 | ena_trc_dbg("Write completion queue doorbell for queue %d: head: %d\n", | |
171 | io_cq->qid, head); | |
11fdf7f2 | 172 | ENA_REG_WRITE32(io_cq->bus, head, io_cq->cq_head_db_reg); |
7c673cae FG |
173 | io_cq->last_head_update = head; |
174 | } | |
175 | ||
176 | return 0; | |
177 | } | |
178 | ||
179 | static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq, | |
180 | u8 numa_node) | |
181 | { | |
182 | struct ena_eth_io_numa_node_cfg_reg numa_cfg; | |
183 | ||
184 | if (!io_cq->numa_node_cfg_reg) | |
185 | return; | |
186 | ||
187 | numa_cfg.numa_cfg = (numa_node & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK) | |
188 | | ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK; | |
189 | ||
11fdf7f2 | 190 | ENA_REG_WRITE32(io_cq->bus, numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg); |
7c673cae FG |
191 | } |
192 | ||
193 | static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem) | |
194 | { | |
195 | io_sq->next_to_comp += elem; | |
196 | } | |
197 | ||
9f95a23c TL |
198 | static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq) |
199 | { | |
200 | io_cq->head++; | |
201 | ||
202 | /* Switch phase bit in case of wrap around */ | |
203 | if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0)) | |
204 | io_cq->phase ^= 1; | |
205 | } | |
206 | ||
207 | static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id) | |
208 | { | |
209 | u8 expected_phase, cdesc_phase; | |
210 | struct ena_eth_io_tx_cdesc *cdesc; | |
211 | u16 masked_head; | |
212 | ||
213 | masked_head = io_cq->head & (io_cq->q_depth - 1); | |
214 | expected_phase = io_cq->phase; | |
215 | ||
216 | cdesc = (struct ena_eth_io_tx_cdesc *) | |
217 | ((uintptr_t)io_cq->cdesc_addr.virt_addr + | |
218 | (masked_head * io_cq->cdesc_entry_size_in_bytes)); | |
219 | ||
220 | /* When the current completion descriptor phase isn't the same as the | |
221 | * expected, it mean that the device still didn't update | |
222 | * this completion. | |
223 | */ | |
224 | cdesc_phase = READ_ONCE16(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK; | |
225 | if (cdesc_phase != expected_phase) | |
226 | return ENA_COM_TRY_AGAIN; | |
227 | ||
228 | dma_rmb(); | |
229 | ||
230 | *req_id = READ_ONCE16(cdesc->req_id); | |
231 | if (unlikely(*req_id >= io_cq->q_depth)) { | |
232 | ena_trc_err("Invalid req id %d\n", cdesc->req_id); | |
233 | return ENA_COM_INVAL; | |
234 | } | |
235 | ||
236 | ena_com_cq_inc_head(io_cq); | |
237 | ||
238 | return 0; | |
239 | } | |
240 | ||
7c673cae FG |
241 | #if defined(__cplusplus) |
242 | } | |
243 | #endif | |
244 | #endif /* ENA_ETH_COM_H_ */ |