]>
Commit | Line | Data |
---|---|---|
c535e923 CM |
1 | /* Copyright 2008 - 2016 Freescale Semiconductor, Inc. |
2 | * | |
3 | * Redistribution and use in source and binary forms, with or without | |
4 | * modification, are permitted provided that the following conditions are met: | |
5 | * * Redistributions of source code must retain the above copyright | |
6 | * notice, this list of conditions and the following disclaimer. | |
7 | * * Redistributions in binary form must reproduce the above copyright | |
8 | * notice, this list of conditions and the following disclaimer in the | |
9 | * documentation and/or other materials provided with the distribution. | |
10 | * * Neither the name of Freescale Semiconductor nor the | |
11 | * names of its contributors may be used to endorse or promote products | |
12 | * derived from this software without specific prior written permission. | |
13 | * | |
14 | * ALTERNATIVELY, this software may be distributed under the terms of the | |
15 | * GNU General Public License ("GPL") as published by the Free Software | |
16 | * Foundation, either version 2 of that License or (at your option) any | |
17 | * later version. | |
18 | * | |
19 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY | |
20 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | |
21 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | |
22 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | |
23 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | |
24 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
25 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | |
26 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | |
28 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
29 | */ | |
30 | ||
31 | #include "qman_priv.h" | |
32 | ||
33 | #define DQRR_MAXFILL 15 | |
34 | #define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */ | |
35 | #define IRQNAME "QMan portal %d" | |
36 | #define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */ | |
37 | #define QMAN_POLL_LIMIT 32 | |
38 | #define QMAN_PIRQ_DQRR_ITHRESH 12 | |
39 | #define QMAN_PIRQ_MR_ITHRESH 4 | |
40 | #define QMAN_PIRQ_IPERIOD 100 | |
41 | ||
42 | /* Portal register assists */ | |
43 | ||
21772c43 MB |
44 | #if defined(CONFIG_ARM) || defined(CONFIG_ARM64) |
45 | /* Cache-inhibited register offsets */ | |
46 | #define QM_REG_EQCR_PI_CINH 0x3000 | |
47 | #define QM_REG_EQCR_CI_CINH 0x3040 | |
48 | #define QM_REG_EQCR_ITR 0x3080 | |
49 | #define QM_REG_DQRR_PI_CINH 0x3100 | |
50 | #define QM_REG_DQRR_CI_CINH 0x3140 | |
51 | #define QM_REG_DQRR_ITR 0x3180 | |
52 | #define QM_REG_DQRR_DCAP 0x31C0 | |
53 | #define QM_REG_DQRR_SDQCR 0x3200 | |
54 | #define QM_REG_DQRR_VDQCR 0x3240 | |
55 | #define QM_REG_DQRR_PDQCR 0x3280 | |
56 | #define QM_REG_MR_PI_CINH 0x3300 | |
57 | #define QM_REG_MR_CI_CINH 0x3340 | |
58 | #define QM_REG_MR_ITR 0x3380 | |
59 | #define QM_REG_CFG 0x3500 | |
60 | #define QM_REG_ISR 0x3600 | |
61 | #define QM_REG_IER 0x3640 | |
62 | #define QM_REG_ISDR 0x3680 | |
63 | #define QM_REG_IIR 0x36C0 | |
64 | #define QM_REG_ITPR 0x3740 | |
65 | ||
66 | /* Cache-enabled register offsets */ | |
67 | #define QM_CL_EQCR 0x0000 | |
68 | #define QM_CL_DQRR 0x1000 | |
69 | #define QM_CL_MR 0x2000 | |
70 | #define QM_CL_EQCR_PI_CENA 0x3000 | |
71 | #define QM_CL_EQCR_CI_CENA 0x3040 | |
72 | #define QM_CL_DQRR_PI_CENA 0x3100 | |
73 | #define QM_CL_DQRR_CI_CENA 0x3140 | |
74 | #define QM_CL_MR_PI_CENA 0x3300 | |
75 | #define QM_CL_MR_CI_CENA 0x3340 | |
76 | #define QM_CL_CR 0x3800 | |
77 | #define QM_CL_RR0 0x3900 | |
78 | #define QM_CL_RR1 0x3940 | |
79 | ||
80 | #else | |
c535e923 CM |
81 | /* Cache-inhibited register offsets */ |
82 | #define QM_REG_EQCR_PI_CINH 0x0000 | |
83 | #define QM_REG_EQCR_CI_CINH 0x0004 | |
84 | #define QM_REG_EQCR_ITR 0x0008 | |
85 | #define QM_REG_DQRR_PI_CINH 0x0040 | |
86 | #define QM_REG_DQRR_CI_CINH 0x0044 | |
87 | #define QM_REG_DQRR_ITR 0x0048 | |
88 | #define QM_REG_DQRR_DCAP 0x0050 | |
89 | #define QM_REG_DQRR_SDQCR 0x0054 | |
90 | #define QM_REG_DQRR_VDQCR 0x0058 | |
91 | #define QM_REG_DQRR_PDQCR 0x005c | |
92 | #define QM_REG_MR_PI_CINH 0x0080 | |
93 | #define QM_REG_MR_CI_CINH 0x0084 | |
94 | #define QM_REG_MR_ITR 0x0088 | |
95 | #define QM_REG_CFG 0x0100 | |
96 | #define QM_REG_ISR 0x0e00 | |
97 | #define QM_REG_IER 0x0e04 | |
98 | #define QM_REG_ISDR 0x0e08 | |
99 | #define QM_REG_IIR 0x0e0c | |
100 | #define QM_REG_ITPR 0x0e14 | |
101 | ||
102 | /* Cache-enabled register offsets */ | |
103 | #define QM_CL_EQCR 0x0000 | |
104 | #define QM_CL_DQRR 0x1000 | |
105 | #define QM_CL_MR 0x2000 | |
106 | #define QM_CL_EQCR_PI_CENA 0x3000 | |
107 | #define QM_CL_EQCR_CI_CENA 0x3100 | |
108 | #define QM_CL_DQRR_PI_CENA 0x3200 | |
109 | #define QM_CL_DQRR_CI_CENA 0x3300 | |
110 | #define QM_CL_MR_PI_CENA 0x3400 | |
111 | #define QM_CL_MR_CI_CENA 0x3500 | |
112 | #define QM_CL_CR 0x3800 | |
113 | #define QM_CL_RR0 0x3900 | |
114 | #define QM_CL_RR1 0x3940 | |
21772c43 | 115 | #endif |
c535e923 CM |
116 | |
117 | /* | |
118 | * BTW, the drivers (and h/w programming model) already obtain the required | |
119 | * synchronisation for portal accesses and data-dependencies. Use of barrier()s | |
120 | * or other order-preserving primitives simply degrade performance. Hence the | |
121 | * use of the __raw_*() interfaces, which simply ensure that the compiler treats | |
122 | * the portal registers as volatile | |
123 | */ | |
124 | ||
125 | /* Cache-enabled ring access */ | |
126 | #define qm_cl(base, idx) ((void *)base + ((idx) << 6)) | |
127 | ||
128 | /* | |
129 | * Portal modes. | |
130 | * Enum types; | |
131 | * pmode == production mode | |
132 | * cmode == consumption mode, | |
133 | * dmode == h/w dequeue mode. | |
134 | * Enum values use 3 letter codes. First letter matches the portal mode, | |
135 | * remaining two letters indicate; | |
136 | * ci == cache-inhibited portal register | |
137 | * ce == cache-enabled portal register | |
138 | * vb == in-band valid-bit (cache-enabled) | |
139 | * dc == DCA (Discrete Consumption Acknowledgment), DQRR-only | |
140 | * As for "enum qm_dqrr_dmode", it should be self-explanatory. | |
141 | */ | |
142 | enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */ | |
143 | qm_eqcr_pci = 0, /* PI index, cache-inhibited */ | |
144 | qm_eqcr_pce = 1, /* PI index, cache-enabled */ | |
145 | qm_eqcr_pvb = 2 /* valid-bit */ | |
146 | }; | |
147 | enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */ | |
148 | qm_dqrr_dpush = 0, /* SDQCR + VDQCR */ | |
149 | qm_dqrr_dpull = 1 /* PDQCR */ | |
150 | }; | |
151 | enum qm_dqrr_pmode { /* s/w-only */ | |
152 | qm_dqrr_pci, /* reads DQRR_PI_CINH */ | |
153 | qm_dqrr_pce, /* reads DQRR_PI_CENA */ | |
154 | qm_dqrr_pvb /* reads valid-bit */ | |
155 | }; | |
156 | enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */ | |
157 | qm_dqrr_cci = 0, /* CI index, cache-inhibited */ | |
158 | qm_dqrr_cce = 1, /* CI index, cache-enabled */ | |
159 | qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgment */ | |
160 | }; | |
161 | enum qm_mr_pmode { /* s/w-only */ | |
162 | qm_mr_pci, /* reads MR_PI_CINH */ | |
163 | qm_mr_pce, /* reads MR_PI_CENA */ | |
164 | qm_mr_pvb /* reads valid-bit */ | |
165 | }; | |
166 | enum qm_mr_cmode { /* matches QCSP_CFG::MM */ | |
167 | qm_mr_cci = 0, /* CI index, cache-inhibited */ | |
168 | qm_mr_cce = 1 /* CI index, cache-enabled */ | |
169 | }; | |
170 | ||
171 | /* --- Portal structures --- */ | |
172 | ||
173 | #define QM_EQCR_SIZE 8 | |
174 | #define QM_DQRR_SIZE 16 | |
175 | #define QM_MR_SIZE 8 | |
176 | ||
177 | /* "Enqueue Command" */ | |
178 | struct qm_eqcr_entry { | |
179 | u8 _ncw_verb; /* writes to this are non-coherent */ | |
180 | u8 dca; | |
18058822 | 181 | __be16 seqnum; |
b5399452 | 182 | u8 __reserved[4]; |
18058822 CM |
183 | __be32 fqid; /* 24-bit */ |
184 | __be32 tag; | |
c535e923 CM |
185 | struct qm_fd fd; |
186 | u8 __reserved3[32]; | |
187 | } __packed; | |
188 | #define QM_EQCR_VERB_VBIT 0x80 | |
189 | #define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */ | |
190 | #define QM_EQCR_VERB_CMD_ENQUEUE 0x01 | |
191 | #define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */ | |
192 | #define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */ | |
193 | #define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */ | |
194 | ||
195 | struct qm_eqcr { | |
196 | struct qm_eqcr_entry *ring, *cursor; | |
197 | u8 ci, available, ithresh, vbit; | |
198 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
199 | u32 busy; | |
200 | enum qm_eqcr_pmode pmode; | |
201 | #endif | |
202 | }; | |
203 | ||
204 | struct qm_dqrr { | |
205 | const struct qm_dqrr_entry *ring, *cursor; | |
206 | u8 pi, ci, fill, ithresh, vbit; | |
207 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
208 | enum qm_dqrr_dmode dmode; | |
209 | enum qm_dqrr_pmode pmode; | |
210 | enum qm_dqrr_cmode cmode; | |
211 | #endif | |
212 | }; | |
213 | ||
214 | struct qm_mr { | |
215 | union qm_mr_entry *ring, *cursor; | |
216 | u8 pi, ci, fill, ithresh, vbit; | |
217 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
218 | enum qm_mr_pmode pmode; | |
219 | enum qm_mr_cmode cmode; | |
220 | #endif | |
221 | }; | |
222 | ||
223 | /* MC (Management Command) command */ | |
7ff07da0 CM |
224 | /* "FQ" command layout */ |
225 | struct qm_mcc_fq { | |
c535e923 CM |
226 | u8 _ncw_verb; |
227 | u8 __reserved1[3]; | |
18058822 | 228 | __be32 fqid; /* 24-bit */ |
c535e923 CM |
229 | u8 __reserved2[56]; |
230 | } __packed; | |
c535e923 | 231 | |
7ff07da0 CM |
232 | /* "CGR" command layout */ |
233 | struct qm_mcc_cgr { | |
c535e923 CM |
234 | u8 _ncw_verb; |
235 | u8 __reserved1[30]; | |
236 | u8 cgid; | |
237 | u8 __reserved2[32]; | |
238 | }; | |
239 | ||
c535e923 CM |
240 | #define QM_MCC_VERB_VBIT 0x80 |
241 | #define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */ | |
242 | #define QM_MCC_VERB_INITFQ_PARKED 0x40 | |
243 | #define QM_MCC_VERB_INITFQ_SCHED 0x41 | |
244 | #define QM_MCC_VERB_QUERYFQ 0x44 | |
245 | #define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */ | |
246 | #define QM_MCC_VERB_QUERYWQ 0x46 | |
247 | #define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47 | |
248 | #define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */ | |
249 | #define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */ | |
250 | #define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */ | |
251 | #define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */ | |
252 | #define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */ | |
253 | #define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */ | |
254 | #define QM_MCC_VERB_INITCGR 0x50 | |
255 | #define QM_MCC_VERB_MODIFYCGR 0x51 | |
256 | #define QM_MCC_VERB_CGRTESTWRITE 0x52 | |
257 | #define QM_MCC_VERB_QUERYCGR 0x58 | |
258 | #define QM_MCC_VERB_QUERYCONGESTION 0x59 | |
259 | union qm_mc_command { | |
260 | struct { | |
261 | u8 _ncw_verb; /* writes to this are non-coherent */ | |
262 | u8 __reserved[63]; | |
263 | }; | |
264 | struct qm_mcc_initfq initfq; | |
c535e923 | 265 | struct qm_mcc_initcgr initcgr; |
7ff07da0 CM |
266 | struct qm_mcc_fq fq; |
267 | struct qm_mcc_cgr cgr; | |
c535e923 CM |
268 | }; |
269 | ||
270 | /* MC (Management Command) result */ | |
271 | /* "Query FQ" */ | |
272 | struct qm_mcr_queryfq { | |
273 | u8 verb; | |
274 | u8 result; | |
275 | u8 __reserved1[8]; | |
276 | struct qm_fqd fqd; /* the FQD fields are here */ | |
277 | u8 __reserved2[30]; | |
278 | } __packed; | |
279 | ||
280 | /* "Alter FQ State Commands" */ | |
281 | struct qm_mcr_alterfq { | |
282 | u8 verb; | |
283 | u8 result; | |
284 | u8 fqs; /* Frame Queue Status */ | |
285 | u8 __reserved1[61]; | |
286 | }; | |
287 | #define QM_MCR_VERB_RRID 0x80 | |
288 | #define QM_MCR_VERB_MASK QM_MCC_VERB_MASK | |
289 | #define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED | |
290 | #define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED | |
291 | #define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ | |
292 | #define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP | |
293 | #define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ | |
294 | #define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED | |
295 | #define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED | |
296 | #define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE | |
297 | #define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE | |
298 | #define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS | |
299 | #define QM_MCR_RESULT_NULL 0x00 | |
300 | #define QM_MCR_RESULT_OK 0xf0 | |
301 | #define QM_MCR_RESULT_ERR_FQID 0xf1 | |
302 | #define QM_MCR_RESULT_ERR_FQSTATE 0xf2 | |
303 | #define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */ | |
304 | #define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4 | |
305 | #define QM_MCR_RESULT_PENDING 0xf8 | |
306 | #define QM_MCR_RESULT_ERR_BADCOMMAND 0xff | |
307 | #define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */ | |
308 | #define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */ | |
309 | #define QM_MCR_TIMEOUT 10000 /* us */ | |
310 | union qm_mc_result { | |
311 | struct { | |
312 | u8 verb; | |
313 | u8 result; | |
314 | u8 __reserved1[62]; | |
315 | }; | |
316 | struct qm_mcr_queryfq queryfq; | |
317 | struct qm_mcr_alterfq alterfq; | |
318 | struct qm_mcr_querycgr querycgr; | |
319 | struct qm_mcr_querycongestion querycongestion; | |
320 | struct qm_mcr_querywq querywq; | |
321 | struct qm_mcr_queryfq_np queryfq_np; | |
322 | }; | |
323 | ||
324 | struct qm_mc { | |
325 | union qm_mc_command *cr; | |
326 | union qm_mc_result *rr; | |
327 | u8 rridx, vbit; | |
328 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
329 | enum { | |
330 | /* Can be _mc_start()ed */ | |
331 | qman_mc_idle, | |
332 | /* Can be _mc_commit()ed or _mc_abort()ed */ | |
333 | qman_mc_user, | |
334 | /* Can only be _mc_retry()ed */ | |
335 | qman_mc_hw | |
336 | } state; | |
337 | #endif | |
338 | }; | |
339 | ||
340 | struct qm_addr { | |
e6e2df69 RP |
341 | void *ce; /* cache-enabled */ |
342 | __be32 *ce_be; /* same value as above but for direct access */ | |
c535e923 CM |
343 | void __iomem *ci; /* cache-inhibited */ |
344 | }; | |
345 | ||
346 | struct qm_portal { | |
347 | /* | |
348 | * In the non-CONFIG_FSL_DPAA_CHECKING case, the following stuff up to | |
349 | * and including 'mc' fits within a cacheline (yay!). The 'config' part | |
350 | * is setup-only, so isn't a cause for a concern. In other words, don't | |
351 | * rearrange this structure on a whim, there be dragons ... | |
352 | */ | |
353 | struct qm_addr addr; | |
354 | struct qm_eqcr eqcr; | |
355 | struct qm_dqrr dqrr; | |
356 | struct qm_mr mr; | |
357 | struct qm_mc mc; | |
358 | } ____cacheline_aligned; | |
359 | ||
360 | /* Cache-inhibited register access. */ | |
361 | static inline u32 qm_in(struct qm_portal *p, u32 offset) | |
362 | { | |
e6e2df69 | 363 | return ioread32be(p->addr.ci + offset); |
c535e923 CM |
364 | } |
365 | ||
366 | static inline void qm_out(struct qm_portal *p, u32 offset, u32 val) | |
367 | { | |
e6e2df69 | 368 | iowrite32be(val, p->addr.ci + offset); |
c535e923 CM |
369 | } |
370 | ||
371 | /* Cache Enabled Portal Access */ | |
372 | static inline void qm_cl_invalidate(struct qm_portal *p, u32 offset) | |
373 | { | |
374 | dpaa_invalidate(p->addr.ce + offset); | |
375 | } | |
376 | ||
377 | static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset) | |
378 | { | |
379 | dpaa_touch_ro(p->addr.ce + offset); | |
380 | } | |
381 | ||
382 | static inline u32 qm_ce_in(struct qm_portal *p, u32 offset) | |
383 | { | |
e6e2df69 | 384 | return be32_to_cpu(*(p->addr.ce_be + (offset/4))); |
c535e923 CM |
385 | } |
386 | ||
387 | /* --- EQCR API --- */ | |
388 | ||
389 | #define EQCR_SHIFT ilog2(sizeof(struct qm_eqcr_entry)) | |
390 | #define EQCR_CARRY (uintptr_t)(QM_EQCR_SIZE << EQCR_SHIFT) | |
391 | ||
392 | /* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */ | |
393 | static struct qm_eqcr_entry *eqcr_carryclear(struct qm_eqcr_entry *p) | |
394 | { | |
395 | uintptr_t addr = (uintptr_t)p; | |
396 | ||
397 | addr &= ~EQCR_CARRY; | |
398 | ||
399 | return (struct qm_eqcr_entry *)addr; | |
400 | } | |
401 | ||
402 | /* Bit-wise logic to convert a ring pointer to a ring index */ | |
403 | static int eqcr_ptr2idx(struct qm_eqcr_entry *e) | |
404 | { | |
405 | return ((uintptr_t)e >> EQCR_SHIFT) & (QM_EQCR_SIZE - 1); | |
406 | } | |
407 | ||
408 | /* Increment the 'cursor' ring pointer, taking 'vbit' into account */ | |
409 | static inline void eqcr_inc(struct qm_eqcr *eqcr) | |
410 | { | |
411 | /* increment to the next EQCR pointer and handle overflow and 'vbit' */ | |
412 | struct qm_eqcr_entry *partial = eqcr->cursor + 1; | |
413 | ||
414 | eqcr->cursor = eqcr_carryclear(partial); | |
415 | if (partial != eqcr->cursor) | |
416 | eqcr->vbit ^= QM_EQCR_VERB_VBIT; | |
417 | } | |
418 | ||
419 | static inline int qm_eqcr_init(struct qm_portal *portal, | |
420 | enum qm_eqcr_pmode pmode, | |
421 | unsigned int eq_stash_thresh, | |
422 | int eq_stash_prio) | |
423 | { | |
424 | struct qm_eqcr *eqcr = &portal->eqcr; | |
425 | u32 cfg; | |
426 | u8 pi; | |
427 | ||
428 | eqcr->ring = portal->addr.ce + QM_CL_EQCR; | |
429 | eqcr->ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); | |
430 | qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA); | |
431 | pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1); | |
432 | eqcr->cursor = eqcr->ring + pi; | |
433 | eqcr->vbit = (qm_in(portal, QM_REG_EQCR_PI_CINH) & QM_EQCR_SIZE) ? | |
434 | QM_EQCR_VERB_VBIT : 0; | |
435 | eqcr->available = QM_EQCR_SIZE - 1 - | |
436 | dpaa_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi); | |
437 | eqcr->ithresh = qm_in(portal, QM_REG_EQCR_ITR); | |
438 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
439 | eqcr->busy = 0; | |
440 | eqcr->pmode = pmode; | |
441 | #endif | |
442 | cfg = (qm_in(portal, QM_REG_CFG) & 0x00ffffff) | | |
443 | (eq_stash_thresh << 28) | /* QCSP_CFG: EST */ | |
444 | (eq_stash_prio << 26) | /* QCSP_CFG: EP */ | |
445 | ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */ | |
446 | qm_out(portal, QM_REG_CFG, cfg); | |
447 | return 0; | |
448 | } | |
449 | ||
450 | static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal) | |
451 | { | |
452 | return (qm_in(portal, QM_REG_CFG) >> 28) & 0x7; | |
453 | } | |
454 | ||
455 | static inline void qm_eqcr_finish(struct qm_portal *portal) | |
456 | { | |
457 | struct qm_eqcr *eqcr = &portal->eqcr; | |
458 | u8 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1); | |
459 | u8 ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); | |
460 | ||
461 | DPAA_ASSERT(!eqcr->busy); | |
462 | if (pi != eqcr_ptr2idx(eqcr->cursor)) | |
57907a73 | 463 | pr_crit("losing uncommitted EQCR entries\n"); |
c535e923 CM |
464 | if (ci != eqcr->ci) |
465 | pr_crit("missing existing EQCR completions\n"); | |
466 | if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor)) | |
467 | pr_crit("EQCR destroyed unquiesced\n"); | |
468 | } | |
469 | ||
470 | static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal | |
471 | *portal) | |
472 | { | |
473 | struct qm_eqcr *eqcr = &portal->eqcr; | |
474 | ||
475 | DPAA_ASSERT(!eqcr->busy); | |
476 | if (!eqcr->available) | |
477 | return NULL; | |
478 | ||
479 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
480 | eqcr->busy = 1; | |
481 | #endif | |
482 | dpaa_zero(eqcr->cursor); | |
483 | return eqcr->cursor; | |
484 | } | |
485 | ||
486 | static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal | |
487 | *portal) | |
488 | { | |
489 | struct qm_eqcr *eqcr = &portal->eqcr; | |
490 | u8 diff, old_ci; | |
491 | ||
492 | DPAA_ASSERT(!eqcr->busy); | |
493 | if (!eqcr->available) { | |
494 | old_ci = eqcr->ci; | |
495 | eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & | |
496 | (QM_EQCR_SIZE - 1); | |
497 | diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); | |
498 | eqcr->available += diff; | |
499 | if (!diff) | |
500 | return NULL; | |
501 | } | |
502 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
503 | eqcr->busy = 1; | |
504 | #endif | |
505 | dpaa_zero(eqcr->cursor); | |
506 | return eqcr->cursor; | |
507 | } | |
508 | ||
509 | static inline void eqcr_commit_checks(struct qm_eqcr *eqcr) | |
510 | { | |
511 | DPAA_ASSERT(eqcr->busy); | |
18058822 | 512 | DPAA_ASSERT(!(be32_to_cpu(eqcr->cursor->fqid) & ~QM_FQID_MASK)); |
c535e923 CM |
513 | DPAA_ASSERT(eqcr->available >= 1); |
514 | } | |
515 | ||
516 | static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb) | |
517 | { | |
518 | struct qm_eqcr *eqcr = &portal->eqcr; | |
519 | struct qm_eqcr_entry *eqcursor; | |
520 | ||
521 | eqcr_commit_checks(eqcr); | |
522 | DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb); | |
523 | dma_wmb(); | |
524 | eqcursor = eqcr->cursor; | |
525 | eqcursor->_ncw_verb = myverb | eqcr->vbit; | |
526 | dpaa_flush(eqcursor); | |
527 | eqcr_inc(eqcr); | |
528 | eqcr->available--; | |
529 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
530 | eqcr->busy = 0; | |
531 | #endif | |
532 | } | |
533 | ||
534 | static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal) | |
535 | { | |
536 | qm_cl_touch_ro(portal, QM_CL_EQCR_CI_CENA); | |
537 | } | |
538 | ||
539 | static inline u8 qm_eqcr_cce_update(struct qm_portal *portal) | |
540 | { | |
541 | struct qm_eqcr *eqcr = &portal->eqcr; | |
542 | u8 diff, old_ci = eqcr->ci; | |
543 | ||
544 | eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & (QM_EQCR_SIZE - 1); | |
545 | qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA); | |
546 | diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); | |
547 | eqcr->available += diff; | |
548 | return diff; | |
549 | } | |
550 | ||
551 | static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh) | |
552 | { | |
553 | struct qm_eqcr *eqcr = &portal->eqcr; | |
554 | ||
555 | eqcr->ithresh = ithresh; | |
556 | qm_out(portal, QM_REG_EQCR_ITR, ithresh); | |
557 | } | |
558 | ||
559 | static inline u8 qm_eqcr_get_avail(struct qm_portal *portal) | |
560 | { | |
561 | struct qm_eqcr *eqcr = &portal->eqcr; | |
562 | ||
563 | return eqcr->available; | |
564 | } | |
565 | ||
566 | static inline u8 qm_eqcr_get_fill(struct qm_portal *portal) | |
567 | { | |
568 | struct qm_eqcr *eqcr = &portal->eqcr; | |
569 | ||
570 | return QM_EQCR_SIZE - 1 - eqcr->available; | |
571 | } | |
572 | ||
573 | /* --- DQRR API --- */ | |
574 | ||
575 | #define DQRR_SHIFT ilog2(sizeof(struct qm_dqrr_entry)) | |
576 | #define DQRR_CARRY (uintptr_t)(QM_DQRR_SIZE << DQRR_SHIFT) | |
577 | ||
578 | static const struct qm_dqrr_entry *dqrr_carryclear( | |
579 | const struct qm_dqrr_entry *p) | |
580 | { | |
581 | uintptr_t addr = (uintptr_t)p; | |
582 | ||
583 | addr &= ~DQRR_CARRY; | |
584 | ||
585 | return (const struct qm_dqrr_entry *)addr; | |
586 | } | |
587 | ||
588 | static inline int dqrr_ptr2idx(const struct qm_dqrr_entry *e) | |
589 | { | |
590 | return ((uintptr_t)e >> DQRR_SHIFT) & (QM_DQRR_SIZE - 1); | |
591 | } | |
592 | ||
593 | static const struct qm_dqrr_entry *dqrr_inc(const struct qm_dqrr_entry *e) | |
594 | { | |
595 | return dqrr_carryclear(e + 1); | |
596 | } | |
597 | ||
598 | static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf) | |
599 | { | |
600 | qm_out(portal, QM_REG_CFG, (qm_in(portal, QM_REG_CFG) & 0xff0fffff) | | |
601 | ((mf & (QM_DQRR_SIZE - 1)) << 20)); | |
602 | } | |
603 | ||
604 | static inline int qm_dqrr_init(struct qm_portal *portal, | |
605 | const struct qm_portal_config *config, | |
606 | enum qm_dqrr_dmode dmode, | |
607 | enum qm_dqrr_pmode pmode, | |
608 | enum qm_dqrr_cmode cmode, u8 max_fill) | |
609 | { | |
610 | struct qm_dqrr *dqrr = &portal->dqrr; | |
611 | u32 cfg; | |
612 | ||
613 | /* Make sure the DQRR will be idle when we enable */ | |
614 | qm_out(portal, QM_REG_DQRR_SDQCR, 0); | |
615 | qm_out(portal, QM_REG_DQRR_VDQCR, 0); | |
616 | qm_out(portal, QM_REG_DQRR_PDQCR, 0); | |
617 | dqrr->ring = portal->addr.ce + QM_CL_DQRR; | |
618 | dqrr->pi = qm_in(portal, QM_REG_DQRR_PI_CINH) & (QM_DQRR_SIZE - 1); | |
619 | dqrr->ci = qm_in(portal, QM_REG_DQRR_CI_CINH) & (QM_DQRR_SIZE - 1); | |
620 | dqrr->cursor = dqrr->ring + dqrr->ci; | |
621 | dqrr->fill = dpaa_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi); | |
622 | dqrr->vbit = (qm_in(portal, QM_REG_DQRR_PI_CINH) & QM_DQRR_SIZE) ? | |
623 | QM_DQRR_VERB_VBIT : 0; | |
624 | dqrr->ithresh = qm_in(portal, QM_REG_DQRR_ITR); | |
625 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
626 | dqrr->dmode = dmode; | |
627 | dqrr->pmode = pmode; | |
628 | dqrr->cmode = cmode; | |
629 | #endif | |
630 | /* Invalidate every ring entry before beginning */ | |
631 | for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++) | |
632 | dpaa_invalidate(qm_cl(dqrr->ring, cfg)); | |
633 | cfg = (qm_in(portal, QM_REG_CFG) & 0xff000f00) | | |
634 | ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */ | |
635 | ((dmode & 1) << 18) | /* DP */ | |
636 | ((cmode & 3) << 16) | /* DCM */ | |
637 | 0xa0 | /* RE+SE */ | |
638 | (0 ? 0x40 : 0) | /* Ignore RP */ | |
639 | (0 ? 0x10 : 0); /* Ignore SP */ | |
640 | qm_out(portal, QM_REG_CFG, cfg); | |
641 | qm_dqrr_set_maxfill(portal, max_fill); | |
642 | return 0; | |
643 | } | |
644 | ||
645 | static inline void qm_dqrr_finish(struct qm_portal *portal) | |
646 | { | |
647 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
648 | struct qm_dqrr *dqrr = &portal->dqrr; | |
649 | ||
650 | if (dqrr->cmode != qm_dqrr_cdc && | |
651 | dqrr->ci != dqrr_ptr2idx(dqrr->cursor)) | |
652 | pr_crit("Ignoring completed DQRR entries\n"); | |
653 | #endif | |
654 | } | |
655 | ||
656 | static inline const struct qm_dqrr_entry *qm_dqrr_current( | |
657 | struct qm_portal *portal) | |
658 | { | |
659 | struct qm_dqrr *dqrr = &portal->dqrr; | |
660 | ||
661 | if (!dqrr->fill) | |
662 | return NULL; | |
663 | return dqrr->cursor; | |
664 | } | |
665 | ||
666 | static inline u8 qm_dqrr_next(struct qm_portal *portal) | |
667 | { | |
668 | struct qm_dqrr *dqrr = &portal->dqrr; | |
669 | ||
670 | DPAA_ASSERT(dqrr->fill); | |
671 | dqrr->cursor = dqrr_inc(dqrr->cursor); | |
672 | return --dqrr->fill; | |
673 | } | |
674 | ||
675 | static inline void qm_dqrr_pvb_update(struct qm_portal *portal) | |
676 | { | |
677 | struct qm_dqrr *dqrr = &portal->dqrr; | |
678 | struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi); | |
679 | ||
680 | DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb); | |
681 | #ifndef CONFIG_FSL_PAMU | |
682 | /* | |
683 | * If PAMU is not available we need to invalidate the cache. | |
684 | * When PAMU is available the cache is updated by stash | |
685 | */ | |
686 | dpaa_invalidate_touch_ro(res); | |
687 | #endif | |
e6e2df69 | 688 | if ((res->verb & QM_DQRR_VERB_VBIT) == dqrr->vbit) { |
c535e923 CM |
689 | dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1); |
690 | if (!dqrr->pi) | |
691 | dqrr->vbit ^= QM_DQRR_VERB_VBIT; | |
692 | dqrr->fill++; | |
693 | } | |
694 | } | |
695 | ||
696 | static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal, | |
697 | const struct qm_dqrr_entry *dq, | |
698 | int park) | |
699 | { | |
700 | __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr; | |
701 | int idx = dqrr_ptr2idx(dq); | |
702 | ||
703 | DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc); | |
704 | DPAA_ASSERT((dqrr->ring + idx) == dq); | |
705 | DPAA_ASSERT(idx < QM_DQRR_SIZE); | |
706 | qm_out(portal, QM_REG_DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */ | |
707 | ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */ | |
708 | idx); /* DQRR_DCAP::DCAP_CI */ | |
709 | } | |
710 | ||
711 | static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u32 bitmask) | |
712 | { | |
713 | __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr; | |
714 | ||
715 | DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc); | |
716 | qm_out(portal, QM_REG_DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */ | |
717 | (bitmask << 16)); /* DQRR_DCAP::DCAP_CI */ | |
718 | } | |
719 | ||
720 | static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr) | |
721 | { | |
722 | qm_out(portal, QM_REG_DQRR_SDQCR, sdqcr); | |
723 | } | |
724 | ||
725 | static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr) | |
726 | { | |
727 | qm_out(portal, QM_REG_DQRR_VDQCR, vdqcr); | |
728 | } | |
729 | ||
730 | static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh) | |
731 | { | |
732 | qm_out(portal, QM_REG_DQRR_ITR, ithresh); | |
733 | } | |
734 | ||
735 | /* --- MR API --- */ | |
736 | ||
737 | #define MR_SHIFT ilog2(sizeof(union qm_mr_entry)) | |
738 | #define MR_CARRY (uintptr_t)(QM_MR_SIZE << MR_SHIFT) | |
739 | ||
740 | static union qm_mr_entry *mr_carryclear(union qm_mr_entry *p) | |
741 | { | |
742 | uintptr_t addr = (uintptr_t)p; | |
743 | ||
744 | addr &= ~MR_CARRY; | |
745 | ||
746 | return (union qm_mr_entry *)addr; | |
747 | } | |
748 | ||
749 | static inline int mr_ptr2idx(const union qm_mr_entry *e) | |
750 | { | |
751 | return ((uintptr_t)e >> MR_SHIFT) & (QM_MR_SIZE - 1); | |
752 | } | |
753 | ||
754 | static inline union qm_mr_entry *mr_inc(union qm_mr_entry *e) | |
755 | { | |
756 | return mr_carryclear(e + 1); | |
757 | } | |
758 | ||
759 | static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode, | |
760 | enum qm_mr_cmode cmode) | |
761 | { | |
762 | struct qm_mr *mr = &portal->mr; | |
763 | u32 cfg; | |
764 | ||
765 | mr->ring = portal->addr.ce + QM_CL_MR; | |
766 | mr->pi = qm_in(portal, QM_REG_MR_PI_CINH) & (QM_MR_SIZE - 1); | |
767 | mr->ci = qm_in(portal, QM_REG_MR_CI_CINH) & (QM_MR_SIZE - 1); | |
768 | mr->cursor = mr->ring + mr->ci; | |
769 | mr->fill = dpaa_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi); | |
770 | mr->vbit = (qm_in(portal, QM_REG_MR_PI_CINH) & QM_MR_SIZE) | |
771 | ? QM_MR_VERB_VBIT : 0; | |
772 | mr->ithresh = qm_in(portal, QM_REG_MR_ITR); | |
773 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
774 | mr->pmode = pmode; | |
775 | mr->cmode = cmode; | |
776 | #endif | |
777 | cfg = (qm_in(portal, QM_REG_CFG) & 0xfffff0ff) | | |
778 | ((cmode & 1) << 8); /* QCSP_CFG:MM */ | |
779 | qm_out(portal, QM_REG_CFG, cfg); | |
780 | return 0; | |
781 | } | |
782 | ||
783 | static inline void qm_mr_finish(struct qm_portal *portal) | |
784 | { | |
785 | struct qm_mr *mr = &portal->mr; | |
786 | ||
787 | if (mr->ci != mr_ptr2idx(mr->cursor)) | |
788 | pr_crit("Ignoring completed MR entries\n"); | |
789 | } | |
790 | ||
791 | static inline const union qm_mr_entry *qm_mr_current(struct qm_portal *portal) | |
792 | { | |
793 | struct qm_mr *mr = &portal->mr; | |
794 | ||
795 | if (!mr->fill) | |
796 | return NULL; | |
797 | return mr->cursor; | |
798 | } | |
799 | ||
800 | static inline int qm_mr_next(struct qm_portal *portal) | |
801 | { | |
802 | struct qm_mr *mr = &portal->mr; | |
803 | ||
804 | DPAA_ASSERT(mr->fill); | |
805 | mr->cursor = mr_inc(mr->cursor); | |
806 | return --mr->fill; | |
807 | } | |
808 | ||
809 | static inline void qm_mr_pvb_update(struct qm_portal *portal) | |
810 | { | |
811 | struct qm_mr *mr = &portal->mr; | |
812 | union qm_mr_entry *res = qm_cl(mr->ring, mr->pi); | |
813 | ||
814 | DPAA_ASSERT(mr->pmode == qm_mr_pvb); | |
e6e2df69 RP |
815 | |
816 | if ((res->verb & QM_MR_VERB_VBIT) == mr->vbit) { | |
c535e923 CM |
817 | mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1); |
818 | if (!mr->pi) | |
819 | mr->vbit ^= QM_MR_VERB_VBIT; | |
820 | mr->fill++; | |
821 | res = mr_inc(res); | |
822 | } | |
823 | dpaa_invalidate_touch_ro(res); | |
824 | } | |
825 | ||
826 | static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num) | |
827 | { | |
828 | struct qm_mr *mr = &portal->mr; | |
829 | ||
830 | DPAA_ASSERT(mr->cmode == qm_mr_cci); | |
831 | mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1); | |
832 | qm_out(portal, QM_REG_MR_CI_CINH, mr->ci); | |
833 | } | |
834 | ||
835 | static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal) | |
836 | { | |
837 | struct qm_mr *mr = &portal->mr; | |
838 | ||
839 | DPAA_ASSERT(mr->cmode == qm_mr_cci); | |
840 | mr->ci = mr_ptr2idx(mr->cursor); | |
841 | qm_out(portal, QM_REG_MR_CI_CINH, mr->ci); | |
842 | } | |
843 | ||
844 | static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh) | |
845 | { | |
846 | qm_out(portal, QM_REG_MR_ITR, ithresh); | |
847 | } | |
848 | ||
849 | /* --- Management command API --- */ | |
850 | ||
851 | static inline int qm_mc_init(struct qm_portal *portal) | |
852 | { | |
853 | struct qm_mc *mc = &portal->mc; | |
854 | ||
855 | mc->cr = portal->addr.ce + QM_CL_CR; | |
856 | mc->rr = portal->addr.ce + QM_CL_RR0; | |
e6e2df69 | 857 | mc->rridx = (mc->cr->_ncw_verb & QM_MCC_VERB_VBIT) |
c535e923 CM |
858 | ? 0 : 1; |
859 | mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0; | |
860 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
861 | mc->state = qman_mc_idle; | |
862 | #endif | |
863 | return 0; | |
864 | } | |
865 | ||
866 | static inline void qm_mc_finish(struct qm_portal *portal) | |
867 | { | |
868 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
869 | struct qm_mc *mc = &portal->mc; | |
870 | ||
871 | DPAA_ASSERT(mc->state == qman_mc_idle); | |
872 | if (mc->state != qman_mc_idle) | |
873 | pr_crit("Losing incomplete MC command\n"); | |
874 | #endif | |
875 | } | |
876 | ||
877 | static inline union qm_mc_command *qm_mc_start(struct qm_portal *portal) | |
878 | { | |
879 | struct qm_mc *mc = &portal->mc; | |
880 | ||
881 | DPAA_ASSERT(mc->state == qman_mc_idle); | |
882 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
883 | mc->state = qman_mc_user; | |
884 | #endif | |
885 | dpaa_zero(mc->cr); | |
886 | return mc->cr; | |
887 | } | |
888 | ||
889 | static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb) | |
890 | { | |
891 | struct qm_mc *mc = &portal->mc; | |
892 | union qm_mc_result *rr = mc->rr + mc->rridx; | |
893 | ||
894 | DPAA_ASSERT(mc->state == qman_mc_user); | |
895 | dma_wmb(); | |
896 | mc->cr->_ncw_verb = myverb | mc->vbit; | |
897 | dpaa_flush(mc->cr); | |
898 | dpaa_invalidate_touch_ro(rr); | |
899 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
900 | mc->state = qman_mc_hw; | |
901 | #endif | |
902 | } | |
903 | ||
904 | static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal) | |
905 | { | |
906 | struct qm_mc *mc = &portal->mc; | |
907 | union qm_mc_result *rr = mc->rr + mc->rridx; | |
908 | ||
909 | DPAA_ASSERT(mc->state == qman_mc_hw); | |
910 | /* | |
911 | * The inactive response register's verb byte always returns zero until | |
912 | * its command is submitted and completed. This includes the valid-bit, | |
913 | * in case you were wondering... | |
914 | */ | |
e6e2df69 | 915 | if (!rr->verb) { |
c535e923 CM |
916 | dpaa_invalidate_touch_ro(rr); |
917 | return NULL; | |
918 | } | |
919 | mc->rridx ^= 1; | |
920 | mc->vbit ^= QM_MCC_VERB_VBIT; | |
921 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
922 | mc->state = qman_mc_idle; | |
923 | #endif | |
924 | return rr; | |
925 | } | |
926 | ||
927 | static inline int qm_mc_result_timeout(struct qm_portal *portal, | |
928 | union qm_mc_result **mcr) | |
929 | { | |
930 | int timeout = QM_MCR_TIMEOUT; | |
931 | ||
932 | do { | |
933 | *mcr = qm_mc_result(portal); | |
934 | if (*mcr) | |
935 | break; | |
936 | udelay(1); | |
937 | } while (--timeout); | |
938 | ||
939 | return timeout; | |
940 | } | |
941 | ||
942 | static inline void fq_set(struct qman_fq *fq, u32 mask) | |
943 | { | |
f5bd2299 | 944 | fq->flags |= mask; |
c535e923 CM |
945 | } |
946 | ||
947 | static inline void fq_clear(struct qman_fq *fq, u32 mask) | |
948 | { | |
f5bd2299 | 949 | fq->flags &= ~mask; |
c535e923 CM |
950 | } |
951 | ||
952 | static inline int fq_isset(struct qman_fq *fq, u32 mask) | |
953 | { | |
954 | return fq->flags & mask; | |
955 | } | |
956 | ||
957 | static inline int fq_isclear(struct qman_fq *fq, u32 mask) | |
958 | { | |
959 | return !(fq->flags & mask); | |
960 | } | |
961 | ||
962 | struct qman_portal { | |
963 | struct qm_portal p; | |
964 | /* PORTAL_BITS_*** - dynamic, strictly internal */ | |
965 | unsigned long bits; | |
966 | /* interrupt sources processed by portal_isr(), configurable */ | |
967 | unsigned long irq_sources; | |
968 | u32 use_eqcr_ci_stashing; | |
969 | /* only 1 volatile dequeue at a time */ | |
970 | struct qman_fq *vdqcr_owned; | |
971 | u32 sdqcr; | |
972 | /* probing time config params for cpu-affine portals */ | |
973 | const struct qm_portal_config *config; | |
c535e923 CM |
974 | /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */ |
975 | struct qman_cgrs *cgrs; | |
976 | /* linked-list of CSCN handlers. */ | |
977 | struct list_head cgr_cbs; | |
978 | /* list lock */ | |
979 | spinlock_t cgr_lock; | |
980 | struct work_struct congestion_work; | |
981 | struct work_struct mr_work; | |
982 | char irqname[MAX_IRQNAME]; | |
983 | }; | |
984 | ||
985 | static cpumask_t affine_mask; | |
986 | static DEFINE_SPINLOCK(affine_mask_lock); | |
987 | static u16 affine_channels[NR_CPUS]; | |
988 | static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal); | |
989 | struct qman_portal *affine_portals[NR_CPUS]; | |
990 | ||
991 | static inline struct qman_portal *get_affine_portal(void) | |
992 | { | |
993 | return &get_cpu_var(qman_affine_portal); | |
994 | } | |
995 | ||
996 | static inline void put_affine_portal(void) | |
997 | { | |
998 | put_cpu_var(qman_affine_portal); | |
999 | } | |
1000 | ||
1001 | static struct workqueue_struct *qm_portal_wq; | |
1002 | ||
1003 | int qman_wq_alloc(void) | |
1004 | { | |
1005 | qm_portal_wq = alloc_workqueue("qman_portal_wq", 0, 1); | |
1006 | if (!qm_portal_wq) | |
1007 | return -ENOMEM; | |
1008 | return 0; | |
1009 | } | |
1010 | ||
1011 | /* | |
1012 | * This is what everything can wait on, even if it migrates to a different cpu | |
1013 | * to the one whose affine portal it is waiting on. | |
1014 | */ | |
1015 | static DECLARE_WAIT_QUEUE_HEAD(affine_queue); | |
1016 | ||
1017 | static struct qman_fq **fq_table; | |
1018 | static u32 num_fqids; | |
1019 | ||
1020 | int qman_alloc_fq_table(u32 _num_fqids) | |
1021 | { | |
1022 | num_fqids = _num_fqids; | |
1023 | ||
fad953ce KC |
1024 | fq_table = vzalloc(array3_size(sizeof(struct qman_fq *), |
1025 | num_fqids, 2)); | |
c535e923 CM |
1026 | if (!fq_table) |
1027 | return -ENOMEM; | |
1028 | ||
1029 | pr_debug("Allocated fq lookup table at %p, entry count %u\n", | |
1030 | fq_table, num_fqids * 2); | |
1031 | return 0; | |
1032 | } | |
1033 | ||
1034 | static struct qman_fq *idx_to_fq(u32 idx) | |
1035 | { | |
1036 | struct qman_fq *fq; | |
1037 | ||
1038 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
1039 | if (WARN_ON(idx >= num_fqids * 2)) | |
1040 | return NULL; | |
1041 | #endif | |
1042 | fq = fq_table[idx]; | |
1043 | DPAA_ASSERT(!fq || idx == fq->idx); | |
1044 | ||
1045 | return fq; | |
1046 | } | |
1047 | ||
1048 | /* | |
1049 | * Only returns full-service fq objects, not enqueue-only | |
1050 | * references (QMAN_FQ_FLAG_NO_MODIFY). | |
1051 | */ | |
1052 | static struct qman_fq *fqid_to_fq(u32 fqid) | |
1053 | { | |
1054 | return idx_to_fq(fqid * 2); | |
1055 | } | |
1056 | ||
1057 | static struct qman_fq *tag_to_fq(u32 tag) | |
1058 | { | |
1059 | #if BITS_PER_LONG == 64 | |
1060 | return idx_to_fq(tag); | |
1061 | #else | |
1062 | return (struct qman_fq *)tag; | |
1063 | #endif | |
1064 | } | |
1065 | ||
1066 | static u32 fq_to_tag(struct qman_fq *fq) | |
1067 | { | |
1068 | #if BITS_PER_LONG == 64 | |
1069 | return fq->idx; | |
1070 | #else | |
1071 | return (u32)fq; | |
1072 | #endif | |
1073 | } | |
1074 | ||
1075 | static u32 __poll_portal_slow(struct qman_portal *p, u32 is); | |
1076 | static inline unsigned int __poll_portal_fast(struct qman_portal *p, | |
1077 | unsigned int poll_limit); | |
1078 | static void qm_congestion_task(struct work_struct *work); | |
1079 | static void qm_mr_process_task(struct work_struct *work); | |
1080 | ||
1081 | static irqreturn_t portal_isr(int irq, void *ptr) | |
1082 | { | |
1083 | struct qman_portal *p = ptr; | |
1084 | ||
1085 | u32 clear = QM_DQAVAIL_MASK | p->irq_sources; | |
1086 | u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources; | |
1087 | ||
1088 | if (unlikely(!is)) | |
1089 | return IRQ_NONE; | |
1090 | ||
1091 | /* DQRR-handling if it's interrupt-driven */ | |
1092 | if (is & QM_PIRQ_DQRI) | |
1093 | __poll_portal_fast(p, QMAN_POLL_LIMIT); | |
1094 | /* Handling of anything else that's interrupt-driven */ | |
1095 | clear |= __poll_portal_slow(p, is); | |
1096 | qm_out(&p->p, QM_REG_ISR, clear); | |
1097 | return IRQ_HANDLED; | |
1098 | } | |
1099 | ||
1100 | static int drain_mr_fqrni(struct qm_portal *p) | |
1101 | { | |
1102 | const union qm_mr_entry *msg; | |
1103 | loop: | |
1104 | msg = qm_mr_current(p); | |
1105 | if (!msg) { | |
1106 | /* | |
1107 | * if MR was full and h/w had other FQRNI entries to produce, we | |
1108 | * need to allow it time to produce those entries once the | |
1109 | * existing entries are consumed. A worst-case situation | |
1110 | * (fully-loaded system) means h/w sequencers may have to do 3-4 | |
1111 | * other things before servicing the portal's MR pump, each of | |
1112 | * which (if slow) may take ~50 qman cycles (which is ~200 | |
1113 | * processor cycles). So rounding up and then multiplying this | |
1114 | * worst-case estimate by a factor of 10, just to be | |
1115 | * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume | |
1116 | * one entry at a time, so h/w has an opportunity to produce new | |
1117 | * entries well before the ring has been fully consumed, so | |
1118 | * we're being *really* paranoid here. | |
1119 | */ | |
7ce2e934 | 1120 | msleep(1); |
c535e923 CM |
1121 | msg = qm_mr_current(p); |
1122 | if (!msg) | |
1123 | return 0; | |
1124 | } | |
1125 | if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) { | |
1126 | /* We aren't draining anything but FQRNIs */ | |
1127 | pr_err("Found verb 0x%x in MR\n", msg->verb); | |
1128 | return -1; | |
1129 | } | |
1130 | qm_mr_next(p); | |
1131 | qm_mr_cci_consume(p, 1); | |
1132 | goto loop; | |
1133 | } | |
1134 | ||
1135 | static int qman_create_portal(struct qman_portal *portal, | |
1136 | const struct qm_portal_config *c, | |
1137 | const struct qman_cgrs *cgrs) | |
1138 | { | |
1139 | struct qm_portal *p; | |
c535e923 CM |
1140 | int ret; |
1141 | u32 isdr; | |
1142 | ||
1143 | p = &portal->p; | |
1144 | ||
1145 | #ifdef CONFIG_FSL_PAMU | |
1146 | /* PAMU is required for stashing */ | |
1147 | portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0); | |
1148 | #else | |
1149 | portal->use_eqcr_ci_stashing = 0; | |
1150 | #endif | |
1151 | /* | |
1152 | * prep the low-level portal struct with the mapped addresses from the | |
1153 | * config, everything that follows depends on it and "config" is more | |
1154 | * for (de)reference | |
1155 | */ | |
e6e2df69 RP |
1156 | p->addr.ce = c->addr_virt_ce; |
1157 | p->addr.ce_be = c->addr_virt_ce; | |
1158 | p->addr.ci = c->addr_virt_ci; | |
c535e923 CM |
1159 | /* |
1160 | * If CI-stashing is used, the current defaults use a threshold of 3, | |
1161 | * and stash with high-than-DQRR priority. | |
1162 | */ | |
1163 | if (qm_eqcr_init(p, qm_eqcr_pvb, | |
1164 | portal->use_eqcr_ci_stashing ? 3 : 0, 1)) { | |
1165 | dev_err(c->dev, "EQCR initialisation failed\n"); | |
1166 | goto fail_eqcr; | |
1167 | } | |
1168 | if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb, | |
1169 | qm_dqrr_cdc, DQRR_MAXFILL)) { | |
1170 | dev_err(c->dev, "DQRR initialisation failed\n"); | |
1171 | goto fail_dqrr; | |
1172 | } | |
1173 | if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) { | |
1174 | dev_err(c->dev, "MR initialisation failed\n"); | |
1175 | goto fail_mr; | |
1176 | } | |
1177 | if (qm_mc_init(p)) { | |
1178 | dev_err(c->dev, "MC initialisation failed\n"); | |
1179 | goto fail_mc; | |
1180 | } | |
1181 | /* static interrupt-gating controls */ | |
1182 | qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH); | |
1183 | qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH); | |
1184 | qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD); | |
6da2ec56 | 1185 | portal->cgrs = kmalloc_array(2, sizeof(*cgrs), GFP_KERNEL); |
c535e923 CM |
1186 | if (!portal->cgrs) |
1187 | goto fail_cgrs; | |
1188 | /* initial snapshot is no-depletion */ | |
1189 | qman_cgrs_init(&portal->cgrs[1]); | |
1190 | if (cgrs) | |
1191 | portal->cgrs[0] = *cgrs; | |
1192 | else | |
1193 | /* if the given mask is NULL, assume all CGRs can be seen */ | |
1194 | qman_cgrs_fill(&portal->cgrs[0]); | |
1195 | INIT_LIST_HEAD(&portal->cgr_cbs); | |
1196 | spin_lock_init(&portal->cgr_lock); | |
1197 | INIT_WORK(&portal->congestion_work, qm_congestion_task); | |
1198 | INIT_WORK(&portal->mr_work, qm_mr_process_task); | |
1199 | portal->bits = 0; | |
1200 | portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 | | |
1201 | QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS | | |
1202 | QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED; | |
c535e923 CM |
1203 | isdr = 0xffffffff; |
1204 | qm_out(p, QM_REG_ISDR, isdr); | |
1205 | portal->irq_sources = 0; | |
1206 | qm_out(p, QM_REG_IER, 0); | |
1207 | qm_out(p, QM_REG_ISR, 0xffffffff); | |
1208 | snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu); | |
1209 | if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) { | |
1210 | dev_err(c->dev, "request_irq() failed\n"); | |
1211 | goto fail_irq; | |
1212 | } | |
9beaf661 RP |
1213 | |
1214 | if (dpaa_set_portal_irq_affinity(c->dev, c->irq, c->cpu)) | |
c535e923 | 1215 | goto fail_affinity; |
c535e923 CM |
1216 | |
1217 | /* Need EQCR to be empty before continuing */ | |
1218 | isdr &= ~QM_PIRQ_EQCI; | |
1219 | qm_out(p, QM_REG_ISDR, isdr); | |
1220 | ret = qm_eqcr_get_fill(p); | |
1221 | if (ret) { | |
1222 | dev_err(c->dev, "EQCR unclean\n"); | |
1223 | goto fail_eqcr_empty; | |
1224 | } | |
1225 | isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI); | |
1226 | qm_out(p, QM_REG_ISDR, isdr); | |
1227 | if (qm_dqrr_current(p)) { | |
1228 | dev_err(c->dev, "DQRR unclean\n"); | |
1229 | qm_dqrr_cdc_consume_n(p, 0xffff); | |
1230 | } | |
1231 | if (qm_mr_current(p) && drain_mr_fqrni(p)) { | |
1232 | /* special handling, drain just in case it's a few FQRNIs */ | |
1233 | const union qm_mr_entry *e = qm_mr_current(p); | |
1234 | ||
b6e969db CM |
1235 | dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x, addr 0x%llx\n", |
1236 | e->verb, e->ern.rc, qm_fd_addr_get64(&e->ern.fd)); | |
c535e923 CM |
1237 | goto fail_dqrr_mr_empty; |
1238 | } | |
1239 | /* Success */ | |
1240 | portal->config = c; | |
1241 | qm_out(p, QM_REG_ISDR, 0); | |
1242 | qm_out(p, QM_REG_IIR, 0); | |
1243 | /* Write a sane SDQCR */ | |
1244 | qm_dqrr_sdqcr_set(p, portal->sdqcr); | |
1245 | return 0; | |
1246 | ||
1247 | fail_dqrr_mr_empty: | |
1248 | fail_eqcr_empty: | |
1249 | fail_affinity: | |
1250 | free_irq(c->irq, portal); | |
1251 | fail_irq: | |
c535e923 CM |
1252 | kfree(portal->cgrs); |
1253 | fail_cgrs: | |
1254 | qm_mc_finish(p); | |
1255 | fail_mc: | |
1256 | qm_mr_finish(p); | |
1257 | fail_mr: | |
1258 | qm_dqrr_finish(p); | |
1259 | fail_dqrr: | |
1260 | qm_eqcr_finish(p); | |
1261 | fail_eqcr: | |
1262 | return -EIO; | |
1263 | } | |
1264 | ||
1265 | struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c, | |
1266 | const struct qman_cgrs *cgrs) | |
1267 | { | |
1268 | struct qman_portal *portal; | |
1269 | int err; | |
1270 | ||
1271 | portal = &per_cpu(qman_affine_portal, c->cpu); | |
1272 | err = qman_create_portal(portal, c, cgrs); | |
1273 | if (err) | |
1274 | return NULL; | |
1275 | ||
1276 | spin_lock(&affine_mask_lock); | |
1277 | cpumask_set_cpu(c->cpu, &affine_mask); | |
1278 | affine_channels[c->cpu] = c->channel; | |
1279 | affine_portals[c->cpu] = portal; | |
1280 | spin_unlock(&affine_mask_lock); | |
1281 | ||
1282 | return portal; | |
1283 | } | |
1284 | ||
1285 | static void qman_destroy_portal(struct qman_portal *qm) | |
1286 | { | |
1287 | const struct qm_portal_config *pcfg; | |
1288 | ||
1289 | /* Stop dequeues on the portal */ | |
1290 | qm_dqrr_sdqcr_set(&qm->p, 0); | |
1291 | ||
1292 | /* | |
1293 | * NB we do this to "quiesce" EQCR. If we add enqueue-completions or | |
1294 | * something related to QM_PIRQ_EQCI, this may need fixing. | |
1295 | * Also, due to the prefetching model used for CI updates in the enqueue | |
1296 | * path, this update will only invalidate the CI cacheline *after* | |
1297 | * working on it, so we need to call this twice to ensure a full update | |
1298 | * irrespective of where the enqueue processing was at when the teardown | |
1299 | * began. | |
1300 | */ | |
1301 | qm_eqcr_cce_update(&qm->p); | |
1302 | qm_eqcr_cce_update(&qm->p); | |
1303 | pcfg = qm->config; | |
1304 | ||
1305 | free_irq(pcfg->irq, qm); | |
1306 | ||
1307 | kfree(qm->cgrs); | |
1308 | qm_mc_finish(&qm->p); | |
1309 | qm_mr_finish(&qm->p); | |
1310 | qm_dqrr_finish(&qm->p); | |
1311 | qm_eqcr_finish(&qm->p); | |
1312 | ||
c535e923 CM |
1313 | qm->config = NULL; |
1314 | } | |
1315 | ||
1316 | const struct qm_portal_config *qman_destroy_affine_portal(void) | |
1317 | { | |
1318 | struct qman_portal *qm = get_affine_portal(); | |
1319 | const struct qm_portal_config *pcfg; | |
1320 | int cpu; | |
1321 | ||
1322 | pcfg = qm->config; | |
1323 | cpu = pcfg->cpu; | |
1324 | ||
1325 | qman_destroy_portal(qm); | |
1326 | ||
1327 | spin_lock(&affine_mask_lock); | |
1328 | cpumask_clear_cpu(cpu, &affine_mask); | |
1329 | spin_unlock(&affine_mask_lock); | |
1330 | put_affine_portal(); | |
1331 | return pcfg; | |
1332 | } | |
1333 | ||
1334 | /* Inline helper to reduce nesting in __poll_portal_slow() */ | |
1335 | static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq, | |
1336 | const union qm_mr_entry *msg, u8 verb) | |
1337 | { | |
1338 | switch (verb) { | |
1339 | case QM_MR_VERB_FQRL: | |
1340 | DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL)); | |
1341 | fq_clear(fq, QMAN_FQ_STATE_ORL); | |
1342 | break; | |
1343 | case QM_MR_VERB_FQRN: | |
1344 | DPAA_ASSERT(fq->state == qman_fq_state_parked || | |
1345 | fq->state == qman_fq_state_sched); | |
1346 | DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING)); | |
1347 | fq_clear(fq, QMAN_FQ_STATE_CHANGING); | |
1348 | if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY) | |
1349 | fq_set(fq, QMAN_FQ_STATE_NE); | |
1350 | if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT) | |
1351 | fq_set(fq, QMAN_FQ_STATE_ORL); | |
1352 | fq->state = qman_fq_state_retired; | |
1353 | break; | |
1354 | case QM_MR_VERB_FQPN: | |
1355 | DPAA_ASSERT(fq->state == qman_fq_state_sched); | |
1356 | DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING)); | |
1357 | fq->state = qman_fq_state_parked; | |
1358 | } | |
1359 | } | |
1360 | ||
1361 | static void qm_congestion_task(struct work_struct *work) | |
1362 | { | |
1363 | struct qman_portal *p = container_of(work, struct qman_portal, | |
1364 | congestion_work); | |
1365 | struct qman_cgrs rr, c; | |
1366 | union qm_mc_result *mcr; | |
1367 | struct qman_cgr *cgr; | |
1368 | ||
1369 | spin_lock(&p->cgr_lock); | |
1370 | qm_mc_start(&p->p); | |
1371 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION); | |
1372 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
1373 | spin_unlock(&p->cgr_lock); | |
1374 | dev_crit(p->config->dev, "QUERYCONGESTION timeout\n"); | |
e21c7316 | 1375 | qman_p_irqsource_add(p, QM_PIRQ_CSCI); |
c535e923 CM |
1376 | return; |
1377 | } | |
1378 | /* mask out the ones I'm not interested in */ | |
1379 | qman_cgrs_and(&rr, (struct qman_cgrs *)&mcr->querycongestion.state, | |
1380 | &p->cgrs[0]); | |
1381 | /* check previous snapshot for delta, enter/exit congestion */ | |
1382 | qman_cgrs_xor(&c, &rr, &p->cgrs[1]); | |
1383 | /* update snapshot */ | |
1384 | qman_cgrs_cp(&p->cgrs[1], &rr); | |
1385 | /* Invoke callback */ | |
1386 | list_for_each_entry(cgr, &p->cgr_cbs, node) | |
1387 | if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid)) | |
1388 | cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid)); | |
1389 | spin_unlock(&p->cgr_lock); | |
e21c7316 | 1390 | qman_p_irqsource_add(p, QM_PIRQ_CSCI); |
c535e923 CM |
1391 | } |
1392 | ||
1393 | static void qm_mr_process_task(struct work_struct *work) | |
1394 | { | |
1395 | struct qman_portal *p = container_of(work, struct qman_portal, | |
1396 | mr_work); | |
1397 | const union qm_mr_entry *msg; | |
1398 | struct qman_fq *fq; | |
1399 | u8 verb, num = 0; | |
1400 | ||
1401 | preempt_disable(); | |
1402 | ||
1403 | while (1) { | |
1404 | qm_mr_pvb_update(&p->p); | |
1405 | msg = qm_mr_current(&p->p); | |
1406 | if (!msg) | |
1407 | break; | |
1408 | ||
1409 | verb = msg->verb & QM_MR_VERB_TYPE_MASK; | |
1410 | /* The message is a software ERN iff the 0x20 bit is clear */ | |
1411 | if (verb & 0x20) { | |
1412 | switch (verb) { | |
1413 | case QM_MR_VERB_FQRNI: | |
1414 | /* nada, we drop FQRNIs on the floor */ | |
1415 | break; | |
1416 | case QM_MR_VERB_FQRN: | |
1417 | case QM_MR_VERB_FQRL: | |
1418 | /* Lookup in the retirement table */ | |
d6753c7e | 1419 | fq = fqid_to_fq(qm_fqid_get(&msg->fq)); |
c535e923 CM |
1420 | if (WARN_ON(!fq)) |
1421 | break; | |
1422 | fq_state_change(p, fq, msg, verb); | |
1423 | if (fq->cb.fqs) | |
1424 | fq->cb.fqs(p, fq, msg); | |
1425 | break; | |
1426 | case QM_MR_VERB_FQPN: | |
1427 | /* Parked */ | |
18058822 | 1428 | fq = tag_to_fq(be32_to_cpu(msg->fq.context_b)); |
c535e923 CM |
1429 | fq_state_change(p, fq, msg, verb); |
1430 | if (fq->cb.fqs) | |
1431 | fq->cb.fqs(p, fq, msg); | |
1432 | break; | |
1433 | case QM_MR_VERB_DC_ERN: | |
1434 | /* DCP ERN */ | |
1435 | pr_crit_once("Leaking DCP ERNs!\n"); | |
1436 | break; | |
1437 | default: | |
1438 | pr_crit("Invalid MR verb 0x%02x\n", verb); | |
1439 | } | |
1440 | } else { | |
1441 | /* Its a software ERN */ | |
18058822 | 1442 | fq = tag_to_fq(be32_to_cpu(msg->ern.tag)); |
c535e923 CM |
1443 | fq->cb.ern(p, fq, msg); |
1444 | } | |
1445 | num++; | |
1446 | qm_mr_next(&p->p); | |
1447 | } | |
1448 | ||
1449 | qm_mr_cci_consume(&p->p, num); | |
e21c7316 | 1450 | qman_p_irqsource_add(p, QM_PIRQ_MRI); |
c535e923 CM |
1451 | preempt_enable(); |
1452 | } | |
1453 | ||
1454 | static u32 __poll_portal_slow(struct qman_portal *p, u32 is) | |
1455 | { | |
1456 | if (is & QM_PIRQ_CSCI) { | |
e21c7316 | 1457 | qman_p_irqsource_remove(p, QM_PIRQ_CSCI); |
c535e923 CM |
1458 | queue_work_on(smp_processor_id(), qm_portal_wq, |
1459 | &p->congestion_work); | |
1460 | } | |
1461 | ||
1462 | if (is & QM_PIRQ_EQRI) { | |
1463 | qm_eqcr_cce_update(&p->p); | |
1464 | qm_eqcr_set_ithresh(&p->p, 0); | |
1465 | wake_up(&affine_queue); | |
1466 | } | |
1467 | ||
1468 | if (is & QM_PIRQ_MRI) { | |
e21c7316 | 1469 | qman_p_irqsource_remove(p, QM_PIRQ_MRI); |
c535e923 CM |
1470 | queue_work_on(smp_processor_id(), qm_portal_wq, |
1471 | &p->mr_work); | |
1472 | } | |
1473 | ||
1474 | return is; | |
1475 | } | |
1476 | ||
1477 | /* | |
1478 | * remove some slowish-path stuff from the "fast path" and make sure it isn't | |
1479 | * inlined. | |
1480 | */ | |
1481 | static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq) | |
1482 | { | |
1483 | p->vdqcr_owned = NULL; | |
1484 | fq_clear(fq, QMAN_FQ_STATE_VDQCR); | |
1485 | wake_up(&affine_queue); | |
1486 | } | |
1487 | ||
1488 | /* | |
1489 | * The only states that would conflict with other things if they ran at the | |
1490 | * same time on the same cpu are: | |
1491 | * | |
1492 | * (i) setting/clearing vdqcr_owned, and | |
1493 | * (ii) clearing the NE (Not Empty) flag. | |
1494 | * | |
1495 | * Both are safe. Because; | |
1496 | * | |
1497 | * (i) this clearing can only occur after qman_volatile_dequeue() has set the | |
1498 | * vdqcr_owned field (which it does before setting VDQCR), and | |
1499 | * qman_volatile_dequeue() blocks interrupts and preemption while this is | |
1500 | * done so that we can't interfere. | |
1501 | * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as | |
1502 | * with (i) that API prevents us from interfering until it's safe. | |
1503 | * | |
1504 | * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far | |
1505 | * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett | |
1506 | * advantage comes from this function not having to "lock" anything at all. | |
1507 | * | |
1508 | * Note also that the callbacks are invoked at points which are safe against the | |
1509 | * above potential conflicts, but that this function itself is not re-entrant | |
1510 | * (this is because the function tracks one end of each FIFO in the portal and | |
1511 | * we do *not* want to lock that). So the consequence is that it is safe for | |
1512 | * user callbacks to call into any QMan API. | |
1513 | */ | |
1514 | static inline unsigned int __poll_portal_fast(struct qman_portal *p, | |
1515 | unsigned int poll_limit) | |
1516 | { | |
1517 | const struct qm_dqrr_entry *dq; | |
1518 | struct qman_fq *fq; | |
1519 | enum qman_cb_dqrr_result res; | |
1520 | unsigned int limit = 0; | |
1521 | ||
1522 | do { | |
1523 | qm_dqrr_pvb_update(&p->p); | |
1524 | dq = qm_dqrr_current(&p->p); | |
1525 | if (!dq) | |
1526 | break; | |
1527 | ||
1528 | if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) { | |
1529 | /* | |
efe848cd | 1530 | * VDQCR: don't trust context_b as the FQ may have |
c535e923 CM |
1531 | * been configured for h/w consumption and we're |
1532 | * draining it post-retirement. | |
1533 | */ | |
1534 | fq = p->vdqcr_owned; | |
1535 | /* | |
1536 | * We only set QMAN_FQ_STATE_NE when retiring, so we | |
1537 | * only need to check for clearing it when doing | |
1538 | * volatile dequeues. It's one less thing to check | |
1539 | * in the critical path (SDQCR). | |
1540 | */ | |
1541 | if (dq->stat & QM_DQRR_STAT_FQ_EMPTY) | |
1542 | fq_clear(fq, QMAN_FQ_STATE_NE); | |
1543 | /* | |
1544 | * This is duplicated from the SDQCR code, but we | |
1545 | * have stuff to do before *and* after this callback, | |
1546 | * and we don't want multiple if()s in the critical | |
1547 | * path (SDQCR). | |
1548 | */ | |
1549 | res = fq->cb.dqrr(p, fq, dq); | |
1550 | if (res == qman_cb_dqrr_stop) | |
1551 | break; | |
1552 | /* Check for VDQCR completion */ | |
1553 | if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED) | |
1554 | clear_vdqcr(p, fq); | |
1555 | } else { | |
efe848cd | 1556 | /* SDQCR: context_b points to the FQ */ |
18058822 | 1557 | fq = tag_to_fq(be32_to_cpu(dq->context_b)); |
c535e923 CM |
1558 | /* Now let the callback do its stuff */ |
1559 | res = fq->cb.dqrr(p, fq, dq); | |
1560 | /* | |
1561 | * The callback can request that we exit without | |
1562 | * consuming this entry nor advancing; | |
1563 | */ | |
1564 | if (res == qman_cb_dqrr_stop) | |
1565 | break; | |
1566 | } | |
1567 | /* Interpret 'dq' from a driver perspective. */ | |
1568 | /* | |
1569 | * Parking isn't possible unless HELDACTIVE was set. NB, | |
1570 | * FORCEELIGIBLE implies HELDACTIVE, so we only need to | |
1571 | * check for HELDACTIVE to cover both. | |
1572 | */ | |
1573 | DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) || | |
1574 | (res != qman_cb_dqrr_park)); | |
1575 | /* just means "skip it, I'll consume it myself later on" */ | |
1576 | if (res != qman_cb_dqrr_defer) | |
1577 | qm_dqrr_cdc_consume_1ptr(&p->p, dq, | |
1578 | res == qman_cb_dqrr_park); | |
1579 | /* Move forward */ | |
1580 | qm_dqrr_next(&p->p); | |
1581 | /* | |
1582 | * Entry processed and consumed, increment our counter. The | |
1583 | * callback can request that we exit after consuming the | |
1584 | * entry, and we also exit if we reach our processing limit, | |
1585 | * so loop back only if neither of these conditions is met. | |
1586 | */ | |
1587 | } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop); | |
1588 | ||
1589 | return limit; | |
1590 | } | |
1591 | ||
1592 | void qman_p_irqsource_add(struct qman_portal *p, u32 bits) | |
1593 | { | |
1594 | unsigned long irqflags; | |
1595 | ||
1596 | local_irq_save(irqflags); | |
f5bd2299 | 1597 | p->irq_sources |= bits & QM_PIRQ_VISIBLE; |
c535e923 CM |
1598 | qm_out(&p->p, QM_REG_IER, p->irq_sources); |
1599 | local_irq_restore(irqflags); | |
1600 | } | |
1601 | EXPORT_SYMBOL(qman_p_irqsource_add); | |
1602 | ||
1603 | void qman_p_irqsource_remove(struct qman_portal *p, u32 bits) | |
1604 | { | |
1605 | unsigned long irqflags; | |
1606 | u32 ier; | |
1607 | ||
1608 | /* | |
1609 | * Our interrupt handler only processes+clears status register bits that | |
1610 | * are in p->irq_sources. As we're trimming that mask, if one of them | |
1611 | * were to assert in the status register just before we remove it from | |
1612 | * the enable register, there would be an interrupt-storm when we | |
1613 | * release the IRQ lock. So we wait for the enable register update to | |
1614 | * take effect in h/w (by reading it back) and then clear all other bits | |
1615 | * in the status register. Ie. we clear them from ISR once it's certain | |
1616 | * IER won't allow them to reassert. | |
1617 | */ | |
1618 | local_irq_save(irqflags); | |
1619 | bits &= QM_PIRQ_VISIBLE; | |
f5bd2299 | 1620 | p->irq_sources &= ~bits; |
c535e923 CM |
1621 | qm_out(&p->p, QM_REG_IER, p->irq_sources); |
1622 | ier = qm_in(&p->p, QM_REG_IER); | |
1623 | /* | |
1624 | * Using "~ier" (rather than "bits" or "~p->irq_sources") creates a | |
1625 | * data-dependency, ie. to protect against re-ordering. | |
1626 | */ | |
1627 | qm_out(&p->p, QM_REG_ISR, ~ier); | |
1628 | local_irq_restore(irqflags); | |
1629 | } | |
1630 | EXPORT_SYMBOL(qman_p_irqsource_remove); | |
1631 | ||
1632 | const cpumask_t *qman_affine_cpus(void) | |
1633 | { | |
1634 | return &affine_mask; | |
1635 | } | |
1636 | EXPORT_SYMBOL(qman_affine_cpus); | |
1637 | ||
1638 | u16 qman_affine_channel(int cpu) | |
1639 | { | |
1640 | if (cpu < 0) { | |
1641 | struct qman_portal *portal = get_affine_portal(); | |
1642 | ||
1643 | cpu = portal->config->cpu; | |
1644 | put_affine_portal(); | |
1645 | } | |
1646 | WARN_ON(!cpumask_test_cpu(cpu, &affine_mask)); | |
1647 | return affine_channels[cpu]; | |
1648 | } | |
1649 | EXPORT_SYMBOL(qman_affine_channel); | |
1650 | ||
1651 | struct qman_portal *qman_get_affine_portal(int cpu) | |
1652 | { | |
1653 | return affine_portals[cpu]; | |
1654 | } | |
1655 | EXPORT_SYMBOL(qman_get_affine_portal); | |
1656 | ||
1657 | int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit) | |
1658 | { | |
1659 | return __poll_portal_fast(p, limit); | |
1660 | } | |
1661 | EXPORT_SYMBOL(qman_p_poll_dqrr); | |
1662 | ||
1663 | void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools) | |
1664 | { | |
1665 | unsigned long irqflags; | |
1666 | ||
1667 | local_irq_save(irqflags); | |
1668 | pools &= p->config->pools; | |
1669 | p->sdqcr |= pools; | |
1670 | qm_dqrr_sdqcr_set(&p->p, p->sdqcr); | |
1671 | local_irq_restore(irqflags); | |
1672 | } | |
1673 | EXPORT_SYMBOL(qman_p_static_dequeue_add); | |
1674 | ||
1675 | /* Frame queue API */ | |
1676 | ||
1677 | static const char *mcr_result_str(u8 result) | |
1678 | { | |
1679 | switch (result) { | |
1680 | case QM_MCR_RESULT_NULL: | |
1681 | return "QM_MCR_RESULT_NULL"; | |
1682 | case QM_MCR_RESULT_OK: | |
1683 | return "QM_MCR_RESULT_OK"; | |
1684 | case QM_MCR_RESULT_ERR_FQID: | |
1685 | return "QM_MCR_RESULT_ERR_FQID"; | |
1686 | case QM_MCR_RESULT_ERR_FQSTATE: | |
1687 | return "QM_MCR_RESULT_ERR_FQSTATE"; | |
1688 | case QM_MCR_RESULT_ERR_NOTEMPTY: | |
1689 | return "QM_MCR_RESULT_ERR_NOTEMPTY"; | |
1690 | case QM_MCR_RESULT_PENDING: | |
1691 | return "QM_MCR_RESULT_PENDING"; | |
1692 | case QM_MCR_RESULT_ERR_BADCOMMAND: | |
1693 | return "QM_MCR_RESULT_ERR_BADCOMMAND"; | |
1694 | } | |
1695 | return "<unknown MCR result>"; | |
1696 | } | |
1697 | ||
1698 | int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq) | |
1699 | { | |
1700 | if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) { | |
1701 | int ret = qman_alloc_fqid(&fqid); | |
1702 | ||
1703 | if (ret) | |
1704 | return ret; | |
1705 | } | |
1706 | fq->fqid = fqid; | |
1707 | fq->flags = flags; | |
1708 | fq->state = qman_fq_state_oos; | |
1709 | fq->cgr_groupid = 0; | |
1710 | ||
1711 | /* A context_b of 0 is allegedly special, so don't use that fqid */ | |
1712 | if (fqid == 0 || fqid >= num_fqids) { | |
1713 | WARN(1, "bad fqid %d\n", fqid); | |
1714 | return -EINVAL; | |
1715 | } | |
1716 | ||
1717 | fq->idx = fqid * 2; | |
1718 | if (flags & QMAN_FQ_FLAG_NO_MODIFY) | |
1719 | fq->idx++; | |
1720 | ||
1721 | WARN_ON(fq_table[fq->idx]); | |
1722 | fq_table[fq->idx] = fq; | |
1723 | ||
1724 | return 0; | |
1725 | } | |
1726 | EXPORT_SYMBOL(qman_create_fq); | |
1727 | ||
1728 | void qman_destroy_fq(struct qman_fq *fq) | |
1729 | { | |
1730 | /* | |
1731 | * We don't need to lock the FQ as it is a pre-condition that the FQ be | |
1732 | * quiesced. Instead, run some checks. | |
1733 | */ | |
1734 | switch (fq->state) { | |
1735 | case qman_fq_state_parked: | |
1736 | case qman_fq_state_oos: | |
1737 | if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID)) | |
1738 | qman_release_fqid(fq->fqid); | |
1739 | ||
1740 | DPAA_ASSERT(fq_table[fq->idx]); | |
1741 | fq_table[fq->idx] = NULL; | |
1742 | return; | |
1743 | default: | |
1744 | break; | |
1745 | } | |
1746 | DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!"); | |
1747 | } | |
1748 | EXPORT_SYMBOL(qman_destroy_fq); | |
1749 | ||
1750 | u32 qman_fq_fqid(struct qman_fq *fq) | |
1751 | { | |
1752 | return fq->fqid; | |
1753 | } | |
1754 | EXPORT_SYMBOL(qman_fq_fqid); | |
1755 | ||
1756 | int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) | |
1757 | { | |
1758 | union qm_mc_command *mcc; | |
1759 | union qm_mc_result *mcr; | |
1760 | struct qman_portal *p; | |
1761 | u8 res, myverb; | |
1762 | int ret = 0; | |
1763 | ||
1764 | myverb = (flags & QMAN_INITFQ_FLAG_SCHED) | |
1765 | ? QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED; | |
1766 | ||
1767 | if (fq->state != qman_fq_state_oos && | |
1768 | fq->state != qman_fq_state_parked) | |
1769 | return -EINVAL; | |
1770 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
1771 | if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) | |
1772 | return -EINVAL; | |
1773 | #endif | |
18058822 | 1774 | if (opts && (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_OAC)) { |
c535e923 | 1775 | /* And can't be set at the same time as TDTHRESH */ |
18058822 | 1776 | if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_TDTHRESH) |
c535e923 CM |
1777 | return -EINVAL; |
1778 | } | |
1779 | /* Issue an INITFQ_[PARKED|SCHED] management command */ | |
1780 | p = get_affine_portal(); | |
1781 | if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) || | |
1782 | (fq->state != qman_fq_state_oos && | |
1783 | fq->state != qman_fq_state_parked)) { | |
1784 | ret = -EBUSY; | |
1785 | goto out; | |
1786 | } | |
1787 | mcc = qm_mc_start(&p->p); | |
1788 | if (opts) | |
1789 | mcc->initfq = *opts; | |
d6753c7e | 1790 | qm_fqid_set(&mcc->fq, fq->fqid); |
c535e923 CM |
1791 | mcc->initfq.count = 0; |
1792 | /* | |
efe848cd | 1793 | * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a |
c535e923 CM |
1794 | * demux pointer. Otherwise, the caller-provided value is allowed to |
1795 | * stand, don't overwrite it. | |
1796 | */ | |
1797 | if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) { | |
1798 | dma_addr_t phys_fq; | |
1799 | ||
18058822 CM |
1800 | mcc->initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTB); |
1801 | mcc->initfq.fqd.context_b = cpu_to_be32(fq_to_tag(fq)); | |
c535e923 CM |
1802 | /* |
1803 | * and the physical address - NB, if the user wasn't trying to | |
1804 | * set CONTEXTA, clear the stashing settings. | |
1805 | */ | |
18058822 CM |
1806 | if (!(be16_to_cpu(mcc->initfq.we_mask) & |
1807 | QM_INITFQ_WE_CONTEXTA)) { | |
1808 | mcc->initfq.we_mask |= | |
1809 | cpu_to_be16(QM_INITFQ_WE_CONTEXTA); | |
c535e923 CM |
1810 | memset(&mcc->initfq.fqd.context_a, 0, |
1811 | sizeof(mcc->initfq.fqd.context_a)); | |
1812 | } else { | |
0fbeac3b CM |
1813 | struct qman_portal *p = qman_dma_portal; |
1814 | ||
1815 | phys_fq = dma_map_single(p->config->dev, fq, | |
1816 | sizeof(*fq), DMA_TO_DEVICE); | |
1817 | if (dma_mapping_error(p->config->dev, phys_fq)) { | |
1818 | dev_err(p->config->dev, "dma_mapping failed\n"); | |
1819 | ret = -EIO; | |
1820 | goto out; | |
1821 | } | |
1822 | ||
c535e923 CM |
1823 | qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq); |
1824 | } | |
1825 | } | |
1826 | if (flags & QMAN_INITFQ_FLAG_LOCAL) { | |
1827 | int wq = 0; | |
1828 | ||
18058822 CM |
1829 | if (!(be16_to_cpu(mcc->initfq.we_mask) & |
1830 | QM_INITFQ_WE_DESTWQ)) { | |
1831 | mcc->initfq.we_mask |= | |
1832 | cpu_to_be16(QM_INITFQ_WE_DESTWQ); | |
c535e923 CM |
1833 | wq = 4; |
1834 | } | |
1835 | qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq); | |
1836 | } | |
1837 | qm_mc_commit(&p->p, myverb); | |
1838 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
1839 | dev_err(p->config->dev, "MCR timeout\n"); | |
1840 | ret = -ETIMEDOUT; | |
1841 | goto out; | |
1842 | } | |
1843 | ||
1844 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb); | |
1845 | res = mcr->result; | |
1846 | if (res != QM_MCR_RESULT_OK) { | |
1847 | ret = -EIO; | |
1848 | goto out; | |
1849 | } | |
1850 | if (opts) { | |
18058822 CM |
1851 | if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_FQCTRL) { |
1852 | if (be16_to_cpu(opts->fqd.fq_ctrl) & QM_FQCTRL_CGE) | |
c535e923 CM |
1853 | fq_set(fq, QMAN_FQ_STATE_CGR_EN); |
1854 | else | |
1855 | fq_clear(fq, QMAN_FQ_STATE_CGR_EN); | |
1856 | } | |
18058822 | 1857 | if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_CGID) |
c535e923 CM |
1858 | fq->cgr_groupid = opts->fqd.cgid; |
1859 | } | |
1860 | fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ? | |
1861 | qman_fq_state_sched : qman_fq_state_parked; | |
1862 | ||
1863 | out: | |
1864 | put_affine_portal(); | |
1865 | return ret; | |
1866 | } | |
1867 | EXPORT_SYMBOL(qman_init_fq); | |
1868 | ||
1869 | int qman_schedule_fq(struct qman_fq *fq) | |
1870 | { | |
1871 | union qm_mc_command *mcc; | |
1872 | union qm_mc_result *mcr; | |
1873 | struct qman_portal *p; | |
1874 | int ret = 0; | |
1875 | ||
1876 | if (fq->state != qman_fq_state_parked) | |
1877 | return -EINVAL; | |
1878 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
1879 | if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) | |
1880 | return -EINVAL; | |
1881 | #endif | |
1882 | /* Issue a ALTERFQ_SCHED management command */ | |
1883 | p = get_affine_portal(); | |
1884 | if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) || | |
1885 | fq->state != qman_fq_state_parked) { | |
1886 | ret = -EBUSY; | |
1887 | goto out; | |
1888 | } | |
1889 | mcc = qm_mc_start(&p->p); | |
d6753c7e | 1890 | qm_fqid_set(&mcc->fq, fq->fqid); |
c535e923 CM |
1891 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED); |
1892 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
1893 | dev_err(p->config->dev, "ALTER_SCHED timeout\n"); | |
1894 | ret = -ETIMEDOUT; | |
1895 | goto out; | |
1896 | } | |
1897 | ||
1898 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED); | |
1899 | if (mcr->result != QM_MCR_RESULT_OK) { | |
1900 | ret = -EIO; | |
1901 | goto out; | |
1902 | } | |
1903 | fq->state = qman_fq_state_sched; | |
1904 | out: | |
1905 | put_affine_portal(); | |
1906 | return ret; | |
1907 | } | |
1908 | EXPORT_SYMBOL(qman_schedule_fq); | |
1909 | ||
1910 | int qman_retire_fq(struct qman_fq *fq, u32 *flags) | |
1911 | { | |
1912 | union qm_mc_command *mcc; | |
1913 | union qm_mc_result *mcr; | |
1914 | struct qman_portal *p; | |
1915 | int ret; | |
1916 | u8 res; | |
1917 | ||
1918 | if (fq->state != qman_fq_state_parked && | |
1919 | fq->state != qman_fq_state_sched) | |
1920 | return -EINVAL; | |
1921 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
1922 | if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) | |
1923 | return -EINVAL; | |
1924 | #endif | |
1925 | p = get_affine_portal(); | |
1926 | if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) || | |
1927 | fq->state == qman_fq_state_retired || | |
1928 | fq->state == qman_fq_state_oos) { | |
1929 | ret = -EBUSY; | |
1930 | goto out; | |
1931 | } | |
1932 | mcc = qm_mc_start(&p->p); | |
d6753c7e | 1933 | qm_fqid_set(&mcc->fq, fq->fqid); |
c535e923 CM |
1934 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); |
1935 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
1936 | dev_crit(p->config->dev, "ALTER_RETIRE timeout\n"); | |
1937 | ret = -ETIMEDOUT; | |
1938 | goto out; | |
1939 | } | |
1940 | ||
1941 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE); | |
1942 | res = mcr->result; | |
1943 | /* | |
1944 | * "Elegant" would be to treat OK/PENDING the same way; set CHANGING, | |
1945 | * and defer the flags until FQRNI or FQRN (respectively) show up. But | |
1946 | * "Friendly" is to process OK immediately, and not set CHANGING. We do | |
1947 | * friendly, otherwise the caller doesn't necessarily have a fully | |
1948 | * "retired" FQ on return even if the retirement was immediate. However | |
1949 | * this does mean some code duplication between here and | |
1950 | * fq_state_change(). | |
1951 | */ | |
1952 | if (res == QM_MCR_RESULT_OK) { | |
1953 | ret = 0; | |
1954 | /* Process 'fq' right away, we'll ignore FQRNI */ | |
1955 | if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) | |
1956 | fq_set(fq, QMAN_FQ_STATE_NE); | |
1957 | if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT) | |
1958 | fq_set(fq, QMAN_FQ_STATE_ORL); | |
1959 | if (flags) | |
1960 | *flags = fq->flags; | |
1961 | fq->state = qman_fq_state_retired; | |
1962 | if (fq->cb.fqs) { | |
1963 | /* | |
1964 | * Another issue with supporting "immediate" retirement | |
1965 | * is that we're forced to drop FQRNIs, because by the | |
1966 | * time they're seen it may already be "too late" (the | |
1967 | * fq may have been OOS'd and free()'d already). But if | |
1968 | * the upper layer wants a callback whether it's | |
1969 | * immediate or not, we have to fake a "MR" entry to | |
1970 | * look like an FQRNI... | |
1971 | */ | |
1972 | union qm_mr_entry msg; | |
1973 | ||
1974 | msg.verb = QM_MR_VERB_FQRNI; | |
1975 | msg.fq.fqs = mcr->alterfq.fqs; | |
d6753c7e | 1976 | qm_fqid_set(&msg.fq, fq->fqid); |
18058822 | 1977 | msg.fq.context_b = cpu_to_be32(fq_to_tag(fq)); |
c535e923 CM |
1978 | fq->cb.fqs(p, fq, &msg); |
1979 | } | |
1980 | } else if (res == QM_MCR_RESULT_PENDING) { | |
1981 | ret = 1; | |
1982 | fq_set(fq, QMAN_FQ_STATE_CHANGING); | |
1983 | } else { | |
1984 | ret = -EIO; | |
1985 | } | |
1986 | out: | |
1987 | put_affine_portal(); | |
1988 | return ret; | |
1989 | } | |
1990 | EXPORT_SYMBOL(qman_retire_fq); | |
1991 | ||
1992 | int qman_oos_fq(struct qman_fq *fq) | |
1993 | { | |
1994 | union qm_mc_command *mcc; | |
1995 | union qm_mc_result *mcr; | |
1996 | struct qman_portal *p; | |
1997 | int ret = 0; | |
1998 | ||
1999 | if (fq->state != qman_fq_state_retired) | |
2000 | return -EINVAL; | |
2001 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
2002 | if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) | |
2003 | return -EINVAL; | |
2004 | #endif | |
2005 | p = get_affine_portal(); | |
2006 | if (fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS) || | |
2007 | fq->state != qman_fq_state_retired) { | |
2008 | ret = -EBUSY; | |
2009 | goto out; | |
2010 | } | |
2011 | mcc = qm_mc_start(&p->p); | |
d6753c7e | 2012 | qm_fqid_set(&mcc->fq, fq->fqid); |
c535e923 CM |
2013 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); |
2014 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
2015 | ret = -ETIMEDOUT; | |
2016 | goto out; | |
2017 | } | |
2018 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS); | |
2019 | if (mcr->result != QM_MCR_RESULT_OK) { | |
2020 | ret = -EIO; | |
2021 | goto out; | |
2022 | } | |
2023 | fq->state = qman_fq_state_oos; | |
2024 | out: | |
2025 | put_affine_portal(); | |
2026 | return ret; | |
2027 | } | |
2028 | EXPORT_SYMBOL(qman_oos_fq); | |
2029 | ||
2030 | int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd) | |
2031 | { | |
2032 | union qm_mc_command *mcc; | |
2033 | union qm_mc_result *mcr; | |
2034 | struct qman_portal *p = get_affine_portal(); | |
2035 | int ret = 0; | |
2036 | ||
2037 | mcc = qm_mc_start(&p->p); | |
d6753c7e | 2038 | qm_fqid_set(&mcc->fq, fq->fqid); |
c535e923 CM |
2039 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); |
2040 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
2041 | ret = -ETIMEDOUT; | |
2042 | goto out; | |
2043 | } | |
2044 | ||
2045 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ); | |
2046 | if (mcr->result == QM_MCR_RESULT_OK) | |
2047 | *fqd = mcr->queryfq.fqd; | |
2048 | else | |
2049 | ret = -EIO; | |
2050 | out: | |
2051 | put_affine_portal(); | |
2052 | return ret; | |
2053 | } | |
2054 | ||
8496272d | 2055 | int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np) |
c535e923 CM |
2056 | { |
2057 | union qm_mc_command *mcc; | |
2058 | union qm_mc_result *mcr; | |
2059 | struct qman_portal *p = get_affine_portal(); | |
2060 | int ret = 0; | |
2061 | ||
2062 | mcc = qm_mc_start(&p->p); | |
d6753c7e | 2063 | qm_fqid_set(&mcc->fq, fq->fqid); |
c535e923 CM |
2064 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); |
2065 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
2066 | ret = -ETIMEDOUT; | |
2067 | goto out; | |
2068 | } | |
2069 | ||
2070 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); | |
2071 | if (mcr->result == QM_MCR_RESULT_OK) | |
2072 | *np = mcr->queryfq_np; | |
2073 | else if (mcr->result == QM_MCR_RESULT_ERR_FQID) | |
2074 | ret = -ERANGE; | |
2075 | else | |
2076 | ret = -EIO; | |
2077 | out: | |
2078 | put_affine_portal(); | |
2079 | return ret; | |
2080 | } | |
8496272d | 2081 | EXPORT_SYMBOL(qman_query_fq_np); |
c535e923 CM |
2082 | |
2083 | static int qman_query_cgr(struct qman_cgr *cgr, | |
2084 | struct qm_mcr_querycgr *cgrd) | |
2085 | { | |
2086 | union qm_mc_command *mcc; | |
2087 | union qm_mc_result *mcr; | |
2088 | struct qman_portal *p = get_affine_portal(); | |
2089 | int ret = 0; | |
2090 | ||
2091 | mcc = qm_mc_start(&p->p); | |
7ff07da0 | 2092 | mcc->cgr.cgid = cgr->cgrid; |
c535e923 CM |
2093 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR); |
2094 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
2095 | ret = -ETIMEDOUT; | |
2096 | goto out; | |
2097 | } | |
2098 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR); | |
2099 | if (mcr->result == QM_MCR_RESULT_OK) | |
2100 | *cgrd = mcr->querycgr; | |
2101 | else { | |
2102 | dev_err(p->config->dev, "QUERY_CGR failed: %s\n", | |
2103 | mcr_result_str(mcr->result)); | |
2104 | ret = -EIO; | |
2105 | } | |
2106 | out: | |
2107 | put_affine_portal(); | |
2108 | return ret; | |
2109 | } | |
2110 | ||
2111 | int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result) | |
2112 | { | |
2113 | struct qm_mcr_querycgr query_cgr; | |
2114 | int err; | |
2115 | ||
2116 | err = qman_query_cgr(cgr, &query_cgr); | |
2117 | if (err) | |
2118 | return err; | |
2119 | ||
2120 | *result = !!query_cgr.cgr.cs; | |
2121 | return 0; | |
2122 | } | |
2123 | EXPORT_SYMBOL(qman_query_cgr_congested); | |
2124 | ||
2125 | /* internal function used as a wait_event() expression */ | |
2126 | static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr) | |
2127 | { | |
2128 | unsigned long irqflags; | |
2129 | int ret = -EBUSY; | |
2130 | ||
2131 | local_irq_save(irqflags); | |
2132 | if (p->vdqcr_owned) | |
2133 | goto out; | |
2134 | if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) | |
2135 | goto out; | |
2136 | ||
2137 | fq_set(fq, QMAN_FQ_STATE_VDQCR); | |
2138 | p->vdqcr_owned = fq; | |
2139 | qm_dqrr_vdqcr_set(&p->p, vdqcr); | |
2140 | ret = 0; | |
2141 | out: | |
2142 | local_irq_restore(irqflags); | |
2143 | return ret; | |
2144 | } | |
2145 | ||
2146 | static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr) | |
2147 | { | |
2148 | int ret; | |
2149 | ||
2150 | *p = get_affine_portal(); | |
2151 | ret = set_p_vdqcr(*p, fq, vdqcr); | |
2152 | put_affine_portal(); | |
2153 | return ret; | |
2154 | } | |
2155 | ||
2156 | static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq, | |
2157 | u32 vdqcr, u32 flags) | |
2158 | { | |
2159 | int ret = 0; | |
2160 | ||
2161 | if (flags & QMAN_VOLATILE_FLAG_WAIT_INT) | |
2162 | ret = wait_event_interruptible(affine_queue, | |
2163 | !set_vdqcr(p, fq, vdqcr)); | |
2164 | else | |
2165 | wait_event(affine_queue, !set_vdqcr(p, fq, vdqcr)); | |
2166 | return ret; | |
2167 | } | |
2168 | ||
2169 | int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr) | |
2170 | { | |
2171 | struct qman_portal *p; | |
2172 | int ret; | |
2173 | ||
2174 | if (fq->state != qman_fq_state_parked && | |
2175 | fq->state != qman_fq_state_retired) | |
2176 | return -EINVAL; | |
2177 | if (vdqcr & QM_VDQCR_FQID_MASK) | |
2178 | return -EINVAL; | |
2179 | if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) | |
2180 | return -EBUSY; | |
2181 | vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid; | |
2182 | if (flags & QMAN_VOLATILE_FLAG_WAIT) | |
2183 | ret = wait_vdqcr_start(&p, fq, vdqcr, flags); | |
2184 | else | |
2185 | ret = set_vdqcr(&p, fq, vdqcr); | |
2186 | if (ret) | |
2187 | return ret; | |
2188 | /* VDQCR is set */ | |
2189 | if (flags & QMAN_VOLATILE_FLAG_FINISH) { | |
2190 | if (flags & QMAN_VOLATILE_FLAG_WAIT_INT) | |
2191 | /* | |
2192 | * NB: don't propagate any error - the caller wouldn't | |
2193 | * know whether the VDQCR was issued or not. A signal | |
2194 | * could arrive after returning anyway, so the caller | |
2195 | * can check signal_pending() if that's an issue. | |
2196 | */ | |
2197 | wait_event_interruptible(affine_queue, | |
2198 | !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); | |
2199 | else | |
2200 | wait_event(affine_queue, | |
2201 | !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); | |
2202 | } | |
2203 | return 0; | |
2204 | } | |
2205 | EXPORT_SYMBOL(qman_volatile_dequeue); | |
2206 | ||
2207 | static void update_eqcr_ci(struct qman_portal *p, u8 avail) | |
2208 | { | |
2209 | if (avail) | |
2210 | qm_eqcr_cce_prefetch(&p->p); | |
2211 | else | |
2212 | qm_eqcr_cce_update(&p->p); | |
2213 | } | |
2214 | ||
2215 | int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd) | |
2216 | { | |
2217 | struct qman_portal *p; | |
2218 | struct qm_eqcr_entry *eq; | |
2219 | unsigned long irqflags; | |
2220 | u8 avail; | |
2221 | ||
2222 | p = get_affine_portal(); | |
2223 | local_irq_save(irqflags); | |
2224 | ||
2225 | if (p->use_eqcr_ci_stashing) { | |
2226 | /* | |
2227 | * The stashing case is easy, only update if we need to in | |
2228 | * order to try and liberate ring entries. | |
2229 | */ | |
2230 | eq = qm_eqcr_start_stash(&p->p); | |
2231 | } else { | |
2232 | /* | |
2233 | * The non-stashing case is harder, need to prefetch ahead of | |
2234 | * time. | |
2235 | */ | |
2236 | avail = qm_eqcr_get_avail(&p->p); | |
2237 | if (avail < 2) | |
2238 | update_eqcr_ci(p, avail); | |
2239 | eq = qm_eqcr_start_no_stash(&p->p); | |
2240 | } | |
2241 | ||
2242 | if (unlikely(!eq)) | |
2243 | goto out; | |
2244 | ||
d6753c7e | 2245 | qm_fqid_set(eq, fq->fqid); |
18058822 | 2246 | eq->tag = cpu_to_be32(fq_to_tag(fq)); |
c535e923 CM |
2247 | eq->fd = *fd; |
2248 | ||
2249 | qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE); | |
2250 | out: | |
2251 | local_irq_restore(irqflags); | |
2252 | put_affine_portal(); | |
2253 | return 0; | |
2254 | } | |
2255 | EXPORT_SYMBOL(qman_enqueue); | |
2256 | ||
2257 | static int qm_modify_cgr(struct qman_cgr *cgr, u32 flags, | |
2258 | struct qm_mcc_initcgr *opts) | |
2259 | { | |
2260 | union qm_mc_command *mcc; | |
2261 | union qm_mc_result *mcr; | |
2262 | struct qman_portal *p = get_affine_portal(); | |
2263 | u8 verb = QM_MCC_VERB_MODIFYCGR; | |
2264 | int ret = 0; | |
2265 | ||
2266 | mcc = qm_mc_start(&p->p); | |
2267 | if (opts) | |
2268 | mcc->initcgr = *opts; | |
2269 | mcc->initcgr.cgid = cgr->cgrid; | |
2270 | if (flags & QMAN_CGR_FLAG_USE_INIT) | |
2271 | verb = QM_MCC_VERB_INITCGR; | |
2272 | qm_mc_commit(&p->p, verb); | |
2273 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
2274 | ret = -ETIMEDOUT; | |
2275 | goto out; | |
2276 | } | |
2277 | ||
2278 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb); | |
2279 | if (mcr->result != QM_MCR_RESULT_OK) | |
2280 | ret = -EIO; | |
2281 | ||
2282 | out: | |
2283 | put_affine_portal(); | |
2284 | return ret; | |
2285 | } | |
2286 | ||
2287 | #define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0) | |
496bfa11 CM |
2288 | |
2289 | /* congestion state change notification target update control */ | |
2290 | static void qm_cgr_cscn_targ_set(struct __qm_mc_cgr *cgr, int pi, u32 val) | |
2291 | { | |
2292 | if (qman_ip_rev >= QMAN_REV30) | |
18058822 CM |
2293 | cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi | |
2294 | QM_CGR_TARG_UDP_CTRL_WRITE_BIT); | |
496bfa11 | 2295 | else |
18058822 | 2296 | cgr->cscn_targ = cpu_to_be32(val | QM_CGR_TARG_PORTAL(pi)); |
496bfa11 CM |
2297 | } |
2298 | ||
2299 | static void qm_cgr_cscn_targ_clear(struct __qm_mc_cgr *cgr, int pi, u32 val) | |
2300 | { | |
2301 | if (qman_ip_rev >= QMAN_REV30) | |
18058822 | 2302 | cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi); |
496bfa11 | 2303 | else |
18058822 | 2304 | cgr->cscn_targ = cpu_to_be32(val & ~QM_CGR_TARG_PORTAL(pi)); |
496bfa11 | 2305 | } |
c535e923 CM |
2306 | |
2307 | static u8 qman_cgr_cpus[CGR_NUM]; | |
2308 | ||
2309 | void qman_init_cgr_all(void) | |
2310 | { | |
2311 | struct qman_cgr cgr; | |
2312 | int err_cnt = 0; | |
2313 | ||
2314 | for (cgr.cgrid = 0; cgr.cgrid < CGR_NUM; cgr.cgrid++) { | |
2315 | if (qm_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL)) | |
2316 | err_cnt++; | |
2317 | } | |
2318 | ||
2319 | if (err_cnt) | |
2320 | pr_err("Warning: %d error%s while initialising CGR h/w\n", | |
2321 | err_cnt, (err_cnt > 1) ? "s" : ""); | |
2322 | } | |
2323 | ||
2324 | int qman_create_cgr(struct qman_cgr *cgr, u32 flags, | |
2325 | struct qm_mcc_initcgr *opts) | |
2326 | { | |
2327 | struct qm_mcr_querycgr cgr_state; | |
c535e923 CM |
2328 | int ret; |
2329 | struct qman_portal *p; | |
2330 | ||
2331 | /* | |
2332 | * We have to check that the provided CGRID is within the limits of the | |
2333 | * data-structures, for obvious reasons. However we'll let h/w take | |
2334 | * care of determining whether it's within the limits of what exists on | |
2335 | * the SoC. | |
2336 | */ | |
2337 | if (cgr->cgrid >= CGR_NUM) | |
2338 | return -EINVAL; | |
2339 | ||
2340 | preempt_disable(); | |
2341 | p = get_affine_portal(); | |
2342 | qman_cgr_cpus[cgr->cgrid] = smp_processor_id(); | |
2343 | preempt_enable(); | |
2344 | ||
2345 | cgr->chan = p->config->channel; | |
2346 | spin_lock(&p->cgr_lock); | |
2347 | ||
2348 | if (opts) { | |
e5caf693 CM |
2349 | struct qm_mcc_initcgr local_opts = *opts; |
2350 | ||
c535e923 CM |
2351 | ret = qman_query_cgr(cgr, &cgr_state); |
2352 | if (ret) | |
2353 | goto out; | |
e5caf693 | 2354 | |
496bfa11 | 2355 | qm_cgr_cscn_targ_set(&local_opts.cgr, PORTAL_IDX(p), |
18058822 CM |
2356 | be32_to_cpu(cgr_state.cgr.cscn_targ)); |
2357 | local_opts.we_mask |= cpu_to_be16(QM_CGR_WE_CSCN_TARG); | |
c535e923 CM |
2358 | |
2359 | /* send init if flags indicate so */ | |
e5caf693 | 2360 | if (flags & QMAN_CGR_FLAG_USE_INIT) |
c535e923 CM |
2361 | ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, |
2362 | &local_opts); | |
2363 | else | |
2364 | ret = qm_modify_cgr(cgr, 0, &local_opts); | |
2365 | if (ret) | |
2366 | goto out; | |
2367 | } | |
2368 | ||
2369 | list_add(&cgr->node, &p->cgr_cbs); | |
2370 | ||
2371 | /* Determine if newly added object requires its callback to be called */ | |
2372 | ret = qman_query_cgr(cgr, &cgr_state); | |
2373 | if (ret) { | |
2374 | /* we can't go back, so proceed and return success */ | |
2375 | dev_err(p->config->dev, "CGR HW state partially modified\n"); | |
2376 | ret = 0; | |
2377 | goto out; | |
2378 | } | |
2379 | if (cgr->cb && cgr_state.cgr.cscn_en && | |
2380 | qman_cgrs_get(&p->cgrs[1], cgr->cgrid)) | |
2381 | cgr->cb(p, cgr, 1); | |
2382 | out: | |
2383 | spin_unlock(&p->cgr_lock); | |
2384 | put_affine_portal(); | |
2385 | return ret; | |
2386 | } | |
2387 | EXPORT_SYMBOL(qman_create_cgr); | |
2388 | ||
2389 | int qman_delete_cgr(struct qman_cgr *cgr) | |
2390 | { | |
2391 | unsigned long irqflags; | |
2392 | struct qm_mcr_querycgr cgr_state; | |
2393 | struct qm_mcc_initcgr local_opts; | |
2394 | int ret = 0; | |
2395 | struct qman_cgr *i; | |
2396 | struct qman_portal *p = get_affine_portal(); | |
2397 | ||
2398 | if (cgr->chan != p->config->channel) { | |
2399 | /* attempt to delete from other portal than creator */ | |
2400 | dev_err(p->config->dev, "CGR not owned by current portal"); | |
2401 | dev_dbg(p->config->dev, " create 0x%x, delete 0x%x\n", | |
2402 | cgr->chan, p->config->channel); | |
2403 | ||
2404 | ret = -EINVAL; | |
2405 | goto put_portal; | |
2406 | } | |
2407 | memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr)); | |
2408 | spin_lock_irqsave(&p->cgr_lock, irqflags); | |
2409 | list_del(&cgr->node); | |
2410 | /* | |
2411 | * If there are no other CGR objects for this CGRID in the list, | |
2412 | * update CSCN_TARG accordingly | |
2413 | */ | |
2414 | list_for_each_entry(i, &p->cgr_cbs, node) | |
2415 | if (i->cgrid == cgr->cgrid && i->cb) | |
2416 | goto release_lock; | |
2417 | ret = qman_query_cgr(cgr, &cgr_state); | |
2418 | if (ret) { | |
2419 | /* add back to the list */ | |
2420 | list_add(&cgr->node, &p->cgr_cbs); | |
2421 | goto release_lock; | |
2422 | } | |
496bfa11 | 2423 | |
18058822 | 2424 | local_opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_TARG); |
496bfa11 | 2425 | qm_cgr_cscn_targ_clear(&local_opts.cgr, PORTAL_IDX(p), |
18058822 | 2426 | be32_to_cpu(cgr_state.cgr.cscn_targ)); |
496bfa11 | 2427 | |
c535e923 CM |
2428 | ret = qm_modify_cgr(cgr, 0, &local_opts); |
2429 | if (ret) | |
2430 | /* add back to the list */ | |
2431 | list_add(&cgr->node, &p->cgr_cbs); | |
2432 | release_lock: | |
2433 | spin_unlock_irqrestore(&p->cgr_lock, irqflags); | |
2434 | put_portal: | |
2435 | put_affine_portal(); | |
2436 | return ret; | |
2437 | } | |
2438 | EXPORT_SYMBOL(qman_delete_cgr); | |
2439 | ||
2440 | struct cgr_comp { | |
2441 | struct qman_cgr *cgr; | |
2442 | struct completion completion; | |
2443 | }; | |
2444 | ||
96f413f4 | 2445 | static void qman_delete_cgr_smp_call(void *p) |
c535e923 | 2446 | { |
96f413f4 | 2447 | qman_delete_cgr((struct qman_cgr *)p); |
c535e923 CM |
2448 | } |
2449 | ||
2450 | void qman_delete_cgr_safe(struct qman_cgr *cgr) | |
2451 | { | |
c535e923 CM |
2452 | preempt_disable(); |
2453 | if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) { | |
96f413f4 MB |
2454 | smp_call_function_single(qman_cgr_cpus[cgr->cgrid], |
2455 | qman_delete_cgr_smp_call, cgr, true); | |
c535e923 CM |
2456 | preempt_enable(); |
2457 | return; | |
2458 | } | |
96f413f4 | 2459 | |
c535e923 CM |
2460 | qman_delete_cgr(cgr); |
2461 | preempt_enable(); | |
2462 | } | |
2463 | EXPORT_SYMBOL(qman_delete_cgr_safe); | |
2464 | ||
2465 | /* Cleanup FQs */ | |
2466 | ||
2467 | static int _qm_mr_consume_and_match_verb(struct qm_portal *p, int v) | |
2468 | { | |
2469 | const union qm_mr_entry *msg; | |
2470 | int found = 0; | |
2471 | ||
2472 | qm_mr_pvb_update(p); | |
2473 | msg = qm_mr_current(p); | |
2474 | while (msg) { | |
2475 | if ((msg->verb & QM_MR_VERB_TYPE_MASK) == v) | |
2476 | found = 1; | |
2477 | qm_mr_next(p); | |
2478 | qm_mr_cci_consume_to_current(p); | |
2479 | qm_mr_pvb_update(p); | |
2480 | msg = qm_mr_current(p); | |
2481 | } | |
2482 | return found; | |
2483 | } | |
2484 | ||
2485 | static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s, | |
2486 | bool wait) | |
2487 | { | |
2488 | const struct qm_dqrr_entry *dqrr; | |
2489 | int found = 0; | |
2490 | ||
2491 | do { | |
2492 | qm_dqrr_pvb_update(p); | |
2493 | dqrr = qm_dqrr_current(p); | |
2494 | if (!dqrr) | |
2495 | cpu_relax(); | |
2496 | } while (wait && !dqrr); | |
2497 | ||
2498 | while (dqrr) { | |
d6753c7e | 2499 | if (qm_fqid_get(dqrr) == fqid && (dqrr->stat & s)) |
c535e923 CM |
2500 | found = 1; |
2501 | qm_dqrr_cdc_consume_1ptr(p, dqrr, 0); | |
2502 | qm_dqrr_pvb_update(p); | |
2503 | qm_dqrr_next(p); | |
2504 | dqrr = qm_dqrr_current(p); | |
2505 | } | |
2506 | return found; | |
2507 | } | |
2508 | ||
2509 | #define qm_mr_drain(p, V) \ | |
2510 | _qm_mr_consume_and_match_verb(p, QM_MR_VERB_##V) | |
2511 | ||
2512 | #define qm_dqrr_drain(p, f, S) \ | |
2513 | _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, false) | |
2514 | ||
2515 | #define qm_dqrr_drain_wait(p, f, S) \ | |
2516 | _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, true) | |
2517 | ||
2518 | #define qm_dqrr_drain_nomatch(p) \ | |
2519 | _qm_dqrr_consume_and_match(p, 0, 0, false) | |
2520 | ||
2521 | static int qman_shutdown_fq(u32 fqid) | |
2522 | { | |
2523 | struct qman_portal *p; | |
2524 | struct device *dev; | |
2525 | union qm_mc_command *mcc; | |
2526 | union qm_mc_result *mcr; | |
2527 | int orl_empty, drain = 0, ret = 0; | |
2528 | u32 channel, wq, res; | |
2529 | u8 state; | |
2530 | ||
2531 | p = get_affine_portal(); | |
2532 | dev = p->config->dev; | |
2533 | /* Determine the state of the FQID */ | |
2534 | mcc = qm_mc_start(&p->p); | |
d6753c7e | 2535 | qm_fqid_set(&mcc->fq, fqid); |
c535e923 CM |
2536 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); |
2537 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
2538 | dev_err(dev, "QUERYFQ_NP timeout\n"); | |
2539 | ret = -ETIMEDOUT; | |
2540 | goto out; | |
2541 | } | |
2542 | ||
2543 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); | |
2544 | state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK; | |
2545 | if (state == QM_MCR_NP_STATE_OOS) | |
2546 | goto out; /* Already OOS, no need to do anymore checks */ | |
2547 | ||
2548 | /* Query which channel the FQ is using */ | |
2549 | mcc = qm_mc_start(&p->p); | |
d6753c7e | 2550 | qm_fqid_set(&mcc->fq, fqid); |
c535e923 CM |
2551 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); |
2552 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
2553 | dev_err(dev, "QUERYFQ timeout\n"); | |
2554 | ret = -ETIMEDOUT; | |
2555 | goto out; | |
2556 | } | |
2557 | ||
2558 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ); | |
2559 | /* Need to store these since the MCR gets reused */ | |
2560 | channel = qm_fqd_get_chan(&mcr->queryfq.fqd); | |
2561 | wq = qm_fqd_get_wq(&mcr->queryfq.fqd); | |
2562 | ||
2563 | switch (state) { | |
2564 | case QM_MCR_NP_STATE_TEN_SCHED: | |
2565 | case QM_MCR_NP_STATE_TRU_SCHED: | |
2566 | case QM_MCR_NP_STATE_ACTIVE: | |
2567 | case QM_MCR_NP_STATE_PARKED: | |
2568 | orl_empty = 0; | |
2569 | mcc = qm_mc_start(&p->p); | |
d6753c7e | 2570 | qm_fqid_set(&mcc->fq, fqid); |
c535e923 CM |
2571 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); |
2572 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
2573 | dev_err(dev, "QUERYFQ_NP timeout\n"); | |
2574 | ret = -ETIMEDOUT; | |
2575 | goto out; | |
2576 | } | |
2577 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == | |
2578 | QM_MCR_VERB_ALTER_RETIRE); | |
2579 | res = mcr->result; /* Make a copy as we reuse MCR below */ | |
2580 | ||
2581 | if (res == QM_MCR_RESULT_PENDING) { | |
2582 | /* | |
2583 | * Need to wait for the FQRN in the message ring, which | |
2584 | * will only occur once the FQ has been drained. In | |
2585 | * order for the FQ to drain the portal needs to be set | |
2586 | * to dequeue from the channel the FQ is scheduled on | |
2587 | */ | |
2588 | int found_fqrn = 0; | |
2589 | u16 dequeue_wq = 0; | |
2590 | ||
2591 | /* Flag that we need to drain FQ */ | |
2592 | drain = 1; | |
2593 | ||
2594 | if (channel >= qm_channel_pool1 && | |
2595 | channel < qm_channel_pool1 + 15) { | |
2596 | /* Pool channel, enable the bit in the portal */ | |
2597 | dequeue_wq = (channel - | |
2598 | qm_channel_pool1 + 1)<<4 | wq; | |
2599 | } else if (channel < qm_channel_pool1) { | |
2600 | /* Dedicated channel */ | |
2601 | dequeue_wq = wq; | |
2602 | } else { | |
2603 | dev_err(dev, "Can't recover FQ 0x%x, ch: 0x%x", | |
2604 | fqid, channel); | |
2605 | ret = -EBUSY; | |
2606 | goto out; | |
2607 | } | |
2608 | /* Set the sdqcr to drain this channel */ | |
2609 | if (channel < qm_channel_pool1) | |
2610 | qm_dqrr_sdqcr_set(&p->p, | |
2611 | QM_SDQCR_TYPE_ACTIVE | | |
2612 | QM_SDQCR_CHANNELS_DEDICATED); | |
2613 | else | |
2614 | qm_dqrr_sdqcr_set(&p->p, | |
2615 | QM_SDQCR_TYPE_ACTIVE | | |
2616 | QM_SDQCR_CHANNELS_POOL_CONV | |
2617 | (channel)); | |
2618 | do { | |
2619 | /* Keep draining DQRR while checking the MR*/ | |
2620 | qm_dqrr_drain_nomatch(&p->p); | |
2621 | /* Process message ring too */ | |
2622 | found_fqrn = qm_mr_drain(&p->p, FQRN); | |
2623 | cpu_relax(); | |
2624 | } while (!found_fqrn); | |
2625 | ||
2626 | } | |
2627 | if (res != QM_MCR_RESULT_OK && | |
2628 | res != QM_MCR_RESULT_PENDING) { | |
2629 | dev_err(dev, "retire_fq failed: FQ 0x%x, res=0x%x\n", | |
2630 | fqid, res); | |
2631 | ret = -EIO; | |
2632 | goto out; | |
2633 | } | |
2634 | if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) { | |
2635 | /* | |
2636 | * ORL had no entries, no need to wait until the | |
2637 | * ERNs come in | |
2638 | */ | |
2639 | orl_empty = 1; | |
2640 | } | |
2641 | /* | |
2642 | * Retirement succeeded, check to see if FQ needs | |
2643 | * to be drained | |
2644 | */ | |
2645 | if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) { | |
2646 | /* FQ is Not Empty, drain using volatile DQ commands */ | |
2647 | do { | |
2648 | u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3); | |
2649 | ||
2650 | qm_dqrr_vdqcr_set(&p->p, vdqcr); | |
2651 | /* | |
2652 | * Wait for a dequeue and process the dequeues, | |
2653 | * making sure to empty the ring completely | |
2654 | */ | |
2655 | } while (qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY)); | |
2656 | } | |
2657 | qm_dqrr_sdqcr_set(&p->p, 0); | |
2658 | ||
2659 | while (!orl_empty) { | |
2660 | /* Wait for the ORL to have been completely drained */ | |
2661 | orl_empty = qm_mr_drain(&p->p, FQRL); | |
2662 | cpu_relax(); | |
2663 | } | |
2664 | mcc = qm_mc_start(&p->p); | |
d6753c7e | 2665 | qm_fqid_set(&mcc->fq, fqid); |
c535e923 CM |
2666 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); |
2667 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
2668 | ret = -ETIMEDOUT; | |
2669 | goto out; | |
2670 | } | |
2671 | ||
2672 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == | |
2673 | QM_MCR_VERB_ALTER_OOS); | |
2674 | if (mcr->result != QM_MCR_RESULT_OK) { | |
2675 | dev_err(dev, "OOS after drain fail: FQ 0x%x (0x%x)\n", | |
2676 | fqid, mcr->result); | |
2677 | ret = -EIO; | |
2678 | goto out; | |
2679 | } | |
2680 | break; | |
2681 | ||
2682 | case QM_MCR_NP_STATE_RETIRED: | |
2683 | /* Send OOS Command */ | |
2684 | mcc = qm_mc_start(&p->p); | |
d6753c7e | 2685 | qm_fqid_set(&mcc->fq, fqid); |
c535e923 CM |
2686 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); |
2687 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
2688 | ret = -ETIMEDOUT; | |
2689 | goto out; | |
2690 | } | |
2691 | ||
2692 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == | |
2693 | QM_MCR_VERB_ALTER_OOS); | |
2694 | if (mcr->result) { | |
2695 | dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n", | |
2696 | fqid, mcr->result); | |
2697 | ret = -EIO; | |
2698 | goto out; | |
2699 | } | |
2700 | break; | |
2701 | ||
2702 | case QM_MCR_NP_STATE_OOS: | |
2703 | /* Done */ | |
2704 | break; | |
2705 | ||
2706 | default: | |
2707 | ret = -EIO; | |
2708 | } | |
2709 | ||
2710 | out: | |
2711 | put_affine_portal(); | |
2712 | return ret; | |
2713 | } | |
2714 | ||
2715 | const struct qm_portal_config *qman_get_qm_portal_config( | |
2716 | struct qman_portal *portal) | |
2717 | { | |
2718 | return portal->config; | |
2719 | } | |
021ba010 | 2720 | EXPORT_SYMBOL(qman_get_qm_portal_config); |
c535e923 CM |
2721 | |
2722 | struct gen_pool *qm_fqalloc; /* FQID allocator */ | |
2723 | struct gen_pool *qm_qpalloc; /* pool-channel allocator */ | |
2724 | struct gen_pool *qm_cgralloc; /* CGR ID allocator */ | |
2725 | ||
2726 | static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt) | |
2727 | { | |
2728 | unsigned long addr; | |
2729 | ||
64e9e22e AB |
2730 | if (!p) |
2731 | return -ENODEV; | |
2732 | ||
c535e923 CM |
2733 | addr = gen_pool_alloc(p, cnt); |
2734 | if (!addr) | |
2735 | return -ENOMEM; | |
2736 | ||
2737 | *result = addr & ~DPAA_GENALLOC_OFF; | |
2738 | ||
2739 | return 0; | |
2740 | } | |
2741 | ||
2742 | int qman_alloc_fqid_range(u32 *result, u32 count) | |
2743 | { | |
2744 | return qman_alloc_range(qm_fqalloc, result, count); | |
2745 | } | |
2746 | EXPORT_SYMBOL(qman_alloc_fqid_range); | |
2747 | ||
2748 | int qman_alloc_pool_range(u32 *result, u32 count) | |
2749 | { | |
2750 | return qman_alloc_range(qm_qpalloc, result, count); | |
2751 | } | |
2752 | EXPORT_SYMBOL(qman_alloc_pool_range); | |
2753 | ||
2754 | int qman_alloc_cgrid_range(u32 *result, u32 count) | |
2755 | { | |
2756 | return qman_alloc_range(qm_cgralloc, result, count); | |
2757 | } | |
2758 | EXPORT_SYMBOL(qman_alloc_cgrid_range); | |
2759 | ||
2760 | int qman_release_fqid(u32 fqid) | |
2761 | { | |
2762 | int ret = qman_shutdown_fq(fqid); | |
2763 | ||
2764 | if (ret) { | |
2765 | pr_debug("FQID %d leaked\n", fqid); | |
2766 | return ret; | |
2767 | } | |
2768 | ||
2769 | gen_pool_free(qm_fqalloc, fqid | DPAA_GENALLOC_OFF, 1); | |
2770 | return 0; | |
2771 | } | |
2772 | EXPORT_SYMBOL(qman_release_fqid); | |
2773 | ||
2774 | static int qpool_cleanup(u32 qp) | |
2775 | { | |
2776 | /* | |
2777 | * We query all FQDs starting from | |
2778 | * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs | |
2779 | * whose destination channel is the pool-channel being released. | |
2780 | * When a non-OOS FQD is found we attempt to clean it up | |
2781 | */ | |
2782 | struct qman_fq fq = { | |
2783 | .fqid = QM_FQID_RANGE_START | |
2784 | }; | |
2785 | int err; | |
2786 | ||
2787 | do { | |
2788 | struct qm_mcr_queryfq_np np; | |
2789 | ||
2790 | err = qman_query_fq_np(&fq, &np); | |
d95cb0d3 | 2791 | if (err == -ERANGE) |
c535e923 CM |
2792 | /* FQID range exceeded, found no problems */ |
2793 | return 0; | |
d95cb0d3 CM |
2794 | else if (WARN_ON(err)) |
2795 | return err; | |
2796 | ||
c535e923 CM |
2797 | if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) { |
2798 | struct qm_fqd fqd; | |
2799 | ||
2800 | err = qman_query_fq(&fq, &fqd); | |
2801 | if (WARN_ON(err)) | |
d95cb0d3 | 2802 | return err; |
c535e923 CM |
2803 | if (qm_fqd_get_chan(&fqd) == qp) { |
2804 | /* The channel is the FQ's target, clean it */ | |
2805 | err = qman_shutdown_fq(fq.fqid); | |
2806 | if (err) | |
2807 | /* | |
2808 | * Couldn't shut down the FQ | |
2809 | * so the pool must be leaked | |
2810 | */ | |
2811 | return err; | |
2812 | } | |
2813 | } | |
2814 | /* Move to the next FQID */ | |
2815 | fq.fqid++; | |
2816 | } while (1); | |
2817 | } | |
2818 | ||
2819 | int qman_release_pool(u32 qp) | |
2820 | { | |
2821 | int ret; | |
2822 | ||
2823 | ret = qpool_cleanup(qp); | |
2824 | if (ret) { | |
2825 | pr_debug("CHID %d leaked\n", qp); | |
2826 | return ret; | |
2827 | } | |
2828 | ||
2829 | gen_pool_free(qm_qpalloc, qp | DPAA_GENALLOC_OFF, 1); | |
2830 | return 0; | |
2831 | } | |
2832 | EXPORT_SYMBOL(qman_release_pool); | |
2833 | ||
2834 | static int cgr_cleanup(u32 cgrid) | |
2835 | { | |
2836 | /* | |
2837 | * query all FQDs starting from FQID 1 until we get an "invalid FQID" | |
2838 | * error, looking for non-OOS FQDs whose CGR is the CGR being released | |
2839 | */ | |
2840 | struct qman_fq fq = { | |
d95cb0d3 | 2841 | .fqid = QM_FQID_RANGE_START |
c535e923 CM |
2842 | }; |
2843 | int err; | |
2844 | ||
2845 | do { | |
2846 | struct qm_mcr_queryfq_np np; | |
2847 | ||
2848 | err = qman_query_fq_np(&fq, &np); | |
d95cb0d3 | 2849 | if (err == -ERANGE) |
c535e923 CM |
2850 | /* FQID range exceeded, found no problems */ |
2851 | return 0; | |
d95cb0d3 CM |
2852 | else if (WARN_ON(err)) |
2853 | return err; | |
2854 | ||
c535e923 CM |
2855 | if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) { |
2856 | struct qm_fqd fqd; | |
2857 | ||
2858 | err = qman_query_fq(&fq, &fqd); | |
2859 | if (WARN_ON(err)) | |
d95cb0d3 | 2860 | return err; |
18058822 | 2861 | if (be16_to_cpu(fqd.fq_ctrl) & QM_FQCTRL_CGE && |
c535e923 CM |
2862 | fqd.cgid == cgrid) { |
2863 | pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n", | |
2864 | cgrid, fq.fqid); | |
2865 | return -EIO; | |
2866 | } | |
2867 | } | |
2868 | /* Move to the next FQID */ | |
2869 | fq.fqid++; | |
2870 | } while (1); | |
2871 | } | |
2872 | ||
2873 | int qman_release_cgrid(u32 cgrid) | |
2874 | { | |
2875 | int ret; | |
2876 | ||
2877 | ret = cgr_cleanup(cgrid); | |
2878 | if (ret) { | |
2879 | pr_debug("CGRID %d leaked\n", cgrid); | |
2880 | return ret; | |
2881 | } | |
2882 | ||
2883 | gen_pool_free(qm_cgralloc, cgrid | DPAA_GENALLOC_OFF, 1); | |
2884 | return 0; | |
2885 | } | |
2886 | EXPORT_SYMBOL(qman_release_cgrid); |