]>
Commit | Line | Data |
---|---|---|
1 | /* Copyright 2008 - 2016 Freescale Semiconductor, Inc. | |
2 | * | |
3 | * Redistribution and use in source and binary forms, with or without | |
4 | * modification, are permitted provided that the following conditions are met: | |
5 | * * Redistributions of source code must retain the above copyright | |
6 | * notice, this list of conditions and the following disclaimer. | |
7 | * * Redistributions in binary form must reproduce the above copyright | |
8 | * notice, this list of conditions and the following disclaimer in the | |
9 | * documentation and/or other materials provided with the distribution. | |
10 | * * Neither the name of Freescale Semiconductor nor the | |
11 | * names of its contributors may be used to endorse or promote products | |
12 | * derived from this software without specific prior written permission. | |
13 | * | |
14 | * ALTERNATIVELY, this software may be distributed under the terms of the | |
15 | * GNU General Public License ("GPL") as published by the Free Software | |
16 | * Foundation, either version 2 of that License or (at your option) any | |
17 | * later version. | |
18 | * | |
19 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY | |
20 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | |
21 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | |
22 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | |
23 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | |
24 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
25 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | |
26 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | |
28 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
29 | */ | |
30 | ||
31 | #include "qman_priv.h" | |
32 | ||
33 | #define DQRR_MAXFILL 15 | |
34 | #define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */ | |
35 | #define IRQNAME "QMan portal %d" | |
36 | #define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */ | |
37 | #define QMAN_POLL_LIMIT 32 | |
38 | #define QMAN_PIRQ_DQRR_ITHRESH 12 | |
39 | #define QMAN_DQRR_IT_MAX 15 | |
40 | #define QMAN_ITP_MAX 0xFFF | |
41 | #define QMAN_PIRQ_MR_ITHRESH 4 | |
42 | #define QMAN_PIRQ_IPERIOD 100 | |
43 | ||
44 | /* Portal register assists */ | |
45 | ||
46 | #if defined(CONFIG_ARM) || defined(CONFIG_ARM64) | |
47 | /* Cache-inhibited register offsets */ | |
48 | #define QM_REG_EQCR_PI_CINH 0x3000 | |
49 | #define QM_REG_EQCR_CI_CINH 0x3040 | |
50 | #define QM_REG_EQCR_ITR 0x3080 | |
51 | #define QM_REG_DQRR_PI_CINH 0x3100 | |
52 | #define QM_REG_DQRR_CI_CINH 0x3140 | |
53 | #define QM_REG_DQRR_ITR 0x3180 | |
54 | #define QM_REG_DQRR_DCAP 0x31C0 | |
55 | #define QM_REG_DQRR_SDQCR 0x3200 | |
56 | #define QM_REG_DQRR_VDQCR 0x3240 | |
57 | #define QM_REG_DQRR_PDQCR 0x3280 | |
58 | #define QM_REG_MR_PI_CINH 0x3300 | |
59 | #define QM_REG_MR_CI_CINH 0x3340 | |
60 | #define QM_REG_MR_ITR 0x3380 | |
61 | #define QM_REG_CFG 0x3500 | |
62 | #define QM_REG_ISR 0x3600 | |
63 | #define QM_REG_IER 0x3640 | |
64 | #define QM_REG_ISDR 0x3680 | |
65 | #define QM_REG_IIR 0x36C0 | |
66 | #define QM_REG_ITPR 0x3740 | |
67 | ||
68 | /* Cache-enabled register offsets */ | |
69 | #define QM_CL_EQCR 0x0000 | |
70 | #define QM_CL_DQRR 0x1000 | |
71 | #define QM_CL_MR 0x2000 | |
72 | #define QM_CL_EQCR_PI_CENA 0x3000 | |
73 | #define QM_CL_EQCR_CI_CENA 0x3040 | |
74 | #define QM_CL_DQRR_PI_CENA 0x3100 | |
75 | #define QM_CL_DQRR_CI_CENA 0x3140 | |
76 | #define QM_CL_MR_PI_CENA 0x3300 | |
77 | #define QM_CL_MR_CI_CENA 0x3340 | |
78 | #define QM_CL_CR 0x3800 | |
79 | #define QM_CL_RR0 0x3900 | |
80 | #define QM_CL_RR1 0x3940 | |
81 | ||
82 | #else | |
83 | /* Cache-inhibited register offsets */ | |
84 | #define QM_REG_EQCR_PI_CINH 0x0000 | |
85 | #define QM_REG_EQCR_CI_CINH 0x0004 | |
86 | #define QM_REG_EQCR_ITR 0x0008 | |
87 | #define QM_REG_DQRR_PI_CINH 0x0040 | |
88 | #define QM_REG_DQRR_CI_CINH 0x0044 | |
89 | #define QM_REG_DQRR_ITR 0x0048 | |
90 | #define QM_REG_DQRR_DCAP 0x0050 | |
91 | #define QM_REG_DQRR_SDQCR 0x0054 | |
92 | #define QM_REG_DQRR_VDQCR 0x0058 | |
93 | #define QM_REG_DQRR_PDQCR 0x005c | |
94 | #define QM_REG_MR_PI_CINH 0x0080 | |
95 | #define QM_REG_MR_CI_CINH 0x0084 | |
96 | #define QM_REG_MR_ITR 0x0088 | |
97 | #define QM_REG_CFG 0x0100 | |
98 | #define QM_REG_ISR 0x0e00 | |
99 | #define QM_REG_IER 0x0e04 | |
100 | #define QM_REG_ISDR 0x0e08 | |
101 | #define QM_REG_IIR 0x0e0c | |
102 | #define QM_REG_ITPR 0x0e14 | |
103 | ||
104 | /* Cache-enabled register offsets */ | |
105 | #define QM_CL_EQCR 0x0000 | |
106 | #define QM_CL_DQRR 0x1000 | |
107 | #define QM_CL_MR 0x2000 | |
108 | #define QM_CL_EQCR_PI_CENA 0x3000 | |
109 | #define QM_CL_EQCR_CI_CENA 0x3100 | |
110 | #define QM_CL_DQRR_PI_CENA 0x3200 | |
111 | #define QM_CL_DQRR_CI_CENA 0x3300 | |
112 | #define QM_CL_MR_PI_CENA 0x3400 | |
113 | #define QM_CL_MR_CI_CENA 0x3500 | |
114 | #define QM_CL_CR 0x3800 | |
115 | #define QM_CL_RR0 0x3900 | |
116 | #define QM_CL_RR1 0x3940 | |
117 | #endif | |
118 | ||
119 | /* | |
120 | * BTW, the drivers (and h/w programming model) already obtain the required | |
121 | * synchronisation for portal accesses and data-dependencies. Use of barrier()s | |
122 | * or other order-preserving primitives simply degrade performance. Hence the | |
123 | * use of the __raw_*() interfaces, which simply ensure that the compiler treats | |
124 | * the portal registers as volatile | |
125 | */ | |
126 | ||
127 | /* Cache-enabled ring access */ | |
128 | #define qm_cl(base, idx) ((void *)base + ((idx) << 6)) | |
129 | ||
130 | /* | |
131 | * Portal modes. | |
132 | * Enum types; | |
133 | * pmode == production mode | |
134 | * cmode == consumption mode, | |
135 | * dmode == h/w dequeue mode. | |
136 | * Enum values use 3 letter codes. First letter matches the portal mode, | |
137 | * remaining two letters indicate; | |
138 | * ci == cache-inhibited portal register | |
139 | * ce == cache-enabled portal register | |
140 | * vb == in-band valid-bit (cache-enabled) | |
141 | * dc == DCA (Discrete Consumption Acknowledgment), DQRR-only | |
142 | * As for "enum qm_dqrr_dmode", it should be self-explanatory. | |
143 | */ | |
144 | enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */ | |
145 | qm_eqcr_pci = 0, /* PI index, cache-inhibited */ | |
146 | qm_eqcr_pce = 1, /* PI index, cache-enabled */ | |
147 | qm_eqcr_pvb = 2 /* valid-bit */ | |
148 | }; | |
149 | enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */ | |
150 | qm_dqrr_dpush = 0, /* SDQCR + VDQCR */ | |
151 | qm_dqrr_dpull = 1 /* PDQCR */ | |
152 | }; | |
153 | enum qm_dqrr_pmode { /* s/w-only */ | |
154 | qm_dqrr_pci, /* reads DQRR_PI_CINH */ | |
155 | qm_dqrr_pce, /* reads DQRR_PI_CENA */ | |
156 | qm_dqrr_pvb /* reads valid-bit */ | |
157 | }; | |
158 | enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */ | |
159 | qm_dqrr_cci = 0, /* CI index, cache-inhibited */ | |
160 | qm_dqrr_cce = 1, /* CI index, cache-enabled */ | |
161 | qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgment */ | |
162 | }; | |
163 | enum qm_mr_pmode { /* s/w-only */ | |
164 | qm_mr_pci, /* reads MR_PI_CINH */ | |
165 | qm_mr_pce, /* reads MR_PI_CENA */ | |
166 | qm_mr_pvb /* reads valid-bit */ | |
167 | }; | |
168 | enum qm_mr_cmode { /* matches QCSP_CFG::MM */ | |
169 | qm_mr_cci = 0, /* CI index, cache-inhibited */ | |
170 | qm_mr_cce = 1 /* CI index, cache-enabled */ | |
171 | }; | |
172 | ||
173 | /* --- Portal structures --- */ | |
174 | ||
175 | #define QM_EQCR_SIZE 8 | |
176 | #define QM_DQRR_SIZE 16 | |
177 | #define QM_MR_SIZE 8 | |
178 | ||
179 | /* "Enqueue Command" */ | |
180 | struct qm_eqcr_entry { | |
181 | u8 _ncw_verb; /* writes to this are non-coherent */ | |
182 | u8 dca; | |
183 | __be16 seqnum; | |
184 | u8 __reserved[4]; | |
185 | __be32 fqid; /* 24-bit */ | |
186 | __be32 tag; | |
187 | struct qm_fd fd; | |
188 | u8 __reserved3[32]; | |
189 | } __packed; | |
190 | #define QM_EQCR_VERB_VBIT 0x80 | |
191 | #define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */ | |
192 | #define QM_EQCR_VERB_CMD_ENQUEUE 0x01 | |
193 | #define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */ | |
194 | #define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */ | |
195 | #define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */ | |
196 | ||
197 | struct qm_eqcr { | |
198 | struct qm_eqcr_entry *ring, *cursor; | |
199 | u8 ci, available, ithresh, vbit; | |
200 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
201 | u32 busy; | |
202 | enum qm_eqcr_pmode pmode; | |
203 | #endif | |
204 | }; | |
205 | ||
206 | struct qm_dqrr { | |
207 | const struct qm_dqrr_entry *ring, *cursor; | |
208 | u8 pi, ci, fill, ithresh, vbit; | |
209 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
210 | enum qm_dqrr_dmode dmode; | |
211 | enum qm_dqrr_pmode pmode; | |
212 | enum qm_dqrr_cmode cmode; | |
213 | #endif | |
214 | }; | |
215 | ||
216 | struct qm_mr { | |
217 | union qm_mr_entry *ring, *cursor; | |
218 | u8 pi, ci, fill, ithresh, vbit; | |
219 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
220 | enum qm_mr_pmode pmode; | |
221 | enum qm_mr_cmode cmode; | |
222 | #endif | |
223 | }; | |
224 | ||
225 | /* MC (Management Command) command */ | |
226 | /* "FQ" command layout */ | |
227 | struct qm_mcc_fq { | |
228 | u8 _ncw_verb; | |
229 | u8 __reserved1[3]; | |
230 | __be32 fqid; /* 24-bit */ | |
231 | u8 __reserved2[56]; | |
232 | } __packed; | |
233 | ||
234 | /* "CGR" command layout */ | |
235 | struct qm_mcc_cgr { | |
236 | u8 _ncw_verb; | |
237 | u8 __reserved1[30]; | |
238 | u8 cgid; | |
239 | u8 __reserved2[32]; | |
240 | }; | |
241 | ||
242 | #define QM_MCC_VERB_VBIT 0x80 | |
243 | #define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */ | |
244 | #define QM_MCC_VERB_INITFQ_PARKED 0x40 | |
245 | #define QM_MCC_VERB_INITFQ_SCHED 0x41 | |
246 | #define QM_MCC_VERB_QUERYFQ 0x44 | |
247 | #define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */ | |
248 | #define QM_MCC_VERB_QUERYWQ 0x46 | |
249 | #define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47 | |
250 | #define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */ | |
251 | #define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */ | |
252 | #define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */ | |
253 | #define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */ | |
254 | #define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */ | |
255 | #define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */ | |
256 | #define QM_MCC_VERB_INITCGR 0x50 | |
257 | #define QM_MCC_VERB_MODIFYCGR 0x51 | |
258 | #define QM_MCC_VERB_CGRTESTWRITE 0x52 | |
259 | #define QM_MCC_VERB_QUERYCGR 0x58 | |
260 | #define QM_MCC_VERB_QUERYCONGESTION 0x59 | |
261 | union qm_mc_command { | |
262 | struct { | |
263 | u8 _ncw_verb; /* writes to this are non-coherent */ | |
264 | u8 __reserved[63]; | |
265 | }; | |
266 | struct qm_mcc_initfq initfq; | |
267 | struct qm_mcc_initcgr initcgr; | |
268 | struct qm_mcc_fq fq; | |
269 | struct qm_mcc_cgr cgr; | |
270 | }; | |
271 | ||
272 | /* MC (Management Command) result */ | |
273 | /* "Query FQ" */ | |
274 | struct qm_mcr_queryfq { | |
275 | u8 verb; | |
276 | u8 result; | |
277 | u8 __reserved1[8]; | |
278 | struct qm_fqd fqd; /* the FQD fields are here */ | |
279 | u8 __reserved2[30]; | |
280 | } __packed; | |
281 | ||
282 | /* "Alter FQ State Commands" */ | |
283 | struct qm_mcr_alterfq { | |
284 | u8 verb; | |
285 | u8 result; | |
286 | u8 fqs; /* Frame Queue Status */ | |
287 | u8 __reserved1[61]; | |
288 | }; | |
289 | #define QM_MCR_VERB_RRID 0x80 | |
290 | #define QM_MCR_VERB_MASK QM_MCC_VERB_MASK | |
291 | #define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED | |
292 | #define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED | |
293 | #define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ | |
294 | #define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP | |
295 | #define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ | |
296 | #define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED | |
297 | #define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED | |
298 | #define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE | |
299 | #define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE | |
300 | #define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS | |
301 | #define QM_MCR_RESULT_NULL 0x00 | |
302 | #define QM_MCR_RESULT_OK 0xf0 | |
303 | #define QM_MCR_RESULT_ERR_FQID 0xf1 | |
304 | #define QM_MCR_RESULT_ERR_FQSTATE 0xf2 | |
305 | #define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */ | |
306 | #define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4 | |
307 | #define QM_MCR_RESULT_PENDING 0xf8 | |
308 | #define QM_MCR_RESULT_ERR_BADCOMMAND 0xff | |
309 | #define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */ | |
310 | #define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */ | |
311 | #define QM_MCR_TIMEOUT 10000 /* us */ | |
312 | union qm_mc_result { | |
313 | struct { | |
314 | u8 verb; | |
315 | u8 result; | |
316 | u8 __reserved1[62]; | |
317 | }; | |
318 | struct qm_mcr_queryfq queryfq; | |
319 | struct qm_mcr_alterfq alterfq; | |
320 | struct qm_mcr_querycgr querycgr; | |
321 | struct qm_mcr_querycongestion querycongestion; | |
322 | struct qm_mcr_querywq querywq; | |
323 | struct qm_mcr_queryfq_np queryfq_np; | |
324 | }; | |
325 | ||
326 | struct qm_mc { | |
327 | union qm_mc_command *cr; | |
328 | union qm_mc_result *rr; | |
329 | u8 rridx, vbit; | |
330 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
331 | enum { | |
332 | /* Can be _mc_start()ed */ | |
333 | qman_mc_idle, | |
334 | /* Can be _mc_commit()ed or _mc_abort()ed */ | |
335 | qman_mc_user, | |
336 | /* Can only be _mc_retry()ed */ | |
337 | qman_mc_hw | |
338 | } state; | |
339 | #endif | |
340 | }; | |
341 | ||
342 | struct qm_addr { | |
343 | void *ce; /* cache-enabled */ | |
344 | __be32 *ce_be; /* same value as above but for direct access */ | |
345 | void __iomem *ci; /* cache-inhibited */ | |
346 | }; | |
347 | ||
348 | struct qm_portal { | |
349 | /* | |
350 | * In the non-CONFIG_FSL_DPAA_CHECKING case, the following stuff up to | |
351 | * and including 'mc' fits within a cacheline (yay!). The 'config' part | |
352 | * is setup-only, so isn't a cause for a concern. In other words, don't | |
353 | * rearrange this structure on a whim, there be dragons ... | |
354 | */ | |
355 | struct qm_addr addr; | |
356 | struct qm_eqcr eqcr; | |
357 | struct qm_dqrr dqrr; | |
358 | struct qm_mr mr; | |
359 | struct qm_mc mc; | |
360 | } ____cacheline_aligned; | |
361 | ||
362 | /* Cache-inhibited register access. */ | |
363 | static inline u32 qm_in(struct qm_portal *p, u32 offset) | |
364 | { | |
365 | return ioread32be(p->addr.ci + offset); | |
366 | } | |
367 | ||
368 | static inline void qm_out(struct qm_portal *p, u32 offset, u32 val) | |
369 | { | |
370 | iowrite32be(val, p->addr.ci + offset); | |
371 | } | |
372 | ||
373 | /* Cache Enabled Portal Access */ | |
374 | static inline void qm_cl_invalidate(struct qm_portal *p, u32 offset) | |
375 | { | |
376 | dpaa_invalidate(p->addr.ce + offset); | |
377 | } | |
378 | ||
379 | static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset) | |
380 | { | |
381 | dpaa_touch_ro(p->addr.ce + offset); | |
382 | } | |
383 | ||
384 | static inline u32 qm_ce_in(struct qm_portal *p, u32 offset) | |
385 | { | |
386 | return be32_to_cpu(*(p->addr.ce_be + (offset/4))); | |
387 | } | |
388 | ||
389 | /* --- EQCR API --- */ | |
390 | ||
391 | #define EQCR_SHIFT ilog2(sizeof(struct qm_eqcr_entry)) | |
392 | #define EQCR_CARRY (uintptr_t)(QM_EQCR_SIZE << EQCR_SHIFT) | |
393 | ||
394 | /* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */ | |
395 | static struct qm_eqcr_entry *eqcr_carryclear(struct qm_eqcr_entry *p) | |
396 | { | |
397 | uintptr_t addr = (uintptr_t)p; | |
398 | ||
399 | addr &= ~EQCR_CARRY; | |
400 | ||
401 | return (struct qm_eqcr_entry *)addr; | |
402 | } | |
403 | ||
404 | /* Bit-wise logic to convert a ring pointer to a ring index */ | |
405 | static int eqcr_ptr2idx(struct qm_eqcr_entry *e) | |
406 | { | |
407 | return ((uintptr_t)e >> EQCR_SHIFT) & (QM_EQCR_SIZE - 1); | |
408 | } | |
409 | ||
410 | /* Increment the 'cursor' ring pointer, taking 'vbit' into account */ | |
411 | static inline void eqcr_inc(struct qm_eqcr *eqcr) | |
412 | { | |
413 | /* increment to the next EQCR pointer and handle overflow and 'vbit' */ | |
414 | struct qm_eqcr_entry *partial = eqcr->cursor + 1; | |
415 | ||
416 | eqcr->cursor = eqcr_carryclear(partial); | |
417 | if (partial != eqcr->cursor) | |
418 | eqcr->vbit ^= QM_EQCR_VERB_VBIT; | |
419 | } | |
420 | ||
421 | static inline int qm_eqcr_init(struct qm_portal *portal, | |
422 | enum qm_eqcr_pmode pmode, | |
423 | unsigned int eq_stash_thresh, | |
424 | int eq_stash_prio) | |
425 | { | |
426 | struct qm_eqcr *eqcr = &portal->eqcr; | |
427 | u32 cfg; | |
428 | u8 pi; | |
429 | ||
430 | eqcr->ring = portal->addr.ce + QM_CL_EQCR; | |
431 | eqcr->ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); | |
432 | qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA); | |
433 | pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1); | |
434 | eqcr->cursor = eqcr->ring + pi; | |
435 | eqcr->vbit = (qm_in(portal, QM_REG_EQCR_PI_CINH) & QM_EQCR_SIZE) ? | |
436 | QM_EQCR_VERB_VBIT : 0; | |
437 | eqcr->available = QM_EQCR_SIZE - 1 - | |
438 | dpaa_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi); | |
439 | eqcr->ithresh = qm_in(portal, QM_REG_EQCR_ITR); | |
440 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
441 | eqcr->busy = 0; | |
442 | eqcr->pmode = pmode; | |
443 | #endif | |
444 | cfg = (qm_in(portal, QM_REG_CFG) & 0x00ffffff) | | |
445 | (eq_stash_thresh << 28) | /* QCSP_CFG: EST */ | |
446 | (eq_stash_prio << 26) | /* QCSP_CFG: EP */ | |
447 | ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */ | |
448 | qm_out(portal, QM_REG_CFG, cfg); | |
449 | return 0; | |
450 | } | |
451 | ||
452 | static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal) | |
453 | { | |
454 | return (qm_in(portal, QM_REG_CFG) >> 28) & 0x7; | |
455 | } | |
456 | ||
457 | static inline void qm_eqcr_finish(struct qm_portal *portal) | |
458 | { | |
459 | struct qm_eqcr *eqcr = &portal->eqcr; | |
460 | u8 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1); | |
461 | u8 ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); | |
462 | ||
463 | DPAA_ASSERT(!eqcr->busy); | |
464 | if (pi != eqcr_ptr2idx(eqcr->cursor)) | |
465 | pr_crit("losing uncommitted EQCR entries\n"); | |
466 | if (ci != eqcr->ci) | |
467 | pr_crit("missing existing EQCR completions\n"); | |
468 | if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor)) | |
469 | pr_crit("EQCR destroyed unquiesced\n"); | |
470 | } | |
471 | ||
472 | static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal | |
473 | *portal) | |
474 | { | |
475 | struct qm_eqcr *eqcr = &portal->eqcr; | |
476 | ||
477 | DPAA_ASSERT(!eqcr->busy); | |
478 | if (!eqcr->available) | |
479 | return NULL; | |
480 | ||
481 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
482 | eqcr->busy = 1; | |
483 | #endif | |
484 | dpaa_zero(eqcr->cursor); | |
485 | return eqcr->cursor; | |
486 | } | |
487 | ||
488 | static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal | |
489 | *portal) | |
490 | { | |
491 | struct qm_eqcr *eqcr = &portal->eqcr; | |
492 | u8 diff, old_ci; | |
493 | ||
494 | DPAA_ASSERT(!eqcr->busy); | |
495 | if (!eqcr->available) { | |
496 | old_ci = eqcr->ci; | |
497 | eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & | |
498 | (QM_EQCR_SIZE - 1); | |
499 | diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); | |
500 | eqcr->available += diff; | |
501 | if (!diff) | |
502 | return NULL; | |
503 | } | |
504 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
505 | eqcr->busy = 1; | |
506 | #endif | |
507 | dpaa_zero(eqcr->cursor); | |
508 | return eqcr->cursor; | |
509 | } | |
510 | ||
511 | static inline void eqcr_commit_checks(struct qm_eqcr *eqcr) | |
512 | { | |
513 | DPAA_ASSERT(eqcr->busy); | |
514 | DPAA_ASSERT(!(be32_to_cpu(eqcr->cursor->fqid) & ~QM_FQID_MASK)); | |
515 | DPAA_ASSERT(eqcr->available >= 1); | |
516 | } | |
517 | ||
518 | static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb) | |
519 | { | |
520 | struct qm_eqcr *eqcr = &portal->eqcr; | |
521 | struct qm_eqcr_entry *eqcursor; | |
522 | ||
523 | eqcr_commit_checks(eqcr); | |
524 | DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb); | |
525 | dma_wmb(); | |
526 | eqcursor = eqcr->cursor; | |
527 | eqcursor->_ncw_verb = myverb | eqcr->vbit; | |
528 | dpaa_flush(eqcursor); | |
529 | eqcr_inc(eqcr); | |
530 | eqcr->available--; | |
531 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
532 | eqcr->busy = 0; | |
533 | #endif | |
534 | } | |
535 | ||
536 | static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal) | |
537 | { | |
538 | qm_cl_touch_ro(portal, QM_CL_EQCR_CI_CENA); | |
539 | } | |
540 | ||
541 | static inline u8 qm_eqcr_cce_update(struct qm_portal *portal) | |
542 | { | |
543 | struct qm_eqcr *eqcr = &portal->eqcr; | |
544 | u8 diff, old_ci = eqcr->ci; | |
545 | ||
546 | eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & (QM_EQCR_SIZE - 1); | |
547 | qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA); | |
548 | diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); | |
549 | eqcr->available += diff; | |
550 | return diff; | |
551 | } | |
552 | ||
553 | static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh) | |
554 | { | |
555 | struct qm_eqcr *eqcr = &portal->eqcr; | |
556 | ||
557 | eqcr->ithresh = ithresh; | |
558 | qm_out(portal, QM_REG_EQCR_ITR, ithresh); | |
559 | } | |
560 | ||
561 | static inline u8 qm_eqcr_get_avail(struct qm_portal *portal) | |
562 | { | |
563 | struct qm_eqcr *eqcr = &portal->eqcr; | |
564 | ||
565 | return eqcr->available; | |
566 | } | |
567 | ||
568 | static inline u8 qm_eqcr_get_fill(struct qm_portal *portal) | |
569 | { | |
570 | struct qm_eqcr *eqcr = &portal->eqcr; | |
571 | ||
572 | return QM_EQCR_SIZE - 1 - eqcr->available; | |
573 | } | |
574 | ||
575 | /* --- DQRR API --- */ | |
576 | ||
577 | #define DQRR_SHIFT ilog2(sizeof(struct qm_dqrr_entry)) | |
578 | #define DQRR_CARRY (uintptr_t)(QM_DQRR_SIZE << DQRR_SHIFT) | |
579 | ||
580 | static const struct qm_dqrr_entry *dqrr_carryclear( | |
581 | const struct qm_dqrr_entry *p) | |
582 | { | |
583 | uintptr_t addr = (uintptr_t)p; | |
584 | ||
585 | addr &= ~DQRR_CARRY; | |
586 | ||
587 | return (const struct qm_dqrr_entry *)addr; | |
588 | } | |
589 | ||
590 | static inline int dqrr_ptr2idx(const struct qm_dqrr_entry *e) | |
591 | { | |
592 | return ((uintptr_t)e >> DQRR_SHIFT) & (QM_DQRR_SIZE - 1); | |
593 | } | |
594 | ||
595 | static const struct qm_dqrr_entry *dqrr_inc(const struct qm_dqrr_entry *e) | |
596 | { | |
597 | return dqrr_carryclear(e + 1); | |
598 | } | |
599 | ||
600 | static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf) | |
601 | { | |
602 | qm_out(portal, QM_REG_CFG, (qm_in(portal, QM_REG_CFG) & 0xff0fffff) | | |
603 | ((mf & (QM_DQRR_SIZE - 1)) << 20)); | |
604 | } | |
605 | ||
606 | static inline int qm_dqrr_init(struct qm_portal *portal, | |
607 | const struct qm_portal_config *config, | |
608 | enum qm_dqrr_dmode dmode, | |
609 | enum qm_dqrr_pmode pmode, | |
610 | enum qm_dqrr_cmode cmode, u8 max_fill) | |
611 | { | |
612 | struct qm_dqrr *dqrr = &portal->dqrr; | |
613 | u32 cfg; | |
614 | ||
615 | /* Make sure the DQRR will be idle when we enable */ | |
616 | qm_out(portal, QM_REG_DQRR_SDQCR, 0); | |
617 | qm_out(portal, QM_REG_DQRR_VDQCR, 0); | |
618 | qm_out(portal, QM_REG_DQRR_PDQCR, 0); | |
619 | dqrr->ring = portal->addr.ce + QM_CL_DQRR; | |
620 | dqrr->pi = qm_in(portal, QM_REG_DQRR_PI_CINH) & (QM_DQRR_SIZE - 1); | |
621 | dqrr->ci = qm_in(portal, QM_REG_DQRR_CI_CINH) & (QM_DQRR_SIZE - 1); | |
622 | dqrr->cursor = dqrr->ring + dqrr->ci; | |
623 | dqrr->fill = dpaa_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi); | |
624 | dqrr->vbit = (qm_in(portal, QM_REG_DQRR_PI_CINH) & QM_DQRR_SIZE) ? | |
625 | QM_DQRR_VERB_VBIT : 0; | |
626 | dqrr->ithresh = qm_in(portal, QM_REG_DQRR_ITR); | |
627 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
628 | dqrr->dmode = dmode; | |
629 | dqrr->pmode = pmode; | |
630 | dqrr->cmode = cmode; | |
631 | #endif | |
632 | /* Invalidate every ring entry before beginning */ | |
633 | for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++) | |
634 | dpaa_invalidate(qm_cl(dqrr->ring, cfg)); | |
635 | cfg = (qm_in(portal, QM_REG_CFG) & 0xff000f00) | | |
636 | ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */ | |
637 | ((dmode & 1) << 18) | /* DP */ | |
638 | ((cmode & 3) << 16) | /* DCM */ | |
639 | 0xa0 | /* RE+SE */ | |
640 | (0 ? 0x40 : 0) | /* Ignore RP */ | |
641 | (0 ? 0x10 : 0); /* Ignore SP */ | |
642 | qm_out(portal, QM_REG_CFG, cfg); | |
643 | qm_dqrr_set_maxfill(portal, max_fill); | |
644 | return 0; | |
645 | } | |
646 | ||
647 | static inline void qm_dqrr_finish(struct qm_portal *portal) | |
648 | { | |
649 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
650 | struct qm_dqrr *dqrr = &portal->dqrr; | |
651 | ||
652 | if (dqrr->cmode != qm_dqrr_cdc && | |
653 | dqrr->ci != dqrr_ptr2idx(dqrr->cursor)) | |
654 | pr_crit("Ignoring completed DQRR entries\n"); | |
655 | #endif | |
656 | } | |
657 | ||
658 | static inline const struct qm_dqrr_entry *qm_dqrr_current( | |
659 | struct qm_portal *portal) | |
660 | { | |
661 | struct qm_dqrr *dqrr = &portal->dqrr; | |
662 | ||
663 | if (!dqrr->fill) | |
664 | return NULL; | |
665 | return dqrr->cursor; | |
666 | } | |
667 | ||
668 | static inline u8 qm_dqrr_next(struct qm_portal *portal) | |
669 | { | |
670 | struct qm_dqrr *dqrr = &portal->dqrr; | |
671 | ||
672 | DPAA_ASSERT(dqrr->fill); | |
673 | dqrr->cursor = dqrr_inc(dqrr->cursor); | |
674 | return --dqrr->fill; | |
675 | } | |
676 | ||
677 | static inline void qm_dqrr_pvb_update(struct qm_portal *portal) | |
678 | { | |
679 | struct qm_dqrr *dqrr = &portal->dqrr; | |
680 | struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi); | |
681 | ||
682 | DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb); | |
683 | #ifndef CONFIG_FSL_PAMU | |
684 | /* | |
685 | * If PAMU is not available we need to invalidate the cache. | |
686 | * When PAMU is available the cache is updated by stash | |
687 | */ | |
688 | dpaa_invalidate_touch_ro(res); | |
689 | #endif | |
690 | if ((res->verb & QM_DQRR_VERB_VBIT) == dqrr->vbit) { | |
691 | dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1); | |
692 | if (!dqrr->pi) | |
693 | dqrr->vbit ^= QM_DQRR_VERB_VBIT; | |
694 | dqrr->fill++; | |
695 | } | |
696 | } | |
697 | ||
698 | static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal, | |
699 | const struct qm_dqrr_entry *dq, | |
700 | int park) | |
701 | { | |
702 | __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr; | |
703 | int idx = dqrr_ptr2idx(dq); | |
704 | ||
705 | DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc); | |
706 | DPAA_ASSERT((dqrr->ring + idx) == dq); | |
707 | DPAA_ASSERT(idx < QM_DQRR_SIZE); | |
708 | qm_out(portal, QM_REG_DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */ | |
709 | ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */ | |
710 | idx); /* DQRR_DCAP::DCAP_CI */ | |
711 | } | |
712 | ||
713 | static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u32 bitmask) | |
714 | { | |
715 | __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr; | |
716 | ||
717 | DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc); | |
718 | qm_out(portal, QM_REG_DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */ | |
719 | (bitmask << 16)); /* DQRR_DCAP::DCAP_CI */ | |
720 | } | |
721 | ||
722 | static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr) | |
723 | { | |
724 | qm_out(portal, QM_REG_DQRR_SDQCR, sdqcr); | |
725 | } | |
726 | ||
727 | static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr) | |
728 | { | |
729 | qm_out(portal, QM_REG_DQRR_VDQCR, vdqcr); | |
730 | } | |
731 | ||
732 | static inline int qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh) | |
733 | { | |
734 | ||
735 | if (ithresh > QMAN_DQRR_IT_MAX) | |
736 | return -EINVAL; | |
737 | ||
738 | qm_out(portal, QM_REG_DQRR_ITR, ithresh); | |
739 | ||
740 | return 0; | |
741 | } | |
742 | ||
743 | /* --- MR API --- */ | |
744 | ||
745 | #define MR_SHIFT ilog2(sizeof(union qm_mr_entry)) | |
746 | #define MR_CARRY (uintptr_t)(QM_MR_SIZE << MR_SHIFT) | |
747 | ||
748 | static union qm_mr_entry *mr_carryclear(union qm_mr_entry *p) | |
749 | { | |
750 | uintptr_t addr = (uintptr_t)p; | |
751 | ||
752 | addr &= ~MR_CARRY; | |
753 | ||
754 | return (union qm_mr_entry *)addr; | |
755 | } | |
756 | ||
757 | static inline int mr_ptr2idx(const union qm_mr_entry *e) | |
758 | { | |
759 | return ((uintptr_t)e >> MR_SHIFT) & (QM_MR_SIZE - 1); | |
760 | } | |
761 | ||
762 | static inline union qm_mr_entry *mr_inc(union qm_mr_entry *e) | |
763 | { | |
764 | return mr_carryclear(e + 1); | |
765 | } | |
766 | ||
767 | static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode, | |
768 | enum qm_mr_cmode cmode) | |
769 | { | |
770 | struct qm_mr *mr = &portal->mr; | |
771 | u32 cfg; | |
772 | ||
773 | mr->ring = portal->addr.ce + QM_CL_MR; | |
774 | mr->pi = qm_in(portal, QM_REG_MR_PI_CINH) & (QM_MR_SIZE - 1); | |
775 | mr->ci = qm_in(portal, QM_REG_MR_CI_CINH) & (QM_MR_SIZE - 1); | |
776 | mr->cursor = mr->ring + mr->ci; | |
777 | mr->fill = dpaa_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi); | |
778 | mr->vbit = (qm_in(portal, QM_REG_MR_PI_CINH) & QM_MR_SIZE) | |
779 | ? QM_MR_VERB_VBIT : 0; | |
780 | mr->ithresh = qm_in(portal, QM_REG_MR_ITR); | |
781 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
782 | mr->pmode = pmode; | |
783 | mr->cmode = cmode; | |
784 | #endif | |
785 | cfg = (qm_in(portal, QM_REG_CFG) & 0xfffff0ff) | | |
786 | ((cmode & 1) << 8); /* QCSP_CFG:MM */ | |
787 | qm_out(portal, QM_REG_CFG, cfg); | |
788 | return 0; | |
789 | } | |
790 | ||
791 | static inline void qm_mr_finish(struct qm_portal *portal) | |
792 | { | |
793 | struct qm_mr *mr = &portal->mr; | |
794 | ||
795 | if (mr->ci != mr_ptr2idx(mr->cursor)) | |
796 | pr_crit("Ignoring completed MR entries\n"); | |
797 | } | |
798 | ||
799 | static inline const union qm_mr_entry *qm_mr_current(struct qm_portal *portal) | |
800 | { | |
801 | struct qm_mr *mr = &portal->mr; | |
802 | ||
803 | if (!mr->fill) | |
804 | return NULL; | |
805 | return mr->cursor; | |
806 | } | |
807 | ||
808 | static inline int qm_mr_next(struct qm_portal *portal) | |
809 | { | |
810 | struct qm_mr *mr = &portal->mr; | |
811 | ||
812 | DPAA_ASSERT(mr->fill); | |
813 | mr->cursor = mr_inc(mr->cursor); | |
814 | return --mr->fill; | |
815 | } | |
816 | ||
817 | static inline void qm_mr_pvb_update(struct qm_portal *portal) | |
818 | { | |
819 | struct qm_mr *mr = &portal->mr; | |
820 | union qm_mr_entry *res = qm_cl(mr->ring, mr->pi); | |
821 | ||
822 | DPAA_ASSERT(mr->pmode == qm_mr_pvb); | |
823 | ||
824 | if ((res->verb & QM_MR_VERB_VBIT) == mr->vbit) { | |
825 | mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1); | |
826 | if (!mr->pi) | |
827 | mr->vbit ^= QM_MR_VERB_VBIT; | |
828 | mr->fill++; | |
829 | res = mr_inc(res); | |
830 | } | |
831 | dpaa_invalidate_touch_ro(res); | |
832 | } | |
833 | ||
834 | static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num) | |
835 | { | |
836 | struct qm_mr *mr = &portal->mr; | |
837 | ||
838 | DPAA_ASSERT(mr->cmode == qm_mr_cci); | |
839 | mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1); | |
840 | qm_out(portal, QM_REG_MR_CI_CINH, mr->ci); | |
841 | } | |
842 | ||
843 | static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal) | |
844 | { | |
845 | struct qm_mr *mr = &portal->mr; | |
846 | ||
847 | DPAA_ASSERT(mr->cmode == qm_mr_cci); | |
848 | mr->ci = mr_ptr2idx(mr->cursor); | |
849 | qm_out(portal, QM_REG_MR_CI_CINH, mr->ci); | |
850 | } | |
851 | ||
852 | static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh) | |
853 | { | |
854 | qm_out(portal, QM_REG_MR_ITR, ithresh); | |
855 | } | |
856 | ||
857 | /* --- Management command API --- */ | |
858 | ||
859 | static inline int qm_mc_init(struct qm_portal *portal) | |
860 | { | |
861 | u8 rr0, rr1; | |
862 | struct qm_mc *mc = &portal->mc; | |
863 | ||
864 | mc->cr = portal->addr.ce + QM_CL_CR; | |
865 | mc->rr = portal->addr.ce + QM_CL_RR0; | |
866 | /* | |
867 | * The expected valid bit polarity for the next CR command is 0 | |
868 | * if RR1 contains a valid response, and is 1 if RR0 contains a | |
869 | * valid response. If both RR contain all 0, this indicates either | |
870 | * that no command has been executed since reset (in which case the | |
871 | * expected valid bit polarity is 1) | |
872 | */ | |
873 | rr0 = mc->rr->verb; | |
874 | rr1 = (mc->rr+1)->verb; | |
875 | if ((rr0 == 0 && rr1 == 0) || rr0 != 0) | |
876 | mc->rridx = 1; | |
877 | else | |
878 | mc->rridx = 0; | |
879 | mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0; | |
880 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
881 | mc->state = qman_mc_idle; | |
882 | #endif | |
883 | return 0; | |
884 | } | |
885 | ||
886 | static inline void qm_mc_finish(struct qm_portal *portal) | |
887 | { | |
888 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
889 | struct qm_mc *mc = &portal->mc; | |
890 | ||
891 | DPAA_ASSERT(mc->state == qman_mc_idle); | |
892 | if (mc->state != qman_mc_idle) | |
893 | pr_crit("Losing incomplete MC command\n"); | |
894 | #endif | |
895 | } | |
896 | ||
897 | static inline union qm_mc_command *qm_mc_start(struct qm_portal *portal) | |
898 | { | |
899 | struct qm_mc *mc = &portal->mc; | |
900 | ||
901 | DPAA_ASSERT(mc->state == qman_mc_idle); | |
902 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
903 | mc->state = qman_mc_user; | |
904 | #endif | |
905 | dpaa_zero(mc->cr); | |
906 | return mc->cr; | |
907 | } | |
908 | ||
909 | static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb) | |
910 | { | |
911 | struct qm_mc *mc = &portal->mc; | |
912 | union qm_mc_result *rr = mc->rr + mc->rridx; | |
913 | ||
914 | DPAA_ASSERT(mc->state == qman_mc_user); | |
915 | dma_wmb(); | |
916 | mc->cr->_ncw_verb = myverb | mc->vbit; | |
917 | dpaa_flush(mc->cr); | |
918 | dpaa_invalidate_touch_ro(rr); | |
919 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
920 | mc->state = qman_mc_hw; | |
921 | #endif | |
922 | } | |
923 | ||
924 | static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal) | |
925 | { | |
926 | struct qm_mc *mc = &portal->mc; | |
927 | union qm_mc_result *rr = mc->rr + mc->rridx; | |
928 | ||
929 | DPAA_ASSERT(mc->state == qman_mc_hw); | |
930 | /* | |
931 | * The inactive response register's verb byte always returns zero until | |
932 | * its command is submitted and completed. This includes the valid-bit, | |
933 | * in case you were wondering... | |
934 | */ | |
935 | if (!rr->verb) { | |
936 | dpaa_invalidate_touch_ro(rr); | |
937 | return NULL; | |
938 | } | |
939 | mc->rridx ^= 1; | |
940 | mc->vbit ^= QM_MCC_VERB_VBIT; | |
941 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
942 | mc->state = qman_mc_idle; | |
943 | #endif | |
944 | return rr; | |
945 | } | |
946 | ||
947 | static inline int qm_mc_result_timeout(struct qm_portal *portal, | |
948 | union qm_mc_result **mcr) | |
949 | { | |
950 | int timeout = QM_MCR_TIMEOUT; | |
951 | ||
952 | do { | |
953 | *mcr = qm_mc_result(portal); | |
954 | if (*mcr) | |
955 | break; | |
956 | udelay(1); | |
957 | } while (--timeout); | |
958 | ||
959 | return timeout; | |
960 | } | |
961 | ||
962 | static inline void fq_set(struct qman_fq *fq, u32 mask) | |
963 | { | |
964 | fq->flags |= mask; | |
965 | } | |
966 | ||
967 | static inline void fq_clear(struct qman_fq *fq, u32 mask) | |
968 | { | |
969 | fq->flags &= ~mask; | |
970 | } | |
971 | ||
972 | static inline int fq_isset(struct qman_fq *fq, u32 mask) | |
973 | { | |
974 | return fq->flags & mask; | |
975 | } | |
976 | ||
977 | static inline int fq_isclear(struct qman_fq *fq, u32 mask) | |
978 | { | |
979 | return !(fq->flags & mask); | |
980 | } | |
981 | ||
982 | struct qman_portal { | |
983 | struct qm_portal p; | |
984 | /* PORTAL_BITS_*** - dynamic, strictly internal */ | |
985 | unsigned long bits; | |
986 | /* interrupt sources processed by portal_isr(), configurable */ | |
987 | unsigned long irq_sources; | |
988 | u32 use_eqcr_ci_stashing; | |
989 | /* only 1 volatile dequeue at a time */ | |
990 | struct qman_fq *vdqcr_owned; | |
991 | u32 sdqcr; | |
992 | /* probing time config params for cpu-affine portals */ | |
993 | const struct qm_portal_config *config; | |
994 | /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */ | |
995 | struct qman_cgrs *cgrs; | |
996 | /* linked-list of CSCN handlers. */ | |
997 | struct list_head cgr_cbs; | |
998 | /* list lock */ | |
999 | spinlock_t cgr_lock; | |
1000 | struct work_struct congestion_work; | |
1001 | struct work_struct mr_work; | |
1002 | char irqname[MAX_IRQNAME]; | |
1003 | }; | |
1004 | ||
1005 | static cpumask_t affine_mask; | |
1006 | static DEFINE_SPINLOCK(affine_mask_lock); | |
1007 | static u16 affine_channels[NR_CPUS]; | |
1008 | static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal); | |
1009 | struct qman_portal *affine_portals[NR_CPUS]; | |
1010 | ||
1011 | static inline struct qman_portal *get_affine_portal(void) | |
1012 | { | |
1013 | return &get_cpu_var(qman_affine_portal); | |
1014 | } | |
1015 | ||
1016 | static inline void put_affine_portal(void) | |
1017 | { | |
1018 | put_cpu_var(qman_affine_portal); | |
1019 | } | |
1020 | ||
1021 | static struct workqueue_struct *qm_portal_wq; | |
1022 | ||
1023 | int qman_dqrr_set_ithresh(struct qman_portal *portal, u8 ithresh) | |
1024 | { | |
1025 | int res; | |
1026 | ||
1027 | if (!portal) | |
1028 | return -EINVAL; | |
1029 | ||
1030 | res = qm_dqrr_set_ithresh(&portal->p, ithresh); | |
1031 | if (res) | |
1032 | return res; | |
1033 | ||
1034 | portal->p.dqrr.ithresh = ithresh; | |
1035 | ||
1036 | return 0; | |
1037 | } | |
1038 | EXPORT_SYMBOL(qman_dqrr_set_ithresh); | |
1039 | ||
1040 | void qman_dqrr_get_ithresh(struct qman_portal *portal, u8 *ithresh) | |
1041 | { | |
1042 | if (portal && ithresh) | |
1043 | *ithresh = qm_in(&portal->p, QM_REG_DQRR_ITR); | |
1044 | } | |
1045 | EXPORT_SYMBOL(qman_dqrr_get_ithresh); | |
1046 | ||
1047 | void qman_portal_get_iperiod(struct qman_portal *portal, u32 *iperiod) | |
1048 | { | |
1049 | if (portal && iperiod) | |
1050 | *iperiod = qm_in(&portal->p, QM_REG_ITPR); | |
1051 | } | |
1052 | EXPORT_SYMBOL(qman_portal_get_iperiod); | |
1053 | ||
1054 | int qman_portal_set_iperiod(struct qman_portal *portal, u32 iperiod) | |
1055 | { | |
1056 | if (!portal || iperiod > QMAN_ITP_MAX) | |
1057 | return -EINVAL; | |
1058 | ||
1059 | qm_out(&portal->p, QM_REG_ITPR, iperiod); | |
1060 | ||
1061 | return 0; | |
1062 | } | |
1063 | EXPORT_SYMBOL(qman_portal_set_iperiod); | |
1064 | ||
1065 | int qman_wq_alloc(void) | |
1066 | { | |
1067 | qm_portal_wq = alloc_workqueue("qman_portal_wq", 0, 1); | |
1068 | if (!qm_portal_wq) | |
1069 | return -ENOMEM; | |
1070 | return 0; | |
1071 | } | |
1072 | ||
1073 | /* | |
1074 | * This is what everything can wait on, even if it migrates to a different cpu | |
1075 | * to the one whose affine portal it is waiting on. | |
1076 | */ | |
1077 | static DECLARE_WAIT_QUEUE_HEAD(affine_queue); | |
1078 | ||
1079 | static struct qman_fq **fq_table; | |
1080 | static u32 num_fqids; | |
1081 | ||
1082 | int qman_alloc_fq_table(u32 _num_fqids) | |
1083 | { | |
1084 | num_fqids = _num_fqids; | |
1085 | ||
1086 | fq_table = vzalloc(array3_size(sizeof(struct qman_fq *), | |
1087 | num_fqids, 2)); | |
1088 | if (!fq_table) | |
1089 | return -ENOMEM; | |
1090 | ||
1091 | pr_debug("Allocated fq lookup table at %p, entry count %u\n", | |
1092 | fq_table, num_fqids * 2); | |
1093 | return 0; | |
1094 | } | |
1095 | ||
1096 | static struct qman_fq *idx_to_fq(u32 idx) | |
1097 | { | |
1098 | struct qman_fq *fq; | |
1099 | ||
1100 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
1101 | if (WARN_ON(idx >= num_fqids * 2)) | |
1102 | return NULL; | |
1103 | #endif | |
1104 | fq = fq_table[idx]; | |
1105 | DPAA_ASSERT(!fq || idx == fq->idx); | |
1106 | ||
1107 | return fq; | |
1108 | } | |
1109 | ||
1110 | /* | |
1111 | * Only returns full-service fq objects, not enqueue-only | |
1112 | * references (QMAN_FQ_FLAG_NO_MODIFY). | |
1113 | */ | |
1114 | static struct qman_fq *fqid_to_fq(u32 fqid) | |
1115 | { | |
1116 | return idx_to_fq(fqid * 2); | |
1117 | } | |
1118 | ||
1119 | static struct qman_fq *tag_to_fq(u32 tag) | |
1120 | { | |
1121 | #if BITS_PER_LONG == 64 | |
1122 | return idx_to_fq(tag); | |
1123 | #else | |
1124 | return (struct qman_fq *)tag; | |
1125 | #endif | |
1126 | } | |
1127 | ||
1128 | static u32 fq_to_tag(struct qman_fq *fq) | |
1129 | { | |
1130 | #if BITS_PER_LONG == 64 | |
1131 | return fq->idx; | |
1132 | #else | |
1133 | return (u32)fq; | |
1134 | #endif | |
1135 | } | |
1136 | ||
1137 | static u32 __poll_portal_slow(struct qman_portal *p, u32 is); | |
1138 | static inline unsigned int __poll_portal_fast(struct qman_portal *p, | |
1139 | unsigned int poll_limit); | |
1140 | static void qm_congestion_task(struct work_struct *work); | |
1141 | static void qm_mr_process_task(struct work_struct *work); | |
1142 | ||
1143 | static irqreturn_t portal_isr(int irq, void *ptr) | |
1144 | { | |
1145 | struct qman_portal *p = ptr; | |
1146 | u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources; | |
1147 | u32 clear = 0; | |
1148 | ||
1149 | if (unlikely(!is)) | |
1150 | return IRQ_NONE; | |
1151 | ||
1152 | /* DQRR-handling if it's interrupt-driven */ | |
1153 | if (is & QM_PIRQ_DQRI) { | |
1154 | __poll_portal_fast(p, QMAN_POLL_LIMIT); | |
1155 | clear = QM_DQAVAIL_MASK | QM_PIRQ_DQRI; | |
1156 | } | |
1157 | /* Handling of anything else that's interrupt-driven */ | |
1158 | clear |= __poll_portal_slow(p, is) & QM_PIRQ_SLOW; | |
1159 | qm_out(&p->p, QM_REG_ISR, clear); | |
1160 | return IRQ_HANDLED; | |
1161 | } | |
1162 | ||
1163 | static int drain_mr_fqrni(struct qm_portal *p) | |
1164 | { | |
1165 | const union qm_mr_entry *msg; | |
1166 | loop: | |
1167 | msg = qm_mr_current(p); | |
1168 | if (!msg) { | |
1169 | /* | |
1170 | * if MR was full and h/w had other FQRNI entries to produce, we | |
1171 | * need to allow it time to produce those entries once the | |
1172 | * existing entries are consumed. A worst-case situation | |
1173 | * (fully-loaded system) means h/w sequencers may have to do 3-4 | |
1174 | * other things before servicing the portal's MR pump, each of | |
1175 | * which (if slow) may take ~50 qman cycles (which is ~200 | |
1176 | * processor cycles). So rounding up and then multiplying this | |
1177 | * worst-case estimate by a factor of 10, just to be | |
1178 | * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume | |
1179 | * one entry at a time, so h/w has an opportunity to produce new | |
1180 | * entries well before the ring has been fully consumed, so | |
1181 | * we're being *really* paranoid here. | |
1182 | */ | |
1183 | msleep(1); | |
1184 | msg = qm_mr_current(p); | |
1185 | if (!msg) | |
1186 | return 0; | |
1187 | } | |
1188 | if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) { | |
1189 | /* We aren't draining anything but FQRNIs */ | |
1190 | pr_err("Found verb 0x%x in MR\n", msg->verb); | |
1191 | return -1; | |
1192 | } | |
1193 | qm_mr_next(p); | |
1194 | qm_mr_cci_consume(p, 1); | |
1195 | goto loop; | |
1196 | } | |
1197 | ||
1198 | static int qman_create_portal(struct qman_portal *portal, | |
1199 | const struct qm_portal_config *c, | |
1200 | const struct qman_cgrs *cgrs) | |
1201 | { | |
1202 | struct qm_portal *p; | |
1203 | int ret; | |
1204 | u32 isdr; | |
1205 | ||
1206 | p = &portal->p; | |
1207 | ||
1208 | #ifdef CONFIG_FSL_PAMU | |
1209 | /* PAMU is required for stashing */ | |
1210 | portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0); | |
1211 | #else | |
1212 | portal->use_eqcr_ci_stashing = 0; | |
1213 | #endif | |
1214 | /* | |
1215 | * prep the low-level portal struct with the mapped addresses from the | |
1216 | * config, everything that follows depends on it and "config" is more | |
1217 | * for (de)reference | |
1218 | */ | |
1219 | p->addr.ce = c->addr_virt_ce; | |
1220 | p->addr.ce_be = c->addr_virt_ce; | |
1221 | p->addr.ci = c->addr_virt_ci; | |
1222 | /* | |
1223 | * If CI-stashing is used, the current defaults use a threshold of 3, | |
1224 | * and stash with high-than-DQRR priority. | |
1225 | */ | |
1226 | if (qm_eqcr_init(p, qm_eqcr_pvb, | |
1227 | portal->use_eqcr_ci_stashing ? 3 : 0, 1)) { | |
1228 | dev_err(c->dev, "EQCR initialisation failed\n"); | |
1229 | goto fail_eqcr; | |
1230 | } | |
1231 | if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb, | |
1232 | qm_dqrr_cdc, DQRR_MAXFILL)) { | |
1233 | dev_err(c->dev, "DQRR initialisation failed\n"); | |
1234 | goto fail_dqrr; | |
1235 | } | |
1236 | if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) { | |
1237 | dev_err(c->dev, "MR initialisation failed\n"); | |
1238 | goto fail_mr; | |
1239 | } | |
1240 | if (qm_mc_init(p)) { | |
1241 | dev_err(c->dev, "MC initialisation failed\n"); | |
1242 | goto fail_mc; | |
1243 | } | |
1244 | /* static interrupt-gating controls */ | |
1245 | qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH); | |
1246 | qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH); | |
1247 | qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD); | |
1248 | portal->cgrs = kmalloc_array(2, sizeof(*cgrs), GFP_KERNEL); | |
1249 | if (!portal->cgrs) | |
1250 | goto fail_cgrs; | |
1251 | /* initial snapshot is no-depletion */ | |
1252 | qman_cgrs_init(&portal->cgrs[1]); | |
1253 | if (cgrs) | |
1254 | portal->cgrs[0] = *cgrs; | |
1255 | else | |
1256 | /* if the given mask is NULL, assume all CGRs can be seen */ | |
1257 | qman_cgrs_fill(&portal->cgrs[0]); | |
1258 | INIT_LIST_HEAD(&portal->cgr_cbs); | |
1259 | spin_lock_init(&portal->cgr_lock); | |
1260 | INIT_WORK(&portal->congestion_work, qm_congestion_task); | |
1261 | INIT_WORK(&portal->mr_work, qm_mr_process_task); | |
1262 | portal->bits = 0; | |
1263 | portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 | | |
1264 | QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS | | |
1265 | QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED; | |
1266 | isdr = 0xffffffff; | |
1267 | qm_out(p, QM_REG_ISDR, isdr); | |
1268 | portal->irq_sources = 0; | |
1269 | qm_out(p, QM_REG_IER, 0); | |
1270 | qm_out(p, QM_REG_ISR, 0xffffffff); | |
1271 | snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu); | |
1272 | if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) { | |
1273 | dev_err(c->dev, "request_irq() failed\n"); | |
1274 | goto fail_irq; | |
1275 | } | |
1276 | ||
1277 | if (dpaa_set_portal_irq_affinity(c->dev, c->irq, c->cpu)) | |
1278 | goto fail_affinity; | |
1279 | ||
1280 | /* Need EQCR to be empty before continuing */ | |
1281 | isdr &= ~QM_PIRQ_EQCI; | |
1282 | qm_out(p, QM_REG_ISDR, isdr); | |
1283 | ret = qm_eqcr_get_fill(p); | |
1284 | if (ret) { | |
1285 | dev_err(c->dev, "EQCR unclean\n"); | |
1286 | goto fail_eqcr_empty; | |
1287 | } | |
1288 | isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI); | |
1289 | qm_out(p, QM_REG_ISDR, isdr); | |
1290 | if (qm_dqrr_current(p)) { | |
1291 | dev_err(c->dev, "DQRR unclean\n"); | |
1292 | qm_dqrr_cdc_consume_n(p, 0xffff); | |
1293 | } | |
1294 | if (qm_mr_current(p) && drain_mr_fqrni(p)) { | |
1295 | /* special handling, drain just in case it's a few FQRNIs */ | |
1296 | const union qm_mr_entry *e = qm_mr_current(p); | |
1297 | ||
1298 | dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x, addr 0x%llx\n", | |
1299 | e->verb, e->ern.rc, qm_fd_addr_get64(&e->ern.fd)); | |
1300 | goto fail_dqrr_mr_empty; | |
1301 | } | |
1302 | /* Success */ | |
1303 | portal->config = c; | |
1304 | qm_out(p, QM_REG_ISDR, 0); | |
1305 | qm_out(p, QM_REG_IIR, 0); | |
1306 | /* Write a sane SDQCR */ | |
1307 | qm_dqrr_sdqcr_set(p, portal->sdqcr); | |
1308 | return 0; | |
1309 | ||
1310 | fail_dqrr_mr_empty: | |
1311 | fail_eqcr_empty: | |
1312 | fail_affinity: | |
1313 | free_irq(c->irq, portal); | |
1314 | fail_irq: | |
1315 | kfree(portal->cgrs); | |
1316 | fail_cgrs: | |
1317 | qm_mc_finish(p); | |
1318 | fail_mc: | |
1319 | qm_mr_finish(p); | |
1320 | fail_mr: | |
1321 | qm_dqrr_finish(p); | |
1322 | fail_dqrr: | |
1323 | qm_eqcr_finish(p); | |
1324 | fail_eqcr: | |
1325 | return -EIO; | |
1326 | } | |
1327 | ||
1328 | struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c, | |
1329 | const struct qman_cgrs *cgrs) | |
1330 | { | |
1331 | struct qman_portal *portal; | |
1332 | int err; | |
1333 | ||
1334 | portal = &per_cpu(qman_affine_portal, c->cpu); | |
1335 | err = qman_create_portal(portal, c, cgrs); | |
1336 | if (err) | |
1337 | return NULL; | |
1338 | ||
1339 | spin_lock(&affine_mask_lock); | |
1340 | cpumask_set_cpu(c->cpu, &affine_mask); | |
1341 | affine_channels[c->cpu] = c->channel; | |
1342 | affine_portals[c->cpu] = portal; | |
1343 | spin_unlock(&affine_mask_lock); | |
1344 | ||
1345 | return portal; | |
1346 | } | |
1347 | ||
1348 | static void qman_destroy_portal(struct qman_portal *qm) | |
1349 | { | |
1350 | const struct qm_portal_config *pcfg; | |
1351 | ||
1352 | /* Stop dequeues on the portal */ | |
1353 | qm_dqrr_sdqcr_set(&qm->p, 0); | |
1354 | ||
1355 | /* | |
1356 | * NB we do this to "quiesce" EQCR. If we add enqueue-completions or | |
1357 | * something related to QM_PIRQ_EQCI, this may need fixing. | |
1358 | * Also, due to the prefetching model used for CI updates in the enqueue | |
1359 | * path, this update will only invalidate the CI cacheline *after* | |
1360 | * working on it, so we need to call this twice to ensure a full update | |
1361 | * irrespective of where the enqueue processing was at when the teardown | |
1362 | * began. | |
1363 | */ | |
1364 | qm_eqcr_cce_update(&qm->p); | |
1365 | qm_eqcr_cce_update(&qm->p); | |
1366 | pcfg = qm->config; | |
1367 | ||
1368 | free_irq(pcfg->irq, qm); | |
1369 | ||
1370 | kfree(qm->cgrs); | |
1371 | qm_mc_finish(&qm->p); | |
1372 | qm_mr_finish(&qm->p); | |
1373 | qm_dqrr_finish(&qm->p); | |
1374 | qm_eqcr_finish(&qm->p); | |
1375 | ||
1376 | qm->config = NULL; | |
1377 | } | |
1378 | ||
1379 | const struct qm_portal_config *qman_destroy_affine_portal(void) | |
1380 | { | |
1381 | struct qman_portal *qm = get_affine_portal(); | |
1382 | const struct qm_portal_config *pcfg; | |
1383 | int cpu; | |
1384 | ||
1385 | pcfg = qm->config; | |
1386 | cpu = pcfg->cpu; | |
1387 | ||
1388 | qman_destroy_portal(qm); | |
1389 | ||
1390 | spin_lock(&affine_mask_lock); | |
1391 | cpumask_clear_cpu(cpu, &affine_mask); | |
1392 | spin_unlock(&affine_mask_lock); | |
1393 | put_affine_portal(); | |
1394 | return pcfg; | |
1395 | } | |
1396 | ||
1397 | /* Inline helper to reduce nesting in __poll_portal_slow() */ | |
1398 | static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq, | |
1399 | const union qm_mr_entry *msg, u8 verb) | |
1400 | { | |
1401 | switch (verb) { | |
1402 | case QM_MR_VERB_FQRL: | |
1403 | DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL)); | |
1404 | fq_clear(fq, QMAN_FQ_STATE_ORL); | |
1405 | break; | |
1406 | case QM_MR_VERB_FQRN: | |
1407 | DPAA_ASSERT(fq->state == qman_fq_state_parked || | |
1408 | fq->state == qman_fq_state_sched); | |
1409 | DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING)); | |
1410 | fq_clear(fq, QMAN_FQ_STATE_CHANGING); | |
1411 | if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY) | |
1412 | fq_set(fq, QMAN_FQ_STATE_NE); | |
1413 | if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT) | |
1414 | fq_set(fq, QMAN_FQ_STATE_ORL); | |
1415 | fq->state = qman_fq_state_retired; | |
1416 | break; | |
1417 | case QM_MR_VERB_FQPN: | |
1418 | DPAA_ASSERT(fq->state == qman_fq_state_sched); | |
1419 | DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING)); | |
1420 | fq->state = qman_fq_state_parked; | |
1421 | } | |
1422 | } | |
1423 | ||
1424 | static void qm_congestion_task(struct work_struct *work) | |
1425 | { | |
1426 | struct qman_portal *p = container_of(work, struct qman_portal, | |
1427 | congestion_work); | |
1428 | struct qman_cgrs rr, c; | |
1429 | union qm_mc_result *mcr; | |
1430 | struct qman_cgr *cgr; | |
1431 | ||
1432 | spin_lock(&p->cgr_lock); | |
1433 | qm_mc_start(&p->p); | |
1434 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION); | |
1435 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
1436 | spin_unlock(&p->cgr_lock); | |
1437 | dev_crit(p->config->dev, "QUERYCONGESTION timeout\n"); | |
1438 | qman_p_irqsource_add(p, QM_PIRQ_CSCI); | |
1439 | return; | |
1440 | } | |
1441 | /* mask out the ones I'm not interested in */ | |
1442 | qman_cgrs_and(&rr, (struct qman_cgrs *)&mcr->querycongestion.state, | |
1443 | &p->cgrs[0]); | |
1444 | /* check previous snapshot for delta, enter/exit congestion */ | |
1445 | qman_cgrs_xor(&c, &rr, &p->cgrs[1]); | |
1446 | /* update snapshot */ | |
1447 | qman_cgrs_cp(&p->cgrs[1], &rr); | |
1448 | /* Invoke callback */ | |
1449 | list_for_each_entry(cgr, &p->cgr_cbs, node) | |
1450 | if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid)) | |
1451 | cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid)); | |
1452 | spin_unlock(&p->cgr_lock); | |
1453 | qman_p_irqsource_add(p, QM_PIRQ_CSCI); | |
1454 | } | |
1455 | ||
1456 | static void qm_mr_process_task(struct work_struct *work) | |
1457 | { | |
1458 | struct qman_portal *p = container_of(work, struct qman_portal, | |
1459 | mr_work); | |
1460 | const union qm_mr_entry *msg; | |
1461 | struct qman_fq *fq; | |
1462 | u8 verb, num = 0; | |
1463 | ||
1464 | preempt_disable(); | |
1465 | ||
1466 | while (1) { | |
1467 | qm_mr_pvb_update(&p->p); | |
1468 | msg = qm_mr_current(&p->p); | |
1469 | if (!msg) | |
1470 | break; | |
1471 | ||
1472 | verb = msg->verb & QM_MR_VERB_TYPE_MASK; | |
1473 | /* The message is a software ERN iff the 0x20 bit is clear */ | |
1474 | if (verb & 0x20) { | |
1475 | switch (verb) { | |
1476 | case QM_MR_VERB_FQRNI: | |
1477 | /* nada, we drop FQRNIs on the floor */ | |
1478 | break; | |
1479 | case QM_MR_VERB_FQRN: | |
1480 | case QM_MR_VERB_FQRL: | |
1481 | /* Lookup in the retirement table */ | |
1482 | fq = fqid_to_fq(qm_fqid_get(&msg->fq)); | |
1483 | if (WARN_ON(!fq)) | |
1484 | break; | |
1485 | fq_state_change(p, fq, msg, verb); | |
1486 | if (fq->cb.fqs) | |
1487 | fq->cb.fqs(p, fq, msg); | |
1488 | break; | |
1489 | case QM_MR_VERB_FQPN: | |
1490 | /* Parked */ | |
1491 | fq = tag_to_fq(be32_to_cpu(msg->fq.context_b)); | |
1492 | fq_state_change(p, fq, msg, verb); | |
1493 | if (fq->cb.fqs) | |
1494 | fq->cb.fqs(p, fq, msg); | |
1495 | break; | |
1496 | case QM_MR_VERB_DC_ERN: | |
1497 | /* DCP ERN */ | |
1498 | pr_crit_once("Leaking DCP ERNs!\n"); | |
1499 | break; | |
1500 | default: | |
1501 | pr_crit("Invalid MR verb 0x%02x\n", verb); | |
1502 | } | |
1503 | } else { | |
1504 | /* Its a software ERN */ | |
1505 | fq = tag_to_fq(be32_to_cpu(msg->ern.tag)); | |
1506 | fq->cb.ern(p, fq, msg); | |
1507 | } | |
1508 | num++; | |
1509 | qm_mr_next(&p->p); | |
1510 | } | |
1511 | ||
1512 | qm_mr_cci_consume(&p->p, num); | |
1513 | qman_p_irqsource_add(p, QM_PIRQ_MRI); | |
1514 | preempt_enable(); | |
1515 | } | |
1516 | ||
1517 | static u32 __poll_portal_slow(struct qman_portal *p, u32 is) | |
1518 | { | |
1519 | if (is & QM_PIRQ_CSCI) { | |
1520 | qman_p_irqsource_remove(p, QM_PIRQ_CSCI); | |
1521 | queue_work_on(smp_processor_id(), qm_portal_wq, | |
1522 | &p->congestion_work); | |
1523 | } | |
1524 | ||
1525 | if (is & QM_PIRQ_EQRI) { | |
1526 | qm_eqcr_cce_update(&p->p); | |
1527 | qm_eqcr_set_ithresh(&p->p, 0); | |
1528 | wake_up(&affine_queue); | |
1529 | } | |
1530 | ||
1531 | if (is & QM_PIRQ_MRI) { | |
1532 | qman_p_irqsource_remove(p, QM_PIRQ_MRI); | |
1533 | queue_work_on(smp_processor_id(), qm_portal_wq, | |
1534 | &p->mr_work); | |
1535 | } | |
1536 | ||
1537 | return is; | |
1538 | } | |
1539 | ||
1540 | /* | |
1541 | * remove some slowish-path stuff from the "fast path" and make sure it isn't | |
1542 | * inlined. | |
1543 | */ | |
1544 | static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq) | |
1545 | { | |
1546 | p->vdqcr_owned = NULL; | |
1547 | fq_clear(fq, QMAN_FQ_STATE_VDQCR); | |
1548 | wake_up(&affine_queue); | |
1549 | } | |
1550 | ||
1551 | /* | |
1552 | * The only states that would conflict with other things if they ran at the | |
1553 | * same time on the same cpu are: | |
1554 | * | |
1555 | * (i) setting/clearing vdqcr_owned, and | |
1556 | * (ii) clearing the NE (Not Empty) flag. | |
1557 | * | |
1558 | * Both are safe. Because; | |
1559 | * | |
1560 | * (i) this clearing can only occur after qman_volatile_dequeue() has set the | |
1561 | * vdqcr_owned field (which it does before setting VDQCR), and | |
1562 | * qman_volatile_dequeue() blocks interrupts and preemption while this is | |
1563 | * done so that we can't interfere. | |
1564 | * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as | |
1565 | * with (i) that API prevents us from interfering until it's safe. | |
1566 | * | |
1567 | * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far | |
1568 | * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett | |
1569 | * advantage comes from this function not having to "lock" anything at all. | |
1570 | * | |
1571 | * Note also that the callbacks are invoked at points which are safe against the | |
1572 | * above potential conflicts, but that this function itself is not re-entrant | |
1573 | * (this is because the function tracks one end of each FIFO in the portal and | |
1574 | * we do *not* want to lock that). So the consequence is that it is safe for | |
1575 | * user callbacks to call into any QMan API. | |
1576 | */ | |
1577 | static inline unsigned int __poll_portal_fast(struct qman_portal *p, | |
1578 | unsigned int poll_limit) | |
1579 | { | |
1580 | const struct qm_dqrr_entry *dq; | |
1581 | struct qman_fq *fq; | |
1582 | enum qman_cb_dqrr_result res; | |
1583 | unsigned int limit = 0; | |
1584 | ||
1585 | do { | |
1586 | qm_dqrr_pvb_update(&p->p); | |
1587 | dq = qm_dqrr_current(&p->p); | |
1588 | if (!dq) | |
1589 | break; | |
1590 | ||
1591 | if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) { | |
1592 | /* | |
1593 | * VDQCR: don't trust context_b as the FQ may have | |
1594 | * been configured for h/w consumption and we're | |
1595 | * draining it post-retirement. | |
1596 | */ | |
1597 | fq = p->vdqcr_owned; | |
1598 | /* | |
1599 | * We only set QMAN_FQ_STATE_NE when retiring, so we | |
1600 | * only need to check for clearing it when doing | |
1601 | * volatile dequeues. It's one less thing to check | |
1602 | * in the critical path (SDQCR). | |
1603 | */ | |
1604 | if (dq->stat & QM_DQRR_STAT_FQ_EMPTY) | |
1605 | fq_clear(fq, QMAN_FQ_STATE_NE); | |
1606 | /* | |
1607 | * This is duplicated from the SDQCR code, but we | |
1608 | * have stuff to do before *and* after this callback, | |
1609 | * and we don't want multiple if()s in the critical | |
1610 | * path (SDQCR). | |
1611 | */ | |
1612 | res = fq->cb.dqrr(p, fq, dq); | |
1613 | if (res == qman_cb_dqrr_stop) | |
1614 | break; | |
1615 | /* Check for VDQCR completion */ | |
1616 | if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED) | |
1617 | clear_vdqcr(p, fq); | |
1618 | } else { | |
1619 | /* SDQCR: context_b points to the FQ */ | |
1620 | fq = tag_to_fq(be32_to_cpu(dq->context_b)); | |
1621 | /* Now let the callback do its stuff */ | |
1622 | res = fq->cb.dqrr(p, fq, dq); | |
1623 | /* | |
1624 | * The callback can request that we exit without | |
1625 | * consuming this entry nor advancing; | |
1626 | */ | |
1627 | if (res == qman_cb_dqrr_stop) | |
1628 | break; | |
1629 | } | |
1630 | /* Interpret 'dq' from a driver perspective. */ | |
1631 | /* | |
1632 | * Parking isn't possible unless HELDACTIVE was set. NB, | |
1633 | * FORCEELIGIBLE implies HELDACTIVE, so we only need to | |
1634 | * check for HELDACTIVE to cover both. | |
1635 | */ | |
1636 | DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) || | |
1637 | (res != qman_cb_dqrr_park)); | |
1638 | /* just means "skip it, I'll consume it myself later on" */ | |
1639 | if (res != qman_cb_dqrr_defer) | |
1640 | qm_dqrr_cdc_consume_1ptr(&p->p, dq, | |
1641 | res == qman_cb_dqrr_park); | |
1642 | /* Move forward */ | |
1643 | qm_dqrr_next(&p->p); | |
1644 | /* | |
1645 | * Entry processed and consumed, increment our counter. The | |
1646 | * callback can request that we exit after consuming the | |
1647 | * entry, and we also exit if we reach our processing limit, | |
1648 | * so loop back only if neither of these conditions is met. | |
1649 | */ | |
1650 | } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop); | |
1651 | ||
1652 | return limit; | |
1653 | } | |
1654 | ||
1655 | void qman_p_irqsource_add(struct qman_portal *p, u32 bits) | |
1656 | { | |
1657 | unsigned long irqflags; | |
1658 | ||
1659 | local_irq_save(irqflags); | |
1660 | p->irq_sources |= bits & QM_PIRQ_VISIBLE; | |
1661 | qm_out(&p->p, QM_REG_IER, p->irq_sources); | |
1662 | local_irq_restore(irqflags); | |
1663 | } | |
1664 | EXPORT_SYMBOL(qman_p_irqsource_add); | |
1665 | ||
1666 | void qman_p_irqsource_remove(struct qman_portal *p, u32 bits) | |
1667 | { | |
1668 | unsigned long irqflags; | |
1669 | u32 ier; | |
1670 | ||
1671 | /* | |
1672 | * Our interrupt handler only processes+clears status register bits that | |
1673 | * are in p->irq_sources. As we're trimming that mask, if one of them | |
1674 | * were to assert in the status register just before we remove it from | |
1675 | * the enable register, there would be an interrupt-storm when we | |
1676 | * release the IRQ lock. So we wait for the enable register update to | |
1677 | * take effect in h/w (by reading it back) and then clear all other bits | |
1678 | * in the status register. Ie. we clear them from ISR once it's certain | |
1679 | * IER won't allow them to reassert. | |
1680 | */ | |
1681 | local_irq_save(irqflags); | |
1682 | bits &= QM_PIRQ_VISIBLE; | |
1683 | p->irq_sources &= ~bits; | |
1684 | qm_out(&p->p, QM_REG_IER, p->irq_sources); | |
1685 | ier = qm_in(&p->p, QM_REG_IER); | |
1686 | /* | |
1687 | * Using "~ier" (rather than "bits" or "~p->irq_sources") creates a | |
1688 | * data-dependency, ie. to protect against re-ordering. | |
1689 | */ | |
1690 | qm_out(&p->p, QM_REG_ISR, ~ier); | |
1691 | local_irq_restore(irqflags); | |
1692 | } | |
1693 | EXPORT_SYMBOL(qman_p_irqsource_remove); | |
1694 | ||
1695 | const cpumask_t *qman_affine_cpus(void) | |
1696 | { | |
1697 | return &affine_mask; | |
1698 | } | |
1699 | EXPORT_SYMBOL(qman_affine_cpus); | |
1700 | ||
1701 | u16 qman_affine_channel(int cpu) | |
1702 | { | |
1703 | if (cpu < 0) { | |
1704 | struct qman_portal *portal = get_affine_portal(); | |
1705 | ||
1706 | cpu = portal->config->cpu; | |
1707 | put_affine_portal(); | |
1708 | } | |
1709 | WARN_ON(!cpumask_test_cpu(cpu, &affine_mask)); | |
1710 | return affine_channels[cpu]; | |
1711 | } | |
1712 | EXPORT_SYMBOL(qman_affine_channel); | |
1713 | ||
1714 | struct qman_portal *qman_get_affine_portal(int cpu) | |
1715 | { | |
1716 | return affine_portals[cpu]; | |
1717 | } | |
1718 | EXPORT_SYMBOL(qman_get_affine_portal); | |
1719 | ||
1720 | int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit) | |
1721 | { | |
1722 | return __poll_portal_fast(p, limit); | |
1723 | } | |
1724 | EXPORT_SYMBOL(qman_p_poll_dqrr); | |
1725 | ||
1726 | void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools) | |
1727 | { | |
1728 | unsigned long irqflags; | |
1729 | ||
1730 | local_irq_save(irqflags); | |
1731 | pools &= p->config->pools; | |
1732 | p->sdqcr |= pools; | |
1733 | qm_dqrr_sdqcr_set(&p->p, p->sdqcr); | |
1734 | local_irq_restore(irqflags); | |
1735 | } | |
1736 | EXPORT_SYMBOL(qman_p_static_dequeue_add); | |
1737 | ||
1738 | /* Frame queue API */ | |
1739 | ||
1740 | static const char *mcr_result_str(u8 result) | |
1741 | { | |
1742 | switch (result) { | |
1743 | case QM_MCR_RESULT_NULL: | |
1744 | return "QM_MCR_RESULT_NULL"; | |
1745 | case QM_MCR_RESULT_OK: | |
1746 | return "QM_MCR_RESULT_OK"; | |
1747 | case QM_MCR_RESULT_ERR_FQID: | |
1748 | return "QM_MCR_RESULT_ERR_FQID"; | |
1749 | case QM_MCR_RESULT_ERR_FQSTATE: | |
1750 | return "QM_MCR_RESULT_ERR_FQSTATE"; | |
1751 | case QM_MCR_RESULT_ERR_NOTEMPTY: | |
1752 | return "QM_MCR_RESULT_ERR_NOTEMPTY"; | |
1753 | case QM_MCR_RESULT_PENDING: | |
1754 | return "QM_MCR_RESULT_PENDING"; | |
1755 | case QM_MCR_RESULT_ERR_BADCOMMAND: | |
1756 | return "QM_MCR_RESULT_ERR_BADCOMMAND"; | |
1757 | } | |
1758 | return "<unknown MCR result>"; | |
1759 | } | |
1760 | ||
1761 | int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq) | |
1762 | { | |
1763 | if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) { | |
1764 | int ret = qman_alloc_fqid(&fqid); | |
1765 | ||
1766 | if (ret) | |
1767 | return ret; | |
1768 | } | |
1769 | fq->fqid = fqid; | |
1770 | fq->flags = flags; | |
1771 | fq->state = qman_fq_state_oos; | |
1772 | fq->cgr_groupid = 0; | |
1773 | ||
1774 | /* A context_b of 0 is allegedly special, so don't use that fqid */ | |
1775 | if (fqid == 0 || fqid >= num_fqids) { | |
1776 | WARN(1, "bad fqid %d\n", fqid); | |
1777 | return -EINVAL; | |
1778 | } | |
1779 | ||
1780 | fq->idx = fqid * 2; | |
1781 | if (flags & QMAN_FQ_FLAG_NO_MODIFY) | |
1782 | fq->idx++; | |
1783 | ||
1784 | WARN_ON(fq_table[fq->idx]); | |
1785 | fq_table[fq->idx] = fq; | |
1786 | ||
1787 | return 0; | |
1788 | } | |
1789 | EXPORT_SYMBOL(qman_create_fq); | |
1790 | ||
1791 | void qman_destroy_fq(struct qman_fq *fq) | |
1792 | { | |
1793 | /* | |
1794 | * We don't need to lock the FQ as it is a pre-condition that the FQ be | |
1795 | * quiesced. Instead, run some checks. | |
1796 | */ | |
1797 | switch (fq->state) { | |
1798 | case qman_fq_state_parked: | |
1799 | case qman_fq_state_oos: | |
1800 | if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID)) | |
1801 | qman_release_fqid(fq->fqid); | |
1802 | ||
1803 | DPAA_ASSERT(fq_table[fq->idx]); | |
1804 | fq_table[fq->idx] = NULL; | |
1805 | return; | |
1806 | default: | |
1807 | break; | |
1808 | } | |
1809 | DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!"); | |
1810 | } | |
1811 | EXPORT_SYMBOL(qman_destroy_fq); | |
1812 | ||
1813 | u32 qman_fq_fqid(struct qman_fq *fq) | |
1814 | { | |
1815 | return fq->fqid; | |
1816 | } | |
1817 | EXPORT_SYMBOL(qman_fq_fqid); | |
1818 | ||
1819 | int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) | |
1820 | { | |
1821 | union qm_mc_command *mcc; | |
1822 | union qm_mc_result *mcr; | |
1823 | struct qman_portal *p; | |
1824 | u8 res, myverb; | |
1825 | int ret = 0; | |
1826 | ||
1827 | myverb = (flags & QMAN_INITFQ_FLAG_SCHED) | |
1828 | ? QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED; | |
1829 | ||
1830 | if (fq->state != qman_fq_state_oos && | |
1831 | fq->state != qman_fq_state_parked) | |
1832 | return -EINVAL; | |
1833 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
1834 | if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) | |
1835 | return -EINVAL; | |
1836 | #endif | |
1837 | if (opts && (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_OAC)) { | |
1838 | /* And can't be set at the same time as TDTHRESH */ | |
1839 | if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_TDTHRESH) | |
1840 | return -EINVAL; | |
1841 | } | |
1842 | /* Issue an INITFQ_[PARKED|SCHED] management command */ | |
1843 | p = get_affine_portal(); | |
1844 | if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) || | |
1845 | (fq->state != qman_fq_state_oos && | |
1846 | fq->state != qman_fq_state_parked)) { | |
1847 | ret = -EBUSY; | |
1848 | goto out; | |
1849 | } | |
1850 | mcc = qm_mc_start(&p->p); | |
1851 | if (opts) | |
1852 | mcc->initfq = *opts; | |
1853 | qm_fqid_set(&mcc->fq, fq->fqid); | |
1854 | mcc->initfq.count = 0; | |
1855 | /* | |
1856 | * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a | |
1857 | * demux pointer. Otherwise, the caller-provided value is allowed to | |
1858 | * stand, don't overwrite it. | |
1859 | */ | |
1860 | if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) { | |
1861 | dma_addr_t phys_fq; | |
1862 | ||
1863 | mcc->initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTB); | |
1864 | mcc->initfq.fqd.context_b = cpu_to_be32(fq_to_tag(fq)); | |
1865 | /* | |
1866 | * and the physical address - NB, if the user wasn't trying to | |
1867 | * set CONTEXTA, clear the stashing settings. | |
1868 | */ | |
1869 | if (!(be16_to_cpu(mcc->initfq.we_mask) & | |
1870 | QM_INITFQ_WE_CONTEXTA)) { | |
1871 | mcc->initfq.we_mask |= | |
1872 | cpu_to_be16(QM_INITFQ_WE_CONTEXTA); | |
1873 | memset(&mcc->initfq.fqd.context_a, 0, | |
1874 | sizeof(mcc->initfq.fqd.context_a)); | |
1875 | } else { | |
1876 | struct qman_portal *p = qman_dma_portal; | |
1877 | ||
1878 | phys_fq = dma_map_single(p->config->dev, fq, | |
1879 | sizeof(*fq), DMA_TO_DEVICE); | |
1880 | if (dma_mapping_error(p->config->dev, phys_fq)) { | |
1881 | dev_err(p->config->dev, "dma_mapping failed\n"); | |
1882 | ret = -EIO; | |
1883 | goto out; | |
1884 | } | |
1885 | ||
1886 | qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq); | |
1887 | } | |
1888 | } | |
1889 | if (flags & QMAN_INITFQ_FLAG_LOCAL) { | |
1890 | int wq = 0; | |
1891 | ||
1892 | if (!(be16_to_cpu(mcc->initfq.we_mask) & | |
1893 | QM_INITFQ_WE_DESTWQ)) { | |
1894 | mcc->initfq.we_mask |= | |
1895 | cpu_to_be16(QM_INITFQ_WE_DESTWQ); | |
1896 | wq = 4; | |
1897 | } | |
1898 | qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq); | |
1899 | } | |
1900 | qm_mc_commit(&p->p, myverb); | |
1901 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
1902 | dev_err(p->config->dev, "MCR timeout\n"); | |
1903 | ret = -ETIMEDOUT; | |
1904 | goto out; | |
1905 | } | |
1906 | ||
1907 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb); | |
1908 | res = mcr->result; | |
1909 | if (res != QM_MCR_RESULT_OK) { | |
1910 | ret = -EIO; | |
1911 | goto out; | |
1912 | } | |
1913 | if (opts) { | |
1914 | if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_FQCTRL) { | |
1915 | if (be16_to_cpu(opts->fqd.fq_ctrl) & QM_FQCTRL_CGE) | |
1916 | fq_set(fq, QMAN_FQ_STATE_CGR_EN); | |
1917 | else | |
1918 | fq_clear(fq, QMAN_FQ_STATE_CGR_EN); | |
1919 | } | |
1920 | if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_CGID) | |
1921 | fq->cgr_groupid = opts->fqd.cgid; | |
1922 | } | |
1923 | fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ? | |
1924 | qman_fq_state_sched : qman_fq_state_parked; | |
1925 | ||
1926 | out: | |
1927 | put_affine_portal(); | |
1928 | return ret; | |
1929 | } | |
1930 | EXPORT_SYMBOL(qman_init_fq); | |
1931 | ||
1932 | int qman_schedule_fq(struct qman_fq *fq) | |
1933 | { | |
1934 | union qm_mc_command *mcc; | |
1935 | union qm_mc_result *mcr; | |
1936 | struct qman_portal *p; | |
1937 | int ret = 0; | |
1938 | ||
1939 | if (fq->state != qman_fq_state_parked) | |
1940 | return -EINVAL; | |
1941 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
1942 | if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) | |
1943 | return -EINVAL; | |
1944 | #endif | |
1945 | /* Issue a ALTERFQ_SCHED management command */ | |
1946 | p = get_affine_portal(); | |
1947 | if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) || | |
1948 | fq->state != qman_fq_state_parked) { | |
1949 | ret = -EBUSY; | |
1950 | goto out; | |
1951 | } | |
1952 | mcc = qm_mc_start(&p->p); | |
1953 | qm_fqid_set(&mcc->fq, fq->fqid); | |
1954 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED); | |
1955 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
1956 | dev_err(p->config->dev, "ALTER_SCHED timeout\n"); | |
1957 | ret = -ETIMEDOUT; | |
1958 | goto out; | |
1959 | } | |
1960 | ||
1961 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED); | |
1962 | if (mcr->result != QM_MCR_RESULT_OK) { | |
1963 | ret = -EIO; | |
1964 | goto out; | |
1965 | } | |
1966 | fq->state = qman_fq_state_sched; | |
1967 | out: | |
1968 | put_affine_portal(); | |
1969 | return ret; | |
1970 | } | |
1971 | EXPORT_SYMBOL(qman_schedule_fq); | |
1972 | ||
1973 | int qman_retire_fq(struct qman_fq *fq, u32 *flags) | |
1974 | { | |
1975 | union qm_mc_command *mcc; | |
1976 | union qm_mc_result *mcr; | |
1977 | struct qman_portal *p; | |
1978 | int ret; | |
1979 | u8 res; | |
1980 | ||
1981 | if (fq->state != qman_fq_state_parked && | |
1982 | fq->state != qman_fq_state_sched) | |
1983 | return -EINVAL; | |
1984 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
1985 | if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) | |
1986 | return -EINVAL; | |
1987 | #endif | |
1988 | p = get_affine_portal(); | |
1989 | if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) || | |
1990 | fq->state == qman_fq_state_retired || | |
1991 | fq->state == qman_fq_state_oos) { | |
1992 | ret = -EBUSY; | |
1993 | goto out; | |
1994 | } | |
1995 | mcc = qm_mc_start(&p->p); | |
1996 | qm_fqid_set(&mcc->fq, fq->fqid); | |
1997 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); | |
1998 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
1999 | dev_crit(p->config->dev, "ALTER_RETIRE timeout\n"); | |
2000 | ret = -ETIMEDOUT; | |
2001 | goto out; | |
2002 | } | |
2003 | ||
2004 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE); | |
2005 | res = mcr->result; | |
2006 | /* | |
2007 | * "Elegant" would be to treat OK/PENDING the same way; set CHANGING, | |
2008 | * and defer the flags until FQRNI or FQRN (respectively) show up. But | |
2009 | * "Friendly" is to process OK immediately, and not set CHANGING. We do | |
2010 | * friendly, otherwise the caller doesn't necessarily have a fully | |
2011 | * "retired" FQ on return even if the retirement was immediate. However | |
2012 | * this does mean some code duplication between here and | |
2013 | * fq_state_change(). | |
2014 | */ | |
2015 | if (res == QM_MCR_RESULT_OK) { | |
2016 | ret = 0; | |
2017 | /* Process 'fq' right away, we'll ignore FQRNI */ | |
2018 | if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) | |
2019 | fq_set(fq, QMAN_FQ_STATE_NE); | |
2020 | if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT) | |
2021 | fq_set(fq, QMAN_FQ_STATE_ORL); | |
2022 | if (flags) | |
2023 | *flags = fq->flags; | |
2024 | fq->state = qman_fq_state_retired; | |
2025 | if (fq->cb.fqs) { | |
2026 | /* | |
2027 | * Another issue with supporting "immediate" retirement | |
2028 | * is that we're forced to drop FQRNIs, because by the | |
2029 | * time they're seen it may already be "too late" (the | |
2030 | * fq may have been OOS'd and free()'d already). But if | |
2031 | * the upper layer wants a callback whether it's | |
2032 | * immediate or not, we have to fake a "MR" entry to | |
2033 | * look like an FQRNI... | |
2034 | */ | |
2035 | union qm_mr_entry msg; | |
2036 | ||
2037 | msg.verb = QM_MR_VERB_FQRNI; | |
2038 | msg.fq.fqs = mcr->alterfq.fqs; | |
2039 | qm_fqid_set(&msg.fq, fq->fqid); | |
2040 | msg.fq.context_b = cpu_to_be32(fq_to_tag(fq)); | |
2041 | fq->cb.fqs(p, fq, &msg); | |
2042 | } | |
2043 | } else if (res == QM_MCR_RESULT_PENDING) { | |
2044 | ret = 1; | |
2045 | fq_set(fq, QMAN_FQ_STATE_CHANGING); | |
2046 | } else { | |
2047 | ret = -EIO; | |
2048 | } | |
2049 | out: | |
2050 | put_affine_portal(); | |
2051 | return ret; | |
2052 | } | |
2053 | EXPORT_SYMBOL(qman_retire_fq); | |
2054 | ||
2055 | int qman_oos_fq(struct qman_fq *fq) | |
2056 | { | |
2057 | union qm_mc_command *mcc; | |
2058 | union qm_mc_result *mcr; | |
2059 | struct qman_portal *p; | |
2060 | int ret = 0; | |
2061 | ||
2062 | if (fq->state != qman_fq_state_retired) | |
2063 | return -EINVAL; | |
2064 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
2065 | if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) | |
2066 | return -EINVAL; | |
2067 | #endif | |
2068 | p = get_affine_portal(); | |
2069 | if (fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS) || | |
2070 | fq->state != qman_fq_state_retired) { | |
2071 | ret = -EBUSY; | |
2072 | goto out; | |
2073 | } | |
2074 | mcc = qm_mc_start(&p->p); | |
2075 | qm_fqid_set(&mcc->fq, fq->fqid); | |
2076 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); | |
2077 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
2078 | ret = -ETIMEDOUT; | |
2079 | goto out; | |
2080 | } | |
2081 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS); | |
2082 | if (mcr->result != QM_MCR_RESULT_OK) { | |
2083 | ret = -EIO; | |
2084 | goto out; | |
2085 | } | |
2086 | fq->state = qman_fq_state_oos; | |
2087 | out: | |
2088 | put_affine_portal(); | |
2089 | return ret; | |
2090 | } | |
2091 | EXPORT_SYMBOL(qman_oos_fq); | |
2092 | ||
2093 | int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd) | |
2094 | { | |
2095 | union qm_mc_command *mcc; | |
2096 | union qm_mc_result *mcr; | |
2097 | struct qman_portal *p = get_affine_portal(); | |
2098 | int ret = 0; | |
2099 | ||
2100 | mcc = qm_mc_start(&p->p); | |
2101 | qm_fqid_set(&mcc->fq, fq->fqid); | |
2102 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); | |
2103 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
2104 | ret = -ETIMEDOUT; | |
2105 | goto out; | |
2106 | } | |
2107 | ||
2108 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ); | |
2109 | if (mcr->result == QM_MCR_RESULT_OK) | |
2110 | *fqd = mcr->queryfq.fqd; | |
2111 | else | |
2112 | ret = -EIO; | |
2113 | out: | |
2114 | put_affine_portal(); | |
2115 | return ret; | |
2116 | } | |
2117 | ||
2118 | int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np) | |
2119 | { | |
2120 | union qm_mc_command *mcc; | |
2121 | union qm_mc_result *mcr; | |
2122 | struct qman_portal *p = get_affine_portal(); | |
2123 | int ret = 0; | |
2124 | ||
2125 | mcc = qm_mc_start(&p->p); | |
2126 | qm_fqid_set(&mcc->fq, fq->fqid); | |
2127 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); | |
2128 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
2129 | ret = -ETIMEDOUT; | |
2130 | goto out; | |
2131 | } | |
2132 | ||
2133 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); | |
2134 | if (mcr->result == QM_MCR_RESULT_OK) | |
2135 | *np = mcr->queryfq_np; | |
2136 | else if (mcr->result == QM_MCR_RESULT_ERR_FQID) | |
2137 | ret = -ERANGE; | |
2138 | else | |
2139 | ret = -EIO; | |
2140 | out: | |
2141 | put_affine_portal(); | |
2142 | return ret; | |
2143 | } | |
2144 | EXPORT_SYMBOL(qman_query_fq_np); | |
2145 | ||
2146 | static int qman_query_cgr(struct qman_cgr *cgr, | |
2147 | struct qm_mcr_querycgr *cgrd) | |
2148 | { | |
2149 | union qm_mc_command *mcc; | |
2150 | union qm_mc_result *mcr; | |
2151 | struct qman_portal *p = get_affine_portal(); | |
2152 | int ret = 0; | |
2153 | ||
2154 | mcc = qm_mc_start(&p->p); | |
2155 | mcc->cgr.cgid = cgr->cgrid; | |
2156 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR); | |
2157 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
2158 | ret = -ETIMEDOUT; | |
2159 | goto out; | |
2160 | } | |
2161 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR); | |
2162 | if (mcr->result == QM_MCR_RESULT_OK) | |
2163 | *cgrd = mcr->querycgr; | |
2164 | else { | |
2165 | dev_err(p->config->dev, "QUERY_CGR failed: %s\n", | |
2166 | mcr_result_str(mcr->result)); | |
2167 | ret = -EIO; | |
2168 | } | |
2169 | out: | |
2170 | put_affine_portal(); | |
2171 | return ret; | |
2172 | } | |
2173 | ||
2174 | int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result) | |
2175 | { | |
2176 | struct qm_mcr_querycgr query_cgr; | |
2177 | int err; | |
2178 | ||
2179 | err = qman_query_cgr(cgr, &query_cgr); | |
2180 | if (err) | |
2181 | return err; | |
2182 | ||
2183 | *result = !!query_cgr.cgr.cs; | |
2184 | return 0; | |
2185 | } | |
2186 | EXPORT_SYMBOL(qman_query_cgr_congested); | |
2187 | ||
2188 | /* internal function used as a wait_event() expression */ | |
2189 | static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr) | |
2190 | { | |
2191 | unsigned long irqflags; | |
2192 | int ret = -EBUSY; | |
2193 | ||
2194 | local_irq_save(irqflags); | |
2195 | if (p->vdqcr_owned) | |
2196 | goto out; | |
2197 | if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) | |
2198 | goto out; | |
2199 | ||
2200 | fq_set(fq, QMAN_FQ_STATE_VDQCR); | |
2201 | p->vdqcr_owned = fq; | |
2202 | qm_dqrr_vdqcr_set(&p->p, vdqcr); | |
2203 | ret = 0; | |
2204 | out: | |
2205 | local_irq_restore(irqflags); | |
2206 | return ret; | |
2207 | } | |
2208 | ||
2209 | static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr) | |
2210 | { | |
2211 | int ret; | |
2212 | ||
2213 | *p = get_affine_portal(); | |
2214 | ret = set_p_vdqcr(*p, fq, vdqcr); | |
2215 | put_affine_portal(); | |
2216 | return ret; | |
2217 | } | |
2218 | ||
2219 | static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq, | |
2220 | u32 vdqcr, u32 flags) | |
2221 | { | |
2222 | int ret = 0; | |
2223 | ||
2224 | if (flags & QMAN_VOLATILE_FLAG_WAIT_INT) | |
2225 | ret = wait_event_interruptible(affine_queue, | |
2226 | !set_vdqcr(p, fq, vdqcr)); | |
2227 | else | |
2228 | wait_event(affine_queue, !set_vdqcr(p, fq, vdqcr)); | |
2229 | return ret; | |
2230 | } | |
2231 | ||
2232 | int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr) | |
2233 | { | |
2234 | struct qman_portal *p; | |
2235 | int ret; | |
2236 | ||
2237 | if (fq->state != qman_fq_state_parked && | |
2238 | fq->state != qman_fq_state_retired) | |
2239 | return -EINVAL; | |
2240 | if (vdqcr & QM_VDQCR_FQID_MASK) | |
2241 | return -EINVAL; | |
2242 | if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) | |
2243 | return -EBUSY; | |
2244 | vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid; | |
2245 | if (flags & QMAN_VOLATILE_FLAG_WAIT) | |
2246 | ret = wait_vdqcr_start(&p, fq, vdqcr, flags); | |
2247 | else | |
2248 | ret = set_vdqcr(&p, fq, vdqcr); | |
2249 | if (ret) | |
2250 | return ret; | |
2251 | /* VDQCR is set */ | |
2252 | if (flags & QMAN_VOLATILE_FLAG_FINISH) { | |
2253 | if (flags & QMAN_VOLATILE_FLAG_WAIT_INT) | |
2254 | /* | |
2255 | * NB: don't propagate any error - the caller wouldn't | |
2256 | * know whether the VDQCR was issued or not. A signal | |
2257 | * could arrive after returning anyway, so the caller | |
2258 | * can check signal_pending() if that's an issue. | |
2259 | */ | |
2260 | wait_event_interruptible(affine_queue, | |
2261 | !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); | |
2262 | else | |
2263 | wait_event(affine_queue, | |
2264 | !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); | |
2265 | } | |
2266 | return 0; | |
2267 | } | |
2268 | EXPORT_SYMBOL(qman_volatile_dequeue); | |
2269 | ||
2270 | static void update_eqcr_ci(struct qman_portal *p, u8 avail) | |
2271 | { | |
2272 | if (avail) | |
2273 | qm_eqcr_cce_prefetch(&p->p); | |
2274 | else | |
2275 | qm_eqcr_cce_update(&p->p); | |
2276 | } | |
2277 | ||
2278 | int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd) | |
2279 | { | |
2280 | struct qman_portal *p; | |
2281 | struct qm_eqcr_entry *eq; | |
2282 | unsigned long irqflags; | |
2283 | u8 avail; | |
2284 | ||
2285 | p = get_affine_portal(); | |
2286 | local_irq_save(irqflags); | |
2287 | ||
2288 | if (p->use_eqcr_ci_stashing) { | |
2289 | /* | |
2290 | * The stashing case is easy, only update if we need to in | |
2291 | * order to try and liberate ring entries. | |
2292 | */ | |
2293 | eq = qm_eqcr_start_stash(&p->p); | |
2294 | } else { | |
2295 | /* | |
2296 | * The non-stashing case is harder, need to prefetch ahead of | |
2297 | * time. | |
2298 | */ | |
2299 | avail = qm_eqcr_get_avail(&p->p); | |
2300 | if (avail < 2) | |
2301 | update_eqcr_ci(p, avail); | |
2302 | eq = qm_eqcr_start_no_stash(&p->p); | |
2303 | } | |
2304 | ||
2305 | if (unlikely(!eq)) | |
2306 | goto out; | |
2307 | ||
2308 | qm_fqid_set(eq, fq->fqid); | |
2309 | eq->tag = cpu_to_be32(fq_to_tag(fq)); | |
2310 | eq->fd = *fd; | |
2311 | ||
2312 | qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE); | |
2313 | out: | |
2314 | local_irq_restore(irqflags); | |
2315 | put_affine_portal(); | |
2316 | return 0; | |
2317 | } | |
2318 | EXPORT_SYMBOL(qman_enqueue); | |
2319 | ||
2320 | static int qm_modify_cgr(struct qman_cgr *cgr, u32 flags, | |
2321 | struct qm_mcc_initcgr *opts) | |
2322 | { | |
2323 | union qm_mc_command *mcc; | |
2324 | union qm_mc_result *mcr; | |
2325 | struct qman_portal *p = get_affine_portal(); | |
2326 | u8 verb = QM_MCC_VERB_MODIFYCGR; | |
2327 | int ret = 0; | |
2328 | ||
2329 | mcc = qm_mc_start(&p->p); | |
2330 | if (opts) | |
2331 | mcc->initcgr = *opts; | |
2332 | mcc->initcgr.cgid = cgr->cgrid; | |
2333 | if (flags & QMAN_CGR_FLAG_USE_INIT) | |
2334 | verb = QM_MCC_VERB_INITCGR; | |
2335 | qm_mc_commit(&p->p, verb); | |
2336 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
2337 | ret = -ETIMEDOUT; | |
2338 | goto out; | |
2339 | } | |
2340 | ||
2341 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb); | |
2342 | if (mcr->result != QM_MCR_RESULT_OK) | |
2343 | ret = -EIO; | |
2344 | ||
2345 | out: | |
2346 | put_affine_portal(); | |
2347 | return ret; | |
2348 | } | |
2349 | ||
2350 | #define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0) | |
2351 | ||
2352 | /* congestion state change notification target update control */ | |
2353 | static void qm_cgr_cscn_targ_set(struct __qm_mc_cgr *cgr, int pi, u32 val) | |
2354 | { | |
2355 | if (qman_ip_rev >= QMAN_REV30) | |
2356 | cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi | | |
2357 | QM_CGR_TARG_UDP_CTRL_WRITE_BIT); | |
2358 | else | |
2359 | cgr->cscn_targ = cpu_to_be32(val | QM_CGR_TARG_PORTAL(pi)); | |
2360 | } | |
2361 | ||
2362 | static void qm_cgr_cscn_targ_clear(struct __qm_mc_cgr *cgr, int pi, u32 val) | |
2363 | { | |
2364 | if (qman_ip_rev >= QMAN_REV30) | |
2365 | cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi); | |
2366 | else | |
2367 | cgr->cscn_targ = cpu_to_be32(val & ~QM_CGR_TARG_PORTAL(pi)); | |
2368 | } | |
2369 | ||
2370 | static u8 qman_cgr_cpus[CGR_NUM]; | |
2371 | ||
2372 | void qman_init_cgr_all(void) | |
2373 | { | |
2374 | struct qman_cgr cgr; | |
2375 | int err_cnt = 0; | |
2376 | ||
2377 | for (cgr.cgrid = 0; cgr.cgrid < CGR_NUM; cgr.cgrid++) { | |
2378 | if (qm_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL)) | |
2379 | err_cnt++; | |
2380 | } | |
2381 | ||
2382 | if (err_cnt) | |
2383 | pr_err("Warning: %d error%s while initialising CGR h/w\n", | |
2384 | err_cnt, (err_cnt > 1) ? "s" : ""); | |
2385 | } | |
2386 | ||
2387 | int qman_create_cgr(struct qman_cgr *cgr, u32 flags, | |
2388 | struct qm_mcc_initcgr *opts) | |
2389 | { | |
2390 | struct qm_mcr_querycgr cgr_state; | |
2391 | int ret; | |
2392 | struct qman_portal *p; | |
2393 | ||
2394 | /* | |
2395 | * We have to check that the provided CGRID is within the limits of the | |
2396 | * data-structures, for obvious reasons. However we'll let h/w take | |
2397 | * care of determining whether it's within the limits of what exists on | |
2398 | * the SoC. | |
2399 | */ | |
2400 | if (cgr->cgrid >= CGR_NUM) | |
2401 | return -EINVAL; | |
2402 | ||
2403 | preempt_disable(); | |
2404 | p = get_affine_portal(); | |
2405 | qman_cgr_cpus[cgr->cgrid] = smp_processor_id(); | |
2406 | preempt_enable(); | |
2407 | ||
2408 | cgr->chan = p->config->channel; | |
2409 | spin_lock(&p->cgr_lock); | |
2410 | ||
2411 | if (opts) { | |
2412 | struct qm_mcc_initcgr local_opts = *opts; | |
2413 | ||
2414 | ret = qman_query_cgr(cgr, &cgr_state); | |
2415 | if (ret) | |
2416 | goto out; | |
2417 | ||
2418 | qm_cgr_cscn_targ_set(&local_opts.cgr, PORTAL_IDX(p), | |
2419 | be32_to_cpu(cgr_state.cgr.cscn_targ)); | |
2420 | local_opts.we_mask |= cpu_to_be16(QM_CGR_WE_CSCN_TARG); | |
2421 | ||
2422 | /* send init if flags indicate so */ | |
2423 | if (flags & QMAN_CGR_FLAG_USE_INIT) | |
2424 | ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, | |
2425 | &local_opts); | |
2426 | else | |
2427 | ret = qm_modify_cgr(cgr, 0, &local_opts); | |
2428 | if (ret) | |
2429 | goto out; | |
2430 | } | |
2431 | ||
2432 | list_add(&cgr->node, &p->cgr_cbs); | |
2433 | ||
2434 | /* Determine if newly added object requires its callback to be called */ | |
2435 | ret = qman_query_cgr(cgr, &cgr_state); | |
2436 | if (ret) { | |
2437 | /* we can't go back, so proceed and return success */ | |
2438 | dev_err(p->config->dev, "CGR HW state partially modified\n"); | |
2439 | ret = 0; | |
2440 | goto out; | |
2441 | } | |
2442 | if (cgr->cb && cgr_state.cgr.cscn_en && | |
2443 | qman_cgrs_get(&p->cgrs[1], cgr->cgrid)) | |
2444 | cgr->cb(p, cgr, 1); | |
2445 | out: | |
2446 | spin_unlock(&p->cgr_lock); | |
2447 | put_affine_portal(); | |
2448 | return ret; | |
2449 | } | |
2450 | EXPORT_SYMBOL(qman_create_cgr); | |
2451 | ||
2452 | int qman_delete_cgr(struct qman_cgr *cgr) | |
2453 | { | |
2454 | unsigned long irqflags; | |
2455 | struct qm_mcr_querycgr cgr_state; | |
2456 | struct qm_mcc_initcgr local_opts; | |
2457 | int ret = 0; | |
2458 | struct qman_cgr *i; | |
2459 | struct qman_portal *p = get_affine_portal(); | |
2460 | ||
2461 | if (cgr->chan != p->config->channel) { | |
2462 | /* attempt to delete from other portal than creator */ | |
2463 | dev_err(p->config->dev, "CGR not owned by current portal"); | |
2464 | dev_dbg(p->config->dev, " create 0x%x, delete 0x%x\n", | |
2465 | cgr->chan, p->config->channel); | |
2466 | ||
2467 | ret = -EINVAL; | |
2468 | goto put_portal; | |
2469 | } | |
2470 | memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr)); | |
2471 | spin_lock_irqsave(&p->cgr_lock, irqflags); | |
2472 | list_del(&cgr->node); | |
2473 | /* | |
2474 | * If there are no other CGR objects for this CGRID in the list, | |
2475 | * update CSCN_TARG accordingly | |
2476 | */ | |
2477 | list_for_each_entry(i, &p->cgr_cbs, node) | |
2478 | if (i->cgrid == cgr->cgrid && i->cb) | |
2479 | goto release_lock; | |
2480 | ret = qman_query_cgr(cgr, &cgr_state); | |
2481 | if (ret) { | |
2482 | /* add back to the list */ | |
2483 | list_add(&cgr->node, &p->cgr_cbs); | |
2484 | goto release_lock; | |
2485 | } | |
2486 | ||
2487 | local_opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_TARG); | |
2488 | qm_cgr_cscn_targ_clear(&local_opts.cgr, PORTAL_IDX(p), | |
2489 | be32_to_cpu(cgr_state.cgr.cscn_targ)); | |
2490 | ||
2491 | ret = qm_modify_cgr(cgr, 0, &local_opts); | |
2492 | if (ret) | |
2493 | /* add back to the list */ | |
2494 | list_add(&cgr->node, &p->cgr_cbs); | |
2495 | release_lock: | |
2496 | spin_unlock_irqrestore(&p->cgr_lock, irqflags); | |
2497 | put_portal: | |
2498 | put_affine_portal(); | |
2499 | return ret; | |
2500 | } | |
2501 | EXPORT_SYMBOL(qman_delete_cgr); | |
2502 | ||
2503 | struct cgr_comp { | |
2504 | struct qman_cgr *cgr; | |
2505 | struct completion completion; | |
2506 | }; | |
2507 | ||
2508 | static void qman_delete_cgr_smp_call(void *p) | |
2509 | { | |
2510 | qman_delete_cgr((struct qman_cgr *)p); | |
2511 | } | |
2512 | ||
2513 | void qman_delete_cgr_safe(struct qman_cgr *cgr) | |
2514 | { | |
2515 | preempt_disable(); | |
2516 | if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) { | |
2517 | smp_call_function_single(qman_cgr_cpus[cgr->cgrid], | |
2518 | qman_delete_cgr_smp_call, cgr, true); | |
2519 | preempt_enable(); | |
2520 | return; | |
2521 | } | |
2522 | ||
2523 | qman_delete_cgr(cgr); | |
2524 | preempt_enable(); | |
2525 | } | |
2526 | EXPORT_SYMBOL(qman_delete_cgr_safe); | |
2527 | ||
2528 | /* Cleanup FQs */ | |
2529 | ||
2530 | static int _qm_mr_consume_and_match_verb(struct qm_portal *p, int v) | |
2531 | { | |
2532 | const union qm_mr_entry *msg; | |
2533 | int found = 0; | |
2534 | ||
2535 | qm_mr_pvb_update(p); | |
2536 | msg = qm_mr_current(p); | |
2537 | while (msg) { | |
2538 | if ((msg->verb & QM_MR_VERB_TYPE_MASK) == v) | |
2539 | found = 1; | |
2540 | qm_mr_next(p); | |
2541 | qm_mr_cci_consume_to_current(p); | |
2542 | qm_mr_pvb_update(p); | |
2543 | msg = qm_mr_current(p); | |
2544 | } | |
2545 | return found; | |
2546 | } | |
2547 | ||
2548 | static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s, | |
2549 | bool wait) | |
2550 | { | |
2551 | const struct qm_dqrr_entry *dqrr; | |
2552 | int found = 0; | |
2553 | ||
2554 | do { | |
2555 | qm_dqrr_pvb_update(p); | |
2556 | dqrr = qm_dqrr_current(p); | |
2557 | if (!dqrr) | |
2558 | cpu_relax(); | |
2559 | } while (wait && !dqrr); | |
2560 | ||
2561 | while (dqrr) { | |
2562 | if (qm_fqid_get(dqrr) == fqid && (dqrr->stat & s)) | |
2563 | found = 1; | |
2564 | qm_dqrr_cdc_consume_1ptr(p, dqrr, 0); | |
2565 | qm_dqrr_pvb_update(p); | |
2566 | qm_dqrr_next(p); | |
2567 | dqrr = qm_dqrr_current(p); | |
2568 | } | |
2569 | return found; | |
2570 | } | |
2571 | ||
2572 | #define qm_mr_drain(p, V) \ | |
2573 | _qm_mr_consume_and_match_verb(p, QM_MR_VERB_##V) | |
2574 | ||
2575 | #define qm_dqrr_drain(p, f, S) \ | |
2576 | _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, false) | |
2577 | ||
2578 | #define qm_dqrr_drain_wait(p, f, S) \ | |
2579 | _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, true) | |
2580 | ||
2581 | #define qm_dqrr_drain_nomatch(p) \ | |
2582 | _qm_dqrr_consume_and_match(p, 0, 0, false) | |
2583 | ||
2584 | static int qman_shutdown_fq(u32 fqid) | |
2585 | { | |
2586 | struct qman_portal *p; | |
2587 | struct device *dev; | |
2588 | union qm_mc_command *mcc; | |
2589 | union qm_mc_result *mcr; | |
2590 | int orl_empty, drain = 0, ret = 0; | |
2591 | u32 channel, wq, res; | |
2592 | u8 state; | |
2593 | ||
2594 | p = get_affine_portal(); | |
2595 | dev = p->config->dev; | |
2596 | /* Determine the state of the FQID */ | |
2597 | mcc = qm_mc_start(&p->p); | |
2598 | qm_fqid_set(&mcc->fq, fqid); | |
2599 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); | |
2600 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
2601 | dev_err(dev, "QUERYFQ_NP timeout\n"); | |
2602 | ret = -ETIMEDOUT; | |
2603 | goto out; | |
2604 | } | |
2605 | ||
2606 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); | |
2607 | state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK; | |
2608 | if (state == QM_MCR_NP_STATE_OOS) | |
2609 | goto out; /* Already OOS, no need to do anymore checks */ | |
2610 | ||
2611 | /* Query which channel the FQ is using */ | |
2612 | mcc = qm_mc_start(&p->p); | |
2613 | qm_fqid_set(&mcc->fq, fqid); | |
2614 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); | |
2615 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
2616 | dev_err(dev, "QUERYFQ timeout\n"); | |
2617 | ret = -ETIMEDOUT; | |
2618 | goto out; | |
2619 | } | |
2620 | ||
2621 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ); | |
2622 | /* Need to store these since the MCR gets reused */ | |
2623 | channel = qm_fqd_get_chan(&mcr->queryfq.fqd); | |
2624 | wq = qm_fqd_get_wq(&mcr->queryfq.fqd); | |
2625 | ||
2626 | switch (state) { | |
2627 | case QM_MCR_NP_STATE_TEN_SCHED: | |
2628 | case QM_MCR_NP_STATE_TRU_SCHED: | |
2629 | case QM_MCR_NP_STATE_ACTIVE: | |
2630 | case QM_MCR_NP_STATE_PARKED: | |
2631 | orl_empty = 0; | |
2632 | mcc = qm_mc_start(&p->p); | |
2633 | qm_fqid_set(&mcc->fq, fqid); | |
2634 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); | |
2635 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
2636 | dev_err(dev, "QUERYFQ_NP timeout\n"); | |
2637 | ret = -ETIMEDOUT; | |
2638 | goto out; | |
2639 | } | |
2640 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == | |
2641 | QM_MCR_VERB_ALTER_RETIRE); | |
2642 | res = mcr->result; /* Make a copy as we reuse MCR below */ | |
2643 | ||
2644 | if (res == QM_MCR_RESULT_PENDING) { | |
2645 | /* | |
2646 | * Need to wait for the FQRN in the message ring, which | |
2647 | * will only occur once the FQ has been drained. In | |
2648 | * order for the FQ to drain the portal needs to be set | |
2649 | * to dequeue from the channel the FQ is scheduled on | |
2650 | */ | |
2651 | int found_fqrn = 0; | |
2652 | u16 dequeue_wq = 0; | |
2653 | ||
2654 | /* Flag that we need to drain FQ */ | |
2655 | drain = 1; | |
2656 | ||
2657 | if (channel >= qm_channel_pool1 && | |
2658 | channel < qm_channel_pool1 + 15) { | |
2659 | /* Pool channel, enable the bit in the portal */ | |
2660 | dequeue_wq = (channel - | |
2661 | qm_channel_pool1 + 1)<<4 | wq; | |
2662 | } else if (channel < qm_channel_pool1) { | |
2663 | /* Dedicated channel */ | |
2664 | dequeue_wq = wq; | |
2665 | } else { | |
2666 | dev_err(dev, "Can't recover FQ 0x%x, ch: 0x%x", | |
2667 | fqid, channel); | |
2668 | ret = -EBUSY; | |
2669 | goto out; | |
2670 | } | |
2671 | /* Set the sdqcr to drain this channel */ | |
2672 | if (channel < qm_channel_pool1) | |
2673 | qm_dqrr_sdqcr_set(&p->p, | |
2674 | QM_SDQCR_TYPE_ACTIVE | | |
2675 | QM_SDQCR_CHANNELS_DEDICATED); | |
2676 | else | |
2677 | qm_dqrr_sdqcr_set(&p->p, | |
2678 | QM_SDQCR_TYPE_ACTIVE | | |
2679 | QM_SDQCR_CHANNELS_POOL_CONV | |
2680 | (channel)); | |
2681 | do { | |
2682 | /* Keep draining DQRR while checking the MR*/ | |
2683 | qm_dqrr_drain_nomatch(&p->p); | |
2684 | /* Process message ring too */ | |
2685 | found_fqrn = qm_mr_drain(&p->p, FQRN); | |
2686 | cpu_relax(); | |
2687 | } while (!found_fqrn); | |
2688 | ||
2689 | } | |
2690 | if (res != QM_MCR_RESULT_OK && | |
2691 | res != QM_MCR_RESULT_PENDING) { | |
2692 | dev_err(dev, "retire_fq failed: FQ 0x%x, res=0x%x\n", | |
2693 | fqid, res); | |
2694 | ret = -EIO; | |
2695 | goto out; | |
2696 | } | |
2697 | if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) { | |
2698 | /* | |
2699 | * ORL had no entries, no need to wait until the | |
2700 | * ERNs come in | |
2701 | */ | |
2702 | orl_empty = 1; | |
2703 | } | |
2704 | /* | |
2705 | * Retirement succeeded, check to see if FQ needs | |
2706 | * to be drained | |
2707 | */ | |
2708 | if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) { | |
2709 | /* FQ is Not Empty, drain using volatile DQ commands */ | |
2710 | do { | |
2711 | u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3); | |
2712 | ||
2713 | qm_dqrr_vdqcr_set(&p->p, vdqcr); | |
2714 | /* | |
2715 | * Wait for a dequeue and process the dequeues, | |
2716 | * making sure to empty the ring completely | |
2717 | */ | |
2718 | } while (qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY)); | |
2719 | } | |
2720 | qm_dqrr_sdqcr_set(&p->p, 0); | |
2721 | ||
2722 | while (!orl_empty) { | |
2723 | /* Wait for the ORL to have been completely drained */ | |
2724 | orl_empty = qm_mr_drain(&p->p, FQRL); | |
2725 | cpu_relax(); | |
2726 | } | |
2727 | mcc = qm_mc_start(&p->p); | |
2728 | qm_fqid_set(&mcc->fq, fqid); | |
2729 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); | |
2730 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
2731 | ret = -ETIMEDOUT; | |
2732 | goto out; | |
2733 | } | |
2734 | ||
2735 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == | |
2736 | QM_MCR_VERB_ALTER_OOS); | |
2737 | if (mcr->result != QM_MCR_RESULT_OK) { | |
2738 | dev_err(dev, "OOS after drain fail: FQ 0x%x (0x%x)\n", | |
2739 | fqid, mcr->result); | |
2740 | ret = -EIO; | |
2741 | goto out; | |
2742 | } | |
2743 | break; | |
2744 | ||
2745 | case QM_MCR_NP_STATE_RETIRED: | |
2746 | /* Send OOS Command */ | |
2747 | mcc = qm_mc_start(&p->p); | |
2748 | qm_fqid_set(&mcc->fq, fqid); | |
2749 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); | |
2750 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
2751 | ret = -ETIMEDOUT; | |
2752 | goto out; | |
2753 | } | |
2754 | ||
2755 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == | |
2756 | QM_MCR_VERB_ALTER_OOS); | |
2757 | if (mcr->result) { | |
2758 | dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n", | |
2759 | fqid, mcr->result); | |
2760 | ret = -EIO; | |
2761 | goto out; | |
2762 | } | |
2763 | break; | |
2764 | ||
2765 | case QM_MCR_NP_STATE_OOS: | |
2766 | /* Done */ | |
2767 | break; | |
2768 | ||
2769 | default: | |
2770 | ret = -EIO; | |
2771 | } | |
2772 | ||
2773 | out: | |
2774 | put_affine_portal(); | |
2775 | return ret; | |
2776 | } | |
2777 | ||
2778 | const struct qm_portal_config *qman_get_qm_portal_config( | |
2779 | struct qman_portal *portal) | |
2780 | { | |
2781 | return portal->config; | |
2782 | } | |
2783 | EXPORT_SYMBOL(qman_get_qm_portal_config); | |
2784 | ||
2785 | struct gen_pool *qm_fqalloc; /* FQID allocator */ | |
2786 | struct gen_pool *qm_qpalloc; /* pool-channel allocator */ | |
2787 | struct gen_pool *qm_cgralloc; /* CGR ID allocator */ | |
2788 | ||
2789 | static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt) | |
2790 | { | |
2791 | unsigned long addr; | |
2792 | ||
2793 | if (!p) | |
2794 | return -ENODEV; | |
2795 | ||
2796 | addr = gen_pool_alloc(p, cnt); | |
2797 | if (!addr) | |
2798 | return -ENOMEM; | |
2799 | ||
2800 | *result = addr & ~DPAA_GENALLOC_OFF; | |
2801 | ||
2802 | return 0; | |
2803 | } | |
2804 | ||
2805 | int qman_alloc_fqid_range(u32 *result, u32 count) | |
2806 | { | |
2807 | return qman_alloc_range(qm_fqalloc, result, count); | |
2808 | } | |
2809 | EXPORT_SYMBOL(qman_alloc_fqid_range); | |
2810 | ||
2811 | int qman_alloc_pool_range(u32 *result, u32 count) | |
2812 | { | |
2813 | return qman_alloc_range(qm_qpalloc, result, count); | |
2814 | } | |
2815 | EXPORT_SYMBOL(qman_alloc_pool_range); | |
2816 | ||
2817 | int qman_alloc_cgrid_range(u32 *result, u32 count) | |
2818 | { | |
2819 | return qman_alloc_range(qm_cgralloc, result, count); | |
2820 | } | |
2821 | EXPORT_SYMBOL(qman_alloc_cgrid_range); | |
2822 | ||
2823 | int qman_release_fqid(u32 fqid) | |
2824 | { | |
2825 | int ret = qman_shutdown_fq(fqid); | |
2826 | ||
2827 | if (ret) { | |
2828 | pr_debug("FQID %d leaked\n", fqid); | |
2829 | return ret; | |
2830 | } | |
2831 | ||
2832 | gen_pool_free(qm_fqalloc, fqid | DPAA_GENALLOC_OFF, 1); | |
2833 | return 0; | |
2834 | } | |
2835 | EXPORT_SYMBOL(qman_release_fqid); | |
2836 | ||
2837 | static int qpool_cleanup(u32 qp) | |
2838 | { | |
2839 | /* | |
2840 | * We query all FQDs starting from | |
2841 | * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs | |
2842 | * whose destination channel is the pool-channel being released. | |
2843 | * When a non-OOS FQD is found we attempt to clean it up | |
2844 | */ | |
2845 | struct qman_fq fq = { | |
2846 | .fqid = QM_FQID_RANGE_START | |
2847 | }; | |
2848 | int err; | |
2849 | ||
2850 | do { | |
2851 | struct qm_mcr_queryfq_np np; | |
2852 | ||
2853 | err = qman_query_fq_np(&fq, &np); | |
2854 | if (err == -ERANGE) | |
2855 | /* FQID range exceeded, found no problems */ | |
2856 | return 0; | |
2857 | else if (WARN_ON(err)) | |
2858 | return err; | |
2859 | ||
2860 | if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) { | |
2861 | struct qm_fqd fqd; | |
2862 | ||
2863 | err = qman_query_fq(&fq, &fqd); | |
2864 | if (WARN_ON(err)) | |
2865 | return err; | |
2866 | if (qm_fqd_get_chan(&fqd) == qp) { | |
2867 | /* The channel is the FQ's target, clean it */ | |
2868 | err = qman_shutdown_fq(fq.fqid); | |
2869 | if (err) | |
2870 | /* | |
2871 | * Couldn't shut down the FQ | |
2872 | * so the pool must be leaked | |
2873 | */ | |
2874 | return err; | |
2875 | } | |
2876 | } | |
2877 | /* Move to the next FQID */ | |
2878 | fq.fqid++; | |
2879 | } while (1); | |
2880 | } | |
2881 | ||
2882 | int qman_release_pool(u32 qp) | |
2883 | { | |
2884 | int ret; | |
2885 | ||
2886 | ret = qpool_cleanup(qp); | |
2887 | if (ret) { | |
2888 | pr_debug("CHID %d leaked\n", qp); | |
2889 | return ret; | |
2890 | } | |
2891 | ||
2892 | gen_pool_free(qm_qpalloc, qp | DPAA_GENALLOC_OFF, 1); | |
2893 | return 0; | |
2894 | } | |
2895 | EXPORT_SYMBOL(qman_release_pool); | |
2896 | ||
2897 | static int cgr_cleanup(u32 cgrid) | |
2898 | { | |
2899 | /* | |
2900 | * query all FQDs starting from FQID 1 until we get an "invalid FQID" | |
2901 | * error, looking for non-OOS FQDs whose CGR is the CGR being released | |
2902 | */ | |
2903 | struct qman_fq fq = { | |
2904 | .fqid = QM_FQID_RANGE_START | |
2905 | }; | |
2906 | int err; | |
2907 | ||
2908 | do { | |
2909 | struct qm_mcr_queryfq_np np; | |
2910 | ||
2911 | err = qman_query_fq_np(&fq, &np); | |
2912 | if (err == -ERANGE) | |
2913 | /* FQID range exceeded, found no problems */ | |
2914 | return 0; | |
2915 | else if (WARN_ON(err)) | |
2916 | return err; | |
2917 | ||
2918 | if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) { | |
2919 | struct qm_fqd fqd; | |
2920 | ||
2921 | err = qman_query_fq(&fq, &fqd); | |
2922 | if (WARN_ON(err)) | |
2923 | return err; | |
2924 | if (be16_to_cpu(fqd.fq_ctrl) & QM_FQCTRL_CGE && | |
2925 | fqd.cgid == cgrid) { | |
2926 | pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n", | |
2927 | cgrid, fq.fqid); | |
2928 | return -EIO; | |
2929 | } | |
2930 | } | |
2931 | /* Move to the next FQID */ | |
2932 | fq.fqid++; | |
2933 | } while (1); | |
2934 | } | |
2935 | ||
2936 | int qman_release_cgrid(u32 cgrid) | |
2937 | { | |
2938 | int ret; | |
2939 | ||
2940 | ret = cgr_cleanup(cgrid); | |
2941 | if (ret) { | |
2942 | pr_debug("CGRID %d leaked\n", cgrid); | |
2943 | return ret; | |
2944 | } | |
2945 | ||
2946 | gen_pool_free(qm_cgralloc, cgrid | DPAA_GENALLOC_OFF, 1); | |
2947 | return 0; | |
2948 | } | |
2949 | EXPORT_SYMBOL(qman_release_cgrid); |