]>
Commit | Line | Data |
---|---|---|
c535e923 CM |
1 | /* Copyright 2008 - 2016 Freescale Semiconductor, Inc. |
2 | * | |
3 | * Redistribution and use in source and binary forms, with or without | |
4 | * modification, are permitted provided that the following conditions are met: | |
5 | * * Redistributions of source code must retain the above copyright | |
6 | * notice, this list of conditions and the following disclaimer. | |
7 | * * Redistributions in binary form must reproduce the above copyright | |
8 | * notice, this list of conditions and the following disclaimer in the | |
9 | * documentation and/or other materials provided with the distribution. | |
10 | * * Neither the name of Freescale Semiconductor nor the | |
11 | * names of its contributors may be used to endorse or promote products | |
12 | * derived from this software without specific prior written permission. | |
13 | * | |
14 | * ALTERNATIVELY, this software may be distributed under the terms of the | |
15 | * GNU General Public License ("GPL") as published by the Free Software | |
16 | * Foundation, either version 2 of that License or (at your option) any | |
17 | * later version. | |
18 | * | |
19 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY | |
20 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | |
21 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | |
22 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | |
23 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | |
24 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
25 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | |
26 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | |
28 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
29 | */ | |
30 | ||
31 | #include "qman_priv.h" | |
32 | ||
33 | #define DQRR_MAXFILL 15 | |
34 | #define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */ | |
35 | #define IRQNAME "QMan portal %d" | |
36 | #define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */ | |
37 | #define QMAN_POLL_LIMIT 32 | |
38 | #define QMAN_PIRQ_DQRR_ITHRESH 12 | |
39 | #define QMAN_PIRQ_MR_ITHRESH 4 | |
40 | #define QMAN_PIRQ_IPERIOD 100 | |
41 | ||
42 | /* Portal register assists */ | |
43 | ||
44 | /* Cache-inhibited register offsets */ | |
45 | #define QM_REG_EQCR_PI_CINH 0x0000 | |
46 | #define QM_REG_EQCR_CI_CINH 0x0004 | |
47 | #define QM_REG_EQCR_ITR 0x0008 | |
48 | #define QM_REG_DQRR_PI_CINH 0x0040 | |
49 | #define QM_REG_DQRR_CI_CINH 0x0044 | |
50 | #define QM_REG_DQRR_ITR 0x0048 | |
51 | #define QM_REG_DQRR_DCAP 0x0050 | |
52 | #define QM_REG_DQRR_SDQCR 0x0054 | |
53 | #define QM_REG_DQRR_VDQCR 0x0058 | |
54 | #define QM_REG_DQRR_PDQCR 0x005c | |
55 | #define QM_REG_MR_PI_CINH 0x0080 | |
56 | #define QM_REG_MR_CI_CINH 0x0084 | |
57 | #define QM_REG_MR_ITR 0x0088 | |
58 | #define QM_REG_CFG 0x0100 | |
59 | #define QM_REG_ISR 0x0e00 | |
60 | #define QM_REG_IER 0x0e04 | |
61 | #define QM_REG_ISDR 0x0e08 | |
62 | #define QM_REG_IIR 0x0e0c | |
63 | #define QM_REG_ITPR 0x0e14 | |
64 | ||
65 | /* Cache-enabled register offsets */ | |
66 | #define QM_CL_EQCR 0x0000 | |
67 | #define QM_CL_DQRR 0x1000 | |
68 | #define QM_CL_MR 0x2000 | |
69 | #define QM_CL_EQCR_PI_CENA 0x3000 | |
70 | #define QM_CL_EQCR_CI_CENA 0x3100 | |
71 | #define QM_CL_DQRR_PI_CENA 0x3200 | |
72 | #define QM_CL_DQRR_CI_CENA 0x3300 | |
73 | #define QM_CL_MR_PI_CENA 0x3400 | |
74 | #define QM_CL_MR_CI_CENA 0x3500 | |
75 | #define QM_CL_CR 0x3800 | |
76 | #define QM_CL_RR0 0x3900 | |
77 | #define QM_CL_RR1 0x3940 | |
78 | ||
79 | /* | |
80 | * BTW, the drivers (and h/w programming model) already obtain the required | |
81 | * synchronisation for portal accesses and data-dependencies. Use of barrier()s | |
82 | * or other order-preserving primitives simply degrade performance. Hence the | |
83 | * use of the __raw_*() interfaces, which simply ensure that the compiler treats | |
84 | * the portal registers as volatile | |
85 | */ | |
86 | ||
87 | /* Cache-enabled ring access */ | |
88 | #define qm_cl(base, idx) ((void *)base + ((idx) << 6)) | |
89 | ||
90 | /* | |
91 | * Portal modes. | |
92 | * Enum types; | |
93 | * pmode == production mode | |
94 | * cmode == consumption mode, | |
95 | * dmode == h/w dequeue mode. | |
96 | * Enum values use 3 letter codes. First letter matches the portal mode, | |
97 | * remaining two letters indicate; | |
98 | * ci == cache-inhibited portal register | |
99 | * ce == cache-enabled portal register | |
100 | * vb == in-band valid-bit (cache-enabled) | |
101 | * dc == DCA (Discrete Consumption Acknowledgment), DQRR-only | |
102 | * As for "enum qm_dqrr_dmode", it should be self-explanatory. | |
103 | */ | |
104 | enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */ | |
105 | qm_eqcr_pci = 0, /* PI index, cache-inhibited */ | |
106 | qm_eqcr_pce = 1, /* PI index, cache-enabled */ | |
107 | qm_eqcr_pvb = 2 /* valid-bit */ | |
108 | }; | |
109 | enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */ | |
110 | qm_dqrr_dpush = 0, /* SDQCR + VDQCR */ | |
111 | qm_dqrr_dpull = 1 /* PDQCR */ | |
112 | }; | |
113 | enum qm_dqrr_pmode { /* s/w-only */ | |
114 | qm_dqrr_pci, /* reads DQRR_PI_CINH */ | |
115 | qm_dqrr_pce, /* reads DQRR_PI_CENA */ | |
116 | qm_dqrr_pvb /* reads valid-bit */ | |
117 | }; | |
118 | enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */ | |
119 | qm_dqrr_cci = 0, /* CI index, cache-inhibited */ | |
120 | qm_dqrr_cce = 1, /* CI index, cache-enabled */ | |
121 | qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgment */ | |
122 | }; | |
123 | enum qm_mr_pmode { /* s/w-only */ | |
124 | qm_mr_pci, /* reads MR_PI_CINH */ | |
125 | qm_mr_pce, /* reads MR_PI_CENA */ | |
126 | qm_mr_pvb /* reads valid-bit */ | |
127 | }; | |
128 | enum qm_mr_cmode { /* matches QCSP_CFG::MM */ | |
129 | qm_mr_cci = 0, /* CI index, cache-inhibited */ | |
130 | qm_mr_cce = 1 /* CI index, cache-enabled */ | |
131 | }; | |
132 | ||
133 | /* --- Portal structures --- */ | |
134 | ||
135 | #define QM_EQCR_SIZE 8 | |
136 | #define QM_DQRR_SIZE 16 | |
137 | #define QM_MR_SIZE 8 | |
138 | ||
139 | /* "Enqueue Command" */ | |
140 | struct qm_eqcr_entry { | |
141 | u8 _ncw_verb; /* writes to this are non-coherent */ | |
142 | u8 dca; | |
143 | u16 seqnum; | |
144 | u32 orp; /* 24-bit */ | |
145 | u32 fqid; /* 24-bit */ | |
146 | u32 tag; | |
147 | struct qm_fd fd; | |
148 | u8 __reserved3[32]; | |
149 | } __packed; | |
150 | #define QM_EQCR_VERB_VBIT 0x80 | |
151 | #define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */ | |
152 | #define QM_EQCR_VERB_CMD_ENQUEUE 0x01 | |
153 | #define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */ | |
154 | #define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */ | |
155 | #define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */ | |
156 | ||
157 | struct qm_eqcr { | |
158 | struct qm_eqcr_entry *ring, *cursor; | |
159 | u8 ci, available, ithresh, vbit; | |
160 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
161 | u32 busy; | |
162 | enum qm_eqcr_pmode pmode; | |
163 | #endif | |
164 | }; | |
165 | ||
166 | struct qm_dqrr { | |
167 | const struct qm_dqrr_entry *ring, *cursor; | |
168 | u8 pi, ci, fill, ithresh, vbit; | |
169 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
170 | enum qm_dqrr_dmode dmode; | |
171 | enum qm_dqrr_pmode pmode; | |
172 | enum qm_dqrr_cmode cmode; | |
173 | #endif | |
174 | }; | |
175 | ||
176 | struct qm_mr { | |
177 | union qm_mr_entry *ring, *cursor; | |
178 | u8 pi, ci, fill, ithresh, vbit; | |
179 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
180 | enum qm_mr_pmode pmode; | |
181 | enum qm_mr_cmode cmode; | |
182 | #endif | |
183 | }; | |
184 | ||
185 | /* MC (Management Command) command */ | |
186 | /* "Query FQ" */ | |
187 | struct qm_mcc_queryfq { | |
188 | u8 _ncw_verb; | |
189 | u8 __reserved1[3]; | |
190 | u32 fqid; /* 24-bit */ | |
191 | u8 __reserved2[56]; | |
192 | } __packed; | |
193 | /* "Alter FQ State Commands " */ | |
194 | struct qm_mcc_alterfq { | |
195 | u8 _ncw_verb; | |
196 | u8 __reserved1[3]; | |
197 | u32 fqid; /* 24-bit */ | |
198 | u8 __reserved2; | |
199 | u8 count; /* number of consecutive FQID */ | |
200 | u8 __reserved3[10]; | |
201 | u32 context_b; /* frame queue context b */ | |
202 | u8 __reserved4[40]; | |
203 | } __packed; | |
204 | ||
205 | /* "Query CGR" */ | |
206 | struct qm_mcc_querycgr { | |
207 | u8 _ncw_verb; | |
208 | u8 __reserved1[30]; | |
209 | u8 cgid; | |
210 | u8 __reserved2[32]; | |
211 | }; | |
212 | ||
213 | struct qm_mcc_querywq { | |
214 | u8 _ncw_verb; | |
215 | u8 __reserved; | |
216 | /* select channel if verb != QUERYWQ_DEDICATED */ | |
217 | u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */ | |
218 | u8 __reserved2[60]; | |
219 | } __packed; | |
220 | ||
221 | #define QM_MCC_VERB_VBIT 0x80 | |
222 | #define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */ | |
223 | #define QM_MCC_VERB_INITFQ_PARKED 0x40 | |
224 | #define QM_MCC_VERB_INITFQ_SCHED 0x41 | |
225 | #define QM_MCC_VERB_QUERYFQ 0x44 | |
226 | #define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */ | |
227 | #define QM_MCC_VERB_QUERYWQ 0x46 | |
228 | #define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47 | |
229 | #define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */ | |
230 | #define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */ | |
231 | #define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */ | |
232 | #define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */ | |
233 | #define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */ | |
234 | #define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */ | |
235 | #define QM_MCC_VERB_INITCGR 0x50 | |
236 | #define QM_MCC_VERB_MODIFYCGR 0x51 | |
237 | #define QM_MCC_VERB_CGRTESTWRITE 0x52 | |
238 | #define QM_MCC_VERB_QUERYCGR 0x58 | |
239 | #define QM_MCC_VERB_QUERYCONGESTION 0x59 | |
240 | union qm_mc_command { | |
241 | struct { | |
242 | u8 _ncw_verb; /* writes to this are non-coherent */ | |
243 | u8 __reserved[63]; | |
244 | }; | |
245 | struct qm_mcc_initfq initfq; | |
246 | struct qm_mcc_queryfq queryfq; | |
247 | struct qm_mcc_alterfq alterfq; | |
248 | struct qm_mcc_initcgr initcgr; | |
249 | struct qm_mcc_querycgr querycgr; | |
250 | struct qm_mcc_querywq querywq; | |
251 | struct qm_mcc_queryfq_np queryfq_np; | |
252 | }; | |
253 | ||
254 | /* MC (Management Command) result */ | |
255 | /* "Query FQ" */ | |
256 | struct qm_mcr_queryfq { | |
257 | u8 verb; | |
258 | u8 result; | |
259 | u8 __reserved1[8]; | |
260 | struct qm_fqd fqd; /* the FQD fields are here */ | |
261 | u8 __reserved2[30]; | |
262 | } __packed; | |
263 | ||
264 | /* "Alter FQ State Commands" */ | |
265 | struct qm_mcr_alterfq { | |
266 | u8 verb; | |
267 | u8 result; | |
268 | u8 fqs; /* Frame Queue Status */ | |
269 | u8 __reserved1[61]; | |
270 | }; | |
271 | #define QM_MCR_VERB_RRID 0x80 | |
272 | #define QM_MCR_VERB_MASK QM_MCC_VERB_MASK | |
273 | #define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED | |
274 | #define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED | |
275 | #define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ | |
276 | #define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP | |
277 | #define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ | |
278 | #define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED | |
279 | #define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED | |
280 | #define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE | |
281 | #define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE | |
282 | #define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS | |
283 | #define QM_MCR_RESULT_NULL 0x00 | |
284 | #define QM_MCR_RESULT_OK 0xf0 | |
285 | #define QM_MCR_RESULT_ERR_FQID 0xf1 | |
286 | #define QM_MCR_RESULT_ERR_FQSTATE 0xf2 | |
287 | #define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */ | |
288 | #define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4 | |
289 | #define QM_MCR_RESULT_PENDING 0xf8 | |
290 | #define QM_MCR_RESULT_ERR_BADCOMMAND 0xff | |
291 | #define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */ | |
292 | #define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */ | |
293 | #define QM_MCR_TIMEOUT 10000 /* us */ | |
294 | union qm_mc_result { | |
295 | struct { | |
296 | u8 verb; | |
297 | u8 result; | |
298 | u8 __reserved1[62]; | |
299 | }; | |
300 | struct qm_mcr_queryfq queryfq; | |
301 | struct qm_mcr_alterfq alterfq; | |
302 | struct qm_mcr_querycgr querycgr; | |
303 | struct qm_mcr_querycongestion querycongestion; | |
304 | struct qm_mcr_querywq querywq; | |
305 | struct qm_mcr_queryfq_np queryfq_np; | |
306 | }; | |
307 | ||
308 | struct qm_mc { | |
309 | union qm_mc_command *cr; | |
310 | union qm_mc_result *rr; | |
311 | u8 rridx, vbit; | |
312 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
313 | enum { | |
314 | /* Can be _mc_start()ed */ | |
315 | qman_mc_idle, | |
316 | /* Can be _mc_commit()ed or _mc_abort()ed */ | |
317 | qman_mc_user, | |
318 | /* Can only be _mc_retry()ed */ | |
319 | qman_mc_hw | |
320 | } state; | |
321 | #endif | |
322 | }; | |
323 | ||
324 | struct qm_addr { | |
325 | void __iomem *ce; /* cache-enabled */ | |
326 | void __iomem *ci; /* cache-inhibited */ | |
327 | }; | |
328 | ||
329 | struct qm_portal { | |
330 | /* | |
331 | * In the non-CONFIG_FSL_DPAA_CHECKING case, the following stuff up to | |
332 | * and including 'mc' fits within a cacheline (yay!). The 'config' part | |
333 | * is setup-only, so isn't a cause for a concern. In other words, don't | |
334 | * rearrange this structure on a whim, there be dragons ... | |
335 | */ | |
336 | struct qm_addr addr; | |
337 | struct qm_eqcr eqcr; | |
338 | struct qm_dqrr dqrr; | |
339 | struct qm_mr mr; | |
340 | struct qm_mc mc; | |
341 | } ____cacheline_aligned; | |
342 | ||
343 | /* Cache-inhibited register access. */ | |
344 | static inline u32 qm_in(struct qm_portal *p, u32 offset) | |
345 | { | |
346 | return __raw_readl(p->addr.ci + offset); | |
347 | } | |
348 | ||
349 | static inline void qm_out(struct qm_portal *p, u32 offset, u32 val) | |
350 | { | |
351 | __raw_writel(val, p->addr.ci + offset); | |
352 | } | |
353 | ||
354 | /* Cache Enabled Portal Access */ | |
355 | static inline void qm_cl_invalidate(struct qm_portal *p, u32 offset) | |
356 | { | |
357 | dpaa_invalidate(p->addr.ce + offset); | |
358 | } | |
359 | ||
360 | static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset) | |
361 | { | |
362 | dpaa_touch_ro(p->addr.ce + offset); | |
363 | } | |
364 | ||
365 | static inline u32 qm_ce_in(struct qm_portal *p, u32 offset) | |
366 | { | |
367 | return __raw_readl(p->addr.ce + offset); | |
368 | } | |
369 | ||
370 | /* --- EQCR API --- */ | |
371 | ||
372 | #define EQCR_SHIFT ilog2(sizeof(struct qm_eqcr_entry)) | |
373 | #define EQCR_CARRY (uintptr_t)(QM_EQCR_SIZE << EQCR_SHIFT) | |
374 | ||
375 | /* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */ | |
376 | static struct qm_eqcr_entry *eqcr_carryclear(struct qm_eqcr_entry *p) | |
377 | { | |
378 | uintptr_t addr = (uintptr_t)p; | |
379 | ||
380 | addr &= ~EQCR_CARRY; | |
381 | ||
382 | return (struct qm_eqcr_entry *)addr; | |
383 | } | |
384 | ||
385 | /* Bit-wise logic to convert a ring pointer to a ring index */ | |
386 | static int eqcr_ptr2idx(struct qm_eqcr_entry *e) | |
387 | { | |
388 | return ((uintptr_t)e >> EQCR_SHIFT) & (QM_EQCR_SIZE - 1); | |
389 | } | |
390 | ||
391 | /* Increment the 'cursor' ring pointer, taking 'vbit' into account */ | |
392 | static inline void eqcr_inc(struct qm_eqcr *eqcr) | |
393 | { | |
394 | /* increment to the next EQCR pointer and handle overflow and 'vbit' */ | |
395 | struct qm_eqcr_entry *partial = eqcr->cursor + 1; | |
396 | ||
397 | eqcr->cursor = eqcr_carryclear(partial); | |
398 | if (partial != eqcr->cursor) | |
399 | eqcr->vbit ^= QM_EQCR_VERB_VBIT; | |
400 | } | |
401 | ||
402 | static inline int qm_eqcr_init(struct qm_portal *portal, | |
403 | enum qm_eqcr_pmode pmode, | |
404 | unsigned int eq_stash_thresh, | |
405 | int eq_stash_prio) | |
406 | { | |
407 | struct qm_eqcr *eqcr = &portal->eqcr; | |
408 | u32 cfg; | |
409 | u8 pi; | |
410 | ||
411 | eqcr->ring = portal->addr.ce + QM_CL_EQCR; | |
412 | eqcr->ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); | |
413 | qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA); | |
414 | pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1); | |
415 | eqcr->cursor = eqcr->ring + pi; | |
416 | eqcr->vbit = (qm_in(portal, QM_REG_EQCR_PI_CINH) & QM_EQCR_SIZE) ? | |
417 | QM_EQCR_VERB_VBIT : 0; | |
418 | eqcr->available = QM_EQCR_SIZE - 1 - | |
419 | dpaa_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi); | |
420 | eqcr->ithresh = qm_in(portal, QM_REG_EQCR_ITR); | |
421 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
422 | eqcr->busy = 0; | |
423 | eqcr->pmode = pmode; | |
424 | #endif | |
425 | cfg = (qm_in(portal, QM_REG_CFG) & 0x00ffffff) | | |
426 | (eq_stash_thresh << 28) | /* QCSP_CFG: EST */ | |
427 | (eq_stash_prio << 26) | /* QCSP_CFG: EP */ | |
428 | ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */ | |
429 | qm_out(portal, QM_REG_CFG, cfg); | |
430 | return 0; | |
431 | } | |
432 | ||
433 | static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal) | |
434 | { | |
435 | return (qm_in(portal, QM_REG_CFG) >> 28) & 0x7; | |
436 | } | |
437 | ||
438 | static inline void qm_eqcr_finish(struct qm_portal *portal) | |
439 | { | |
440 | struct qm_eqcr *eqcr = &portal->eqcr; | |
441 | u8 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1); | |
442 | u8 ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); | |
443 | ||
444 | DPAA_ASSERT(!eqcr->busy); | |
445 | if (pi != eqcr_ptr2idx(eqcr->cursor)) | |
57907a73 | 446 | pr_crit("losing uncommitted EQCR entries\n"); |
c535e923 CM |
447 | if (ci != eqcr->ci) |
448 | pr_crit("missing existing EQCR completions\n"); | |
449 | if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor)) | |
450 | pr_crit("EQCR destroyed unquiesced\n"); | |
451 | } | |
452 | ||
453 | static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal | |
454 | *portal) | |
455 | { | |
456 | struct qm_eqcr *eqcr = &portal->eqcr; | |
457 | ||
458 | DPAA_ASSERT(!eqcr->busy); | |
459 | if (!eqcr->available) | |
460 | return NULL; | |
461 | ||
462 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
463 | eqcr->busy = 1; | |
464 | #endif | |
465 | dpaa_zero(eqcr->cursor); | |
466 | return eqcr->cursor; | |
467 | } | |
468 | ||
469 | static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal | |
470 | *portal) | |
471 | { | |
472 | struct qm_eqcr *eqcr = &portal->eqcr; | |
473 | u8 diff, old_ci; | |
474 | ||
475 | DPAA_ASSERT(!eqcr->busy); | |
476 | if (!eqcr->available) { | |
477 | old_ci = eqcr->ci; | |
478 | eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & | |
479 | (QM_EQCR_SIZE - 1); | |
480 | diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); | |
481 | eqcr->available += diff; | |
482 | if (!diff) | |
483 | return NULL; | |
484 | } | |
485 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
486 | eqcr->busy = 1; | |
487 | #endif | |
488 | dpaa_zero(eqcr->cursor); | |
489 | return eqcr->cursor; | |
490 | } | |
491 | ||
492 | static inline void eqcr_commit_checks(struct qm_eqcr *eqcr) | |
493 | { | |
494 | DPAA_ASSERT(eqcr->busy); | |
495 | DPAA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff)); | |
496 | DPAA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff)); | |
497 | DPAA_ASSERT(eqcr->available >= 1); | |
498 | } | |
499 | ||
500 | static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb) | |
501 | { | |
502 | struct qm_eqcr *eqcr = &portal->eqcr; | |
503 | struct qm_eqcr_entry *eqcursor; | |
504 | ||
505 | eqcr_commit_checks(eqcr); | |
506 | DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb); | |
507 | dma_wmb(); | |
508 | eqcursor = eqcr->cursor; | |
509 | eqcursor->_ncw_verb = myverb | eqcr->vbit; | |
510 | dpaa_flush(eqcursor); | |
511 | eqcr_inc(eqcr); | |
512 | eqcr->available--; | |
513 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
514 | eqcr->busy = 0; | |
515 | #endif | |
516 | } | |
517 | ||
518 | static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal) | |
519 | { | |
520 | qm_cl_touch_ro(portal, QM_CL_EQCR_CI_CENA); | |
521 | } | |
522 | ||
523 | static inline u8 qm_eqcr_cce_update(struct qm_portal *portal) | |
524 | { | |
525 | struct qm_eqcr *eqcr = &portal->eqcr; | |
526 | u8 diff, old_ci = eqcr->ci; | |
527 | ||
528 | eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & (QM_EQCR_SIZE - 1); | |
529 | qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA); | |
530 | diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); | |
531 | eqcr->available += diff; | |
532 | return diff; | |
533 | } | |
534 | ||
535 | static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh) | |
536 | { | |
537 | struct qm_eqcr *eqcr = &portal->eqcr; | |
538 | ||
539 | eqcr->ithresh = ithresh; | |
540 | qm_out(portal, QM_REG_EQCR_ITR, ithresh); | |
541 | } | |
542 | ||
543 | static inline u8 qm_eqcr_get_avail(struct qm_portal *portal) | |
544 | { | |
545 | struct qm_eqcr *eqcr = &portal->eqcr; | |
546 | ||
547 | return eqcr->available; | |
548 | } | |
549 | ||
550 | static inline u8 qm_eqcr_get_fill(struct qm_portal *portal) | |
551 | { | |
552 | struct qm_eqcr *eqcr = &portal->eqcr; | |
553 | ||
554 | return QM_EQCR_SIZE - 1 - eqcr->available; | |
555 | } | |
556 | ||
557 | /* --- DQRR API --- */ | |
558 | ||
559 | #define DQRR_SHIFT ilog2(sizeof(struct qm_dqrr_entry)) | |
560 | #define DQRR_CARRY (uintptr_t)(QM_DQRR_SIZE << DQRR_SHIFT) | |
561 | ||
562 | static const struct qm_dqrr_entry *dqrr_carryclear( | |
563 | const struct qm_dqrr_entry *p) | |
564 | { | |
565 | uintptr_t addr = (uintptr_t)p; | |
566 | ||
567 | addr &= ~DQRR_CARRY; | |
568 | ||
569 | return (const struct qm_dqrr_entry *)addr; | |
570 | } | |
571 | ||
572 | static inline int dqrr_ptr2idx(const struct qm_dqrr_entry *e) | |
573 | { | |
574 | return ((uintptr_t)e >> DQRR_SHIFT) & (QM_DQRR_SIZE - 1); | |
575 | } | |
576 | ||
577 | static const struct qm_dqrr_entry *dqrr_inc(const struct qm_dqrr_entry *e) | |
578 | { | |
579 | return dqrr_carryclear(e + 1); | |
580 | } | |
581 | ||
582 | static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf) | |
583 | { | |
584 | qm_out(portal, QM_REG_CFG, (qm_in(portal, QM_REG_CFG) & 0xff0fffff) | | |
585 | ((mf & (QM_DQRR_SIZE - 1)) << 20)); | |
586 | } | |
587 | ||
588 | static inline int qm_dqrr_init(struct qm_portal *portal, | |
589 | const struct qm_portal_config *config, | |
590 | enum qm_dqrr_dmode dmode, | |
591 | enum qm_dqrr_pmode pmode, | |
592 | enum qm_dqrr_cmode cmode, u8 max_fill) | |
593 | { | |
594 | struct qm_dqrr *dqrr = &portal->dqrr; | |
595 | u32 cfg; | |
596 | ||
597 | /* Make sure the DQRR will be idle when we enable */ | |
598 | qm_out(portal, QM_REG_DQRR_SDQCR, 0); | |
599 | qm_out(portal, QM_REG_DQRR_VDQCR, 0); | |
600 | qm_out(portal, QM_REG_DQRR_PDQCR, 0); | |
601 | dqrr->ring = portal->addr.ce + QM_CL_DQRR; | |
602 | dqrr->pi = qm_in(portal, QM_REG_DQRR_PI_CINH) & (QM_DQRR_SIZE - 1); | |
603 | dqrr->ci = qm_in(portal, QM_REG_DQRR_CI_CINH) & (QM_DQRR_SIZE - 1); | |
604 | dqrr->cursor = dqrr->ring + dqrr->ci; | |
605 | dqrr->fill = dpaa_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi); | |
606 | dqrr->vbit = (qm_in(portal, QM_REG_DQRR_PI_CINH) & QM_DQRR_SIZE) ? | |
607 | QM_DQRR_VERB_VBIT : 0; | |
608 | dqrr->ithresh = qm_in(portal, QM_REG_DQRR_ITR); | |
609 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
610 | dqrr->dmode = dmode; | |
611 | dqrr->pmode = pmode; | |
612 | dqrr->cmode = cmode; | |
613 | #endif | |
614 | /* Invalidate every ring entry before beginning */ | |
615 | for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++) | |
616 | dpaa_invalidate(qm_cl(dqrr->ring, cfg)); | |
617 | cfg = (qm_in(portal, QM_REG_CFG) & 0xff000f00) | | |
618 | ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */ | |
619 | ((dmode & 1) << 18) | /* DP */ | |
620 | ((cmode & 3) << 16) | /* DCM */ | |
621 | 0xa0 | /* RE+SE */ | |
622 | (0 ? 0x40 : 0) | /* Ignore RP */ | |
623 | (0 ? 0x10 : 0); /* Ignore SP */ | |
624 | qm_out(portal, QM_REG_CFG, cfg); | |
625 | qm_dqrr_set_maxfill(portal, max_fill); | |
626 | return 0; | |
627 | } | |
628 | ||
629 | static inline void qm_dqrr_finish(struct qm_portal *portal) | |
630 | { | |
631 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
632 | struct qm_dqrr *dqrr = &portal->dqrr; | |
633 | ||
634 | if (dqrr->cmode != qm_dqrr_cdc && | |
635 | dqrr->ci != dqrr_ptr2idx(dqrr->cursor)) | |
636 | pr_crit("Ignoring completed DQRR entries\n"); | |
637 | #endif | |
638 | } | |
639 | ||
640 | static inline const struct qm_dqrr_entry *qm_dqrr_current( | |
641 | struct qm_portal *portal) | |
642 | { | |
643 | struct qm_dqrr *dqrr = &portal->dqrr; | |
644 | ||
645 | if (!dqrr->fill) | |
646 | return NULL; | |
647 | return dqrr->cursor; | |
648 | } | |
649 | ||
650 | static inline u8 qm_dqrr_next(struct qm_portal *portal) | |
651 | { | |
652 | struct qm_dqrr *dqrr = &portal->dqrr; | |
653 | ||
654 | DPAA_ASSERT(dqrr->fill); | |
655 | dqrr->cursor = dqrr_inc(dqrr->cursor); | |
656 | return --dqrr->fill; | |
657 | } | |
658 | ||
659 | static inline void qm_dqrr_pvb_update(struct qm_portal *portal) | |
660 | { | |
661 | struct qm_dqrr *dqrr = &portal->dqrr; | |
662 | struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi); | |
663 | ||
664 | DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb); | |
665 | #ifndef CONFIG_FSL_PAMU | |
666 | /* | |
667 | * If PAMU is not available we need to invalidate the cache. | |
668 | * When PAMU is available the cache is updated by stash | |
669 | */ | |
670 | dpaa_invalidate_touch_ro(res); | |
671 | #endif | |
672 | /* | |
673 | * when accessing 'verb', use __raw_readb() to ensure that compiler | |
674 | * inlining doesn't try to optimise out "excess reads". | |
675 | */ | |
676 | if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) { | |
677 | dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1); | |
678 | if (!dqrr->pi) | |
679 | dqrr->vbit ^= QM_DQRR_VERB_VBIT; | |
680 | dqrr->fill++; | |
681 | } | |
682 | } | |
683 | ||
684 | static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal, | |
685 | const struct qm_dqrr_entry *dq, | |
686 | int park) | |
687 | { | |
688 | __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr; | |
689 | int idx = dqrr_ptr2idx(dq); | |
690 | ||
691 | DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc); | |
692 | DPAA_ASSERT((dqrr->ring + idx) == dq); | |
693 | DPAA_ASSERT(idx < QM_DQRR_SIZE); | |
694 | qm_out(portal, QM_REG_DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */ | |
695 | ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */ | |
696 | idx); /* DQRR_DCAP::DCAP_CI */ | |
697 | } | |
698 | ||
699 | static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u32 bitmask) | |
700 | { | |
701 | __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr; | |
702 | ||
703 | DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc); | |
704 | qm_out(portal, QM_REG_DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */ | |
705 | (bitmask << 16)); /* DQRR_DCAP::DCAP_CI */ | |
706 | } | |
707 | ||
708 | static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr) | |
709 | { | |
710 | qm_out(portal, QM_REG_DQRR_SDQCR, sdqcr); | |
711 | } | |
712 | ||
713 | static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr) | |
714 | { | |
715 | qm_out(portal, QM_REG_DQRR_VDQCR, vdqcr); | |
716 | } | |
717 | ||
718 | static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh) | |
719 | { | |
720 | qm_out(portal, QM_REG_DQRR_ITR, ithresh); | |
721 | } | |
722 | ||
723 | /* --- MR API --- */ | |
724 | ||
725 | #define MR_SHIFT ilog2(sizeof(union qm_mr_entry)) | |
726 | #define MR_CARRY (uintptr_t)(QM_MR_SIZE << MR_SHIFT) | |
727 | ||
728 | static union qm_mr_entry *mr_carryclear(union qm_mr_entry *p) | |
729 | { | |
730 | uintptr_t addr = (uintptr_t)p; | |
731 | ||
732 | addr &= ~MR_CARRY; | |
733 | ||
734 | return (union qm_mr_entry *)addr; | |
735 | } | |
736 | ||
737 | static inline int mr_ptr2idx(const union qm_mr_entry *e) | |
738 | { | |
739 | return ((uintptr_t)e >> MR_SHIFT) & (QM_MR_SIZE - 1); | |
740 | } | |
741 | ||
742 | static inline union qm_mr_entry *mr_inc(union qm_mr_entry *e) | |
743 | { | |
744 | return mr_carryclear(e + 1); | |
745 | } | |
746 | ||
747 | static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode, | |
748 | enum qm_mr_cmode cmode) | |
749 | { | |
750 | struct qm_mr *mr = &portal->mr; | |
751 | u32 cfg; | |
752 | ||
753 | mr->ring = portal->addr.ce + QM_CL_MR; | |
754 | mr->pi = qm_in(portal, QM_REG_MR_PI_CINH) & (QM_MR_SIZE - 1); | |
755 | mr->ci = qm_in(portal, QM_REG_MR_CI_CINH) & (QM_MR_SIZE - 1); | |
756 | mr->cursor = mr->ring + mr->ci; | |
757 | mr->fill = dpaa_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi); | |
758 | mr->vbit = (qm_in(portal, QM_REG_MR_PI_CINH) & QM_MR_SIZE) | |
759 | ? QM_MR_VERB_VBIT : 0; | |
760 | mr->ithresh = qm_in(portal, QM_REG_MR_ITR); | |
761 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
762 | mr->pmode = pmode; | |
763 | mr->cmode = cmode; | |
764 | #endif | |
765 | cfg = (qm_in(portal, QM_REG_CFG) & 0xfffff0ff) | | |
766 | ((cmode & 1) << 8); /* QCSP_CFG:MM */ | |
767 | qm_out(portal, QM_REG_CFG, cfg); | |
768 | return 0; | |
769 | } | |
770 | ||
771 | static inline void qm_mr_finish(struct qm_portal *portal) | |
772 | { | |
773 | struct qm_mr *mr = &portal->mr; | |
774 | ||
775 | if (mr->ci != mr_ptr2idx(mr->cursor)) | |
776 | pr_crit("Ignoring completed MR entries\n"); | |
777 | } | |
778 | ||
779 | static inline const union qm_mr_entry *qm_mr_current(struct qm_portal *portal) | |
780 | { | |
781 | struct qm_mr *mr = &portal->mr; | |
782 | ||
783 | if (!mr->fill) | |
784 | return NULL; | |
785 | return mr->cursor; | |
786 | } | |
787 | ||
788 | static inline int qm_mr_next(struct qm_portal *portal) | |
789 | { | |
790 | struct qm_mr *mr = &portal->mr; | |
791 | ||
792 | DPAA_ASSERT(mr->fill); | |
793 | mr->cursor = mr_inc(mr->cursor); | |
794 | return --mr->fill; | |
795 | } | |
796 | ||
797 | static inline void qm_mr_pvb_update(struct qm_portal *portal) | |
798 | { | |
799 | struct qm_mr *mr = &portal->mr; | |
800 | union qm_mr_entry *res = qm_cl(mr->ring, mr->pi); | |
801 | ||
802 | DPAA_ASSERT(mr->pmode == qm_mr_pvb); | |
803 | /* | |
804 | * when accessing 'verb', use __raw_readb() to ensure that compiler | |
805 | * inlining doesn't try to optimise out "excess reads". | |
806 | */ | |
807 | if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) { | |
808 | mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1); | |
809 | if (!mr->pi) | |
810 | mr->vbit ^= QM_MR_VERB_VBIT; | |
811 | mr->fill++; | |
812 | res = mr_inc(res); | |
813 | } | |
814 | dpaa_invalidate_touch_ro(res); | |
815 | } | |
816 | ||
817 | static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num) | |
818 | { | |
819 | struct qm_mr *mr = &portal->mr; | |
820 | ||
821 | DPAA_ASSERT(mr->cmode == qm_mr_cci); | |
822 | mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1); | |
823 | qm_out(portal, QM_REG_MR_CI_CINH, mr->ci); | |
824 | } | |
825 | ||
826 | static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal) | |
827 | { | |
828 | struct qm_mr *mr = &portal->mr; | |
829 | ||
830 | DPAA_ASSERT(mr->cmode == qm_mr_cci); | |
831 | mr->ci = mr_ptr2idx(mr->cursor); | |
832 | qm_out(portal, QM_REG_MR_CI_CINH, mr->ci); | |
833 | } | |
834 | ||
835 | static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh) | |
836 | { | |
837 | qm_out(portal, QM_REG_MR_ITR, ithresh); | |
838 | } | |
839 | ||
840 | /* --- Management command API --- */ | |
841 | ||
842 | static inline int qm_mc_init(struct qm_portal *portal) | |
843 | { | |
844 | struct qm_mc *mc = &portal->mc; | |
845 | ||
846 | mc->cr = portal->addr.ce + QM_CL_CR; | |
847 | mc->rr = portal->addr.ce + QM_CL_RR0; | |
848 | mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & QM_MCC_VERB_VBIT) | |
849 | ? 0 : 1; | |
850 | mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0; | |
851 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
852 | mc->state = qman_mc_idle; | |
853 | #endif | |
854 | return 0; | |
855 | } | |
856 | ||
857 | static inline void qm_mc_finish(struct qm_portal *portal) | |
858 | { | |
859 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
860 | struct qm_mc *mc = &portal->mc; | |
861 | ||
862 | DPAA_ASSERT(mc->state == qman_mc_idle); | |
863 | if (mc->state != qman_mc_idle) | |
864 | pr_crit("Losing incomplete MC command\n"); | |
865 | #endif | |
866 | } | |
867 | ||
868 | static inline union qm_mc_command *qm_mc_start(struct qm_portal *portal) | |
869 | { | |
870 | struct qm_mc *mc = &portal->mc; | |
871 | ||
872 | DPAA_ASSERT(mc->state == qman_mc_idle); | |
873 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
874 | mc->state = qman_mc_user; | |
875 | #endif | |
876 | dpaa_zero(mc->cr); | |
877 | return mc->cr; | |
878 | } | |
879 | ||
880 | static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb) | |
881 | { | |
882 | struct qm_mc *mc = &portal->mc; | |
883 | union qm_mc_result *rr = mc->rr + mc->rridx; | |
884 | ||
885 | DPAA_ASSERT(mc->state == qman_mc_user); | |
886 | dma_wmb(); | |
887 | mc->cr->_ncw_verb = myverb | mc->vbit; | |
888 | dpaa_flush(mc->cr); | |
889 | dpaa_invalidate_touch_ro(rr); | |
890 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
891 | mc->state = qman_mc_hw; | |
892 | #endif | |
893 | } | |
894 | ||
895 | static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal) | |
896 | { | |
897 | struct qm_mc *mc = &portal->mc; | |
898 | union qm_mc_result *rr = mc->rr + mc->rridx; | |
899 | ||
900 | DPAA_ASSERT(mc->state == qman_mc_hw); | |
901 | /* | |
902 | * The inactive response register's verb byte always returns zero until | |
903 | * its command is submitted and completed. This includes the valid-bit, | |
904 | * in case you were wondering... | |
905 | */ | |
906 | if (!__raw_readb(&rr->verb)) { | |
907 | dpaa_invalidate_touch_ro(rr); | |
908 | return NULL; | |
909 | } | |
910 | mc->rridx ^= 1; | |
911 | mc->vbit ^= QM_MCC_VERB_VBIT; | |
912 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
913 | mc->state = qman_mc_idle; | |
914 | #endif | |
915 | return rr; | |
916 | } | |
917 | ||
918 | static inline int qm_mc_result_timeout(struct qm_portal *portal, | |
919 | union qm_mc_result **mcr) | |
920 | { | |
921 | int timeout = QM_MCR_TIMEOUT; | |
922 | ||
923 | do { | |
924 | *mcr = qm_mc_result(portal); | |
925 | if (*mcr) | |
926 | break; | |
927 | udelay(1); | |
928 | } while (--timeout); | |
929 | ||
930 | return timeout; | |
931 | } | |
932 | ||
933 | static inline void fq_set(struct qman_fq *fq, u32 mask) | |
934 | { | |
935 | set_bits(mask, &fq->flags); | |
936 | } | |
937 | ||
938 | static inline void fq_clear(struct qman_fq *fq, u32 mask) | |
939 | { | |
940 | clear_bits(mask, &fq->flags); | |
941 | } | |
942 | ||
943 | static inline int fq_isset(struct qman_fq *fq, u32 mask) | |
944 | { | |
945 | return fq->flags & mask; | |
946 | } | |
947 | ||
948 | static inline int fq_isclear(struct qman_fq *fq, u32 mask) | |
949 | { | |
950 | return !(fq->flags & mask); | |
951 | } | |
952 | ||
953 | struct qman_portal { | |
954 | struct qm_portal p; | |
955 | /* PORTAL_BITS_*** - dynamic, strictly internal */ | |
956 | unsigned long bits; | |
957 | /* interrupt sources processed by portal_isr(), configurable */ | |
958 | unsigned long irq_sources; | |
959 | u32 use_eqcr_ci_stashing; | |
960 | /* only 1 volatile dequeue at a time */ | |
961 | struct qman_fq *vdqcr_owned; | |
962 | u32 sdqcr; | |
963 | /* probing time config params for cpu-affine portals */ | |
964 | const struct qm_portal_config *config; | |
965 | /* needed for providing a non-NULL device to dma_map_***() */ | |
966 | struct platform_device *pdev; | |
967 | /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */ | |
968 | struct qman_cgrs *cgrs; | |
969 | /* linked-list of CSCN handlers. */ | |
970 | struct list_head cgr_cbs; | |
971 | /* list lock */ | |
972 | spinlock_t cgr_lock; | |
973 | struct work_struct congestion_work; | |
974 | struct work_struct mr_work; | |
975 | char irqname[MAX_IRQNAME]; | |
976 | }; | |
977 | ||
978 | static cpumask_t affine_mask; | |
979 | static DEFINE_SPINLOCK(affine_mask_lock); | |
980 | static u16 affine_channels[NR_CPUS]; | |
981 | static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal); | |
982 | struct qman_portal *affine_portals[NR_CPUS]; | |
983 | ||
984 | static inline struct qman_portal *get_affine_portal(void) | |
985 | { | |
986 | return &get_cpu_var(qman_affine_portal); | |
987 | } | |
988 | ||
989 | static inline void put_affine_portal(void) | |
990 | { | |
991 | put_cpu_var(qman_affine_portal); | |
992 | } | |
993 | ||
994 | static struct workqueue_struct *qm_portal_wq; | |
995 | ||
996 | int qman_wq_alloc(void) | |
997 | { | |
998 | qm_portal_wq = alloc_workqueue("qman_portal_wq", 0, 1); | |
999 | if (!qm_portal_wq) | |
1000 | return -ENOMEM; | |
1001 | return 0; | |
1002 | } | |
1003 | ||
1004 | /* | |
1005 | * This is what everything can wait on, even if it migrates to a different cpu | |
1006 | * to the one whose affine portal it is waiting on. | |
1007 | */ | |
1008 | static DECLARE_WAIT_QUEUE_HEAD(affine_queue); | |
1009 | ||
1010 | static struct qman_fq **fq_table; | |
1011 | static u32 num_fqids; | |
1012 | ||
1013 | int qman_alloc_fq_table(u32 _num_fqids) | |
1014 | { | |
1015 | num_fqids = _num_fqids; | |
1016 | ||
1017 | fq_table = vzalloc(num_fqids * 2 * sizeof(struct qman_fq *)); | |
1018 | if (!fq_table) | |
1019 | return -ENOMEM; | |
1020 | ||
1021 | pr_debug("Allocated fq lookup table at %p, entry count %u\n", | |
1022 | fq_table, num_fqids * 2); | |
1023 | return 0; | |
1024 | } | |
1025 | ||
1026 | static struct qman_fq *idx_to_fq(u32 idx) | |
1027 | { | |
1028 | struct qman_fq *fq; | |
1029 | ||
1030 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
1031 | if (WARN_ON(idx >= num_fqids * 2)) | |
1032 | return NULL; | |
1033 | #endif | |
1034 | fq = fq_table[idx]; | |
1035 | DPAA_ASSERT(!fq || idx == fq->idx); | |
1036 | ||
1037 | return fq; | |
1038 | } | |
1039 | ||
1040 | /* | |
1041 | * Only returns full-service fq objects, not enqueue-only | |
1042 | * references (QMAN_FQ_FLAG_NO_MODIFY). | |
1043 | */ | |
1044 | static struct qman_fq *fqid_to_fq(u32 fqid) | |
1045 | { | |
1046 | return idx_to_fq(fqid * 2); | |
1047 | } | |
1048 | ||
1049 | static struct qman_fq *tag_to_fq(u32 tag) | |
1050 | { | |
1051 | #if BITS_PER_LONG == 64 | |
1052 | return idx_to_fq(tag); | |
1053 | #else | |
1054 | return (struct qman_fq *)tag; | |
1055 | #endif | |
1056 | } | |
1057 | ||
1058 | static u32 fq_to_tag(struct qman_fq *fq) | |
1059 | { | |
1060 | #if BITS_PER_LONG == 64 | |
1061 | return fq->idx; | |
1062 | #else | |
1063 | return (u32)fq; | |
1064 | #endif | |
1065 | } | |
1066 | ||
1067 | static u32 __poll_portal_slow(struct qman_portal *p, u32 is); | |
1068 | static inline unsigned int __poll_portal_fast(struct qman_portal *p, | |
1069 | unsigned int poll_limit); | |
1070 | static void qm_congestion_task(struct work_struct *work); | |
1071 | static void qm_mr_process_task(struct work_struct *work); | |
1072 | ||
1073 | static irqreturn_t portal_isr(int irq, void *ptr) | |
1074 | { | |
1075 | struct qman_portal *p = ptr; | |
1076 | ||
1077 | u32 clear = QM_DQAVAIL_MASK | p->irq_sources; | |
1078 | u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources; | |
1079 | ||
1080 | if (unlikely(!is)) | |
1081 | return IRQ_NONE; | |
1082 | ||
1083 | /* DQRR-handling if it's interrupt-driven */ | |
1084 | if (is & QM_PIRQ_DQRI) | |
1085 | __poll_portal_fast(p, QMAN_POLL_LIMIT); | |
1086 | /* Handling of anything else that's interrupt-driven */ | |
1087 | clear |= __poll_portal_slow(p, is); | |
1088 | qm_out(&p->p, QM_REG_ISR, clear); | |
1089 | return IRQ_HANDLED; | |
1090 | } | |
1091 | ||
1092 | static int drain_mr_fqrni(struct qm_portal *p) | |
1093 | { | |
1094 | const union qm_mr_entry *msg; | |
1095 | loop: | |
1096 | msg = qm_mr_current(p); | |
1097 | if (!msg) { | |
1098 | /* | |
1099 | * if MR was full and h/w had other FQRNI entries to produce, we | |
1100 | * need to allow it time to produce those entries once the | |
1101 | * existing entries are consumed. A worst-case situation | |
1102 | * (fully-loaded system) means h/w sequencers may have to do 3-4 | |
1103 | * other things before servicing the portal's MR pump, each of | |
1104 | * which (if slow) may take ~50 qman cycles (which is ~200 | |
1105 | * processor cycles). So rounding up and then multiplying this | |
1106 | * worst-case estimate by a factor of 10, just to be | |
1107 | * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume | |
1108 | * one entry at a time, so h/w has an opportunity to produce new | |
1109 | * entries well before the ring has been fully consumed, so | |
1110 | * we're being *really* paranoid here. | |
1111 | */ | |
1112 | u64 now, then = jiffies; | |
1113 | ||
1114 | do { | |
1115 | now = jiffies; | |
1116 | } while ((then + 10000) > now); | |
1117 | msg = qm_mr_current(p); | |
1118 | if (!msg) | |
1119 | return 0; | |
1120 | } | |
1121 | if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) { | |
1122 | /* We aren't draining anything but FQRNIs */ | |
1123 | pr_err("Found verb 0x%x in MR\n", msg->verb); | |
1124 | return -1; | |
1125 | } | |
1126 | qm_mr_next(p); | |
1127 | qm_mr_cci_consume(p, 1); | |
1128 | goto loop; | |
1129 | } | |
1130 | ||
1131 | static int qman_create_portal(struct qman_portal *portal, | |
1132 | const struct qm_portal_config *c, | |
1133 | const struct qman_cgrs *cgrs) | |
1134 | { | |
1135 | struct qm_portal *p; | |
1136 | char buf[16]; | |
1137 | int ret; | |
1138 | u32 isdr; | |
1139 | ||
1140 | p = &portal->p; | |
1141 | ||
1142 | #ifdef CONFIG_FSL_PAMU | |
1143 | /* PAMU is required for stashing */ | |
1144 | portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0); | |
1145 | #else | |
1146 | portal->use_eqcr_ci_stashing = 0; | |
1147 | #endif | |
1148 | /* | |
1149 | * prep the low-level portal struct with the mapped addresses from the | |
1150 | * config, everything that follows depends on it and "config" is more | |
1151 | * for (de)reference | |
1152 | */ | |
1153 | p->addr.ce = c->addr_virt[DPAA_PORTAL_CE]; | |
1154 | p->addr.ci = c->addr_virt[DPAA_PORTAL_CI]; | |
1155 | /* | |
1156 | * If CI-stashing is used, the current defaults use a threshold of 3, | |
1157 | * and stash with high-than-DQRR priority. | |
1158 | */ | |
1159 | if (qm_eqcr_init(p, qm_eqcr_pvb, | |
1160 | portal->use_eqcr_ci_stashing ? 3 : 0, 1)) { | |
1161 | dev_err(c->dev, "EQCR initialisation failed\n"); | |
1162 | goto fail_eqcr; | |
1163 | } | |
1164 | if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb, | |
1165 | qm_dqrr_cdc, DQRR_MAXFILL)) { | |
1166 | dev_err(c->dev, "DQRR initialisation failed\n"); | |
1167 | goto fail_dqrr; | |
1168 | } | |
1169 | if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) { | |
1170 | dev_err(c->dev, "MR initialisation failed\n"); | |
1171 | goto fail_mr; | |
1172 | } | |
1173 | if (qm_mc_init(p)) { | |
1174 | dev_err(c->dev, "MC initialisation failed\n"); | |
1175 | goto fail_mc; | |
1176 | } | |
1177 | /* static interrupt-gating controls */ | |
1178 | qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH); | |
1179 | qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH); | |
1180 | qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD); | |
1181 | portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL); | |
1182 | if (!portal->cgrs) | |
1183 | goto fail_cgrs; | |
1184 | /* initial snapshot is no-depletion */ | |
1185 | qman_cgrs_init(&portal->cgrs[1]); | |
1186 | if (cgrs) | |
1187 | portal->cgrs[0] = *cgrs; | |
1188 | else | |
1189 | /* if the given mask is NULL, assume all CGRs can be seen */ | |
1190 | qman_cgrs_fill(&portal->cgrs[0]); | |
1191 | INIT_LIST_HEAD(&portal->cgr_cbs); | |
1192 | spin_lock_init(&portal->cgr_lock); | |
1193 | INIT_WORK(&portal->congestion_work, qm_congestion_task); | |
1194 | INIT_WORK(&portal->mr_work, qm_mr_process_task); | |
1195 | portal->bits = 0; | |
1196 | portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 | | |
1197 | QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS | | |
1198 | QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED; | |
1199 | sprintf(buf, "qportal-%d", c->channel); | |
1200 | portal->pdev = platform_device_alloc(buf, -1); | |
1201 | if (!portal->pdev) | |
1202 | goto fail_devalloc; | |
1203 | if (dma_set_mask(&portal->pdev->dev, DMA_BIT_MASK(40))) | |
1204 | goto fail_devadd; | |
1205 | ret = platform_device_add(portal->pdev); | |
1206 | if (ret) | |
1207 | goto fail_devadd; | |
1208 | isdr = 0xffffffff; | |
1209 | qm_out(p, QM_REG_ISDR, isdr); | |
1210 | portal->irq_sources = 0; | |
1211 | qm_out(p, QM_REG_IER, 0); | |
1212 | qm_out(p, QM_REG_ISR, 0xffffffff); | |
1213 | snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu); | |
1214 | if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) { | |
1215 | dev_err(c->dev, "request_irq() failed\n"); | |
1216 | goto fail_irq; | |
1217 | } | |
1218 | if (c->cpu != -1 && irq_can_set_affinity(c->irq) && | |
1219 | irq_set_affinity(c->irq, cpumask_of(c->cpu))) { | |
1220 | dev_err(c->dev, "irq_set_affinity() failed\n"); | |
1221 | goto fail_affinity; | |
1222 | } | |
1223 | ||
1224 | /* Need EQCR to be empty before continuing */ | |
1225 | isdr &= ~QM_PIRQ_EQCI; | |
1226 | qm_out(p, QM_REG_ISDR, isdr); | |
1227 | ret = qm_eqcr_get_fill(p); | |
1228 | if (ret) { | |
1229 | dev_err(c->dev, "EQCR unclean\n"); | |
1230 | goto fail_eqcr_empty; | |
1231 | } | |
1232 | isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI); | |
1233 | qm_out(p, QM_REG_ISDR, isdr); | |
1234 | if (qm_dqrr_current(p)) { | |
1235 | dev_err(c->dev, "DQRR unclean\n"); | |
1236 | qm_dqrr_cdc_consume_n(p, 0xffff); | |
1237 | } | |
1238 | if (qm_mr_current(p) && drain_mr_fqrni(p)) { | |
1239 | /* special handling, drain just in case it's a few FQRNIs */ | |
1240 | const union qm_mr_entry *e = qm_mr_current(p); | |
1241 | ||
1242 | dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x\n, addr 0x%x", | |
1243 | e->verb, e->ern.rc, e->ern.fd.addr_lo); | |
1244 | goto fail_dqrr_mr_empty; | |
1245 | } | |
1246 | /* Success */ | |
1247 | portal->config = c; | |
1248 | qm_out(p, QM_REG_ISDR, 0); | |
1249 | qm_out(p, QM_REG_IIR, 0); | |
1250 | /* Write a sane SDQCR */ | |
1251 | qm_dqrr_sdqcr_set(p, portal->sdqcr); | |
1252 | return 0; | |
1253 | ||
1254 | fail_dqrr_mr_empty: | |
1255 | fail_eqcr_empty: | |
1256 | fail_affinity: | |
1257 | free_irq(c->irq, portal); | |
1258 | fail_irq: | |
1259 | platform_device_del(portal->pdev); | |
1260 | fail_devadd: | |
1261 | platform_device_put(portal->pdev); | |
1262 | fail_devalloc: | |
1263 | kfree(portal->cgrs); | |
1264 | fail_cgrs: | |
1265 | qm_mc_finish(p); | |
1266 | fail_mc: | |
1267 | qm_mr_finish(p); | |
1268 | fail_mr: | |
1269 | qm_dqrr_finish(p); | |
1270 | fail_dqrr: | |
1271 | qm_eqcr_finish(p); | |
1272 | fail_eqcr: | |
1273 | return -EIO; | |
1274 | } | |
1275 | ||
1276 | struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c, | |
1277 | const struct qman_cgrs *cgrs) | |
1278 | { | |
1279 | struct qman_portal *portal; | |
1280 | int err; | |
1281 | ||
1282 | portal = &per_cpu(qman_affine_portal, c->cpu); | |
1283 | err = qman_create_portal(portal, c, cgrs); | |
1284 | if (err) | |
1285 | return NULL; | |
1286 | ||
1287 | spin_lock(&affine_mask_lock); | |
1288 | cpumask_set_cpu(c->cpu, &affine_mask); | |
1289 | affine_channels[c->cpu] = c->channel; | |
1290 | affine_portals[c->cpu] = portal; | |
1291 | spin_unlock(&affine_mask_lock); | |
1292 | ||
1293 | return portal; | |
1294 | } | |
1295 | ||
1296 | static void qman_destroy_portal(struct qman_portal *qm) | |
1297 | { | |
1298 | const struct qm_portal_config *pcfg; | |
1299 | ||
1300 | /* Stop dequeues on the portal */ | |
1301 | qm_dqrr_sdqcr_set(&qm->p, 0); | |
1302 | ||
1303 | /* | |
1304 | * NB we do this to "quiesce" EQCR. If we add enqueue-completions or | |
1305 | * something related to QM_PIRQ_EQCI, this may need fixing. | |
1306 | * Also, due to the prefetching model used for CI updates in the enqueue | |
1307 | * path, this update will only invalidate the CI cacheline *after* | |
1308 | * working on it, so we need to call this twice to ensure a full update | |
1309 | * irrespective of where the enqueue processing was at when the teardown | |
1310 | * began. | |
1311 | */ | |
1312 | qm_eqcr_cce_update(&qm->p); | |
1313 | qm_eqcr_cce_update(&qm->p); | |
1314 | pcfg = qm->config; | |
1315 | ||
1316 | free_irq(pcfg->irq, qm); | |
1317 | ||
1318 | kfree(qm->cgrs); | |
1319 | qm_mc_finish(&qm->p); | |
1320 | qm_mr_finish(&qm->p); | |
1321 | qm_dqrr_finish(&qm->p); | |
1322 | qm_eqcr_finish(&qm->p); | |
1323 | ||
1324 | platform_device_del(qm->pdev); | |
1325 | platform_device_put(qm->pdev); | |
1326 | ||
1327 | qm->config = NULL; | |
1328 | } | |
1329 | ||
1330 | const struct qm_portal_config *qman_destroy_affine_portal(void) | |
1331 | { | |
1332 | struct qman_portal *qm = get_affine_portal(); | |
1333 | const struct qm_portal_config *pcfg; | |
1334 | int cpu; | |
1335 | ||
1336 | pcfg = qm->config; | |
1337 | cpu = pcfg->cpu; | |
1338 | ||
1339 | qman_destroy_portal(qm); | |
1340 | ||
1341 | spin_lock(&affine_mask_lock); | |
1342 | cpumask_clear_cpu(cpu, &affine_mask); | |
1343 | spin_unlock(&affine_mask_lock); | |
1344 | put_affine_portal(); | |
1345 | return pcfg; | |
1346 | } | |
1347 | ||
1348 | /* Inline helper to reduce nesting in __poll_portal_slow() */ | |
1349 | static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq, | |
1350 | const union qm_mr_entry *msg, u8 verb) | |
1351 | { | |
1352 | switch (verb) { | |
1353 | case QM_MR_VERB_FQRL: | |
1354 | DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL)); | |
1355 | fq_clear(fq, QMAN_FQ_STATE_ORL); | |
1356 | break; | |
1357 | case QM_MR_VERB_FQRN: | |
1358 | DPAA_ASSERT(fq->state == qman_fq_state_parked || | |
1359 | fq->state == qman_fq_state_sched); | |
1360 | DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING)); | |
1361 | fq_clear(fq, QMAN_FQ_STATE_CHANGING); | |
1362 | if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY) | |
1363 | fq_set(fq, QMAN_FQ_STATE_NE); | |
1364 | if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT) | |
1365 | fq_set(fq, QMAN_FQ_STATE_ORL); | |
1366 | fq->state = qman_fq_state_retired; | |
1367 | break; | |
1368 | case QM_MR_VERB_FQPN: | |
1369 | DPAA_ASSERT(fq->state == qman_fq_state_sched); | |
1370 | DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING)); | |
1371 | fq->state = qman_fq_state_parked; | |
1372 | } | |
1373 | } | |
1374 | ||
1375 | static void qm_congestion_task(struct work_struct *work) | |
1376 | { | |
1377 | struct qman_portal *p = container_of(work, struct qman_portal, | |
1378 | congestion_work); | |
1379 | struct qman_cgrs rr, c; | |
1380 | union qm_mc_result *mcr; | |
1381 | struct qman_cgr *cgr; | |
1382 | ||
1383 | spin_lock(&p->cgr_lock); | |
1384 | qm_mc_start(&p->p); | |
1385 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION); | |
1386 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
1387 | spin_unlock(&p->cgr_lock); | |
1388 | dev_crit(p->config->dev, "QUERYCONGESTION timeout\n"); | |
1389 | return; | |
1390 | } | |
1391 | /* mask out the ones I'm not interested in */ | |
1392 | qman_cgrs_and(&rr, (struct qman_cgrs *)&mcr->querycongestion.state, | |
1393 | &p->cgrs[0]); | |
1394 | /* check previous snapshot for delta, enter/exit congestion */ | |
1395 | qman_cgrs_xor(&c, &rr, &p->cgrs[1]); | |
1396 | /* update snapshot */ | |
1397 | qman_cgrs_cp(&p->cgrs[1], &rr); | |
1398 | /* Invoke callback */ | |
1399 | list_for_each_entry(cgr, &p->cgr_cbs, node) | |
1400 | if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid)) | |
1401 | cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid)); | |
1402 | spin_unlock(&p->cgr_lock); | |
1403 | } | |
1404 | ||
1405 | static void qm_mr_process_task(struct work_struct *work) | |
1406 | { | |
1407 | struct qman_portal *p = container_of(work, struct qman_portal, | |
1408 | mr_work); | |
1409 | const union qm_mr_entry *msg; | |
1410 | struct qman_fq *fq; | |
1411 | u8 verb, num = 0; | |
1412 | ||
1413 | preempt_disable(); | |
1414 | ||
1415 | while (1) { | |
1416 | qm_mr_pvb_update(&p->p); | |
1417 | msg = qm_mr_current(&p->p); | |
1418 | if (!msg) | |
1419 | break; | |
1420 | ||
1421 | verb = msg->verb & QM_MR_VERB_TYPE_MASK; | |
1422 | /* The message is a software ERN iff the 0x20 bit is clear */ | |
1423 | if (verb & 0x20) { | |
1424 | switch (verb) { | |
1425 | case QM_MR_VERB_FQRNI: | |
1426 | /* nada, we drop FQRNIs on the floor */ | |
1427 | break; | |
1428 | case QM_MR_VERB_FQRN: | |
1429 | case QM_MR_VERB_FQRL: | |
1430 | /* Lookup in the retirement table */ | |
1431 | fq = fqid_to_fq(msg->fq.fqid); | |
1432 | if (WARN_ON(!fq)) | |
1433 | break; | |
1434 | fq_state_change(p, fq, msg, verb); | |
1435 | if (fq->cb.fqs) | |
1436 | fq->cb.fqs(p, fq, msg); | |
1437 | break; | |
1438 | case QM_MR_VERB_FQPN: | |
1439 | /* Parked */ | |
1440 | fq = tag_to_fq(msg->fq.contextB); | |
1441 | fq_state_change(p, fq, msg, verb); | |
1442 | if (fq->cb.fqs) | |
1443 | fq->cb.fqs(p, fq, msg); | |
1444 | break; | |
1445 | case QM_MR_VERB_DC_ERN: | |
1446 | /* DCP ERN */ | |
1447 | pr_crit_once("Leaking DCP ERNs!\n"); | |
1448 | break; | |
1449 | default: | |
1450 | pr_crit("Invalid MR verb 0x%02x\n", verb); | |
1451 | } | |
1452 | } else { | |
1453 | /* Its a software ERN */ | |
1454 | fq = tag_to_fq(msg->ern.tag); | |
1455 | fq->cb.ern(p, fq, msg); | |
1456 | } | |
1457 | num++; | |
1458 | qm_mr_next(&p->p); | |
1459 | } | |
1460 | ||
1461 | qm_mr_cci_consume(&p->p, num); | |
1462 | preempt_enable(); | |
1463 | } | |
1464 | ||
1465 | static u32 __poll_portal_slow(struct qman_portal *p, u32 is) | |
1466 | { | |
1467 | if (is & QM_PIRQ_CSCI) { | |
1468 | queue_work_on(smp_processor_id(), qm_portal_wq, | |
1469 | &p->congestion_work); | |
1470 | } | |
1471 | ||
1472 | if (is & QM_PIRQ_EQRI) { | |
1473 | qm_eqcr_cce_update(&p->p); | |
1474 | qm_eqcr_set_ithresh(&p->p, 0); | |
1475 | wake_up(&affine_queue); | |
1476 | } | |
1477 | ||
1478 | if (is & QM_PIRQ_MRI) { | |
1479 | queue_work_on(smp_processor_id(), qm_portal_wq, | |
1480 | &p->mr_work); | |
1481 | } | |
1482 | ||
1483 | return is; | |
1484 | } | |
1485 | ||
1486 | /* | |
1487 | * remove some slowish-path stuff from the "fast path" and make sure it isn't | |
1488 | * inlined. | |
1489 | */ | |
1490 | static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq) | |
1491 | { | |
1492 | p->vdqcr_owned = NULL; | |
1493 | fq_clear(fq, QMAN_FQ_STATE_VDQCR); | |
1494 | wake_up(&affine_queue); | |
1495 | } | |
1496 | ||
1497 | /* | |
1498 | * The only states that would conflict with other things if they ran at the | |
1499 | * same time on the same cpu are: | |
1500 | * | |
1501 | * (i) setting/clearing vdqcr_owned, and | |
1502 | * (ii) clearing the NE (Not Empty) flag. | |
1503 | * | |
1504 | * Both are safe. Because; | |
1505 | * | |
1506 | * (i) this clearing can only occur after qman_volatile_dequeue() has set the | |
1507 | * vdqcr_owned field (which it does before setting VDQCR), and | |
1508 | * qman_volatile_dequeue() blocks interrupts and preemption while this is | |
1509 | * done so that we can't interfere. | |
1510 | * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as | |
1511 | * with (i) that API prevents us from interfering until it's safe. | |
1512 | * | |
1513 | * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far | |
1514 | * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett | |
1515 | * advantage comes from this function not having to "lock" anything at all. | |
1516 | * | |
1517 | * Note also that the callbacks are invoked at points which are safe against the | |
1518 | * above potential conflicts, but that this function itself is not re-entrant | |
1519 | * (this is because the function tracks one end of each FIFO in the portal and | |
1520 | * we do *not* want to lock that). So the consequence is that it is safe for | |
1521 | * user callbacks to call into any QMan API. | |
1522 | */ | |
1523 | static inline unsigned int __poll_portal_fast(struct qman_portal *p, | |
1524 | unsigned int poll_limit) | |
1525 | { | |
1526 | const struct qm_dqrr_entry *dq; | |
1527 | struct qman_fq *fq; | |
1528 | enum qman_cb_dqrr_result res; | |
1529 | unsigned int limit = 0; | |
1530 | ||
1531 | do { | |
1532 | qm_dqrr_pvb_update(&p->p); | |
1533 | dq = qm_dqrr_current(&p->p); | |
1534 | if (!dq) | |
1535 | break; | |
1536 | ||
1537 | if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) { | |
1538 | /* | |
1539 | * VDQCR: don't trust contextB as the FQ may have | |
1540 | * been configured for h/w consumption and we're | |
1541 | * draining it post-retirement. | |
1542 | */ | |
1543 | fq = p->vdqcr_owned; | |
1544 | /* | |
1545 | * We only set QMAN_FQ_STATE_NE when retiring, so we | |
1546 | * only need to check for clearing it when doing | |
1547 | * volatile dequeues. It's one less thing to check | |
1548 | * in the critical path (SDQCR). | |
1549 | */ | |
1550 | if (dq->stat & QM_DQRR_STAT_FQ_EMPTY) | |
1551 | fq_clear(fq, QMAN_FQ_STATE_NE); | |
1552 | /* | |
1553 | * This is duplicated from the SDQCR code, but we | |
1554 | * have stuff to do before *and* after this callback, | |
1555 | * and we don't want multiple if()s in the critical | |
1556 | * path (SDQCR). | |
1557 | */ | |
1558 | res = fq->cb.dqrr(p, fq, dq); | |
1559 | if (res == qman_cb_dqrr_stop) | |
1560 | break; | |
1561 | /* Check for VDQCR completion */ | |
1562 | if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED) | |
1563 | clear_vdqcr(p, fq); | |
1564 | } else { | |
1565 | /* SDQCR: contextB points to the FQ */ | |
1566 | fq = tag_to_fq(dq->contextB); | |
1567 | /* Now let the callback do its stuff */ | |
1568 | res = fq->cb.dqrr(p, fq, dq); | |
1569 | /* | |
1570 | * The callback can request that we exit without | |
1571 | * consuming this entry nor advancing; | |
1572 | */ | |
1573 | if (res == qman_cb_dqrr_stop) | |
1574 | break; | |
1575 | } | |
1576 | /* Interpret 'dq' from a driver perspective. */ | |
1577 | /* | |
1578 | * Parking isn't possible unless HELDACTIVE was set. NB, | |
1579 | * FORCEELIGIBLE implies HELDACTIVE, so we only need to | |
1580 | * check for HELDACTIVE to cover both. | |
1581 | */ | |
1582 | DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) || | |
1583 | (res != qman_cb_dqrr_park)); | |
1584 | /* just means "skip it, I'll consume it myself later on" */ | |
1585 | if (res != qman_cb_dqrr_defer) | |
1586 | qm_dqrr_cdc_consume_1ptr(&p->p, dq, | |
1587 | res == qman_cb_dqrr_park); | |
1588 | /* Move forward */ | |
1589 | qm_dqrr_next(&p->p); | |
1590 | /* | |
1591 | * Entry processed and consumed, increment our counter. The | |
1592 | * callback can request that we exit after consuming the | |
1593 | * entry, and we also exit if we reach our processing limit, | |
1594 | * so loop back only if neither of these conditions is met. | |
1595 | */ | |
1596 | } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop); | |
1597 | ||
1598 | return limit; | |
1599 | } | |
1600 | ||
1601 | void qman_p_irqsource_add(struct qman_portal *p, u32 bits) | |
1602 | { | |
1603 | unsigned long irqflags; | |
1604 | ||
1605 | local_irq_save(irqflags); | |
1606 | set_bits(bits & QM_PIRQ_VISIBLE, &p->irq_sources); | |
1607 | qm_out(&p->p, QM_REG_IER, p->irq_sources); | |
1608 | local_irq_restore(irqflags); | |
1609 | } | |
1610 | EXPORT_SYMBOL(qman_p_irqsource_add); | |
1611 | ||
1612 | void qman_p_irqsource_remove(struct qman_portal *p, u32 bits) | |
1613 | { | |
1614 | unsigned long irqflags; | |
1615 | u32 ier; | |
1616 | ||
1617 | /* | |
1618 | * Our interrupt handler only processes+clears status register bits that | |
1619 | * are in p->irq_sources. As we're trimming that mask, if one of them | |
1620 | * were to assert in the status register just before we remove it from | |
1621 | * the enable register, there would be an interrupt-storm when we | |
1622 | * release the IRQ lock. So we wait for the enable register update to | |
1623 | * take effect in h/w (by reading it back) and then clear all other bits | |
1624 | * in the status register. Ie. we clear them from ISR once it's certain | |
1625 | * IER won't allow them to reassert. | |
1626 | */ | |
1627 | local_irq_save(irqflags); | |
1628 | bits &= QM_PIRQ_VISIBLE; | |
1629 | clear_bits(bits, &p->irq_sources); | |
1630 | qm_out(&p->p, QM_REG_IER, p->irq_sources); | |
1631 | ier = qm_in(&p->p, QM_REG_IER); | |
1632 | /* | |
1633 | * Using "~ier" (rather than "bits" or "~p->irq_sources") creates a | |
1634 | * data-dependency, ie. to protect against re-ordering. | |
1635 | */ | |
1636 | qm_out(&p->p, QM_REG_ISR, ~ier); | |
1637 | local_irq_restore(irqflags); | |
1638 | } | |
1639 | EXPORT_SYMBOL(qman_p_irqsource_remove); | |
1640 | ||
1641 | const cpumask_t *qman_affine_cpus(void) | |
1642 | { | |
1643 | return &affine_mask; | |
1644 | } | |
1645 | EXPORT_SYMBOL(qman_affine_cpus); | |
1646 | ||
1647 | u16 qman_affine_channel(int cpu) | |
1648 | { | |
1649 | if (cpu < 0) { | |
1650 | struct qman_portal *portal = get_affine_portal(); | |
1651 | ||
1652 | cpu = portal->config->cpu; | |
1653 | put_affine_portal(); | |
1654 | } | |
1655 | WARN_ON(!cpumask_test_cpu(cpu, &affine_mask)); | |
1656 | return affine_channels[cpu]; | |
1657 | } | |
1658 | EXPORT_SYMBOL(qman_affine_channel); | |
1659 | ||
1660 | struct qman_portal *qman_get_affine_portal(int cpu) | |
1661 | { | |
1662 | return affine_portals[cpu]; | |
1663 | } | |
1664 | EXPORT_SYMBOL(qman_get_affine_portal); | |
1665 | ||
1666 | int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit) | |
1667 | { | |
1668 | return __poll_portal_fast(p, limit); | |
1669 | } | |
1670 | EXPORT_SYMBOL(qman_p_poll_dqrr); | |
1671 | ||
1672 | void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools) | |
1673 | { | |
1674 | unsigned long irqflags; | |
1675 | ||
1676 | local_irq_save(irqflags); | |
1677 | pools &= p->config->pools; | |
1678 | p->sdqcr |= pools; | |
1679 | qm_dqrr_sdqcr_set(&p->p, p->sdqcr); | |
1680 | local_irq_restore(irqflags); | |
1681 | } | |
1682 | EXPORT_SYMBOL(qman_p_static_dequeue_add); | |
1683 | ||
1684 | /* Frame queue API */ | |
1685 | ||
1686 | static const char *mcr_result_str(u8 result) | |
1687 | { | |
1688 | switch (result) { | |
1689 | case QM_MCR_RESULT_NULL: | |
1690 | return "QM_MCR_RESULT_NULL"; | |
1691 | case QM_MCR_RESULT_OK: | |
1692 | return "QM_MCR_RESULT_OK"; | |
1693 | case QM_MCR_RESULT_ERR_FQID: | |
1694 | return "QM_MCR_RESULT_ERR_FQID"; | |
1695 | case QM_MCR_RESULT_ERR_FQSTATE: | |
1696 | return "QM_MCR_RESULT_ERR_FQSTATE"; | |
1697 | case QM_MCR_RESULT_ERR_NOTEMPTY: | |
1698 | return "QM_MCR_RESULT_ERR_NOTEMPTY"; | |
1699 | case QM_MCR_RESULT_PENDING: | |
1700 | return "QM_MCR_RESULT_PENDING"; | |
1701 | case QM_MCR_RESULT_ERR_BADCOMMAND: | |
1702 | return "QM_MCR_RESULT_ERR_BADCOMMAND"; | |
1703 | } | |
1704 | return "<unknown MCR result>"; | |
1705 | } | |
1706 | ||
1707 | int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq) | |
1708 | { | |
1709 | if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) { | |
1710 | int ret = qman_alloc_fqid(&fqid); | |
1711 | ||
1712 | if (ret) | |
1713 | return ret; | |
1714 | } | |
1715 | fq->fqid = fqid; | |
1716 | fq->flags = flags; | |
1717 | fq->state = qman_fq_state_oos; | |
1718 | fq->cgr_groupid = 0; | |
1719 | ||
1720 | /* A context_b of 0 is allegedly special, so don't use that fqid */ | |
1721 | if (fqid == 0 || fqid >= num_fqids) { | |
1722 | WARN(1, "bad fqid %d\n", fqid); | |
1723 | return -EINVAL; | |
1724 | } | |
1725 | ||
1726 | fq->idx = fqid * 2; | |
1727 | if (flags & QMAN_FQ_FLAG_NO_MODIFY) | |
1728 | fq->idx++; | |
1729 | ||
1730 | WARN_ON(fq_table[fq->idx]); | |
1731 | fq_table[fq->idx] = fq; | |
1732 | ||
1733 | return 0; | |
1734 | } | |
1735 | EXPORT_SYMBOL(qman_create_fq); | |
1736 | ||
1737 | void qman_destroy_fq(struct qman_fq *fq) | |
1738 | { | |
1739 | /* | |
1740 | * We don't need to lock the FQ as it is a pre-condition that the FQ be | |
1741 | * quiesced. Instead, run some checks. | |
1742 | */ | |
1743 | switch (fq->state) { | |
1744 | case qman_fq_state_parked: | |
1745 | case qman_fq_state_oos: | |
1746 | if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID)) | |
1747 | qman_release_fqid(fq->fqid); | |
1748 | ||
1749 | DPAA_ASSERT(fq_table[fq->idx]); | |
1750 | fq_table[fq->idx] = NULL; | |
1751 | return; | |
1752 | default: | |
1753 | break; | |
1754 | } | |
1755 | DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!"); | |
1756 | } | |
1757 | EXPORT_SYMBOL(qman_destroy_fq); | |
1758 | ||
1759 | u32 qman_fq_fqid(struct qman_fq *fq) | |
1760 | { | |
1761 | return fq->fqid; | |
1762 | } | |
1763 | EXPORT_SYMBOL(qman_fq_fqid); | |
1764 | ||
1765 | int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) | |
1766 | { | |
1767 | union qm_mc_command *mcc; | |
1768 | union qm_mc_result *mcr; | |
1769 | struct qman_portal *p; | |
1770 | u8 res, myverb; | |
1771 | int ret = 0; | |
1772 | ||
1773 | myverb = (flags & QMAN_INITFQ_FLAG_SCHED) | |
1774 | ? QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED; | |
1775 | ||
1776 | if (fq->state != qman_fq_state_oos && | |
1777 | fq->state != qman_fq_state_parked) | |
1778 | return -EINVAL; | |
1779 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
1780 | if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) | |
1781 | return -EINVAL; | |
1782 | #endif | |
1783 | if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) { | |
1784 | /* And can't be set at the same time as TDTHRESH */ | |
1785 | if (opts->we_mask & QM_INITFQ_WE_TDTHRESH) | |
1786 | return -EINVAL; | |
1787 | } | |
1788 | /* Issue an INITFQ_[PARKED|SCHED] management command */ | |
1789 | p = get_affine_portal(); | |
1790 | if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) || | |
1791 | (fq->state != qman_fq_state_oos && | |
1792 | fq->state != qman_fq_state_parked)) { | |
1793 | ret = -EBUSY; | |
1794 | goto out; | |
1795 | } | |
1796 | mcc = qm_mc_start(&p->p); | |
1797 | if (opts) | |
1798 | mcc->initfq = *opts; | |
1799 | mcc->initfq.fqid = fq->fqid; | |
1800 | mcc->initfq.count = 0; | |
1801 | /* | |
1802 | * If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a | |
1803 | * demux pointer. Otherwise, the caller-provided value is allowed to | |
1804 | * stand, don't overwrite it. | |
1805 | */ | |
1806 | if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) { | |
1807 | dma_addr_t phys_fq; | |
1808 | ||
1809 | mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB; | |
1810 | mcc->initfq.fqd.context_b = fq_to_tag(fq); | |
1811 | /* | |
1812 | * and the physical address - NB, if the user wasn't trying to | |
1813 | * set CONTEXTA, clear the stashing settings. | |
1814 | */ | |
1815 | if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) { | |
1816 | mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA; | |
1817 | memset(&mcc->initfq.fqd.context_a, 0, | |
1818 | sizeof(mcc->initfq.fqd.context_a)); | |
1819 | } else { | |
1820 | phys_fq = dma_map_single(&p->pdev->dev, fq, sizeof(*fq), | |
1821 | DMA_TO_DEVICE); | |
1822 | qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq); | |
1823 | } | |
1824 | } | |
1825 | if (flags & QMAN_INITFQ_FLAG_LOCAL) { | |
1826 | int wq = 0; | |
1827 | ||
1828 | if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) { | |
1829 | mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ; | |
1830 | wq = 4; | |
1831 | } | |
1832 | qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq); | |
1833 | } | |
1834 | qm_mc_commit(&p->p, myverb); | |
1835 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
1836 | dev_err(p->config->dev, "MCR timeout\n"); | |
1837 | ret = -ETIMEDOUT; | |
1838 | goto out; | |
1839 | } | |
1840 | ||
1841 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb); | |
1842 | res = mcr->result; | |
1843 | if (res != QM_MCR_RESULT_OK) { | |
1844 | ret = -EIO; | |
1845 | goto out; | |
1846 | } | |
1847 | if (opts) { | |
1848 | if (opts->we_mask & QM_INITFQ_WE_FQCTRL) { | |
1849 | if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE) | |
1850 | fq_set(fq, QMAN_FQ_STATE_CGR_EN); | |
1851 | else | |
1852 | fq_clear(fq, QMAN_FQ_STATE_CGR_EN); | |
1853 | } | |
1854 | if (opts->we_mask & QM_INITFQ_WE_CGID) | |
1855 | fq->cgr_groupid = opts->fqd.cgid; | |
1856 | } | |
1857 | fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ? | |
1858 | qman_fq_state_sched : qman_fq_state_parked; | |
1859 | ||
1860 | out: | |
1861 | put_affine_portal(); | |
1862 | return ret; | |
1863 | } | |
1864 | EXPORT_SYMBOL(qman_init_fq); | |
1865 | ||
1866 | int qman_schedule_fq(struct qman_fq *fq) | |
1867 | { | |
1868 | union qm_mc_command *mcc; | |
1869 | union qm_mc_result *mcr; | |
1870 | struct qman_portal *p; | |
1871 | int ret = 0; | |
1872 | ||
1873 | if (fq->state != qman_fq_state_parked) | |
1874 | return -EINVAL; | |
1875 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
1876 | if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) | |
1877 | return -EINVAL; | |
1878 | #endif | |
1879 | /* Issue a ALTERFQ_SCHED management command */ | |
1880 | p = get_affine_portal(); | |
1881 | if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) || | |
1882 | fq->state != qman_fq_state_parked) { | |
1883 | ret = -EBUSY; | |
1884 | goto out; | |
1885 | } | |
1886 | mcc = qm_mc_start(&p->p); | |
1887 | mcc->alterfq.fqid = fq->fqid; | |
1888 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED); | |
1889 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
1890 | dev_err(p->config->dev, "ALTER_SCHED timeout\n"); | |
1891 | ret = -ETIMEDOUT; | |
1892 | goto out; | |
1893 | } | |
1894 | ||
1895 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED); | |
1896 | if (mcr->result != QM_MCR_RESULT_OK) { | |
1897 | ret = -EIO; | |
1898 | goto out; | |
1899 | } | |
1900 | fq->state = qman_fq_state_sched; | |
1901 | out: | |
1902 | put_affine_portal(); | |
1903 | return ret; | |
1904 | } | |
1905 | EXPORT_SYMBOL(qman_schedule_fq); | |
1906 | ||
1907 | int qman_retire_fq(struct qman_fq *fq, u32 *flags) | |
1908 | { | |
1909 | union qm_mc_command *mcc; | |
1910 | union qm_mc_result *mcr; | |
1911 | struct qman_portal *p; | |
1912 | int ret; | |
1913 | u8 res; | |
1914 | ||
1915 | if (fq->state != qman_fq_state_parked && | |
1916 | fq->state != qman_fq_state_sched) | |
1917 | return -EINVAL; | |
1918 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
1919 | if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) | |
1920 | return -EINVAL; | |
1921 | #endif | |
1922 | p = get_affine_portal(); | |
1923 | if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) || | |
1924 | fq->state == qman_fq_state_retired || | |
1925 | fq->state == qman_fq_state_oos) { | |
1926 | ret = -EBUSY; | |
1927 | goto out; | |
1928 | } | |
1929 | mcc = qm_mc_start(&p->p); | |
1930 | mcc->alterfq.fqid = fq->fqid; | |
1931 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); | |
1932 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
1933 | dev_crit(p->config->dev, "ALTER_RETIRE timeout\n"); | |
1934 | ret = -ETIMEDOUT; | |
1935 | goto out; | |
1936 | } | |
1937 | ||
1938 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE); | |
1939 | res = mcr->result; | |
1940 | /* | |
1941 | * "Elegant" would be to treat OK/PENDING the same way; set CHANGING, | |
1942 | * and defer the flags until FQRNI or FQRN (respectively) show up. But | |
1943 | * "Friendly" is to process OK immediately, and not set CHANGING. We do | |
1944 | * friendly, otherwise the caller doesn't necessarily have a fully | |
1945 | * "retired" FQ on return even if the retirement was immediate. However | |
1946 | * this does mean some code duplication between here and | |
1947 | * fq_state_change(). | |
1948 | */ | |
1949 | if (res == QM_MCR_RESULT_OK) { | |
1950 | ret = 0; | |
1951 | /* Process 'fq' right away, we'll ignore FQRNI */ | |
1952 | if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) | |
1953 | fq_set(fq, QMAN_FQ_STATE_NE); | |
1954 | if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT) | |
1955 | fq_set(fq, QMAN_FQ_STATE_ORL); | |
1956 | if (flags) | |
1957 | *flags = fq->flags; | |
1958 | fq->state = qman_fq_state_retired; | |
1959 | if (fq->cb.fqs) { | |
1960 | /* | |
1961 | * Another issue with supporting "immediate" retirement | |
1962 | * is that we're forced to drop FQRNIs, because by the | |
1963 | * time they're seen it may already be "too late" (the | |
1964 | * fq may have been OOS'd and free()'d already). But if | |
1965 | * the upper layer wants a callback whether it's | |
1966 | * immediate or not, we have to fake a "MR" entry to | |
1967 | * look like an FQRNI... | |
1968 | */ | |
1969 | union qm_mr_entry msg; | |
1970 | ||
1971 | msg.verb = QM_MR_VERB_FQRNI; | |
1972 | msg.fq.fqs = mcr->alterfq.fqs; | |
1973 | msg.fq.fqid = fq->fqid; | |
1974 | msg.fq.contextB = fq_to_tag(fq); | |
1975 | fq->cb.fqs(p, fq, &msg); | |
1976 | } | |
1977 | } else if (res == QM_MCR_RESULT_PENDING) { | |
1978 | ret = 1; | |
1979 | fq_set(fq, QMAN_FQ_STATE_CHANGING); | |
1980 | } else { | |
1981 | ret = -EIO; | |
1982 | } | |
1983 | out: | |
1984 | put_affine_portal(); | |
1985 | return ret; | |
1986 | } | |
1987 | EXPORT_SYMBOL(qman_retire_fq); | |
1988 | ||
1989 | int qman_oos_fq(struct qman_fq *fq) | |
1990 | { | |
1991 | union qm_mc_command *mcc; | |
1992 | union qm_mc_result *mcr; | |
1993 | struct qman_portal *p; | |
1994 | int ret = 0; | |
1995 | ||
1996 | if (fq->state != qman_fq_state_retired) | |
1997 | return -EINVAL; | |
1998 | #ifdef CONFIG_FSL_DPAA_CHECKING | |
1999 | if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) | |
2000 | return -EINVAL; | |
2001 | #endif | |
2002 | p = get_affine_portal(); | |
2003 | if (fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS) || | |
2004 | fq->state != qman_fq_state_retired) { | |
2005 | ret = -EBUSY; | |
2006 | goto out; | |
2007 | } | |
2008 | mcc = qm_mc_start(&p->p); | |
2009 | mcc->alterfq.fqid = fq->fqid; | |
2010 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); | |
2011 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
2012 | ret = -ETIMEDOUT; | |
2013 | goto out; | |
2014 | } | |
2015 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS); | |
2016 | if (mcr->result != QM_MCR_RESULT_OK) { | |
2017 | ret = -EIO; | |
2018 | goto out; | |
2019 | } | |
2020 | fq->state = qman_fq_state_oos; | |
2021 | out: | |
2022 | put_affine_portal(); | |
2023 | return ret; | |
2024 | } | |
2025 | EXPORT_SYMBOL(qman_oos_fq); | |
2026 | ||
2027 | int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd) | |
2028 | { | |
2029 | union qm_mc_command *mcc; | |
2030 | union qm_mc_result *mcr; | |
2031 | struct qman_portal *p = get_affine_portal(); | |
2032 | int ret = 0; | |
2033 | ||
2034 | mcc = qm_mc_start(&p->p); | |
2035 | mcc->queryfq.fqid = fq->fqid; | |
2036 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); | |
2037 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
2038 | ret = -ETIMEDOUT; | |
2039 | goto out; | |
2040 | } | |
2041 | ||
2042 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ); | |
2043 | if (mcr->result == QM_MCR_RESULT_OK) | |
2044 | *fqd = mcr->queryfq.fqd; | |
2045 | else | |
2046 | ret = -EIO; | |
2047 | out: | |
2048 | put_affine_portal(); | |
2049 | return ret; | |
2050 | } | |
2051 | ||
2052 | static int qman_query_fq_np(struct qman_fq *fq, | |
2053 | struct qm_mcr_queryfq_np *np) | |
2054 | { | |
2055 | union qm_mc_command *mcc; | |
2056 | union qm_mc_result *mcr; | |
2057 | struct qman_portal *p = get_affine_portal(); | |
2058 | int ret = 0; | |
2059 | ||
2060 | mcc = qm_mc_start(&p->p); | |
2061 | mcc->queryfq.fqid = fq->fqid; | |
2062 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); | |
2063 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
2064 | ret = -ETIMEDOUT; | |
2065 | goto out; | |
2066 | } | |
2067 | ||
2068 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); | |
2069 | if (mcr->result == QM_MCR_RESULT_OK) | |
2070 | *np = mcr->queryfq_np; | |
2071 | else if (mcr->result == QM_MCR_RESULT_ERR_FQID) | |
2072 | ret = -ERANGE; | |
2073 | else | |
2074 | ret = -EIO; | |
2075 | out: | |
2076 | put_affine_portal(); | |
2077 | return ret; | |
2078 | } | |
2079 | ||
2080 | static int qman_query_cgr(struct qman_cgr *cgr, | |
2081 | struct qm_mcr_querycgr *cgrd) | |
2082 | { | |
2083 | union qm_mc_command *mcc; | |
2084 | union qm_mc_result *mcr; | |
2085 | struct qman_portal *p = get_affine_portal(); | |
2086 | int ret = 0; | |
2087 | ||
2088 | mcc = qm_mc_start(&p->p); | |
2089 | mcc->querycgr.cgid = cgr->cgrid; | |
2090 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR); | |
2091 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
2092 | ret = -ETIMEDOUT; | |
2093 | goto out; | |
2094 | } | |
2095 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR); | |
2096 | if (mcr->result == QM_MCR_RESULT_OK) | |
2097 | *cgrd = mcr->querycgr; | |
2098 | else { | |
2099 | dev_err(p->config->dev, "QUERY_CGR failed: %s\n", | |
2100 | mcr_result_str(mcr->result)); | |
2101 | ret = -EIO; | |
2102 | } | |
2103 | out: | |
2104 | put_affine_portal(); | |
2105 | return ret; | |
2106 | } | |
2107 | ||
2108 | int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result) | |
2109 | { | |
2110 | struct qm_mcr_querycgr query_cgr; | |
2111 | int err; | |
2112 | ||
2113 | err = qman_query_cgr(cgr, &query_cgr); | |
2114 | if (err) | |
2115 | return err; | |
2116 | ||
2117 | *result = !!query_cgr.cgr.cs; | |
2118 | return 0; | |
2119 | } | |
2120 | EXPORT_SYMBOL(qman_query_cgr_congested); | |
2121 | ||
2122 | /* internal function used as a wait_event() expression */ | |
2123 | static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr) | |
2124 | { | |
2125 | unsigned long irqflags; | |
2126 | int ret = -EBUSY; | |
2127 | ||
2128 | local_irq_save(irqflags); | |
2129 | if (p->vdqcr_owned) | |
2130 | goto out; | |
2131 | if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) | |
2132 | goto out; | |
2133 | ||
2134 | fq_set(fq, QMAN_FQ_STATE_VDQCR); | |
2135 | p->vdqcr_owned = fq; | |
2136 | qm_dqrr_vdqcr_set(&p->p, vdqcr); | |
2137 | ret = 0; | |
2138 | out: | |
2139 | local_irq_restore(irqflags); | |
2140 | return ret; | |
2141 | } | |
2142 | ||
2143 | static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr) | |
2144 | { | |
2145 | int ret; | |
2146 | ||
2147 | *p = get_affine_portal(); | |
2148 | ret = set_p_vdqcr(*p, fq, vdqcr); | |
2149 | put_affine_portal(); | |
2150 | return ret; | |
2151 | } | |
2152 | ||
2153 | static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq, | |
2154 | u32 vdqcr, u32 flags) | |
2155 | { | |
2156 | int ret = 0; | |
2157 | ||
2158 | if (flags & QMAN_VOLATILE_FLAG_WAIT_INT) | |
2159 | ret = wait_event_interruptible(affine_queue, | |
2160 | !set_vdqcr(p, fq, vdqcr)); | |
2161 | else | |
2162 | wait_event(affine_queue, !set_vdqcr(p, fq, vdqcr)); | |
2163 | return ret; | |
2164 | } | |
2165 | ||
2166 | int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr) | |
2167 | { | |
2168 | struct qman_portal *p; | |
2169 | int ret; | |
2170 | ||
2171 | if (fq->state != qman_fq_state_parked && | |
2172 | fq->state != qman_fq_state_retired) | |
2173 | return -EINVAL; | |
2174 | if (vdqcr & QM_VDQCR_FQID_MASK) | |
2175 | return -EINVAL; | |
2176 | if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) | |
2177 | return -EBUSY; | |
2178 | vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid; | |
2179 | if (flags & QMAN_VOLATILE_FLAG_WAIT) | |
2180 | ret = wait_vdqcr_start(&p, fq, vdqcr, flags); | |
2181 | else | |
2182 | ret = set_vdqcr(&p, fq, vdqcr); | |
2183 | if (ret) | |
2184 | return ret; | |
2185 | /* VDQCR is set */ | |
2186 | if (flags & QMAN_VOLATILE_FLAG_FINISH) { | |
2187 | if (flags & QMAN_VOLATILE_FLAG_WAIT_INT) | |
2188 | /* | |
2189 | * NB: don't propagate any error - the caller wouldn't | |
2190 | * know whether the VDQCR was issued or not. A signal | |
2191 | * could arrive after returning anyway, so the caller | |
2192 | * can check signal_pending() if that's an issue. | |
2193 | */ | |
2194 | wait_event_interruptible(affine_queue, | |
2195 | !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); | |
2196 | else | |
2197 | wait_event(affine_queue, | |
2198 | !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); | |
2199 | } | |
2200 | return 0; | |
2201 | } | |
2202 | EXPORT_SYMBOL(qman_volatile_dequeue); | |
2203 | ||
2204 | static void update_eqcr_ci(struct qman_portal *p, u8 avail) | |
2205 | { | |
2206 | if (avail) | |
2207 | qm_eqcr_cce_prefetch(&p->p); | |
2208 | else | |
2209 | qm_eqcr_cce_update(&p->p); | |
2210 | } | |
2211 | ||
2212 | int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd) | |
2213 | { | |
2214 | struct qman_portal *p; | |
2215 | struct qm_eqcr_entry *eq; | |
2216 | unsigned long irqflags; | |
2217 | u8 avail; | |
2218 | ||
2219 | p = get_affine_portal(); | |
2220 | local_irq_save(irqflags); | |
2221 | ||
2222 | if (p->use_eqcr_ci_stashing) { | |
2223 | /* | |
2224 | * The stashing case is easy, only update if we need to in | |
2225 | * order to try and liberate ring entries. | |
2226 | */ | |
2227 | eq = qm_eqcr_start_stash(&p->p); | |
2228 | } else { | |
2229 | /* | |
2230 | * The non-stashing case is harder, need to prefetch ahead of | |
2231 | * time. | |
2232 | */ | |
2233 | avail = qm_eqcr_get_avail(&p->p); | |
2234 | if (avail < 2) | |
2235 | update_eqcr_ci(p, avail); | |
2236 | eq = qm_eqcr_start_no_stash(&p->p); | |
2237 | } | |
2238 | ||
2239 | if (unlikely(!eq)) | |
2240 | goto out; | |
2241 | ||
2242 | eq->fqid = fq->fqid; | |
2243 | eq->tag = fq_to_tag(fq); | |
2244 | eq->fd = *fd; | |
2245 | ||
2246 | qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE); | |
2247 | out: | |
2248 | local_irq_restore(irqflags); | |
2249 | put_affine_portal(); | |
2250 | return 0; | |
2251 | } | |
2252 | EXPORT_SYMBOL(qman_enqueue); | |
2253 | ||
2254 | static int qm_modify_cgr(struct qman_cgr *cgr, u32 flags, | |
2255 | struct qm_mcc_initcgr *opts) | |
2256 | { | |
2257 | union qm_mc_command *mcc; | |
2258 | union qm_mc_result *mcr; | |
2259 | struct qman_portal *p = get_affine_portal(); | |
2260 | u8 verb = QM_MCC_VERB_MODIFYCGR; | |
2261 | int ret = 0; | |
2262 | ||
2263 | mcc = qm_mc_start(&p->p); | |
2264 | if (opts) | |
2265 | mcc->initcgr = *opts; | |
2266 | mcc->initcgr.cgid = cgr->cgrid; | |
2267 | if (flags & QMAN_CGR_FLAG_USE_INIT) | |
2268 | verb = QM_MCC_VERB_INITCGR; | |
2269 | qm_mc_commit(&p->p, verb); | |
2270 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
2271 | ret = -ETIMEDOUT; | |
2272 | goto out; | |
2273 | } | |
2274 | ||
2275 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb); | |
2276 | if (mcr->result != QM_MCR_RESULT_OK) | |
2277 | ret = -EIO; | |
2278 | ||
2279 | out: | |
2280 | put_affine_portal(); | |
2281 | return ret; | |
2282 | } | |
2283 | ||
2284 | #define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0) | |
2285 | #define TARG_MASK(n) (BIT(31) >> PORTAL_IDX(n)) | |
2286 | ||
2287 | static u8 qman_cgr_cpus[CGR_NUM]; | |
2288 | ||
2289 | void qman_init_cgr_all(void) | |
2290 | { | |
2291 | struct qman_cgr cgr; | |
2292 | int err_cnt = 0; | |
2293 | ||
2294 | for (cgr.cgrid = 0; cgr.cgrid < CGR_NUM; cgr.cgrid++) { | |
2295 | if (qm_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL)) | |
2296 | err_cnt++; | |
2297 | } | |
2298 | ||
2299 | if (err_cnt) | |
2300 | pr_err("Warning: %d error%s while initialising CGR h/w\n", | |
2301 | err_cnt, (err_cnt > 1) ? "s" : ""); | |
2302 | } | |
2303 | ||
2304 | int qman_create_cgr(struct qman_cgr *cgr, u32 flags, | |
2305 | struct qm_mcc_initcgr *opts) | |
2306 | { | |
2307 | struct qm_mcr_querycgr cgr_state; | |
2308 | struct qm_mcc_initcgr local_opts = {}; | |
2309 | int ret; | |
2310 | struct qman_portal *p; | |
2311 | ||
2312 | /* | |
2313 | * We have to check that the provided CGRID is within the limits of the | |
2314 | * data-structures, for obvious reasons. However we'll let h/w take | |
2315 | * care of determining whether it's within the limits of what exists on | |
2316 | * the SoC. | |
2317 | */ | |
2318 | if (cgr->cgrid >= CGR_NUM) | |
2319 | return -EINVAL; | |
2320 | ||
2321 | preempt_disable(); | |
2322 | p = get_affine_portal(); | |
2323 | qman_cgr_cpus[cgr->cgrid] = smp_processor_id(); | |
2324 | preempt_enable(); | |
2325 | ||
2326 | cgr->chan = p->config->channel; | |
2327 | spin_lock(&p->cgr_lock); | |
2328 | ||
2329 | if (opts) { | |
2330 | ret = qman_query_cgr(cgr, &cgr_state); | |
2331 | if (ret) | |
2332 | goto out; | |
2333 | if (opts) | |
2334 | local_opts = *opts; | |
2335 | if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) | |
2336 | local_opts.cgr.cscn_targ_upd_ctrl = | |
2337 | QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p); | |
2338 | else | |
2339 | /* Overwrite TARG */ | |
2340 | local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ | | |
2341 | TARG_MASK(p); | |
2342 | local_opts.we_mask |= QM_CGR_WE_CSCN_TARG; | |
2343 | ||
2344 | /* send init if flags indicate so */ | |
2345 | if (opts && (flags & QMAN_CGR_FLAG_USE_INIT)) | |
2346 | ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, | |
2347 | &local_opts); | |
2348 | else | |
2349 | ret = qm_modify_cgr(cgr, 0, &local_opts); | |
2350 | if (ret) | |
2351 | goto out; | |
2352 | } | |
2353 | ||
2354 | list_add(&cgr->node, &p->cgr_cbs); | |
2355 | ||
2356 | /* Determine if newly added object requires its callback to be called */ | |
2357 | ret = qman_query_cgr(cgr, &cgr_state); | |
2358 | if (ret) { | |
2359 | /* we can't go back, so proceed and return success */ | |
2360 | dev_err(p->config->dev, "CGR HW state partially modified\n"); | |
2361 | ret = 0; | |
2362 | goto out; | |
2363 | } | |
2364 | if (cgr->cb && cgr_state.cgr.cscn_en && | |
2365 | qman_cgrs_get(&p->cgrs[1], cgr->cgrid)) | |
2366 | cgr->cb(p, cgr, 1); | |
2367 | out: | |
2368 | spin_unlock(&p->cgr_lock); | |
2369 | put_affine_portal(); | |
2370 | return ret; | |
2371 | } | |
2372 | EXPORT_SYMBOL(qman_create_cgr); | |
2373 | ||
2374 | int qman_delete_cgr(struct qman_cgr *cgr) | |
2375 | { | |
2376 | unsigned long irqflags; | |
2377 | struct qm_mcr_querycgr cgr_state; | |
2378 | struct qm_mcc_initcgr local_opts; | |
2379 | int ret = 0; | |
2380 | struct qman_cgr *i; | |
2381 | struct qman_portal *p = get_affine_portal(); | |
2382 | ||
2383 | if (cgr->chan != p->config->channel) { | |
2384 | /* attempt to delete from other portal than creator */ | |
2385 | dev_err(p->config->dev, "CGR not owned by current portal"); | |
2386 | dev_dbg(p->config->dev, " create 0x%x, delete 0x%x\n", | |
2387 | cgr->chan, p->config->channel); | |
2388 | ||
2389 | ret = -EINVAL; | |
2390 | goto put_portal; | |
2391 | } | |
2392 | memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr)); | |
2393 | spin_lock_irqsave(&p->cgr_lock, irqflags); | |
2394 | list_del(&cgr->node); | |
2395 | /* | |
2396 | * If there are no other CGR objects for this CGRID in the list, | |
2397 | * update CSCN_TARG accordingly | |
2398 | */ | |
2399 | list_for_each_entry(i, &p->cgr_cbs, node) | |
2400 | if (i->cgrid == cgr->cgrid && i->cb) | |
2401 | goto release_lock; | |
2402 | ret = qman_query_cgr(cgr, &cgr_state); | |
2403 | if (ret) { | |
2404 | /* add back to the list */ | |
2405 | list_add(&cgr->node, &p->cgr_cbs); | |
2406 | goto release_lock; | |
2407 | } | |
2408 | /* Overwrite TARG */ | |
2409 | local_opts.we_mask = QM_CGR_WE_CSCN_TARG; | |
2410 | if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) | |
2411 | local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p); | |
2412 | else | |
2413 | local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ & | |
2414 | ~(TARG_MASK(p)); | |
2415 | ret = qm_modify_cgr(cgr, 0, &local_opts); | |
2416 | if (ret) | |
2417 | /* add back to the list */ | |
2418 | list_add(&cgr->node, &p->cgr_cbs); | |
2419 | release_lock: | |
2420 | spin_unlock_irqrestore(&p->cgr_lock, irqflags); | |
2421 | put_portal: | |
2422 | put_affine_portal(); | |
2423 | return ret; | |
2424 | } | |
2425 | EXPORT_SYMBOL(qman_delete_cgr); | |
2426 | ||
2427 | struct cgr_comp { | |
2428 | struct qman_cgr *cgr; | |
2429 | struct completion completion; | |
2430 | }; | |
2431 | ||
2432 | static int qman_delete_cgr_thread(void *p) | |
2433 | { | |
2434 | struct cgr_comp *cgr_comp = (struct cgr_comp *)p; | |
2435 | int ret; | |
2436 | ||
2437 | ret = qman_delete_cgr(cgr_comp->cgr); | |
2438 | complete(&cgr_comp->completion); | |
2439 | ||
2440 | return ret; | |
2441 | } | |
2442 | ||
2443 | void qman_delete_cgr_safe(struct qman_cgr *cgr) | |
2444 | { | |
2445 | struct task_struct *thread; | |
2446 | struct cgr_comp cgr_comp; | |
2447 | ||
2448 | preempt_disable(); | |
2449 | if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) { | |
2450 | init_completion(&cgr_comp.completion); | |
2451 | cgr_comp.cgr = cgr; | |
2452 | thread = kthread_create(qman_delete_cgr_thread, &cgr_comp, | |
2453 | "cgr_del"); | |
2454 | ||
2455 | if (IS_ERR(thread)) | |
2456 | goto out; | |
2457 | ||
2458 | kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]); | |
2459 | wake_up_process(thread); | |
2460 | wait_for_completion(&cgr_comp.completion); | |
2461 | preempt_enable(); | |
2462 | return; | |
2463 | } | |
2464 | out: | |
2465 | qman_delete_cgr(cgr); | |
2466 | preempt_enable(); | |
2467 | } | |
2468 | EXPORT_SYMBOL(qman_delete_cgr_safe); | |
2469 | ||
2470 | /* Cleanup FQs */ | |
2471 | ||
2472 | static int _qm_mr_consume_and_match_verb(struct qm_portal *p, int v) | |
2473 | { | |
2474 | const union qm_mr_entry *msg; | |
2475 | int found = 0; | |
2476 | ||
2477 | qm_mr_pvb_update(p); | |
2478 | msg = qm_mr_current(p); | |
2479 | while (msg) { | |
2480 | if ((msg->verb & QM_MR_VERB_TYPE_MASK) == v) | |
2481 | found = 1; | |
2482 | qm_mr_next(p); | |
2483 | qm_mr_cci_consume_to_current(p); | |
2484 | qm_mr_pvb_update(p); | |
2485 | msg = qm_mr_current(p); | |
2486 | } | |
2487 | return found; | |
2488 | } | |
2489 | ||
2490 | static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s, | |
2491 | bool wait) | |
2492 | { | |
2493 | const struct qm_dqrr_entry *dqrr; | |
2494 | int found = 0; | |
2495 | ||
2496 | do { | |
2497 | qm_dqrr_pvb_update(p); | |
2498 | dqrr = qm_dqrr_current(p); | |
2499 | if (!dqrr) | |
2500 | cpu_relax(); | |
2501 | } while (wait && !dqrr); | |
2502 | ||
2503 | while (dqrr) { | |
2504 | if (dqrr->fqid == fqid && (dqrr->stat & s)) | |
2505 | found = 1; | |
2506 | qm_dqrr_cdc_consume_1ptr(p, dqrr, 0); | |
2507 | qm_dqrr_pvb_update(p); | |
2508 | qm_dqrr_next(p); | |
2509 | dqrr = qm_dqrr_current(p); | |
2510 | } | |
2511 | return found; | |
2512 | } | |
2513 | ||
2514 | #define qm_mr_drain(p, V) \ | |
2515 | _qm_mr_consume_and_match_verb(p, QM_MR_VERB_##V) | |
2516 | ||
2517 | #define qm_dqrr_drain(p, f, S) \ | |
2518 | _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, false) | |
2519 | ||
2520 | #define qm_dqrr_drain_wait(p, f, S) \ | |
2521 | _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, true) | |
2522 | ||
2523 | #define qm_dqrr_drain_nomatch(p) \ | |
2524 | _qm_dqrr_consume_and_match(p, 0, 0, false) | |
2525 | ||
2526 | static int qman_shutdown_fq(u32 fqid) | |
2527 | { | |
2528 | struct qman_portal *p; | |
2529 | struct device *dev; | |
2530 | union qm_mc_command *mcc; | |
2531 | union qm_mc_result *mcr; | |
2532 | int orl_empty, drain = 0, ret = 0; | |
2533 | u32 channel, wq, res; | |
2534 | u8 state; | |
2535 | ||
2536 | p = get_affine_portal(); | |
2537 | dev = p->config->dev; | |
2538 | /* Determine the state of the FQID */ | |
2539 | mcc = qm_mc_start(&p->p); | |
2540 | mcc->queryfq_np.fqid = fqid; | |
2541 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); | |
2542 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
2543 | dev_err(dev, "QUERYFQ_NP timeout\n"); | |
2544 | ret = -ETIMEDOUT; | |
2545 | goto out; | |
2546 | } | |
2547 | ||
2548 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); | |
2549 | state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK; | |
2550 | if (state == QM_MCR_NP_STATE_OOS) | |
2551 | goto out; /* Already OOS, no need to do anymore checks */ | |
2552 | ||
2553 | /* Query which channel the FQ is using */ | |
2554 | mcc = qm_mc_start(&p->p); | |
2555 | mcc->queryfq.fqid = fqid; | |
2556 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); | |
2557 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
2558 | dev_err(dev, "QUERYFQ timeout\n"); | |
2559 | ret = -ETIMEDOUT; | |
2560 | goto out; | |
2561 | } | |
2562 | ||
2563 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ); | |
2564 | /* Need to store these since the MCR gets reused */ | |
2565 | channel = qm_fqd_get_chan(&mcr->queryfq.fqd); | |
2566 | wq = qm_fqd_get_wq(&mcr->queryfq.fqd); | |
2567 | ||
2568 | switch (state) { | |
2569 | case QM_MCR_NP_STATE_TEN_SCHED: | |
2570 | case QM_MCR_NP_STATE_TRU_SCHED: | |
2571 | case QM_MCR_NP_STATE_ACTIVE: | |
2572 | case QM_MCR_NP_STATE_PARKED: | |
2573 | orl_empty = 0; | |
2574 | mcc = qm_mc_start(&p->p); | |
2575 | mcc->alterfq.fqid = fqid; | |
2576 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); | |
2577 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
2578 | dev_err(dev, "QUERYFQ_NP timeout\n"); | |
2579 | ret = -ETIMEDOUT; | |
2580 | goto out; | |
2581 | } | |
2582 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == | |
2583 | QM_MCR_VERB_ALTER_RETIRE); | |
2584 | res = mcr->result; /* Make a copy as we reuse MCR below */ | |
2585 | ||
2586 | if (res == QM_MCR_RESULT_PENDING) { | |
2587 | /* | |
2588 | * Need to wait for the FQRN in the message ring, which | |
2589 | * will only occur once the FQ has been drained. In | |
2590 | * order for the FQ to drain the portal needs to be set | |
2591 | * to dequeue from the channel the FQ is scheduled on | |
2592 | */ | |
2593 | int found_fqrn = 0; | |
2594 | u16 dequeue_wq = 0; | |
2595 | ||
2596 | /* Flag that we need to drain FQ */ | |
2597 | drain = 1; | |
2598 | ||
2599 | if (channel >= qm_channel_pool1 && | |
2600 | channel < qm_channel_pool1 + 15) { | |
2601 | /* Pool channel, enable the bit in the portal */ | |
2602 | dequeue_wq = (channel - | |
2603 | qm_channel_pool1 + 1)<<4 | wq; | |
2604 | } else if (channel < qm_channel_pool1) { | |
2605 | /* Dedicated channel */ | |
2606 | dequeue_wq = wq; | |
2607 | } else { | |
2608 | dev_err(dev, "Can't recover FQ 0x%x, ch: 0x%x", | |
2609 | fqid, channel); | |
2610 | ret = -EBUSY; | |
2611 | goto out; | |
2612 | } | |
2613 | /* Set the sdqcr to drain this channel */ | |
2614 | if (channel < qm_channel_pool1) | |
2615 | qm_dqrr_sdqcr_set(&p->p, | |
2616 | QM_SDQCR_TYPE_ACTIVE | | |
2617 | QM_SDQCR_CHANNELS_DEDICATED); | |
2618 | else | |
2619 | qm_dqrr_sdqcr_set(&p->p, | |
2620 | QM_SDQCR_TYPE_ACTIVE | | |
2621 | QM_SDQCR_CHANNELS_POOL_CONV | |
2622 | (channel)); | |
2623 | do { | |
2624 | /* Keep draining DQRR while checking the MR*/ | |
2625 | qm_dqrr_drain_nomatch(&p->p); | |
2626 | /* Process message ring too */ | |
2627 | found_fqrn = qm_mr_drain(&p->p, FQRN); | |
2628 | cpu_relax(); | |
2629 | } while (!found_fqrn); | |
2630 | ||
2631 | } | |
2632 | if (res != QM_MCR_RESULT_OK && | |
2633 | res != QM_MCR_RESULT_PENDING) { | |
2634 | dev_err(dev, "retire_fq failed: FQ 0x%x, res=0x%x\n", | |
2635 | fqid, res); | |
2636 | ret = -EIO; | |
2637 | goto out; | |
2638 | } | |
2639 | if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) { | |
2640 | /* | |
2641 | * ORL had no entries, no need to wait until the | |
2642 | * ERNs come in | |
2643 | */ | |
2644 | orl_empty = 1; | |
2645 | } | |
2646 | /* | |
2647 | * Retirement succeeded, check to see if FQ needs | |
2648 | * to be drained | |
2649 | */ | |
2650 | if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) { | |
2651 | /* FQ is Not Empty, drain using volatile DQ commands */ | |
2652 | do { | |
2653 | u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3); | |
2654 | ||
2655 | qm_dqrr_vdqcr_set(&p->p, vdqcr); | |
2656 | /* | |
2657 | * Wait for a dequeue and process the dequeues, | |
2658 | * making sure to empty the ring completely | |
2659 | */ | |
2660 | } while (qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY)); | |
2661 | } | |
2662 | qm_dqrr_sdqcr_set(&p->p, 0); | |
2663 | ||
2664 | while (!orl_empty) { | |
2665 | /* Wait for the ORL to have been completely drained */ | |
2666 | orl_empty = qm_mr_drain(&p->p, FQRL); | |
2667 | cpu_relax(); | |
2668 | } | |
2669 | mcc = qm_mc_start(&p->p); | |
2670 | mcc->alterfq.fqid = fqid; | |
2671 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); | |
2672 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
2673 | ret = -ETIMEDOUT; | |
2674 | goto out; | |
2675 | } | |
2676 | ||
2677 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == | |
2678 | QM_MCR_VERB_ALTER_OOS); | |
2679 | if (mcr->result != QM_MCR_RESULT_OK) { | |
2680 | dev_err(dev, "OOS after drain fail: FQ 0x%x (0x%x)\n", | |
2681 | fqid, mcr->result); | |
2682 | ret = -EIO; | |
2683 | goto out; | |
2684 | } | |
2685 | break; | |
2686 | ||
2687 | case QM_MCR_NP_STATE_RETIRED: | |
2688 | /* Send OOS Command */ | |
2689 | mcc = qm_mc_start(&p->p); | |
2690 | mcc->alterfq.fqid = fqid; | |
2691 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); | |
2692 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | |
2693 | ret = -ETIMEDOUT; | |
2694 | goto out; | |
2695 | } | |
2696 | ||
2697 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == | |
2698 | QM_MCR_VERB_ALTER_OOS); | |
2699 | if (mcr->result) { | |
2700 | dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n", | |
2701 | fqid, mcr->result); | |
2702 | ret = -EIO; | |
2703 | goto out; | |
2704 | } | |
2705 | break; | |
2706 | ||
2707 | case QM_MCR_NP_STATE_OOS: | |
2708 | /* Done */ | |
2709 | break; | |
2710 | ||
2711 | default: | |
2712 | ret = -EIO; | |
2713 | } | |
2714 | ||
2715 | out: | |
2716 | put_affine_portal(); | |
2717 | return ret; | |
2718 | } | |
2719 | ||
2720 | const struct qm_portal_config *qman_get_qm_portal_config( | |
2721 | struct qman_portal *portal) | |
2722 | { | |
2723 | return portal->config; | |
2724 | } | |
2725 | ||
2726 | struct gen_pool *qm_fqalloc; /* FQID allocator */ | |
2727 | struct gen_pool *qm_qpalloc; /* pool-channel allocator */ | |
2728 | struct gen_pool *qm_cgralloc; /* CGR ID allocator */ | |
2729 | ||
2730 | static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt) | |
2731 | { | |
2732 | unsigned long addr; | |
2733 | ||
2734 | addr = gen_pool_alloc(p, cnt); | |
2735 | if (!addr) | |
2736 | return -ENOMEM; | |
2737 | ||
2738 | *result = addr & ~DPAA_GENALLOC_OFF; | |
2739 | ||
2740 | return 0; | |
2741 | } | |
2742 | ||
2743 | int qman_alloc_fqid_range(u32 *result, u32 count) | |
2744 | { | |
2745 | return qman_alloc_range(qm_fqalloc, result, count); | |
2746 | } | |
2747 | EXPORT_SYMBOL(qman_alloc_fqid_range); | |
2748 | ||
2749 | int qman_alloc_pool_range(u32 *result, u32 count) | |
2750 | { | |
2751 | return qman_alloc_range(qm_qpalloc, result, count); | |
2752 | } | |
2753 | EXPORT_SYMBOL(qman_alloc_pool_range); | |
2754 | ||
2755 | int qman_alloc_cgrid_range(u32 *result, u32 count) | |
2756 | { | |
2757 | return qman_alloc_range(qm_cgralloc, result, count); | |
2758 | } | |
2759 | EXPORT_SYMBOL(qman_alloc_cgrid_range); | |
2760 | ||
2761 | int qman_release_fqid(u32 fqid) | |
2762 | { | |
2763 | int ret = qman_shutdown_fq(fqid); | |
2764 | ||
2765 | if (ret) { | |
2766 | pr_debug("FQID %d leaked\n", fqid); | |
2767 | return ret; | |
2768 | } | |
2769 | ||
2770 | gen_pool_free(qm_fqalloc, fqid | DPAA_GENALLOC_OFF, 1); | |
2771 | return 0; | |
2772 | } | |
2773 | EXPORT_SYMBOL(qman_release_fqid); | |
2774 | ||
2775 | static int qpool_cleanup(u32 qp) | |
2776 | { | |
2777 | /* | |
2778 | * We query all FQDs starting from | |
2779 | * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs | |
2780 | * whose destination channel is the pool-channel being released. | |
2781 | * When a non-OOS FQD is found we attempt to clean it up | |
2782 | */ | |
2783 | struct qman_fq fq = { | |
2784 | .fqid = QM_FQID_RANGE_START | |
2785 | }; | |
2786 | int err; | |
2787 | ||
2788 | do { | |
2789 | struct qm_mcr_queryfq_np np; | |
2790 | ||
2791 | err = qman_query_fq_np(&fq, &np); | |
d95cb0d3 | 2792 | if (err == -ERANGE) |
c535e923 CM |
2793 | /* FQID range exceeded, found no problems */ |
2794 | return 0; | |
d95cb0d3 CM |
2795 | else if (WARN_ON(err)) |
2796 | return err; | |
2797 | ||
c535e923 CM |
2798 | if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) { |
2799 | struct qm_fqd fqd; | |
2800 | ||
2801 | err = qman_query_fq(&fq, &fqd); | |
2802 | if (WARN_ON(err)) | |
d95cb0d3 | 2803 | return err; |
c535e923 CM |
2804 | if (qm_fqd_get_chan(&fqd) == qp) { |
2805 | /* The channel is the FQ's target, clean it */ | |
2806 | err = qman_shutdown_fq(fq.fqid); | |
2807 | if (err) | |
2808 | /* | |
2809 | * Couldn't shut down the FQ | |
2810 | * so the pool must be leaked | |
2811 | */ | |
2812 | return err; | |
2813 | } | |
2814 | } | |
2815 | /* Move to the next FQID */ | |
2816 | fq.fqid++; | |
2817 | } while (1); | |
2818 | } | |
2819 | ||
2820 | int qman_release_pool(u32 qp) | |
2821 | { | |
2822 | int ret; | |
2823 | ||
2824 | ret = qpool_cleanup(qp); | |
2825 | if (ret) { | |
2826 | pr_debug("CHID %d leaked\n", qp); | |
2827 | return ret; | |
2828 | } | |
2829 | ||
2830 | gen_pool_free(qm_qpalloc, qp | DPAA_GENALLOC_OFF, 1); | |
2831 | return 0; | |
2832 | } | |
2833 | EXPORT_SYMBOL(qman_release_pool); | |
2834 | ||
2835 | static int cgr_cleanup(u32 cgrid) | |
2836 | { | |
2837 | /* | |
2838 | * query all FQDs starting from FQID 1 until we get an "invalid FQID" | |
2839 | * error, looking for non-OOS FQDs whose CGR is the CGR being released | |
2840 | */ | |
2841 | struct qman_fq fq = { | |
d95cb0d3 | 2842 | .fqid = QM_FQID_RANGE_START |
c535e923 CM |
2843 | }; |
2844 | int err; | |
2845 | ||
2846 | do { | |
2847 | struct qm_mcr_queryfq_np np; | |
2848 | ||
2849 | err = qman_query_fq_np(&fq, &np); | |
d95cb0d3 | 2850 | if (err == -ERANGE) |
c535e923 CM |
2851 | /* FQID range exceeded, found no problems */ |
2852 | return 0; | |
d95cb0d3 CM |
2853 | else if (WARN_ON(err)) |
2854 | return err; | |
2855 | ||
c535e923 CM |
2856 | if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) { |
2857 | struct qm_fqd fqd; | |
2858 | ||
2859 | err = qman_query_fq(&fq, &fqd); | |
2860 | if (WARN_ON(err)) | |
d95cb0d3 | 2861 | return err; |
c535e923 CM |
2862 | if ((fqd.fq_ctrl & QM_FQCTRL_CGE) && |
2863 | fqd.cgid == cgrid) { | |
2864 | pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n", | |
2865 | cgrid, fq.fqid); | |
2866 | return -EIO; | |
2867 | } | |
2868 | } | |
2869 | /* Move to the next FQID */ | |
2870 | fq.fqid++; | |
2871 | } while (1); | |
2872 | } | |
2873 | ||
2874 | int qman_release_cgrid(u32 cgrid) | |
2875 | { | |
2876 | int ret; | |
2877 | ||
2878 | ret = cgr_cleanup(cgrid); | |
2879 | if (ret) { | |
2880 | pr_debug("CGRID %d leaked\n", cgrid); | |
2881 | return ret; | |
2882 | } | |
2883 | ||
2884 | gen_pool_free(qm_cgralloc, cgrid | DPAA_GENALLOC_OFF, 1); | |
2885 | return 0; | |
2886 | } | |
2887 | EXPORT_SYMBOL(qman_release_cgrid); |