]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/net/ipa/gsi.c
Merge tag 'timers-core-2020-08-14' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ipa / gsi.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2018-2020 Linaro Ltd.
5 */
6
7 #include <linux/types.h>
8 #include <linux/bits.h>
9 #include <linux/bitfield.h>
10 #include <linux/mutex.h>
11 #include <linux/completion.h>
12 #include <linux/io.h>
13 #include <linux/bug.h>
14 #include <linux/interrupt.h>
15 #include <linux/platform_device.h>
16 #include <linux/netdevice.h>
17
18 #include "gsi.h"
19 #include "gsi_reg.h"
20 #include "gsi_private.h"
21 #include "gsi_trans.h"
22 #include "ipa_gsi.h"
23 #include "ipa_data.h"
24
25 /**
26 * DOC: The IPA Generic Software Interface
27 *
28 * The generic software interface (GSI) is an integral component of the IPA,
29 * providing a well-defined communication layer between the AP subsystem
30 * and the IPA core. The modem uses the GSI layer as well.
31 *
32 * -------- ---------
33 * | | | |
34 * | AP +<---. .----+ Modem |
35 * | +--. | | .->+ |
36 * | | | | | | | |
37 * -------- | | | | ---------
38 * v | v |
39 * --+-+---+-+--
40 * | GSI |
41 * |-----------|
42 * | |
43 * | IPA |
44 * | |
45 * -------------
46 *
47 * In the above diagram, the AP and Modem represent "execution environments"
48 * (EEs), which are independent operating environments that use the IPA for
49 * data transfer.
50 *
51 * Each EE uses a set of unidirectional GSI "channels," which allow transfer
52 * of data to or from the IPA. A channel is implemented as a ring buffer,
53 * with a DRAM-resident array of "transfer elements" (TREs) available to
54 * describe transfers to or from other EEs through the IPA. A transfer
55 * element can also contain an immediate command, requesting the IPA perform
56 * actions other than data transfer.
57 *
58 * Each TRE refers to a block of data--also located DRAM. After writing one
59 * or more TREs to a channel, the writer (either the IPA or an EE) writes a
60 * doorbell register to inform the receiving side how many elements have
61 * been written.
62 *
63 * Each channel has a GSI "event ring" associated with it. An event ring
64 * is implemented very much like a channel ring, but is always directed from
65 * the IPA to an EE. The IPA notifies an EE (such as the AP) about channel
66 * events by adding an entry to the event ring associated with the channel.
67 * The GSI then writes its doorbell for the event ring, causing the target
68 * EE to be interrupted. Each entry in an event ring contains a pointer
69 * to the channel TRE whose completion the event represents.
70 *
71 * Each TRE in a channel ring has a set of flags. One flag indicates whether
72 * the completion of the transfer operation generates an entry (and possibly
73 * an interrupt) in the channel's event ring. Other flags allow transfer
74 * elements to be chained together, forming a single logical transaction.
75 * TRE flags are used to control whether and when interrupts are generated
76 * to signal completion of channel transfers.
77 *
78 * Elements in channel and event rings are completed (or consumed) strictly
79 * in order. Completion of one entry implies the completion of all preceding
80 * entries. A single completion interrupt can therefore communicate the
81 * completion of many transfers.
82 *
83 * Note that all GSI registers are little-endian, which is the assumed
84 * endianness of I/O space accesses. The accessor functions perform byte
85 * swapping if needed (i.e., for a big endian CPU).
86 */
87
88 /* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */
89 #define GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */
90
91 #define GSI_CMD_TIMEOUT 5 /* seconds */
92
93 #define GSI_CHANNEL_STOP_RX_RETRIES 10
94
95 #define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */
96 #define GSI_MHI_EVENT_ID_END 16 /* Last reserved event id */
97
98 #define GSI_ISR_MAX_ITER 50 /* Detect interrupt storms */
99
100 /* An entry in an event ring */
101 struct gsi_event {
102 __le64 xfer_ptr;
103 __le16 len;
104 u8 reserved1;
105 u8 code;
106 __le16 reserved2;
107 u8 type;
108 u8 chid;
109 };
110
111 /* Hardware values from the error log register error code field */
112 enum gsi_err_code {
113 GSI_INVALID_TRE_ERR = 0x1,
114 GSI_OUT_OF_BUFFERS_ERR = 0x2,
115 GSI_OUT_OF_RESOURCES_ERR = 0x3,
116 GSI_UNSUPPORTED_INTER_EE_OP_ERR = 0x4,
117 GSI_EVT_RING_EMPTY_ERR = 0x5,
118 GSI_NON_ALLOCATED_EVT_ACCESS_ERR = 0x6,
119 GSI_HWO_1_ERR = 0x8,
120 };
121
122 /* Hardware values from the error log register error type field */
123 enum gsi_err_type {
124 GSI_ERR_TYPE_GLOB = 0x1,
125 GSI_ERR_TYPE_CHAN = 0x2,
126 GSI_ERR_TYPE_EVT = 0x3,
127 };
128
129 /* Hardware values used when programming an event ring */
130 enum gsi_evt_chtype {
131 GSI_EVT_CHTYPE_MHI_EV = 0x0,
132 GSI_EVT_CHTYPE_XHCI_EV = 0x1,
133 GSI_EVT_CHTYPE_GPI_EV = 0x2,
134 GSI_EVT_CHTYPE_XDCI_EV = 0x3,
135 };
136
137 /* Hardware values used when programming a channel */
138 enum gsi_channel_protocol {
139 GSI_CHANNEL_PROTOCOL_MHI = 0x0,
140 GSI_CHANNEL_PROTOCOL_XHCI = 0x1,
141 GSI_CHANNEL_PROTOCOL_GPI = 0x2,
142 GSI_CHANNEL_PROTOCOL_XDCI = 0x3,
143 };
144
145 /* Hardware values representing an event ring immediate command opcode */
146 enum gsi_evt_cmd_opcode {
147 GSI_EVT_ALLOCATE = 0x0,
148 GSI_EVT_RESET = 0x9,
149 GSI_EVT_DE_ALLOC = 0xa,
150 };
151
152 /* Hardware values representing a generic immediate command opcode */
153 enum gsi_generic_cmd_opcode {
154 GSI_GENERIC_HALT_CHANNEL = 0x1,
155 GSI_GENERIC_ALLOCATE_CHANNEL = 0x2,
156 };
157
158 /* Hardware values representing a channel immediate command opcode */
159 enum gsi_ch_cmd_opcode {
160 GSI_CH_ALLOCATE = 0x0,
161 GSI_CH_START = 0x1,
162 GSI_CH_STOP = 0x2,
163 GSI_CH_RESET = 0x9,
164 GSI_CH_DE_ALLOC = 0xa,
165 };
166
167 /** gsi_channel_scratch_gpi - GPI protocol scratch register
168 * @max_outstanding_tre:
169 * Defines the maximum number of TREs allowed in a single transaction
170 * on a channel (in bytes). This determines the amount of prefetch
171 * performed by the hardware. We configure this to equal the size of
172 * the TLV FIFO for the channel.
173 * @outstanding_threshold:
174 * Defines the threshold (in bytes) determining when the sequencer
175 * should update the channel doorbell. We configure this to equal
176 * the size of two TREs.
177 */
178 struct gsi_channel_scratch_gpi {
179 u64 reserved1;
180 u16 reserved2;
181 u16 max_outstanding_tre;
182 u16 reserved3;
183 u16 outstanding_threshold;
184 };
185
186 /** gsi_channel_scratch - channel scratch configuration area
187 *
188 * The exact interpretation of this register is protocol-specific.
189 * We only use GPI channels; see struct gsi_channel_scratch_gpi, above.
190 */
191 union gsi_channel_scratch {
192 struct gsi_channel_scratch_gpi gpi;
193 struct {
194 u32 word1;
195 u32 word2;
196 u32 word3;
197 u32 word4;
198 } data;
199 };
200
201 /* Check things that can be validated at build time. */
202 static void gsi_validate_build(void)
203 {
204 /* This is used as a divisor */
205 BUILD_BUG_ON(!GSI_RING_ELEMENT_SIZE);
206
207 /* Code assumes the size of channel and event ring element are
208 * the same (and fixed). Make sure the size of an event ring
209 * element is what's expected.
210 */
211 BUILD_BUG_ON(sizeof(struct gsi_event) != GSI_RING_ELEMENT_SIZE);
212
213 /* Hardware requires a 2^n ring size. We ensure the number of
214 * elements in an event ring is a power of 2 elsewhere; this
215 * ensure the elements themselves meet the requirement.
216 */
217 BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE));
218
219 /* The channel element size must fit in this field */
220 BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(ELEMENT_SIZE_FMASK));
221
222 /* The event ring element size must fit in this field */
223 BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(EV_ELEMENT_SIZE_FMASK));
224 }
225
226 /* Return the channel id associated with a given channel */
227 static u32 gsi_channel_id(struct gsi_channel *channel)
228 {
229 return channel - &channel->gsi->channel[0];
230 }
231
232 static void gsi_irq_ieob_enable(struct gsi *gsi, u32 evt_ring_id)
233 {
234 u32 val;
235
236 gsi->event_enable_bitmap |= BIT(evt_ring_id);
237 val = gsi->event_enable_bitmap;
238 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
239 }
240
241 static void gsi_irq_ieob_disable(struct gsi *gsi, u32 evt_ring_id)
242 {
243 u32 val;
244
245 gsi->event_enable_bitmap &= ~BIT(evt_ring_id);
246 val = gsi->event_enable_bitmap;
247 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
248 }
249
250 /* Enable all GSI_interrupt types */
251 static void gsi_irq_enable(struct gsi *gsi)
252 {
253 u32 val;
254
255 /* We don't use inter-EE channel or event interrupts */
256 val = GSI_CNTXT_TYPE_IRQ_MSK_ALL;
257 val &= ~MSK_INTER_EE_CH_CTRL_FMASK;
258 val &= ~MSK_INTER_EE_EV_CTRL_FMASK;
259 iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
260
261 val = GENMASK(gsi->channel_count - 1, 0);
262 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
263
264 val = GENMASK(gsi->evt_ring_count - 1, 0);
265 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
266
267 /* Each IEOB interrupt is enabled (later) as needed by channels */
268 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
269
270 val = GSI_CNTXT_GLOB_IRQ_ALL;
271 iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
272
273 /* Never enable GSI_BREAK_POINT */
274 val = GSI_CNTXT_GSI_IRQ_ALL & ~EN_BREAK_POINT_FMASK;
275 iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
276 }
277
278 /* Disable all GSI_interrupt types */
279 static void gsi_irq_disable(struct gsi *gsi)
280 {
281 iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
282 iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
283 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
284 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
285 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
286 iowrite32(0, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
287 }
288
289 /* Return the virtual address associated with a ring index */
290 void *gsi_ring_virt(struct gsi_ring *ring, u32 index)
291 {
292 /* Note: index *must* be used modulo the ring count here */
293 return ring->virt + (index % ring->count) * GSI_RING_ELEMENT_SIZE;
294 }
295
296 /* Return the 32-bit DMA address associated with a ring index */
297 static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index)
298 {
299 return (ring->addr & GENMASK(31, 0)) + index * GSI_RING_ELEMENT_SIZE;
300 }
301
302 /* Return the ring index of a 32-bit ring offset */
303 static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset)
304 {
305 return (offset - gsi_ring_addr(ring, 0)) / GSI_RING_ELEMENT_SIZE;
306 }
307
308 /* Issue a GSI command by writing a value to a register, then wait for
309 * completion to be signaled. Returns true if the command completes
310 * or false if it times out.
311 */
312 static bool
313 gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion)
314 {
315 reinit_completion(completion);
316
317 iowrite32(val, gsi->virt + reg);
318
319 return !!wait_for_completion_timeout(completion, GSI_CMD_TIMEOUT * HZ);
320 }
321
322 /* Return the hardware's notion of the current state of an event ring */
323 static enum gsi_evt_ring_state
324 gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
325 {
326 u32 val;
327
328 val = ioread32(gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
329
330 return u32_get_bits(val, EV_CHSTATE_FMASK);
331 }
332
333 /* Issue an event ring command and wait for it to complete */
334 static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
335 enum gsi_evt_cmd_opcode opcode)
336 {
337 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
338 struct completion *completion = &evt_ring->completion;
339 struct device *dev = gsi->dev;
340 u32 val;
341
342 val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
343 val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
344
345 if (gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion))
346 return 0; /* Success! */
347
348 dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
349 opcode, evt_ring_id, evt_ring->state);
350
351 return -ETIMEDOUT;
352 }
353
354 /* Allocate an event ring in NOT_ALLOCATED state */
355 static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
356 {
357 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
358 int ret;
359
360 /* Get initial event ring state */
361 evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
362 if (evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
363 dev_err(gsi->dev, "bad event ring state %u before alloc\n",
364 evt_ring->state);
365 return -EINVAL;
366 }
367
368 ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
369 if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
370 dev_err(gsi->dev, "bad event ring state %u after alloc\n",
371 evt_ring->state);
372 ret = -EIO;
373 }
374
375 return ret;
376 }
377
378 /* Reset a GSI event ring in ALLOCATED or ERROR state. */
379 static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
380 {
381 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
382 enum gsi_evt_ring_state state = evt_ring->state;
383 int ret;
384
385 if (state != GSI_EVT_RING_STATE_ALLOCATED &&
386 state != GSI_EVT_RING_STATE_ERROR) {
387 dev_err(gsi->dev, "bad event ring state %u before reset\n",
388 evt_ring->state);
389 return;
390 }
391
392 ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
393 if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED)
394 dev_err(gsi->dev, "bad event ring state %u after reset\n",
395 evt_ring->state);
396 }
397
398 /* Issue a hardware de-allocation request for an allocated event ring */
399 static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
400 {
401 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
402 int ret;
403
404 if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
405 dev_err(gsi->dev, "bad event ring state %u before dealloc\n",
406 evt_ring->state);
407 return;
408 }
409
410 ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
411 if (!ret && evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED)
412 dev_err(gsi->dev, "bad event ring state %u after dealloc\n",
413 evt_ring->state);
414 }
415
416 /* Fetch the current state of a channel from hardware */
417 static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
418 {
419 u32 channel_id = gsi_channel_id(channel);
420 void *virt = channel->gsi->virt;
421 u32 val;
422
423 val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
424
425 return u32_get_bits(val, CHSTATE_FMASK);
426 }
427
428 /* Issue a channel command and wait for it to complete */
429 static int
430 gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
431 {
432 struct completion *completion = &channel->completion;
433 u32 channel_id = gsi_channel_id(channel);
434 struct gsi *gsi = channel->gsi;
435 struct device *dev = gsi->dev;
436 u32 val;
437
438 val = u32_encode_bits(channel_id, CH_CHID_FMASK);
439 val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
440
441 if (gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion))
442 return 0; /* Success! */
443
444 dev_err(dev, "GSI command %u for channel %u timed out, state %u\n",
445 opcode, channel_id, gsi_channel_state(channel));
446
447 return -ETIMEDOUT;
448 }
449
450 /* Allocate GSI channel in NOT_ALLOCATED state */
451 static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
452 {
453 struct gsi_channel *channel = &gsi->channel[channel_id];
454 struct device *dev = gsi->dev;
455 enum gsi_channel_state state;
456 int ret;
457
458 /* Get initial channel state */
459 state = gsi_channel_state(channel);
460 if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) {
461 dev_err(dev, "bad channel state %u before alloc\n", state);
462 return -EINVAL;
463 }
464
465 ret = gsi_channel_command(channel, GSI_CH_ALLOCATE);
466
467 /* Channel state will normally have been updated */
468 state = gsi_channel_state(channel);
469 if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED) {
470 dev_err(dev, "bad channel state %u after alloc\n", state);
471 ret = -EIO;
472 }
473
474 return ret;
475 }
476
477 /* Start an ALLOCATED channel */
478 static int gsi_channel_start_command(struct gsi_channel *channel)
479 {
480 struct device *dev = channel->gsi->dev;
481 enum gsi_channel_state state;
482 int ret;
483
484 state = gsi_channel_state(channel);
485 if (state != GSI_CHANNEL_STATE_ALLOCATED &&
486 state != GSI_CHANNEL_STATE_STOPPED) {
487 dev_err(dev, "bad channel state %u before start\n", state);
488 return -EINVAL;
489 }
490
491 ret = gsi_channel_command(channel, GSI_CH_START);
492
493 /* Channel state will normally have been updated */
494 state = gsi_channel_state(channel);
495 if (!ret && state != GSI_CHANNEL_STATE_STARTED) {
496 dev_err(dev, "bad channel state %u after start\n", state);
497 ret = -EIO;
498 }
499
500 return ret;
501 }
502
503 /* Stop a GSI channel in STARTED state */
504 static int gsi_channel_stop_command(struct gsi_channel *channel)
505 {
506 struct device *dev = channel->gsi->dev;
507 enum gsi_channel_state state;
508 int ret;
509
510 state = gsi_channel_state(channel);
511
512 /* Channel could have entered STOPPED state since last call
513 * if it timed out. If so, we're done.
514 */
515 if (state == GSI_CHANNEL_STATE_STOPPED)
516 return 0;
517
518 if (state != GSI_CHANNEL_STATE_STARTED &&
519 state != GSI_CHANNEL_STATE_STOP_IN_PROC) {
520 dev_err(dev, "bad channel state %u before stop\n", state);
521 return -EINVAL;
522 }
523
524 ret = gsi_channel_command(channel, GSI_CH_STOP);
525
526 /* Channel state will normally have been updated */
527 state = gsi_channel_state(channel);
528 if (ret || state == GSI_CHANNEL_STATE_STOPPED)
529 return ret;
530
531 /* We may have to try again if stop is in progress */
532 if (state == GSI_CHANNEL_STATE_STOP_IN_PROC)
533 return -EAGAIN;
534
535 dev_err(dev, "bad channel state %u after stop\n", state);
536
537 return -EIO;
538 }
539
540 /* Reset a GSI channel in ALLOCATED or ERROR state. */
541 static void gsi_channel_reset_command(struct gsi_channel *channel)
542 {
543 struct device *dev = channel->gsi->dev;
544 enum gsi_channel_state state;
545 int ret;
546
547 msleep(1); /* A short delay is required before a RESET command */
548
549 state = gsi_channel_state(channel);
550 if (state != GSI_CHANNEL_STATE_STOPPED &&
551 state != GSI_CHANNEL_STATE_ERROR) {
552 dev_err(dev, "bad channel state %u before reset\n", state);
553 return;
554 }
555
556 ret = gsi_channel_command(channel, GSI_CH_RESET);
557
558 /* Channel state will normally have been updated */
559 state = gsi_channel_state(channel);
560 if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED)
561 dev_err(dev, "bad channel state %u after reset\n", state);
562 }
563
564 /* Deallocate an ALLOCATED GSI channel */
565 static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
566 {
567 struct gsi_channel *channel = &gsi->channel[channel_id];
568 struct device *dev = gsi->dev;
569 enum gsi_channel_state state;
570 int ret;
571
572 state = gsi_channel_state(channel);
573 if (state != GSI_CHANNEL_STATE_ALLOCATED) {
574 dev_err(dev, "bad channel state %u before dealloc\n", state);
575 return;
576 }
577
578 ret = gsi_channel_command(channel, GSI_CH_DE_ALLOC);
579
580 /* Channel state will normally have been updated */
581 state = gsi_channel_state(channel);
582 if (!ret && state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
583 dev_err(dev, "bad channel state %u after dealloc\n", state);
584 }
585
586 /* Ring an event ring doorbell, reporting the last entry processed by the AP.
587 * The index argument (modulo the ring count) is the first unfilled entry, so
588 * we supply one less than that with the doorbell. Update the event ring
589 * index field with the value provided.
590 */
591 static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index)
592 {
593 struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring;
594 u32 val;
595
596 ring->index = index; /* Next unused entry */
597
598 /* Note: index *must* be used modulo the ring count here */
599 val = gsi_ring_addr(ring, (index - 1) % ring->count);
600 iowrite32(val, gsi->virt + GSI_EV_CH_E_DOORBELL_0_OFFSET(evt_ring_id));
601 }
602
603 /* Program an event ring for use */
604 static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
605 {
606 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
607 size_t size = evt_ring->ring.count * GSI_RING_ELEMENT_SIZE;
608 u32 val;
609
610 val = u32_encode_bits(GSI_EVT_CHTYPE_GPI_EV, EV_CHTYPE_FMASK);
611 val |= EV_INTYPE_FMASK;
612 val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK);
613 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
614
615 val = u32_encode_bits(size, EV_R_LENGTH_FMASK);
616 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id));
617
618 /* The context 2 and 3 registers store the low-order and
619 * high-order 32 bits of the address of the event ring,
620 * respectively.
621 */
622 val = evt_ring->ring.addr & GENMASK(31, 0);
623 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id));
624
625 val = evt_ring->ring.addr >> 32;
626 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id));
627
628 /* Enable interrupt moderation by setting the moderation delay */
629 val = u32_encode_bits(GSI_EVT_RING_INT_MODT, MODT_FMASK);
630 val |= u32_encode_bits(1, MODC_FMASK); /* comes from channel */
631 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_8_OFFSET(evt_ring_id));
632
633 /* No MSI write data, and MSI address high and low address is 0 */
634 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_9_OFFSET(evt_ring_id));
635 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_10_OFFSET(evt_ring_id));
636 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_11_OFFSET(evt_ring_id));
637
638 /* We don't need to get event read pointer updates */
639 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id));
640 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id));
641
642 /* Finally, tell the hardware we've completed event 0 (arbitrary) */
643 gsi_evt_ring_doorbell(gsi, evt_ring_id, 0);
644 }
645
646 /* Return the last (most recent) transaction completed on a channel. */
647 static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel)
648 {
649 struct gsi_trans_info *trans_info = &channel->trans_info;
650 struct gsi_trans *trans;
651
652 spin_lock_bh(&trans_info->spinlock);
653
654 if (!list_empty(&trans_info->complete))
655 trans = list_last_entry(&trans_info->complete,
656 struct gsi_trans, links);
657 else if (!list_empty(&trans_info->polled))
658 trans = list_last_entry(&trans_info->polled,
659 struct gsi_trans, links);
660 else
661 trans = NULL;
662
663 /* Caller will wait for this, so take a reference */
664 if (trans)
665 refcount_inc(&trans->refcount);
666
667 spin_unlock_bh(&trans_info->spinlock);
668
669 return trans;
670 }
671
672 /* Wait for transaction activity on a channel to complete */
673 static void gsi_channel_trans_quiesce(struct gsi_channel *channel)
674 {
675 struct gsi_trans *trans;
676
677 /* Get the last transaction, and wait for it to complete */
678 trans = gsi_channel_trans_last(channel);
679 if (trans) {
680 wait_for_completion(&trans->completion);
681 gsi_trans_free(trans);
682 }
683 }
684
685 /* Stop channel activity. Transactions may not be allocated until thawed. */
686 static void gsi_channel_freeze(struct gsi_channel *channel)
687 {
688 gsi_channel_trans_quiesce(channel);
689
690 napi_disable(&channel->napi);
691
692 gsi_irq_ieob_disable(channel->gsi, channel->evt_ring_id);
693 }
694
695 /* Allow transactions to be used on the channel again. */
696 static void gsi_channel_thaw(struct gsi_channel *channel)
697 {
698 gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id);
699
700 napi_enable(&channel->napi);
701 }
702
703 /* Program a channel for use */
704 static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
705 {
706 size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE;
707 u32 channel_id = gsi_channel_id(channel);
708 union gsi_channel_scratch scr = { };
709 struct gsi_channel_scratch_gpi *gpi;
710 struct gsi *gsi = channel->gsi;
711 u32 wrr_weight = 0;
712 u32 val;
713
714 /* Arbitrarily pick TRE 0 as the first channel element to use */
715 channel->tre_ring.index = 0;
716
717 /* We program all channels to use GPI protocol */
718 val = u32_encode_bits(GSI_CHANNEL_PROTOCOL_GPI, CHTYPE_PROTOCOL_FMASK);
719 if (channel->toward_ipa)
720 val |= CHTYPE_DIR_FMASK;
721 val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK);
722 val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK);
723 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
724
725 val = u32_encode_bits(size, R_LENGTH_FMASK);
726 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id));
727
728 /* The context 2 and 3 registers store the low-order and
729 * high-order 32 bits of the address of the channel ring,
730 * respectively.
731 */
732 val = channel->tre_ring.addr & GENMASK(31, 0);
733 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id));
734
735 val = channel->tre_ring.addr >> 32;
736 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id));
737
738 /* Command channel gets low weighted round-robin priority */
739 if (channel->command)
740 wrr_weight = field_max(WRR_WEIGHT_FMASK);
741 val = u32_encode_bits(wrr_weight, WRR_WEIGHT_FMASK);
742
743 /* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */
744
745 /* Enable the doorbell engine if requested */
746 if (doorbell)
747 val |= USE_DB_ENG_FMASK;
748
749 if (!channel->use_prefetch)
750 val |= USE_ESCAPE_BUF_ONLY_FMASK;
751
752 iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id));
753
754 /* Now update the scratch registers for GPI protocol */
755 gpi = &scr.gpi;
756 gpi->max_outstanding_tre = gsi_channel_trans_tre_max(gsi, channel_id) *
757 GSI_RING_ELEMENT_SIZE;
758 gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE;
759
760 val = scr.data.word1;
761 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_0_OFFSET(channel_id));
762
763 val = scr.data.word2;
764 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_1_OFFSET(channel_id));
765
766 val = scr.data.word3;
767 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_2_OFFSET(channel_id));
768
769 /* We must preserve the upper 16 bits of the last scratch register.
770 * The next sequence assumes those bits remain unchanged between the
771 * read and the write.
772 */
773 val = ioread32(gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
774 val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0));
775 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
776
777 /* All done! */
778 }
779
780 static void gsi_channel_deprogram(struct gsi_channel *channel)
781 {
782 /* Nothing to do */
783 }
784
785 /* Start an allocated GSI channel */
786 int gsi_channel_start(struct gsi *gsi, u32 channel_id)
787 {
788 struct gsi_channel *channel = &gsi->channel[channel_id];
789 int ret;
790
791 mutex_lock(&gsi->mutex);
792
793 ret = gsi_channel_start_command(channel);
794
795 mutex_unlock(&gsi->mutex);
796
797 gsi_channel_thaw(channel);
798
799 return ret;
800 }
801
802 /* Stop a started channel */
803 int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
804 {
805 struct gsi_channel *channel = &gsi->channel[channel_id];
806 u32 retries;
807 int ret;
808
809 gsi_channel_freeze(channel);
810
811 /* RX channels might require a little time to enter STOPPED state */
812 retries = channel->toward_ipa ? 0 : GSI_CHANNEL_STOP_RX_RETRIES;
813
814 mutex_lock(&gsi->mutex);
815
816 do {
817 ret = gsi_channel_stop_command(channel);
818 if (ret != -EAGAIN)
819 break;
820 msleep(1);
821 } while (retries--);
822
823 mutex_unlock(&gsi->mutex);
824
825 /* Thaw the channel if we need to retry (or on error) */
826 if (ret)
827 gsi_channel_thaw(channel);
828
829 return ret;
830 }
831
832 /* Reset and reconfigure a channel (possibly leaving doorbell disabled) */
833 void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool legacy)
834 {
835 struct gsi_channel *channel = &gsi->channel[channel_id];
836
837 mutex_lock(&gsi->mutex);
838
839 gsi_channel_reset_command(channel);
840 /* Due to a hardware quirk we may need to reset RX channels twice. */
841 if (legacy && !channel->toward_ipa)
842 gsi_channel_reset_command(channel);
843
844 gsi_channel_program(channel, legacy);
845 gsi_channel_trans_cancel_pending(channel);
846
847 mutex_unlock(&gsi->mutex);
848 }
849
850 /* Stop a STARTED channel for suspend (using stop if requested) */
851 int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop)
852 {
853 struct gsi_channel *channel = &gsi->channel[channel_id];
854
855 if (stop)
856 return gsi_channel_stop(gsi, channel_id);
857
858 gsi_channel_freeze(channel);
859
860 return 0;
861 }
862
863 /* Resume a suspended channel (starting will be requested if STOPPED) */
864 int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start)
865 {
866 struct gsi_channel *channel = &gsi->channel[channel_id];
867
868 if (start)
869 return gsi_channel_start(gsi, channel_id);
870
871 gsi_channel_thaw(channel);
872
873 return 0;
874 }
875
876 /**
877 * gsi_channel_tx_queued() - Report queued TX transfers for a channel
878 * @channel: Channel for which to report
879 *
880 * Report to the network stack the number of bytes and transactions that
881 * have been queued to hardware since last call. This and the next function
882 * supply information used by the network stack for throttling.
883 *
884 * For each channel we track the number of transactions used and bytes of
885 * data those transactions represent. We also track what those values are
886 * each time this function is called. Subtracting the two tells us
887 * the number of bytes and transactions that have been added between
888 * successive calls.
889 *
890 * Calling this each time we ring the channel doorbell allows us to
891 * provide accurate information to the network stack about how much
892 * work we've given the hardware at any point in time.
893 */
894 void gsi_channel_tx_queued(struct gsi_channel *channel)
895 {
896 u32 trans_count;
897 u32 byte_count;
898
899 byte_count = channel->byte_count - channel->queued_byte_count;
900 trans_count = channel->trans_count - channel->queued_trans_count;
901 channel->queued_byte_count = channel->byte_count;
902 channel->queued_trans_count = channel->trans_count;
903
904 ipa_gsi_channel_tx_queued(channel->gsi, gsi_channel_id(channel),
905 trans_count, byte_count);
906 }
907
908 /**
909 * gsi_channel_tx_update() - Report completed TX transfers
910 * @channel: Channel that has completed transmitting packets
911 * @trans: Last transation known to be complete
912 *
913 * Compute the number of transactions and bytes that have been transferred
914 * over a TX channel since the given transaction was committed. Report this
915 * information to the network stack.
916 *
917 * At the time a transaction is committed, we record its channel's
918 * committed transaction and byte counts *in the transaction*.
919 * Completions are signaled by the hardware with an interrupt, and
920 * we can determine the latest completed transaction at that time.
921 *
922 * The difference between the byte/transaction count recorded in
923 * the transaction and the count last time we recorded a completion
924 * tells us exactly how much data has been transferred between
925 * completions.
926 *
927 * Calling this each time we learn of a newly-completed transaction
928 * allows us to provide accurate information to the network stack
929 * about how much work has been completed by the hardware at a given
930 * point in time.
931 */
932 static void
933 gsi_channel_tx_update(struct gsi_channel *channel, struct gsi_trans *trans)
934 {
935 u64 byte_count = trans->byte_count + trans->len;
936 u64 trans_count = trans->trans_count + 1;
937
938 byte_count -= channel->compl_byte_count;
939 channel->compl_byte_count += byte_count;
940 trans_count -= channel->compl_trans_count;
941 channel->compl_trans_count += trans_count;
942
943 ipa_gsi_channel_tx_completed(channel->gsi, gsi_channel_id(channel),
944 trans_count, byte_count);
945 }
946
947 /* Channel control interrupt handler */
948 static void gsi_isr_chan_ctrl(struct gsi *gsi)
949 {
950 u32 channel_mask;
951
952 channel_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_CH_IRQ_OFFSET);
953 iowrite32(channel_mask, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
954
955 while (channel_mask) {
956 u32 channel_id = __ffs(channel_mask);
957 struct gsi_channel *channel;
958
959 channel_mask ^= BIT(channel_id);
960
961 channel = &gsi->channel[channel_id];
962
963 complete(&channel->completion);
964 }
965 }
966
967 /* Event ring control interrupt handler */
968 static void gsi_isr_evt_ctrl(struct gsi *gsi)
969 {
970 u32 event_mask;
971
972 event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET);
973 iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
974
975 while (event_mask) {
976 u32 evt_ring_id = __ffs(event_mask);
977 struct gsi_evt_ring *evt_ring;
978
979 event_mask ^= BIT(evt_ring_id);
980
981 evt_ring = &gsi->evt_ring[evt_ring_id];
982 evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
983
984 complete(&evt_ring->completion);
985 }
986 }
987
988 /* Global channel error interrupt handler */
989 static void
990 gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code)
991 {
992 if (code == GSI_OUT_OF_RESOURCES_ERR) {
993 dev_err(gsi->dev, "channel %u out of resources\n", channel_id);
994 complete(&gsi->channel[channel_id].completion);
995 return;
996 }
997
998 /* Report, but otherwise ignore all other error codes */
999 dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n",
1000 channel_id, err_ee, code);
1001 }
1002
1003 /* Global event error interrupt handler */
1004 static void
1005 gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code)
1006 {
1007 if (code == GSI_OUT_OF_RESOURCES_ERR) {
1008 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
1009 u32 channel_id = gsi_channel_id(evt_ring->channel);
1010
1011 complete(&evt_ring->completion);
1012 dev_err(gsi->dev, "evt_ring for channel %u out of resources\n",
1013 channel_id);
1014 return;
1015 }
1016
1017 /* Report, but otherwise ignore all other error codes */
1018 dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n",
1019 evt_ring_id, err_ee, code);
1020 }
1021
1022 /* Global error interrupt handler */
1023 static void gsi_isr_glob_err(struct gsi *gsi)
1024 {
1025 enum gsi_err_type type;
1026 enum gsi_err_code code;
1027 u32 which;
1028 u32 val;
1029 u32 ee;
1030
1031 /* Get the logged error, then reinitialize the log */
1032 val = ioread32(gsi->virt + GSI_ERROR_LOG_OFFSET);
1033 iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
1034 iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET);
1035
1036 ee = u32_get_bits(val, ERR_EE_FMASK);
1037 which = u32_get_bits(val, ERR_VIRT_IDX_FMASK);
1038 type = u32_get_bits(val, ERR_TYPE_FMASK);
1039 code = u32_get_bits(val, ERR_CODE_FMASK);
1040
1041 if (type == GSI_ERR_TYPE_CHAN)
1042 gsi_isr_glob_chan_err(gsi, ee, which, code);
1043 else if (type == GSI_ERR_TYPE_EVT)
1044 gsi_isr_glob_evt_err(gsi, ee, which, code);
1045 else /* type GSI_ERR_TYPE_GLOB should be fatal */
1046 dev_err(gsi->dev, "unexpected global error 0x%08x\n", type);
1047 }
1048
1049 /* Generic EE interrupt handler */
1050 static void gsi_isr_gp_int1(struct gsi *gsi)
1051 {
1052 u32 result;
1053 u32 val;
1054
1055 val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1056 result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK);
1057 if (result != GENERIC_EE_SUCCESS_FVAL)
1058 dev_err(gsi->dev, "global INT1 generic result %u\n", result);
1059
1060 complete(&gsi->completion);
1061 }
1062
1063 /* Inter-EE interrupt handler */
1064 static void gsi_isr_glob_ee(struct gsi *gsi)
1065 {
1066 u32 val;
1067
1068 val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET);
1069
1070 if (val & ERROR_INT_FMASK)
1071 gsi_isr_glob_err(gsi);
1072
1073 iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET);
1074
1075 val &= ~ERROR_INT_FMASK;
1076
1077 if (val & EN_GP_INT1_FMASK) {
1078 val ^= EN_GP_INT1_FMASK;
1079 gsi_isr_gp_int1(gsi);
1080 }
1081
1082 if (val)
1083 dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val);
1084 }
1085
1086 /* I/O completion interrupt event */
1087 static void gsi_isr_ieob(struct gsi *gsi)
1088 {
1089 u32 event_mask;
1090
1091 event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET);
1092 iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET);
1093
1094 while (event_mask) {
1095 u32 evt_ring_id = __ffs(event_mask);
1096
1097 event_mask ^= BIT(evt_ring_id);
1098
1099 gsi_irq_ieob_disable(gsi, evt_ring_id);
1100 napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi);
1101 }
1102 }
1103
1104 /* General event interrupts represent serious problems, so report them */
1105 static void gsi_isr_general(struct gsi *gsi)
1106 {
1107 struct device *dev = gsi->dev;
1108 u32 val;
1109
1110 val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET);
1111 iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET);
1112
1113 if (val)
1114 dev_err(dev, "unexpected general interrupt 0x%08x\n", val);
1115 }
1116
1117 /**
1118 * gsi_isr() - Top level GSI interrupt service routine
1119 * @irq: Interrupt number (ignored)
1120 * @dev_id: GSI pointer supplied to request_irq()
1121 *
1122 * This is the main handler function registered for the GSI IRQ. Each type
1123 * of interrupt has a separate handler function that is called from here.
1124 */
1125 static irqreturn_t gsi_isr(int irq, void *dev_id)
1126 {
1127 struct gsi *gsi = dev_id;
1128 u32 intr_mask;
1129 u32 cnt = 0;
1130
1131 while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) {
1132 /* intr_mask contains bitmask of pending GSI interrupts */
1133 do {
1134 u32 gsi_intr = BIT(__ffs(intr_mask));
1135
1136 intr_mask ^= gsi_intr;
1137
1138 switch (gsi_intr) {
1139 case CH_CTRL_FMASK:
1140 gsi_isr_chan_ctrl(gsi);
1141 break;
1142 case EV_CTRL_FMASK:
1143 gsi_isr_evt_ctrl(gsi);
1144 break;
1145 case GLOB_EE_FMASK:
1146 gsi_isr_glob_ee(gsi);
1147 break;
1148 case IEOB_FMASK:
1149 gsi_isr_ieob(gsi);
1150 break;
1151 case GENERAL_FMASK:
1152 gsi_isr_general(gsi);
1153 break;
1154 default:
1155 dev_err(gsi->dev,
1156 "unrecognized interrupt type 0x%08x\n",
1157 gsi_intr);
1158 break;
1159 }
1160 } while (intr_mask);
1161
1162 if (++cnt > GSI_ISR_MAX_ITER) {
1163 dev_err(gsi->dev, "interrupt flood\n");
1164 break;
1165 }
1166 }
1167
1168 return IRQ_HANDLED;
1169 }
1170
1171 /* Return the transaction associated with a transfer completion event */
1172 static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel,
1173 struct gsi_event *event)
1174 {
1175 u32 tre_offset;
1176 u32 tre_index;
1177
1178 /* Event xfer_ptr records the TRE it's associated with */
1179 tre_offset = le64_to_cpu(event->xfer_ptr) & GENMASK(31, 0);
1180 tre_index = gsi_ring_index(&channel->tre_ring, tre_offset);
1181
1182 return gsi_channel_trans_mapped(channel, tre_index);
1183 }
1184
1185 /**
1186 * gsi_evt_ring_rx_update() - Record lengths of received data
1187 * @evt_ring: Event ring associated with channel that received packets
1188 * @index: Event index in ring reported by hardware
1189 *
1190 * Events for RX channels contain the actual number of bytes received into
1191 * the buffer. Every event has a transaction associated with it, and here
1192 * we update transactions to record their actual received lengths.
1193 *
1194 * This function is called whenever we learn that the GSI hardware has filled
1195 * new events since the last time we checked. The ring's index field tells
1196 * the first entry in need of processing. The index provided is the
1197 * first *unfilled* event in the ring (following the last filled one).
1198 *
1199 * Events are sequential within the event ring, and transactions are
1200 * sequential within the transaction pool.
1201 *
1202 * Note that @index always refers to an element *within* the event ring.
1203 */
1204 static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
1205 {
1206 struct gsi_channel *channel = evt_ring->channel;
1207 struct gsi_ring *ring = &evt_ring->ring;
1208 struct gsi_trans_info *trans_info;
1209 struct gsi_event *event_done;
1210 struct gsi_event *event;
1211 struct gsi_trans *trans;
1212 u32 byte_count = 0;
1213 u32 old_index;
1214 u32 event_avail;
1215
1216 trans_info = &channel->trans_info;
1217
1218 /* We'll start with the oldest un-processed event. RX channels
1219 * replenish receive buffers in single-TRE transactions, so we
1220 * can just map that event to its transaction. Transactions
1221 * associated with completion events are consecutive.
1222 */
1223 old_index = ring->index;
1224 event = gsi_ring_virt(ring, old_index);
1225 trans = gsi_event_trans(channel, event);
1226
1227 /* Compute the number of events to process before we wrap,
1228 * and determine when we'll be done processing events.
1229 */
1230 event_avail = ring->count - old_index % ring->count;
1231 event_done = gsi_ring_virt(ring, index);
1232 do {
1233 trans->len = __le16_to_cpu(event->len);
1234 byte_count += trans->len;
1235
1236 /* Move on to the next event and transaction */
1237 if (--event_avail)
1238 event++;
1239 else
1240 event = gsi_ring_virt(ring, 0);
1241 trans = gsi_trans_pool_next(&trans_info->pool, trans);
1242 } while (event != event_done);
1243
1244 /* We record RX bytes when they are received */
1245 channel->byte_count += byte_count;
1246 channel->trans_count++;
1247 }
1248
1249 /* Initialize a ring, including allocating DMA memory for its entries */
1250 static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
1251 {
1252 size_t size = count * GSI_RING_ELEMENT_SIZE;
1253 struct device *dev = gsi->dev;
1254 dma_addr_t addr;
1255
1256 /* Hardware requires a 2^n ring size, with alignment equal to size */
1257 ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
1258 if (ring->virt && addr % size) {
1259 dma_free_coherent(dev, size, ring->virt, ring->addr);
1260 dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n",
1261 size);
1262 return -EINVAL; /* Not a good error value, but distinct */
1263 } else if (!ring->virt) {
1264 return -ENOMEM;
1265 }
1266 ring->addr = addr;
1267 ring->count = count;
1268
1269 return 0;
1270 }
1271
1272 /* Free a previously-allocated ring */
1273 static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring)
1274 {
1275 size_t size = ring->count * GSI_RING_ELEMENT_SIZE;
1276
1277 dma_free_coherent(gsi->dev, size, ring->virt, ring->addr);
1278 }
1279
1280 /* Allocate an available event ring id */
1281 static int gsi_evt_ring_id_alloc(struct gsi *gsi)
1282 {
1283 u32 evt_ring_id;
1284
1285 if (gsi->event_bitmap == ~0U) {
1286 dev_err(gsi->dev, "event rings exhausted\n");
1287 return -ENOSPC;
1288 }
1289
1290 evt_ring_id = ffz(gsi->event_bitmap);
1291 gsi->event_bitmap |= BIT(evt_ring_id);
1292
1293 return (int)evt_ring_id;
1294 }
1295
1296 /* Free a previously-allocated event ring id */
1297 static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id)
1298 {
1299 gsi->event_bitmap &= ~BIT(evt_ring_id);
1300 }
1301
1302 /* Ring a channel doorbell, reporting the first un-filled entry */
1303 void gsi_channel_doorbell(struct gsi_channel *channel)
1304 {
1305 struct gsi_ring *tre_ring = &channel->tre_ring;
1306 u32 channel_id = gsi_channel_id(channel);
1307 struct gsi *gsi = channel->gsi;
1308 u32 val;
1309
1310 /* Note: index *must* be used modulo the ring count here */
1311 val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count);
1312 iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id));
1313 }
1314
1315 /* Consult hardware, move any newly completed transactions to completed list */
1316 static void gsi_channel_update(struct gsi_channel *channel)
1317 {
1318 u32 evt_ring_id = channel->evt_ring_id;
1319 struct gsi *gsi = channel->gsi;
1320 struct gsi_evt_ring *evt_ring;
1321 struct gsi_trans *trans;
1322 struct gsi_ring *ring;
1323 u32 offset;
1324 u32 index;
1325
1326 evt_ring = &gsi->evt_ring[evt_ring_id];
1327 ring = &evt_ring->ring;
1328
1329 /* See if there's anything new to process; if not, we're done. Note
1330 * that index always refers to an entry *within* the event ring.
1331 */
1332 offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id);
1333 index = gsi_ring_index(ring, ioread32(gsi->virt + offset));
1334 if (index == ring->index % ring->count)
1335 return;
1336
1337 /* Get the transaction for the latest completed event. Take a
1338 * reference to keep it from completing before we give the events
1339 * for this and previous transactions back to the hardware.
1340 */
1341 trans = gsi_event_trans(channel, gsi_ring_virt(ring, index - 1));
1342 refcount_inc(&trans->refcount);
1343
1344 /* For RX channels, update each completed transaction with the number
1345 * of bytes that were actually received. For TX channels, report
1346 * the number of transactions and bytes this completion represents
1347 * up the network stack.
1348 */
1349 if (channel->toward_ipa)
1350 gsi_channel_tx_update(channel, trans);
1351 else
1352 gsi_evt_ring_rx_update(evt_ring, index);
1353
1354 gsi_trans_move_complete(trans);
1355
1356 /* Tell the hardware we've handled these events */
1357 gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index);
1358
1359 gsi_trans_free(trans);
1360 }
1361
1362 /**
1363 * gsi_channel_poll_one() - Return a single completed transaction on a channel
1364 * @channel: Channel to be polled
1365 *
1366 * Return: Transaction pointer, or null if none are available
1367 *
1368 * This function returns the first entry on a channel's completed transaction
1369 * list. If that list is empty, the hardware is consulted to determine
1370 * whether any new transactions have completed. If so, they're moved to the
1371 * completed list and the new first entry is returned. If there are no more
1372 * completed transactions, a null pointer is returned.
1373 */
1374 static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
1375 {
1376 struct gsi_trans *trans;
1377
1378 /* Get the first transaction from the completed list */
1379 trans = gsi_channel_trans_complete(channel);
1380 if (!trans) {
1381 /* List is empty; see if there's more to do */
1382 gsi_channel_update(channel);
1383 trans = gsi_channel_trans_complete(channel);
1384 }
1385
1386 if (trans)
1387 gsi_trans_move_polled(trans);
1388
1389 return trans;
1390 }
1391
1392 /**
1393 * gsi_channel_poll() - NAPI poll function for a channel
1394 * @napi: NAPI structure for the channel
1395 * @budget: Budget supplied by NAPI core
1396 *
1397 * Return: Number of items polled (<= budget)
1398 *
1399 * Single transactions completed by hardware are polled until either
1400 * the budget is exhausted, or there are no more. Each transaction
1401 * polled is passed to gsi_trans_complete(), to perform remaining
1402 * completion processing and retire/free the transaction.
1403 */
1404 static int gsi_channel_poll(struct napi_struct *napi, int budget)
1405 {
1406 struct gsi_channel *channel;
1407 int count = 0;
1408
1409 channel = container_of(napi, struct gsi_channel, napi);
1410 while (count < budget) {
1411 struct gsi_trans *trans;
1412
1413 count++;
1414 trans = gsi_channel_poll_one(channel);
1415 if (!trans)
1416 break;
1417 gsi_trans_complete(trans);
1418 }
1419
1420 if (count < budget) {
1421 napi_complete(&channel->napi);
1422 gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id);
1423 }
1424
1425 return count;
1426 }
1427
1428 /* The event bitmap represents which event ids are available for allocation.
1429 * Set bits are not available, clear bits can be used. This function
1430 * initializes the map so all events supported by the hardware are available,
1431 * then precludes any reserved events from being allocated.
1432 */
1433 static u32 gsi_event_bitmap_init(u32 evt_ring_max)
1434 {
1435 u32 event_bitmap = GENMASK(BITS_PER_LONG - 1, evt_ring_max);
1436
1437 event_bitmap |= GENMASK(GSI_MHI_EVENT_ID_END, GSI_MHI_EVENT_ID_START);
1438
1439 return event_bitmap;
1440 }
1441
1442 /* Setup function for event rings */
1443 static void gsi_evt_ring_setup(struct gsi *gsi)
1444 {
1445 /* Nothing to do */
1446 }
1447
1448 /* Inverse of gsi_evt_ring_setup() */
1449 static void gsi_evt_ring_teardown(struct gsi *gsi)
1450 {
1451 /* Nothing to do */
1452 }
1453
1454 /* Setup function for a single channel */
1455 static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id,
1456 bool legacy)
1457 {
1458 struct gsi_channel *channel = &gsi->channel[channel_id];
1459 u32 evt_ring_id = channel->evt_ring_id;
1460 int ret;
1461
1462 if (!channel->gsi)
1463 return 0; /* Ignore uninitialized channels */
1464
1465 ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id);
1466 if (ret)
1467 return ret;
1468
1469 gsi_evt_ring_program(gsi, evt_ring_id);
1470
1471 ret = gsi_channel_alloc_command(gsi, channel_id);
1472 if (ret)
1473 goto err_evt_ring_de_alloc;
1474
1475 gsi_channel_program(channel, legacy);
1476
1477 if (channel->toward_ipa)
1478 netif_tx_napi_add(&gsi->dummy_dev, &channel->napi,
1479 gsi_channel_poll, NAPI_POLL_WEIGHT);
1480 else
1481 netif_napi_add(&gsi->dummy_dev, &channel->napi,
1482 gsi_channel_poll, NAPI_POLL_WEIGHT);
1483
1484 return 0;
1485
1486 err_evt_ring_de_alloc:
1487 /* We've done nothing with the event ring yet so don't reset */
1488 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1489
1490 return ret;
1491 }
1492
1493 /* Inverse of gsi_channel_setup_one() */
1494 static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id)
1495 {
1496 struct gsi_channel *channel = &gsi->channel[channel_id];
1497 u32 evt_ring_id = channel->evt_ring_id;
1498
1499 if (!channel->gsi)
1500 return; /* Ignore uninitialized channels */
1501
1502 netif_napi_del(&channel->napi);
1503
1504 gsi_channel_deprogram(channel);
1505 gsi_channel_de_alloc_command(gsi, channel_id);
1506 gsi_evt_ring_reset_command(gsi, evt_ring_id);
1507 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1508 }
1509
1510 static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
1511 enum gsi_generic_cmd_opcode opcode)
1512 {
1513 struct completion *completion = &gsi->completion;
1514 u32 val;
1515
1516 /* First zero the result code field */
1517 val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1518 val &= ~GENERIC_EE_RESULT_FMASK;
1519 iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1520
1521 /* Now issue the command */
1522 val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK);
1523 val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
1524 val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
1525
1526 if (gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion))
1527 return 0; /* Success! */
1528
1529 dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n",
1530 opcode, channel_id);
1531
1532 return -ETIMEDOUT;
1533 }
1534
1535 static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id)
1536 {
1537 return gsi_generic_command(gsi, channel_id,
1538 GSI_GENERIC_ALLOCATE_CHANNEL);
1539 }
1540
1541 static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
1542 {
1543 int ret;
1544
1545 ret = gsi_generic_command(gsi, channel_id, GSI_GENERIC_HALT_CHANNEL);
1546 if (ret)
1547 dev_err(gsi->dev, "error %d halting modem channel %u\n",
1548 ret, channel_id);
1549 }
1550
1551 /* Setup function for channels */
1552 static int gsi_channel_setup(struct gsi *gsi, bool legacy)
1553 {
1554 u32 channel_id = 0;
1555 u32 mask;
1556 int ret;
1557
1558 gsi_evt_ring_setup(gsi);
1559 gsi_irq_enable(gsi);
1560
1561 mutex_lock(&gsi->mutex);
1562
1563 do {
1564 ret = gsi_channel_setup_one(gsi, channel_id, legacy);
1565 if (ret)
1566 goto err_unwind;
1567 } while (++channel_id < gsi->channel_count);
1568
1569 /* Make sure no channels were defined that hardware does not support */
1570 while (channel_id < GSI_CHANNEL_COUNT_MAX) {
1571 struct gsi_channel *channel = &gsi->channel[channel_id++];
1572
1573 if (!channel->gsi)
1574 continue; /* Ignore uninitialized channels */
1575
1576 dev_err(gsi->dev, "channel %u not supported by hardware\n",
1577 channel_id - 1);
1578 channel_id = gsi->channel_count;
1579 goto err_unwind;
1580 }
1581
1582 /* Allocate modem channels if necessary */
1583 mask = gsi->modem_channel_bitmap;
1584 while (mask) {
1585 u32 modem_channel_id = __ffs(mask);
1586
1587 ret = gsi_modem_channel_alloc(gsi, modem_channel_id);
1588 if (ret)
1589 goto err_unwind_modem;
1590
1591 /* Clear bit from mask only after success (for unwind) */
1592 mask ^= BIT(modem_channel_id);
1593 }
1594
1595 mutex_unlock(&gsi->mutex);
1596
1597 return 0;
1598
1599 err_unwind_modem:
1600 /* Compute which modem channels need to be deallocated */
1601 mask ^= gsi->modem_channel_bitmap;
1602 while (mask) {
1603 u32 channel_id = __fls(mask);
1604
1605 mask ^= BIT(channel_id);
1606
1607 gsi_modem_channel_halt(gsi, channel_id);
1608 }
1609
1610 err_unwind:
1611 while (channel_id--)
1612 gsi_channel_teardown_one(gsi, channel_id);
1613
1614 mutex_unlock(&gsi->mutex);
1615
1616 gsi_irq_disable(gsi);
1617 gsi_evt_ring_teardown(gsi);
1618
1619 return ret;
1620 }
1621
1622 /* Inverse of gsi_channel_setup() */
1623 static void gsi_channel_teardown(struct gsi *gsi)
1624 {
1625 u32 mask = gsi->modem_channel_bitmap;
1626 u32 channel_id;
1627
1628 mutex_lock(&gsi->mutex);
1629
1630 while (mask) {
1631 u32 channel_id = __fls(mask);
1632
1633 mask ^= BIT(channel_id);
1634
1635 gsi_modem_channel_halt(gsi, channel_id);
1636 }
1637
1638 channel_id = gsi->channel_count - 1;
1639 do
1640 gsi_channel_teardown_one(gsi, channel_id);
1641 while (channel_id--);
1642
1643 mutex_unlock(&gsi->mutex);
1644
1645 gsi_irq_disable(gsi);
1646 gsi_evt_ring_teardown(gsi);
1647 }
1648
1649 /* Setup function for GSI. GSI firmware must be loaded and initialized */
1650 int gsi_setup(struct gsi *gsi, bool legacy)
1651 {
1652 struct device *dev = gsi->dev;
1653 u32 val;
1654
1655 /* Here is where we first touch the GSI hardware */
1656 val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
1657 if (!(val & ENABLED_FMASK)) {
1658 dev_err(dev, "GSI has not been enabled\n");
1659 return -EIO;
1660 }
1661
1662 val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
1663
1664 gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
1665 if (!gsi->channel_count) {
1666 dev_err(dev, "GSI reports zero channels supported\n");
1667 return -EINVAL;
1668 }
1669 if (gsi->channel_count > GSI_CHANNEL_COUNT_MAX) {
1670 dev_warn(dev,
1671 "limiting to %u channels; hardware supports %u\n",
1672 GSI_CHANNEL_COUNT_MAX, gsi->channel_count);
1673 gsi->channel_count = GSI_CHANNEL_COUNT_MAX;
1674 }
1675
1676 gsi->evt_ring_count = u32_get_bits(val, NUM_EV_PER_EE_FMASK);
1677 if (!gsi->evt_ring_count) {
1678 dev_err(dev, "GSI reports zero event rings supported\n");
1679 return -EINVAL;
1680 }
1681 if (gsi->evt_ring_count > GSI_EVT_RING_COUNT_MAX) {
1682 dev_warn(dev,
1683 "limiting to %u event rings; hardware supports %u\n",
1684 GSI_EVT_RING_COUNT_MAX, gsi->evt_ring_count);
1685 gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX;
1686 }
1687
1688 /* Initialize the error log */
1689 iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
1690
1691 /* Writing 1 indicates IRQ interrupts; 0 would be MSI */
1692 iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET);
1693
1694 return gsi_channel_setup(gsi, legacy);
1695 }
1696
1697 /* Inverse of gsi_setup() */
1698 void gsi_teardown(struct gsi *gsi)
1699 {
1700 gsi_channel_teardown(gsi);
1701 }
1702
1703 /* Initialize a channel's event ring */
1704 static int gsi_channel_evt_ring_init(struct gsi_channel *channel)
1705 {
1706 struct gsi *gsi = channel->gsi;
1707 struct gsi_evt_ring *evt_ring;
1708 int ret;
1709
1710 ret = gsi_evt_ring_id_alloc(gsi);
1711 if (ret < 0)
1712 return ret;
1713 channel->evt_ring_id = ret;
1714
1715 evt_ring = &gsi->evt_ring[channel->evt_ring_id];
1716 evt_ring->channel = channel;
1717
1718 ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count);
1719 if (!ret)
1720 return 0; /* Success! */
1721
1722 dev_err(gsi->dev, "error %d allocating channel %u event ring\n",
1723 ret, gsi_channel_id(channel));
1724
1725 gsi_evt_ring_id_free(gsi, channel->evt_ring_id);
1726
1727 return ret;
1728 }
1729
1730 /* Inverse of gsi_channel_evt_ring_init() */
1731 static void gsi_channel_evt_ring_exit(struct gsi_channel *channel)
1732 {
1733 u32 evt_ring_id = channel->evt_ring_id;
1734 struct gsi *gsi = channel->gsi;
1735 struct gsi_evt_ring *evt_ring;
1736
1737 evt_ring = &gsi->evt_ring[evt_ring_id];
1738 gsi_ring_free(gsi, &evt_ring->ring);
1739 gsi_evt_ring_id_free(gsi, evt_ring_id);
1740 }
1741
1742 /* Init function for event rings */
1743 static void gsi_evt_ring_init(struct gsi *gsi)
1744 {
1745 u32 evt_ring_id = 0;
1746
1747 gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
1748 gsi->event_enable_bitmap = 0;
1749 do
1750 init_completion(&gsi->evt_ring[evt_ring_id].completion);
1751 while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX);
1752 }
1753
1754 /* Inverse of gsi_evt_ring_init() */
1755 static void gsi_evt_ring_exit(struct gsi *gsi)
1756 {
1757 /* Nothing to do */
1758 }
1759
1760 static bool gsi_channel_data_valid(struct gsi *gsi,
1761 const struct ipa_gsi_endpoint_data *data)
1762 {
1763 #ifdef IPA_VALIDATION
1764 u32 channel_id = data->channel_id;
1765 struct device *dev = gsi->dev;
1766
1767 /* Make sure channel ids are in the range driver supports */
1768 if (channel_id >= GSI_CHANNEL_COUNT_MAX) {
1769 dev_err(dev, "bad channel id %u; must be less than %u\n",
1770 channel_id, GSI_CHANNEL_COUNT_MAX);
1771 return false;
1772 }
1773
1774 if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) {
1775 dev_err(dev, "bad EE id %u; not AP or modem\n", data->ee_id);
1776 return false;
1777 }
1778
1779 if (!data->channel.tlv_count ||
1780 data->channel.tlv_count > GSI_TLV_MAX) {
1781 dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n",
1782 channel_id, data->channel.tlv_count, GSI_TLV_MAX);
1783 return false;
1784 }
1785
1786 /* We have to allow at least one maximally-sized transaction to
1787 * be outstanding (which would use tlv_count TREs). Given how
1788 * gsi_channel_tre_max() is computed, tre_count has to be almost
1789 * twice the TLV FIFO size to satisfy this requirement.
1790 */
1791 if (data->channel.tre_count < 2 * data->channel.tlv_count - 1) {
1792 dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n",
1793 channel_id, data->channel.tlv_count,
1794 data->channel.tre_count);
1795 return false;
1796 }
1797
1798 if (!is_power_of_2(data->channel.tre_count)) {
1799 dev_err(dev, "channel %u bad tre_count %u; not power of 2\n",
1800 channel_id, data->channel.tre_count);
1801 return false;
1802 }
1803
1804 if (!is_power_of_2(data->channel.event_count)) {
1805 dev_err(dev, "channel %u bad event_count %u; not power of 2\n",
1806 channel_id, data->channel.event_count);
1807 return false;
1808 }
1809 #endif /* IPA_VALIDATION */
1810
1811 return true;
1812 }
1813
1814 /* Init function for a single channel */
1815 static int gsi_channel_init_one(struct gsi *gsi,
1816 const struct ipa_gsi_endpoint_data *data,
1817 bool command, bool prefetch)
1818 {
1819 struct gsi_channel *channel;
1820 u32 tre_count;
1821 int ret;
1822
1823 if (!gsi_channel_data_valid(gsi, data))
1824 return -EINVAL;
1825
1826 /* Worst case we need an event for every outstanding TRE */
1827 if (data->channel.tre_count > data->channel.event_count) {
1828 tre_count = data->channel.event_count;
1829 dev_warn(gsi->dev, "channel %u limited to %u TREs\n",
1830 data->channel_id, tre_count);
1831 } else {
1832 tre_count = data->channel.tre_count;
1833 }
1834
1835 channel = &gsi->channel[data->channel_id];
1836 memset(channel, 0, sizeof(*channel));
1837
1838 channel->gsi = gsi;
1839 channel->toward_ipa = data->toward_ipa;
1840 channel->command = command;
1841 channel->use_prefetch = command && prefetch;
1842 channel->tlv_count = data->channel.tlv_count;
1843 channel->tre_count = tre_count;
1844 channel->event_count = data->channel.event_count;
1845 init_completion(&channel->completion);
1846
1847 ret = gsi_channel_evt_ring_init(channel);
1848 if (ret)
1849 goto err_clear_gsi;
1850
1851 ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count);
1852 if (ret) {
1853 dev_err(gsi->dev, "error %d allocating channel %u ring\n",
1854 ret, data->channel_id);
1855 goto err_channel_evt_ring_exit;
1856 }
1857
1858 ret = gsi_channel_trans_init(gsi, data->channel_id);
1859 if (ret)
1860 goto err_ring_free;
1861
1862 if (command) {
1863 u32 tre_max = gsi_channel_tre_max(gsi, data->channel_id);
1864
1865 ret = ipa_cmd_pool_init(channel, tre_max);
1866 }
1867 if (!ret)
1868 return 0; /* Success! */
1869
1870 gsi_channel_trans_exit(channel);
1871 err_ring_free:
1872 gsi_ring_free(gsi, &channel->tre_ring);
1873 err_channel_evt_ring_exit:
1874 gsi_channel_evt_ring_exit(channel);
1875 err_clear_gsi:
1876 channel->gsi = NULL; /* Mark it not (fully) initialized */
1877
1878 return ret;
1879 }
1880
1881 /* Inverse of gsi_channel_init_one() */
1882 static void gsi_channel_exit_one(struct gsi_channel *channel)
1883 {
1884 if (!channel->gsi)
1885 return; /* Ignore uninitialized channels */
1886
1887 if (channel->command)
1888 ipa_cmd_pool_exit(channel);
1889 gsi_channel_trans_exit(channel);
1890 gsi_ring_free(channel->gsi, &channel->tre_ring);
1891 gsi_channel_evt_ring_exit(channel);
1892 }
1893
1894 /* Init function for channels */
1895 static int gsi_channel_init(struct gsi *gsi, bool prefetch, u32 count,
1896 const struct ipa_gsi_endpoint_data *data,
1897 bool modem_alloc)
1898 {
1899 int ret = 0;
1900 u32 i;
1901
1902 gsi_evt_ring_init(gsi);
1903
1904 /* The endpoint data array is indexed by endpoint name */
1905 for (i = 0; i < count; i++) {
1906 bool command = i == IPA_ENDPOINT_AP_COMMAND_TX;
1907
1908 if (ipa_gsi_endpoint_data_empty(&data[i]))
1909 continue; /* Skip over empty slots */
1910
1911 /* Mark modem channels to be allocated (hardware workaround) */
1912 if (data[i].ee_id == GSI_EE_MODEM) {
1913 if (modem_alloc)
1914 gsi->modem_channel_bitmap |=
1915 BIT(data[i].channel_id);
1916 continue;
1917 }
1918
1919 ret = gsi_channel_init_one(gsi, &data[i], command, prefetch);
1920 if (ret)
1921 goto err_unwind;
1922 }
1923
1924 return ret;
1925
1926 err_unwind:
1927 while (i--) {
1928 if (ipa_gsi_endpoint_data_empty(&data[i]))
1929 continue;
1930 if (modem_alloc && data[i].ee_id == GSI_EE_MODEM) {
1931 gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id);
1932 continue;
1933 }
1934 gsi_channel_exit_one(&gsi->channel[data->channel_id]);
1935 }
1936 gsi_evt_ring_exit(gsi);
1937
1938 return ret;
1939 }
1940
1941 /* Inverse of gsi_channel_init() */
1942 static void gsi_channel_exit(struct gsi *gsi)
1943 {
1944 u32 channel_id = GSI_CHANNEL_COUNT_MAX - 1;
1945
1946 do
1947 gsi_channel_exit_one(&gsi->channel[channel_id]);
1948 while (channel_id--);
1949 gsi->modem_channel_bitmap = 0;
1950
1951 gsi_evt_ring_exit(gsi);
1952 }
1953
1954 /* Init function for GSI. GSI hardware does not need to be "ready" */
1955 int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
1956 u32 count, const struct ipa_gsi_endpoint_data *data,
1957 bool modem_alloc)
1958 {
1959 struct device *dev = &pdev->dev;
1960 struct resource *res;
1961 resource_size_t size;
1962 unsigned int irq;
1963 int ret;
1964
1965 gsi_validate_build();
1966
1967 gsi->dev = dev;
1968
1969 /* The GSI layer performs NAPI on all endpoints. NAPI requires a
1970 * network device structure, but the GSI layer does not have one,
1971 * so we must create a dummy network device for this purpose.
1972 */
1973 init_dummy_netdev(&gsi->dummy_dev);
1974
1975 /* Get the GSI IRQ and request for it to wake the system */
1976 ret = platform_get_irq_byname(pdev, "gsi");
1977 if (ret <= 0) {
1978 dev_err(dev, "DT error %d getting \"gsi\" IRQ property\n", ret);
1979 return ret ? : -EINVAL;
1980 }
1981 irq = ret;
1982
1983 ret = request_irq(irq, gsi_isr, 0, "gsi", gsi);
1984 if (ret) {
1985 dev_err(dev, "error %d requesting \"gsi\" IRQ\n", ret);
1986 return ret;
1987 }
1988 gsi->irq = irq;
1989
1990 ret = enable_irq_wake(gsi->irq);
1991 if (ret)
1992 dev_warn(dev, "error %d enabling gsi wake irq\n", ret);
1993 gsi->irq_wake_enabled = !ret;
1994
1995 /* Get GSI memory range and map it */
1996 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
1997 if (!res) {
1998 dev_err(dev, "DT error getting \"gsi\" memory property\n");
1999 ret = -ENODEV;
2000 goto err_disable_irq_wake;
2001 }
2002
2003 size = resource_size(res);
2004 if (res->start > U32_MAX || size > U32_MAX - res->start) {
2005 dev_err(dev, "DT memory resource \"gsi\" out of range\n");
2006 ret = -EINVAL;
2007 goto err_disable_irq_wake;
2008 }
2009
2010 gsi->virt = ioremap(res->start, size);
2011 if (!gsi->virt) {
2012 dev_err(dev, "unable to remap \"gsi\" memory\n");
2013 ret = -ENOMEM;
2014 goto err_disable_irq_wake;
2015 }
2016
2017 ret = gsi_channel_init(gsi, prefetch, count, data, modem_alloc);
2018 if (ret)
2019 goto err_iounmap;
2020
2021 mutex_init(&gsi->mutex);
2022 init_completion(&gsi->completion);
2023
2024 return 0;
2025
2026 err_iounmap:
2027 iounmap(gsi->virt);
2028 err_disable_irq_wake:
2029 if (gsi->irq_wake_enabled)
2030 (void)disable_irq_wake(gsi->irq);
2031 free_irq(gsi->irq, gsi);
2032
2033 return ret;
2034 }
2035
2036 /* Inverse of gsi_init() */
2037 void gsi_exit(struct gsi *gsi)
2038 {
2039 mutex_destroy(&gsi->mutex);
2040 gsi_channel_exit(gsi);
2041 if (gsi->irq_wake_enabled)
2042 (void)disable_irq_wake(gsi->irq);
2043 free_irq(gsi->irq, gsi);
2044 iounmap(gsi->virt);
2045 }
2046
2047 /* The maximum number of outstanding TREs on a channel. This limits
2048 * a channel's maximum number of transactions outstanding (worst case
2049 * is one TRE per transaction).
2050 *
2051 * The absolute limit is the number of TREs in the channel's TRE ring,
2052 * and in theory we should be able use all of them. But in practice,
2053 * doing that led to the hardware reporting exhaustion of event ring
2054 * slots for writing completion information. So the hardware limit
2055 * would be (tre_count - 1).
2056 *
2057 * We reduce it a bit further though. Transaction resource pools are
2058 * sized to be a little larger than this maximum, to allow resource
2059 * allocations to always be contiguous. The number of entries in a
2060 * TRE ring buffer is a power of 2, and the extra resources in a pool
2061 * tends to nearly double the memory allocated for it. Reducing the
2062 * maximum number of outstanding TREs allows the number of entries in
2063 * a pool to avoid crossing that power-of-2 boundary, and this can
2064 * substantially reduce pool memory requirements. The number we
2065 * reduce it by matches the number added in gsi_trans_pool_init().
2066 */
2067 u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id)
2068 {
2069 struct gsi_channel *channel = &gsi->channel[channel_id];
2070
2071 /* Hardware limit is channel->tre_count - 1 */
2072 return channel->tre_count - (channel->tlv_count - 1);
2073 }
2074
2075 /* Returns the maximum number of TREs in a single transaction for a channel */
2076 u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id)
2077 {
2078 struct gsi_channel *channel = &gsi->channel[channel_id];
2079
2080 return channel->tlv_count;
2081 }