1 // SPDX-License-Identifier: GPL-2.0
3 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2018-2020 Linaro Ltd.
7 #include <linux/types.h>
8 #include <linux/bits.h>
9 #include <linux/bitfield.h>
10 #include <linux/mutex.h>
11 #include <linux/completion.h>
13 #include <linux/bug.h>
14 #include <linux/interrupt.h>
15 #include <linux/platform_device.h>
16 #include <linux/netdevice.h>
20 #include "gsi_private.h"
21 #include "gsi_trans.h"
26 * DOC: The IPA Generic Software Interface
28 * The generic software interface (GSI) is an integral component of the IPA,
29 * providing a well-defined communication layer between the AP subsystem
30 * and the IPA core. The modem uses the GSI layer as well.
34 * | AP +<---. .----+ Modem |
37 * -------- | | | | ---------
47 * In the above diagram, the AP and Modem represent "execution environments"
48 * (EEs), which are independent operating environments that use the IPA for
51 * Each EE uses a set of unidirectional GSI "channels," which allow transfer
52 * of data to or from the IPA. A channel is implemented as a ring buffer,
53 * with a DRAM-resident array of "transfer elements" (TREs) available to
54 * describe transfers to or from other EEs through the IPA. A transfer
55 * element can also contain an immediate command, requesting the IPA perform
56 * actions other than data transfer.
58 * Each TRE refers to a block of data--also located DRAM. After writing one
59 * or more TREs to a channel, the writer (either the IPA or an EE) writes a
60 * doorbell register to inform the receiving side how many elements have
63 * Each channel has a GSI "event ring" associated with it. An event ring
64 * is implemented very much like a channel ring, but is always directed from
65 * the IPA to an EE. The IPA notifies an EE (such as the AP) about channel
66 * events by adding an entry to the event ring associated with the channel.
67 * The GSI then writes its doorbell for the event ring, causing the target
68 * EE to be interrupted. Each entry in an event ring contains a pointer
69 * to the channel TRE whose completion the event represents.
71 * Each TRE in a channel ring has a set of flags. One flag indicates whether
72 * the completion of the transfer operation generates an entry (and possibly
73 * an interrupt) in the channel's event ring. Other flags allow transfer
74 * elements to be chained together, forming a single logical transaction.
75 * TRE flags are used to control whether and when interrupts are generated
76 * to signal completion of channel transfers.
78 * Elements in channel and event rings are completed (or consumed) strictly
79 * in order. Completion of one entry implies the completion of all preceding
80 * entries. A single completion interrupt can therefore communicate the
81 * completion of many transfers.
83 * Note that all GSI registers are little-endian, which is the assumed
84 * endianness of I/O space accesses. The accessor functions perform byte
85 * swapping if needed (i.e., for a big endian CPU).
88 /* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */
89 #define GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */
91 #define GSI_CMD_TIMEOUT 5 /* seconds */
93 #define GSI_CHANNEL_STOP_RX_RETRIES 10
95 #define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */
96 #define GSI_MHI_EVENT_ID_END 16 /* Last reserved event id */
98 #define GSI_ISR_MAX_ITER 50 /* Detect interrupt storms */
100 /* An entry in an event ring */
111 /* Hardware values from the error log register error code field */
113 GSI_INVALID_TRE_ERR
= 0x1,
114 GSI_OUT_OF_BUFFERS_ERR
= 0x2,
115 GSI_OUT_OF_RESOURCES_ERR
= 0x3,
116 GSI_UNSUPPORTED_INTER_EE_OP_ERR
= 0x4,
117 GSI_EVT_RING_EMPTY_ERR
= 0x5,
118 GSI_NON_ALLOCATED_EVT_ACCESS_ERR
= 0x6,
122 /* Hardware values from the error log register error type field */
124 GSI_ERR_TYPE_GLOB
= 0x1,
125 GSI_ERR_TYPE_CHAN
= 0x2,
126 GSI_ERR_TYPE_EVT
= 0x3,
129 /* Hardware values used when programming an event ring */
130 enum gsi_evt_chtype
{
131 GSI_EVT_CHTYPE_MHI_EV
= 0x0,
132 GSI_EVT_CHTYPE_XHCI_EV
= 0x1,
133 GSI_EVT_CHTYPE_GPI_EV
= 0x2,
134 GSI_EVT_CHTYPE_XDCI_EV
= 0x3,
137 /* Hardware values used when programming a channel */
138 enum gsi_channel_protocol
{
139 GSI_CHANNEL_PROTOCOL_MHI
= 0x0,
140 GSI_CHANNEL_PROTOCOL_XHCI
= 0x1,
141 GSI_CHANNEL_PROTOCOL_GPI
= 0x2,
142 GSI_CHANNEL_PROTOCOL_XDCI
= 0x3,
145 /* Hardware values representing an event ring immediate command opcode */
146 enum gsi_evt_cmd_opcode
{
147 GSI_EVT_ALLOCATE
= 0x0,
149 GSI_EVT_DE_ALLOC
= 0xa,
152 /* Hardware values representing a generic immediate command opcode */
153 enum gsi_generic_cmd_opcode
{
154 GSI_GENERIC_HALT_CHANNEL
= 0x1,
155 GSI_GENERIC_ALLOCATE_CHANNEL
= 0x2,
158 /* Hardware values representing a channel immediate command opcode */
159 enum gsi_ch_cmd_opcode
{
160 GSI_CH_ALLOCATE
= 0x0,
164 GSI_CH_DE_ALLOC
= 0xa,
167 /** gsi_channel_scratch_gpi - GPI protocol scratch register
168 * @max_outstanding_tre:
169 * Defines the maximum number of TREs allowed in a single transaction
170 * on a channel (in bytes). This determines the amount of prefetch
171 * performed by the hardware. We configure this to equal the size of
172 * the TLV FIFO for the channel.
173 * @outstanding_threshold:
174 * Defines the threshold (in bytes) determining when the sequencer
175 * should update the channel doorbell. We configure this to equal
176 * the size of two TREs.
178 struct gsi_channel_scratch_gpi
{
181 u16 max_outstanding_tre
;
183 u16 outstanding_threshold
;
186 /** gsi_channel_scratch - channel scratch configuration area
188 * The exact interpretation of this register is protocol-specific.
189 * We only use GPI channels; see struct gsi_channel_scratch_gpi, above.
191 union gsi_channel_scratch
{
192 struct gsi_channel_scratch_gpi gpi
;
201 /* Check things that can be validated at build time. */
202 static void gsi_validate_build(void)
204 /* This is used as a divisor */
205 BUILD_BUG_ON(!GSI_RING_ELEMENT_SIZE
);
207 /* Code assumes the size of channel and event ring element are
208 * the same (and fixed). Make sure the size of an event ring
209 * element is what's expected.
211 BUILD_BUG_ON(sizeof(struct gsi_event
) != GSI_RING_ELEMENT_SIZE
);
213 /* Hardware requires a 2^n ring size. We ensure the number of
214 * elements in an event ring is a power of 2 elsewhere; this
215 * ensure the elements themselves meet the requirement.
217 BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE
));
219 /* The channel element size must fit in this field */
220 BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE
> field_max(ELEMENT_SIZE_FMASK
));
222 /* The event ring element size must fit in this field */
223 BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE
> field_max(EV_ELEMENT_SIZE_FMASK
));
226 /* Return the channel id associated with a given channel */
227 static u32
gsi_channel_id(struct gsi_channel
*channel
)
229 return channel
- &channel
->gsi
->channel
[0];
232 static void gsi_irq_ieob_enable(struct gsi
*gsi
, u32 evt_ring_id
)
236 gsi
->event_enable_bitmap
|= BIT(evt_ring_id
);
237 val
= gsi
->event_enable_bitmap
;
238 iowrite32(val
, gsi
->virt
+ GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET
);
241 static void gsi_irq_ieob_disable(struct gsi
*gsi
, u32 evt_ring_id
)
245 gsi
->event_enable_bitmap
&= ~BIT(evt_ring_id
);
246 val
= gsi
->event_enable_bitmap
;
247 iowrite32(val
, gsi
->virt
+ GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET
);
250 /* Enable all GSI_interrupt types */
251 static void gsi_irq_enable(struct gsi
*gsi
)
255 /* We don't use inter-EE channel or event interrupts */
256 val
= GSI_CNTXT_TYPE_IRQ_MSK_ALL
;
257 val
&= ~MSK_INTER_EE_CH_CTRL_FMASK
;
258 val
&= ~MSK_INTER_EE_EV_CTRL_FMASK
;
259 iowrite32(val
, gsi
->virt
+ GSI_CNTXT_TYPE_IRQ_MSK_OFFSET
);
261 val
= GENMASK(gsi
->channel_count
- 1, 0);
262 iowrite32(val
, gsi
->virt
+ GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET
);
264 val
= GENMASK(gsi
->evt_ring_count
- 1, 0);
265 iowrite32(val
, gsi
->virt
+ GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET
);
267 /* Each IEOB interrupt is enabled (later) as needed by channels */
268 iowrite32(0, gsi
->virt
+ GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET
);
270 val
= GSI_CNTXT_GLOB_IRQ_ALL
;
271 iowrite32(val
, gsi
->virt
+ GSI_CNTXT_GLOB_IRQ_EN_OFFSET
);
273 /* Never enable GSI_BREAK_POINT */
274 val
= GSI_CNTXT_GSI_IRQ_ALL
& ~EN_BREAK_POINT_FMASK
;
275 iowrite32(val
, gsi
->virt
+ GSI_CNTXT_GSI_IRQ_EN_OFFSET
);
278 /* Disable all GSI_interrupt types */
279 static void gsi_irq_disable(struct gsi
*gsi
)
281 iowrite32(0, gsi
->virt
+ GSI_CNTXT_GSI_IRQ_EN_OFFSET
);
282 iowrite32(0, gsi
->virt
+ GSI_CNTXT_GLOB_IRQ_EN_OFFSET
);
283 iowrite32(0, gsi
->virt
+ GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET
);
284 iowrite32(0, gsi
->virt
+ GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET
);
285 iowrite32(0, gsi
->virt
+ GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET
);
286 iowrite32(0, gsi
->virt
+ GSI_CNTXT_TYPE_IRQ_MSK_OFFSET
);
289 /* Return the virtual address associated with a ring index */
290 void *gsi_ring_virt(struct gsi_ring
*ring
, u32 index
)
292 /* Note: index *must* be used modulo the ring count here */
293 return ring
->virt
+ (index
% ring
->count
) * GSI_RING_ELEMENT_SIZE
;
296 /* Return the 32-bit DMA address associated with a ring index */
297 static u32
gsi_ring_addr(struct gsi_ring
*ring
, u32 index
)
299 return (ring
->addr
& GENMASK(31, 0)) + index
* GSI_RING_ELEMENT_SIZE
;
302 /* Return the ring index of a 32-bit ring offset */
303 static u32
gsi_ring_index(struct gsi_ring
*ring
, u32 offset
)
305 return (offset
- gsi_ring_addr(ring
, 0)) / GSI_RING_ELEMENT_SIZE
;
308 /* Issue a GSI command by writing a value to a register, then wait for
309 * completion to be signaled. Returns true if the command completes
310 * or false if it times out.
313 gsi_command(struct gsi
*gsi
, u32 reg
, u32 val
, struct completion
*completion
)
315 reinit_completion(completion
);
317 iowrite32(val
, gsi
->virt
+ reg
);
319 return !!wait_for_completion_timeout(completion
, GSI_CMD_TIMEOUT
* HZ
);
322 /* Return the hardware's notion of the current state of an event ring */
323 static enum gsi_evt_ring_state
324 gsi_evt_ring_state(struct gsi
*gsi
, u32 evt_ring_id
)
328 val
= ioread32(gsi
->virt
+ GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id
));
330 return u32_get_bits(val
, EV_CHSTATE_FMASK
);
333 /* Issue an event ring command and wait for it to complete */
334 static int evt_ring_command(struct gsi
*gsi
, u32 evt_ring_id
,
335 enum gsi_evt_cmd_opcode opcode
)
337 struct gsi_evt_ring
*evt_ring
= &gsi
->evt_ring
[evt_ring_id
];
338 struct completion
*completion
= &evt_ring
->completion
;
339 struct device
*dev
= gsi
->dev
;
342 val
= u32_encode_bits(evt_ring_id
, EV_CHID_FMASK
);
343 val
|= u32_encode_bits(opcode
, EV_OPCODE_FMASK
);
345 if (gsi_command(gsi
, GSI_EV_CH_CMD_OFFSET
, val
, completion
))
346 return 0; /* Success! */
348 dev_err(dev
, "GSI command %u for event ring %u timed out, state %u\n",
349 opcode
, evt_ring_id
, evt_ring
->state
);
354 /* Allocate an event ring in NOT_ALLOCATED state */
355 static int gsi_evt_ring_alloc_command(struct gsi
*gsi
, u32 evt_ring_id
)
357 struct gsi_evt_ring
*evt_ring
= &gsi
->evt_ring
[evt_ring_id
];
360 /* Get initial event ring state */
361 evt_ring
->state
= gsi_evt_ring_state(gsi
, evt_ring_id
);
362 if (evt_ring
->state
!= GSI_EVT_RING_STATE_NOT_ALLOCATED
) {
363 dev_err(gsi
->dev
, "bad event ring state %u before alloc\n",
368 ret
= evt_ring_command(gsi
, evt_ring_id
, GSI_EVT_ALLOCATE
);
369 if (!ret
&& evt_ring
->state
!= GSI_EVT_RING_STATE_ALLOCATED
) {
370 dev_err(gsi
->dev
, "bad event ring state %u after alloc\n",
378 /* Reset a GSI event ring in ALLOCATED or ERROR state. */
379 static void gsi_evt_ring_reset_command(struct gsi
*gsi
, u32 evt_ring_id
)
381 struct gsi_evt_ring
*evt_ring
= &gsi
->evt_ring
[evt_ring_id
];
382 enum gsi_evt_ring_state state
= evt_ring
->state
;
385 if (state
!= GSI_EVT_RING_STATE_ALLOCATED
&&
386 state
!= GSI_EVT_RING_STATE_ERROR
) {
387 dev_err(gsi
->dev
, "bad event ring state %u before reset\n",
392 ret
= evt_ring_command(gsi
, evt_ring_id
, GSI_EVT_RESET
);
393 if (!ret
&& evt_ring
->state
!= GSI_EVT_RING_STATE_ALLOCATED
)
394 dev_err(gsi
->dev
, "bad event ring state %u after reset\n",
398 /* Issue a hardware de-allocation request for an allocated event ring */
399 static void gsi_evt_ring_de_alloc_command(struct gsi
*gsi
, u32 evt_ring_id
)
401 struct gsi_evt_ring
*evt_ring
= &gsi
->evt_ring
[evt_ring_id
];
404 if (evt_ring
->state
!= GSI_EVT_RING_STATE_ALLOCATED
) {
405 dev_err(gsi
->dev
, "bad event ring state %u before dealloc\n",
410 ret
= evt_ring_command(gsi
, evt_ring_id
, GSI_EVT_DE_ALLOC
);
411 if (!ret
&& evt_ring
->state
!= GSI_EVT_RING_STATE_NOT_ALLOCATED
)
412 dev_err(gsi
->dev
, "bad event ring state %u after dealloc\n",
416 /* Fetch the current state of a channel from hardware */
417 static enum gsi_channel_state
gsi_channel_state(struct gsi_channel
*channel
)
419 u32 channel_id
= gsi_channel_id(channel
);
420 void *virt
= channel
->gsi
->virt
;
423 val
= ioread32(virt
+ GSI_CH_C_CNTXT_0_OFFSET(channel_id
));
425 return u32_get_bits(val
, CHSTATE_FMASK
);
428 /* Issue a channel command and wait for it to complete */
430 gsi_channel_command(struct gsi_channel
*channel
, enum gsi_ch_cmd_opcode opcode
)
432 struct completion
*completion
= &channel
->completion
;
433 u32 channel_id
= gsi_channel_id(channel
);
434 struct gsi
*gsi
= channel
->gsi
;
435 struct device
*dev
= gsi
->dev
;
438 val
= u32_encode_bits(channel_id
, CH_CHID_FMASK
);
439 val
|= u32_encode_bits(opcode
, CH_OPCODE_FMASK
);
441 if (gsi_command(gsi
, GSI_CH_CMD_OFFSET
, val
, completion
))
442 return 0; /* Success! */
444 dev_err(dev
, "GSI command %u for channel %u timed out, state %u\n",
445 opcode
, channel_id
, gsi_channel_state(channel
));
450 /* Allocate GSI channel in NOT_ALLOCATED state */
451 static int gsi_channel_alloc_command(struct gsi
*gsi
, u32 channel_id
)
453 struct gsi_channel
*channel
= &gsi
->channel
[channel_id
];
454 struct device
*dev
= gsi
->dev
;
455 enum gsi_channel_state state
;
458 /* Get initial channel state */
459 state
= gsi_channel_state(channel
);
460 if (state
!= GSI_CHANNEL_STATE_NOT_ALLOCATED
) {
461 dev_err(dev
, "bad channel state %u before alloc\n", state
);
465 ret
= gsi_channel_command(channel
, GSI_CH_ALLOCATE
);
467 /* Channel state will normally have been updated */
468 state
= gsi_channel_state(channel
);
469 if (!ret
&& state
!= GSI_CHANNEL_STATE_ALLOCATED
) {
470 dev_err(dev
, "bad channel state %u after alloc\n", state
);
477 /* Start an ALLOCATED channel */
478 static int gsi_channel_start_command(struct gsi_channel
*channel
)
480 struct device
*dev
= channel
->gsi
->dev
;
481 enum gsi_channel_state state
;
484 state
= gsi_channel_state(channel
);
485 if (state
!= GSI_CHANNEL_STATE_ALLOCATED
&&
486 state
!= GSI_CHANNEL_STATE_STOPPED
) {
487 dev_err(dev
, "bad channel state %u before start\n", state
);
491 ret
= gsi_channel_command(channel
, GSI_CH_START
);
493 /* Channel state will normally have been updated */
494 state
= gsi_channel_state(channel
);
495 if (!ret
&& state
!= GSI_CHANNEL_STATE_STARTED
) {
496 dev_err(dev
, "bad channel state %u after start\n", state
);
503 /* Stop a GSI channel in STARTED state */
504 static int gsi_channel_stop_command(struct gsi_channel
*channel
)
506 struct device
*dev
= channel
->gsi
->dev
;
507 enum gsi_channel_state state
;
510 state
= gsi_channel_state(channel
);
512 /* Channel could have entered STOPPED state since last call
513 * if it timed out. If so, we're done.
515 if (state
== GSI_CHANNEL_STATE_STOPPED
)
518 if (state
!= GSI_CHANNEL_STATE_STARTED
&&
519 state
!= GSI_CHANNEL_STATE_STOP_IN_PROC
) {
520 dev_err(dev
, "bad channel state %u before stop\n", state
);
524 ret
= gsi_channel_command(channel
, GSI_CH_STOP
);
526 /* Channel state will normally have been updated */
527 state
= gsi_channel_state(channel
);
528 if (ret
|| state
== GSI_CHANNEL_STATE_STOPPED
)
531 /* We may have to try again if stop is in progress */
532 if (state
== GSI_CHANNEL_STATE_STOP_IN_PROC
)
535 dev_err(dev
, "bad channel state %u after stop\n", state
);
540 /* Reset a GSI channel in ALLOCATED or ERROR state. */
541 static void gsi_channel_reset_command(struct gsi_channel
*channel
)
543 struct device
*dev
= channel
->gsi
->dev
;
544 enum gsi_channel_state state
;
547 msleep(1); /* A short delay is required before a RESET command */
549 state
= gsi_channel_state(channel
);
550 if (state
!= GSI_CHANNEL_STATE_STOPPED
&&
551 state
!= GSI_CHANNEL_STATE_ERROR
) {
552 dev_err(dev
, "bad channel state %u before reset\n", state
);
556 ret
= gsi_channel_command(channel
, GSI_CH_RESET
);
558 /* Channel state will normally have been updated */
559 state
= gsi_channel_state(channel
);
560 if (!ret
&& state
!= GSI_CHANNEL_STATE_ALLOCATED
)
561 dev_err(dev
, "bad channel state %u after reset\n", state
);
564 /* Deallocate an ALLOCATED GSI channel */
565 static void gsi_channel_de_alloc_command(struct gsi
*gsi
, u32 channel_id
)
567 struct gsi_channel
*channel
= &gsi
->channel
[channel_id
];
568 struct device
*dev
= gsi
->dev
;
569 enum gsi_channel_state state
;
572 state
= gsi_channel_state(channel
);
573 if (state
!= GSI_CHANNEL_STATE_ALLOCATED
) {
574 dev_err(dev
, "bad channel state %u before dealloc\n", state
);
578 ret
= gsi_channel_command(channel
, GSI_CH_DE_ALLOC
);
580 /* Channel state will normally have been updated */
581 state
= gsi_channel_state(channel
);
582 if (!ret
&& state
!= GSI_CHANNEL_STATE_NOT_ALLOCATED
)
583 dev_err(dev
, "bad channel state %u after dealloc\n", state
);
586 /* Ring an event ring doorbell, reporting the last entry processed by the AP.
587 * The index argument (modulo the ring count) is the first unfilled entry, so
588 * we supply one less than that with the doorbell. Update the event ring
589 * index field with the value provided.
591 static void gsi_evt_ring_doorbell(struct gsi
*gsi
, u32 evt_ring_id
, u32 index
)
593 struct gsi_ring
*ring
= &gsi
->evt_ring
[evt_ring_id
].ring
;
596 ring
->index
= index
; /* Next unused entry */
598 /* Note: index *must* be used modulo the ring count here */
599 val
= gsi_ring_addr(ring
, (index
- 1) % ring
->count
);
600 iowrite32(val
, gsi
->virt
+ GSI_EV_CH_E_DOORBELL_0_OFFSET(evt_ring_id
));
603 /* Program an event ring for use */
604 static void gsi_evt_ring_program(struct gsi
*gsi
, u32 evt_ring_id
)
606 struct gsi_evt_ring
*evt_ring
= &gsi
->evt_ring
[evt_ring_id
];
607 size_t size
= evt_ring
->ring
.count
* GSI_RING_ELEMENT_SIZE
;
610 val
= u32_encode_bits(GSI_EVT_CHTYPE_GPI_EV
, EV_CHTYPE_FMASK
);
611 val
|= EV_INTYPE_FMASK
;
612 val
|= u32_encode_bits(GSI_RING_ELEMENT_SIZE
, EV_ELEMENT_SIZE_FMASK
);
613 iowrite32(val
, gsi
->virt
+ GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id
));
615 val
= u32_encode_bits(size
, EV_R_LENGTH_FMASK
);
616 iowrite32(val
, gsi
->virt
+ GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id
));
618 /* The context 2 and 3 registers store the low-order and
619 * high-order 32 bits of the address of the event ring,
622 val
= evt_ring
->ring
.addr
& GENMASK(31, 0);
623 iowrite32(val
, gsi
->virt
+ GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id
));
625 val
= evt_ring
->ring
.addr
>> 32;
626 iowrite32(val
, gsi
->virt
+ GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id
));
628 /* Enable interrupt moderation by setting the moderation delay */
629 val
= u32_encode_bits(GSI_EVT_RING_INT_MODT
, MODT_FMASK
);
630 val
|= u32_encode_bits(1, MODC_FMASK
); /* comes from channel */
631 iowrite32(val
, gsi
->virt
+ GSI_EV_CH_E_CNTXT_8_OFFSET(evt_ring_id
));
633 /* No MSI write data, and MSI address high and low address is 0 */
634 iowrite32(0, gsi
->virt
+ GSI_EV_CH_E_CNTXT_9_OFFSET(evt_ring_id
));
635 iowrite32(0, gsi
->virt
+ GSI_EV_CH_E_CNTXT_10_OFFSET(evt_ring_id
));
636 iowrite32(0, gsi
->virt
+ GSI_EV_CH_E_CNTXT_11_OFFSET(evt_ring_id
));
638 /* We don't need to get event read pointer updates */
639 iowrite32(0, gsi
->virt
+ GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id
));
640 iowrite32(0, gsi
->virt
+ GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id
));
642 /* Finally, tell the hardware we've completed event 0 (arbitrary) */
643 gsi_evt_ring_doorbell(gsi
, evt_ring_id
, 0);
646 /* Return the last (most recent) transaction completed on a channel. */
647 static struct gsi_trans
*gsi_channel_trans_last(struct gsi_channel
*channel
)
649 struct gsi_trans_info
*trans_info
= &channel
->trans_info
;
650 struct gsi_trans
*trans
;
652 spin_lock_bh(&trans_info
->spinlock
);
654 if (!list_empty(&trans_info
->complete
))
655 trans
= list_last_entry(&trans_info
->complete
,
656 struct gsi_trans
, links
);
657 else if (!list_empty(&trans_info
->polled
))
658 trans
= list_last_entry(&trans_info
->polled
,
659 struct gsi_trans
, links
);
663 /* Caller will wait for this, so take a reference */
665 refcount_inc(&trans
->refcount
);
667 spin_unlock_bh(&trans_info
->spinlock
);
672 /* Wait for transaction activity on a channel to complete */
673 static void gsi_channel_trans_quiesce(struct gsi_channel
*channel
)
675 struct gsi_trans
*trans
;
677 /* Get the last transaction, and wait for it to complete */
678 trans
= gsi_channel_trans_last(channel
);
680 wait_for_completion(&trans
->completion
);
681 gsi_trans_free(trans
);
685 /* Stop channel activity. Transactions may not be allocated until thawed. */
686 static void gsi_channel_freeze(struct gsi_channel
*channel
)
688 gsi_channel_trans_quiesce(channel
);
690 napi_disable(&channel
->napi
);
692 gsi_irq_ieob_disable(channel
->gsi
, channel
->evt_ring_id
);
695 /* Allow transactions to be used on the channel again. */
696 static void gsi_channel_thaw(struct gsi_channel
*channel
)
698 gsi_irq_ieob_enable(channel
->gsi
, channel
->evt_ring_id
);
700 napi_enable(&channel
->napi
);
703 /* Program a channel for use */
704 static void gsi_channel_program(struct gsi_channel
*channel
, bool doorbell
)
706 size_t size
= channel
->tre_ring
.count
* GSI_RING_ELEMENT_SIZE
;
707 u32 channel_id
= gsi_channel_id(channel
);
708 union gsi_channel_scratch scr
= { };
709 struct gsi_channel_scratch_gpi
*gpi
;
710 struct gsi
*gsi
= channel
->gsi
;
714 /* Arbitrarily pick TRE 0 as the first channel element to use */
715 channel
->tre_ring
.index
= 0;
717 /* We program all channels to use GPI protocol */
718 val
= u32_encode_bits(GSI_CHANNEL_PROTOCOL_GPI
, CHTYPE_PROTOCOL_FMASK
);
719 if (channel
->toward_ipa
)
720 val
|= CHTYPE_DIR_FMASK
;
721 val
|= u32_encode_bits(channel
->evt_ring_id
, ERINDEX_FMASK
);
722 val
|= u32_encode_bits(GSI_RING_ELEMENT_SIZE
, ELEMENT_SIZE_FMASK
);
723 iowrite32(val
, gsi
->virt
+ GSI_CH_C_CNTXT_0_OFFSET(channel_id
));
725 val
= u32_encode_bits(size
, R_LENGTH_FMASK
);
726 iowrite32(val
, gsi
->virt
+ GSI_CH_C_CNTXT_1_OFFSET(channel_id
));
728 /* The context 2 and 3 registers store the low-order and
729 * high-order 32 bits of the address of the channel ring,
732 val
= channel
->tre_ring
.addr
& GENMASK(31, 0);
733 iowrite32(val
, gsi
->virt
+ GSI_CH_C_CNTXT_2_OFFSET(channel_id
));
735 val
= channel
->tre_ring
.addr
>> 32;
736 iowrite32(val
, gsi
->virt
+ GSI_CH_C_CNTXT_3_OFFSET(channel_id
));
738 /* Command channel gets low weighted round-robin priority */
739 if (channel
->command
)
740 wrr_weight
= field_max(WRR_WEIGHT_FMASK
);
741 val
= u32_encode_bits(wrr_weight
, WRR_WEIGHT_FMASK
);
743 /* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */
745 /* Enable the doorbell engine if requested */
747 val
|= USE_DB_ENG_FMASK
;
749 if (!channel
->use_prefetch
)
750 val
|= USE_ESCAPE_BUF_ONLY_FMASK
;
752 iowrite32(val
, gsi
->virt
+ GSI_CH_C_QOS_OFFSET(channel_id
));
754 /* Now update the scratch registers for GPI protocol */
756 gpi
->max_outstanding_tre
= gsi_channel_trans_tre_max(gsi
, channel_id
) *
757 GSI_RING_ELEMENT_SIZE
;
758 gpi
->outstanding_threshold
= 2 * GSI_RING_ELEMENT_SIZE
;
760 val
= scr
.data
.word1
;
761 iowrite32(val
, gsi
->virt
+ GSI_CH_C_SCRATCH_0_OFFSET(channel_id
));
763 val
= scr
.data
.word2
;
764 iowrite32(val
, gsi
->virt
+ GSI_CH_C_SCRATCH_1_OFFSET(channel_id
));
766 val
= scr
.data
.word3
;
767 iowrite32(val
, gsi
->virt
+ GSI_CH_C_SCRATCH_2_OFFSET(channel_id
));
769 /* We must preserve the upper 16 bits of the last scratch register.
770 * The next sequence assumes those bits remain unchanged between the
771 * read and the write.
773 val
= ioread32(gsi
->virt
+ GSI_CH_C_SCRATCH_3_OFFSET(channel_id
));
774 val
= (scr
.data
.word4
& GENMASK(31, 16)) | (val
& GENMASK(15, 0));
775 iowrite32(val
, gsi
->virt
+ GSI_CH_C_SCRATCH_3_OFFSET(channel_id
));
780 static void gsi_channel_deprogram(struct gsi_channel
*channel
)
785 /* Start an allocated GSI channel */
786 int gsi_channel_start(struct gsi
*gsi
, u32 channel_id
)
788 struct gsi_channel
*channel
= &gsi
->channel
[channel_id
];
791 mutex_lock(&gsi
->mutex
);
793 ret
= gsi_channel_start_command(channel
);
795 mutex_unlock(&gsi
->mutex
);
797 gsi_channel_thaw(channel
);
802 /* Stop a started channel */
803 int gsi_channel_stop(struct gsi
*gsi
, u32 channel_id
)
805 struct gsi_channel
*channel
= &gsi
->channel
[channel_id
];
809 gsi_channel_freeze(channel
);
811 /* RX channels might require a little time to enter STOPPED state */
812 retries
= channel
->toward_ipa
? 0 : GSI_CHANNEL_STOP_RX_RETRIES
;
814 mutex_lock(&gsi
->mutex
);
817 ret
= gsi_channel_stop_command(channel
);
823 mutex_unlock(&gsi
->mutex
);
825 /* Thaw the channel if we need to retry (or on error) */
827 gsi_channel_thaw(channel
);
832 /* Reset and reconfigure a channel (possibly leaving doorbell disabled) */
833 void gsi_channel_reset(struct gsi
*gsi
, u32 channel_id
, bool legacy
)
835 struct gsi_channel
*channel
= &gsi
->channel
[channel_id
];
837 mutex_lock(&gsi
->mutex
);
839 gsi_channel_reset_command(channel
);
840 /* Due to a hardware quirk we may need to reset RX channels twice. */
841 if (legacy
&& !channel
->toward_ipa
)
842 gsi_channel_reset_command(channel
);
844 gsi_channel_program(channel
, legacy
);
845 gsi_channel_trans_cancel_pending(channel
);
847 mutex_unlock(&gsi
->mutex
);
850 /* Stop a STARTED channel for suspend (using stop if requested) */
851 int gsi_channel_suspend(struct gsi
*gsi
, u32 channel_id
, bool stop
)
853 struct gsi_channel
*channel
= &gsi
->channel
[channel_id
];
856 return gsi_channel_stop(gsi
, channel_id
);
858 gsi_channel_freeze(channel
);
863 /* Resume a suspended channel (starting will be requested if STOPPED) */
864 int gsi_channel_resume(struct gsi
*gsi
, u32 channel_id
, bool start
)
866 struct gsi_channel
*channel
= &gsi
->channel
[channel_id
];
869 return gsi_channel_start(gsi
, channel_id
);
871 gsi_channel_thaw(channel
);
877 * gsi_channel_tx_queued() - Report queued TX transfers for a channel
878 * @channel: Channel for which to report
880 * Report to the network stack the number of bytes and transactions that
881 * have been queued to hardware since last call. This and the next function
882 * supply information used by the network stack for throttling.
884 * For each channel we track the number of transactions used and bytes of
885 * data those transactions represent. We also track what those values are
886 * each time this function is called. Subtracting the two tells us
887 * the number of bytes and transactions that have been added between
890 * Calling this each time we ring the channel doorbell allows us to
891 * provide accurate information to the network stack about how much
892 * work we've given the hardware at any point in time.
894 void gsi_channel_tx_queued(struct gsi_channel
*channel
)
899 byte_count
= channel
->byte_count
- channel
->queued_byte_count
;
900 trans_count
= channel
->trans_count
- channel
->queued_trans_count
;
901 channel
->queued_byte_count
= channel
->byte_count
;
902 channel
->queued_trans_count
= channel
->trans_count
;
904 ipa_gsi_channel_tx_queued(channel
->gsi
, gsi_channel_id(channel
),
905 trans_count
, byte_count
);
909 * gsi_channel_tx_update() - Report completed TX transfers
910 * @channel: Channel that has completed transmitting packets
911 * @trans: Last transation known to be complete
913 * Compute the number of transactions and bytes that have been transferred
914 * over a TX channel since the given transaction was committed. Report this
915 * information to the network stack.
917 * At the time a transaction is committed, we record its channel's
918 * committed transaction and byte counts *in the transaction*.
919 * Completions are signaled by the hardware with an interrupt, and
920 * we can determine the latest completed transaction at that time.
922 * The difference between the byte/transaction count recorded in
923 * the transaction and the count last time we recorded a completion
924 * tells us exactly how much data has been transferred between
927 * Calling this each time we learn of a newly-completed transaction
928 * allows us to provide accurate information to the network stack
929 * about how much work has been completed by the hardware at a given
933 gsi_channel_tx_update(struct gsi_channel
*channel
, struct gsi_trans
*trans
)
935 u64 byte_count
= trans
->byte_count
+ trans
->len
;
936 u64 trans_count
= trans
->trans_count
+ 1;
938 byte_count
-= channel
->compl_byte_count
;
939 channel
->compl_byte_count
+= byte_count
;
940 trans_count
-= channel
->compl_trans_count
;
941 channel
->compl_trans_count
+= trans_count
;
943 ipa_gsi_channel_tx_completed(channel
->gsi
, gsi_channel_id(channel
),
944 trans_count
, byte_count
);
947 /* Channel control interrupt handler */
948 static void gsi_isr_chan_ctrl(struct gsi
*gsi
)
952 channel_mask
= ioread32(gsi
->virt
+ GSI_CNTXT_SRC_CH_IRQ_OFFSET
);
953 iowrite32(channel_mask
, gsi
->virt
+ GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET
);
955 while (channel_mask
) {
956 u32 channel_id
= __ffs(channel_mask
);
957 struct gsi_channel
*channel
;
959 channel_mask
^= BIT(channel_id
);
961 channel
= &gsi
->channel
[channel_id
];
963 complete(&channel
->completion
);
967 /* Event ring control interrupt handler */
968 static void gsi_isr_evt_ctrl(struct gsi
*gsi
)
972 event_mask
= ioread32(gsi
->virt
+ GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET
);
973 iowrite32(event_mask
, gsi
->virt
+ GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET
);
976 u32 evt_ring_id
= __ffs(event_mask
);
977 struct gsi_evt_ring
*evt_ring
;
979 event_mask
^= BIT(evt_ring_id
);
981 evt_ring
= &gsi
->evt_ring
[evt_ring_id
];
982 evt_ring
->state
= gsi_evt_ring_state(gsi
, evt_ring_id
);
984 complete(&evt_ring
->completion
);
988 /* Global channel error interrupt handler */
990 gsi_isr_glob_chan_err(struct gsi
*gsi
, u32 err_ee
, u32 channel_id
, u32 code
)
992 if (code
== GSI_OUT_OF_RESOURCES_ERR
) {
993 dev_err(gsi
->dev
, "channel %u out of resources\n", channel_id
);
994 complete(&gsi
->channel
[channel_id
].completion
);
998 /* Report, but otherwise ignore all other error codes */
999 dev_err(gsi
->dev
, "channel %u global error ee 0x%08x code 0x%08x\n",
1000 channel_id
, err_ee
, code
);
1003 /* Global event error interrupt handler */
1005 gsi_isr_glob_evt_err(struct gsi
*gsi
, u32 err_ee
, u32 evt_ring_id
, u32 code
)
1007 if (code
== GSI_OUT_OF_RESOURCES_ERR
) {
1008 struct gsi_evt_ring
*evt_ring
= &gsi
->evt_ring
[evt_ring_id
];
1009 u32 channel_id
= gsi_channel_id(evt_ring
->channel
);
1011 complete(&evt_ring
->completion
);
1012 dev_err(gsi
->dev
, "evt_ring for channel %u out of resources\n",
1017 /* Report, but otherwise ignore all other error codes */
1018 dev_err(gsi
->dev
, "event ring %u global error ee %u code 0x%08x\n",
1019 evt_ring_id
, err_ee
, code
);
1022 /* Global error interrupt handler */
1023 static void gsi_isr_glob_err(struct gsi
*gsi
)
1025 enum gsi_err_type type
;
1026 enum gsi_err_code code
;
1031 /* Get the logged error, then reinitialize the log */
1032 val
= ioread32(gsi
->virt
+ GSI_ERROR_LOG_OFFSET
);
1033 iowrite32(0, gsi
->virt
+ GSI_ERROR_LOG_OFFSET
);
1034 iowrite32(~0, gsi
->virt
+ GSI_ERROR_LOG_CLR_OFFSET
);
1036 ee
= u32_get_bits(val
, ERR_EE_FMASK
);
1037 which
= u32_get_bits(val
, ERR_VIRT_IDX_FMASK
);
1038 type
= u32_get_bits(val
, ERR_TYPE_FMASK
);
1039 code
= u32_get_bits(val
, ERR_CODE_FMASK
);
1041 if (type
== GSI_ERR_TYPE_CHAN
)
1042 gsi_isr_glob_chan_err(gsi
, ee
, which
, code
);
1043 else if (type
== GSI_ERR_TYPE_EVT
)
1044 gsi_isr_glob_evt_err(gsi
, ee
, which
, code
);
1045 else /* type GSI_ERR_TYPE_GLOB should be fatal */
1046 dev_err(gsi
->dev
, "unexpected global error 0x%08x\n", type
);
1049 /* Generic EE interrupt handler */
1050 static void gsi_isr_gp_int1(struct gsi
*gsi
)
1055 val
= ioread32(gsi
->virt
+ GSI_CNTXT_SCRATCH_0_OFFSET
);
1056 result
= u32_get_bits(val
, GENERIC_EE_RESULT_FMASK
);
1057 if (result
!= GENERIC_EE_SUCCESS_FVAL
)
1058 dev_err(gsi
->dev
, "global INT1 generic result %u\n", result
);
1060 complete(&gsi
->completion
);
1063 /* Inter-EE interrupt handler */
1064 static void gsi_isr_glob_ee(struct gsi
*gsi
)
1068 val
= ioread32(gsi
->virt
+ GSI_CNTXT_GLOB_IRQ_STTS_OFFSET
);
1070 if (val
& ERROR_INT_FMASK
)
1071 gsi_isr_glob_err(gsi
);
1073 iowrite32(val
, gsi
->virt
+ GSI_CNTXT_GLOB_IRQ_CLR_OFFSET
);
1075 val
&= ~ERROR_INT_FMASK
;
1077 if (val
& EN_GP_INT1_FMASK
) {
1078 val
^= EN_GP_INT1_FMASK
;
1079 gsi_isr_gp_int1(gsi
);
1083 dev_err(gsi
->dev
, "unexpected global interrupt 0x%08x\n", val
);
1086 /* I/O completion interrupt event */
1087 static void gsi_isr_ieob(struct gsi
*gsi
)
1091 event_mask
= ioread32(gsi
->virt
+ GSI_CNTXT_SRC_IEOB_IRQ_OFFSET
);
1092 iowrite32(event_mask
, gsi
->virt
+ GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET
);
1094 while (event_mask
) {
1095 u32 evt_ring_id
= __ffs(event_mask
);
1097 event_mask
^= BIT(evt_ring_id
);
1099 gsi_irq_ieob_disable(gsi
, evt_ring_id
);
1100 napi_schedule(&gsi
->evt_ring
[evt_ring_id
].channel
->napi
);
1104 /* General event interrupts represent serious problems, so report them */
1105 static void gsi_isr_general(struct gsi
*gsi
)
1107 struct device
*dev
= gsi
->dev
;
1110 val
= ioread32(gsi
->virt
+ GSI_CNTXT_GSI_IRQ_STTS_OFFSET
);
1111 iowrite32(val
, gsi
->virt
+ GSI_CNTXT_GSI_IRQ_CLR_OFFSET
);
1114 dev_err(dev
, "unexpected general interrupt 0x%08x\n", val
);
1118 * gsi_isr() - Top level GSI interrupt service routine
1119 * @irq: Interrupt number (ignored)
1120 * @dev_id: GSI pointer supplied to request_irq()
1122 * This is the main handler function registered for the GSI IRQ. Each type
1123 * of interrupt has a separate handler function that is called from here.
1125 static irqreturn_t
gsi_isr(int irq
, void *dev_id
)
1127 struct gsi
*gsi
= dev_id
;
1131 while ((intr_mask
= ioread32(gsi
->virt
+ GSI_CNTXT_TYPE_IRQ_OFFSET
))) {
1132 /* intr_mask contains bitmask of pending GSI interrupts */
1134 u32 gsi_intr
= BIT(__ffs(intr_mask
));
1136 intr_mask
^= gsi_intr
;
1140 gsi_isr_chan_ctrl(gsi
);
1143 gsi_isr_evt_ctrl(gsi
);
1146 gsi_isr_glob_ee(gsi
);
1152 gsi_isr_general(gsi
);
1156 "unrecognized interrupt type 0x%08x\n",
1160 } while (intr_mask
);
1162 if (++cnt
> GSI_ISR_MAX_ITER
) {
1163 dev_err(gsi
->dev
, "interrupt flood\n");
1171 /* Return the transaction associated with a transfer completion event */
1172 static struct gsi_trans
*gsi_event_trans(struct gsi_channel
*channel
,
1173 struct gsi_event
*event
)
1178 /* Event xfer_ptr records the TRE it's associated with */
1179 tre_offset
= le64_to_cpu(event
->xfer_ptr
) & GENMASK(31, 0);
1180 tre_index
= gsi_ring_index(&channel
->tre_ring
, tre_offset
);
1182 return gsi_channel_trans_mapped(channel
, tre_index
);
1186 * gsi_evt_ring_rx_update() - Record lengths of received data
1187 * @evt_ring: Event ring associated with channel that received packets
1188 * @index: Event index in ring reported by hardware
1190 * Events for RX channels contain the actual number of bytes received into
1191 * the buffer. Every event has a transaction associated with it, and here
1192 * we update transactions to record their actual received lengths.
1194 * This function is called whenever we learn that the GSI hardware has filled
1195 * new events since the last time we checked. The ring's index field tells
1196 * the first entry in need of processing. The index provided is the
1197 * first *unfilled* event in the ring (following the last filled one).
1199 * Events are sequential within the event ring, and transactions are
1200 * sequential within the transaction pool.
1202 * Note that @index always refers to an element *within* the event ring.
1204 static void gsi_evt_ring_rx_update(struct gsi_evt_ring
*evt_ring
, u32 index
)
1206 struct gsi_channel
*channel
= evt_ring
->channel
;
1207 struct gsi_ring
*ring
= &evt_ring
->ring
;
1208 struct gsi_trans_info
*trans_info
;
1209 struct gsi_event
*event_done
;
1210 struct gsi_event
*event
;
1211 struct gsi_trans
*trans
;
1216 trans_info
= &channel
->trans_info
;
1218 /* We'll start with the oldest un-processed event. RX channels
1219 * replenish receive buffers in single-TRE transactions, so we
1220 * can just map that event to its transaction. Transactions
1221 * associated with completion events are consecutive.
1223 old_index
= ring
->index
;
1224 event
= gsi_ring_virt(ring
, old_index
);
1225 trans
= gsi_event_trans(channel
, event
);
1227 /* Compute the number of events to process before we wrap,
1228 * and determine when we'll be done processing events.
1230 event_avail
= ring
->count
- old_index
% ring
->count
;
1231 event_done
= gsi_ring_virt(ring
, index
);
1233 trans
->len
= __le16_to_cpu(event
->len
);
1234 byte_count
+= trans
->len
;
1236 /* Move on to the next event and transaction */
1240 event
= gsi_ring_virt(ring
, 0);
1241 trans
= gsi_trans_pool_next(&trans_info
->pool
, trans
);
1242 } while (event
!= event_done
);
1244 /* We record RX bytes when they are received */
1245 channel
->byte_count
+= byte_count
;
1246 channel
->trans_count
++;
1249 /* Initialize a ring, including allocating DMA memory for its entries */
1250 static int gsi_ring_alloc(struct gsi
*gsi
, struct gsi_ring
*ring
, u32 count
)
1252 size_t size
= count
* GSI_RING_ELEMENT_SIZE
;
1253 struct device
*dev
= gsi
->dev
;
1256 /* Hardware requires a 2^n ring size, with alignment equal to size */
1257 ring
->virt
= dma_alloc_coherent(dev
, size
, &addr
, GFP_KERNEL
);
1258 if (ring
->virt
&& addr
% size
) {
1259 dma_free_coherent(dev
, size
, ring
->virt
, ring
->addr
);
1260 dev_err(dev
, "unable to alloc 0x%zx-aligned ring buffer\n",
1262 return -EINVAL
; /* Not a good error value, but distinct */
1263 } else if (!ring
->virt
) {
1267 ring
->count
= count
;
1272 /* Free a previously-allocated ring */
1273 static void gsi_ring_free(struct gsi
*gsi
, struct gsi_ring
*ring
)
1275 size_t size
= ring
->count
* GSI_RING_ELEMENT_SIZE
;
1277 dma_free_coherent(gsi
->dev
, size
, ring
->virt
, ring
->addr
);
1280 /* Allocate an available event ring id */
1281 static int gsi_evt_ring_id_alloc(struct gsi
*gsi
)
1285 if (gsi
->event_bitmap
== ~0U) {
1286 dev_err(gsi
->dev
, "event rings exhausted\n");
1290 evt_ring_id
= ffz(gsi
->event_bitmap
);
1291 gsi
->event_bitmap
|= BIT(evt_ring_id
);
1293 return (int)evt_ring_id
;
1296 /* Free a previously-allocated event ring id */
1297 static void gsi_evt_ring_id_free(struct gsi
*gsi
, u32 evt_ring_id
)
1299 gsi
->event_bitmap
&= ~BIT(evt_ring_id
);
1302 /* Ring a channel doorbell, reporting the first un-filled entry */
1303 void gsi_channel_doorbell(struct gsi_channel
*channel
)
1305 struct gsi_ring
*tre_ring
= &channel
->tre_ring
;
1306 u32 channel_id
= gsi_channel_id(channel
);
1307 struct gsi
*gsi
= channel
->gsi
;
1310 /* Note: index *must* be used modulo the ring count here */
1311 val
= gsi_ring_addr(tre_ring
, tre_ring
->index
% tre_ring
->count
);
1312 iowrite32(val
, gsi
->virt
+ GSI_CH_C_DOORBELL_0_OFFSET(channel_id
));
1315 /* Consult hardware, move any newly completed transactions to completed list */
1316 static void gsi_channel_update(struct gsi_channel
*channel
)
1318 u32 evt_ring_id
= channel
->evt_ring_id
;
1319 struct gsi
*gsi
= channel
->gsi
;
1320 struct gsi_evt_ring
*evt_ring
;
1321 struct gsi_trans
*trans
;
1322 struct gsi_ring
*ring
;
1326 evt_ring
= &gsi
->evt_ring
[evt_ring_id
];
1327 ring
= &evt_ring
->ring
;
1329 /* See if there's anything new to process; if not, we're done. Note
1330 * that index always refers to an entry *within* the event ring.
1332 offset
= GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id
);
1333 index
= gsi_ring_index(ring
, ioread32(gsi
->virt
+ offset
));
1334 if (index
== ring
->index
% ring
->count
)
1337 /* Get the transaction for the latest completed event. Take a
1338 * reference to keep it from completing before we give the events
1339 * for this and previous transactions back to the hardware.
1341 trans
= gsi_event_trans(channel
, gsi_ring_virt(ring
, index
- 1));
1342 refcount_inc(&trans
->refcount
);
1344 /* For RX channels, update each completed transaction with the number
1345 * of bytes that were actually received. For TX channels, report
1346 * the number of transactions and bytes this completion represents
1347 * up the network stack.
1349 if (channel
->toward_ipa
)
1350 gsi_channel_tx_update(channel
, trans
);
1352 gsi_evt_ring_rx_update(evt_ring
, index
);
1354 gsi_trans_move_complete(trans
);
1356 /* Tell the hardware we've handled these events */
1357 gsi_evt_ring_doorbell(channel
->gsi
, channel
->evt_ring_id
, index
);
1359 gsi_trans_free(trans
);
1363 * gsi_channel_poll_one() - Return a single completed transaction on a channel
1364 * @channel: Channel to be polled
1366 * Return: Transaction pointer, or null if none are available
1368 * This function returns the first entry on a channel's completed transaction
1369 * list. If that list is empty, the hardware is consulted to determine
1370 * whether any new transactions have completed. If so, they're moved to the
1371 * completed list and the new first entry is returned. If there are no more
1372 * completed transactions, a null pointer is returned.
1374 static struct gsi_trans
*gsi_channel_poll_one(struct gsi_channel
*channel
)
1376 struct gsi_trans
*trans
;
1378 /* Get the first transaction from the completed list */
1379 trans
= gsi_channel_trans_complete(channel
);
1381 /* List is empty; see if there's more to do */
1382 gsi_channel_update(channel
);
1383 trans
= gsi_channel_trans_complete(channel
);
1387 gsi_trans_move_polled(trans
);
1393 * gsi_channel_poll() - NAPI poll function for a channel
1394 * @napi: NAPI structure for the channel
1395 * @budget: Budget supplied by NAPI core
1397 * Return: Number of items polled (<= budget)
1399 * Single transactions completed by hardware are polled until either
1400 * the budget is exhausted, or there are no more. Each transaction
1401 * polled is passed to gsi_trans_complete(), to perform remaining
1402 * completion processing and retire/free the transaction.
1404 static int gsi_channel_poll(struct napi_struct
*napi
, int budget
)
1406 struct gsi_channel
*channel
;
1409 channel
= container_of(napi
, struct gsi_channel
, napi
);
1410 while (count
< budget
) {
1411 struct gsi_trans
*trans
;
1414 trans
= gsi_channel_poll_one(channel
);
1417 gsi_trans_complete(trans
);
1420 if (count
< budget
) {
1421 napi_complete(&channel
->napi
);
1422 gsi_irq_ieob_enable(channel
->gsi
, channel
->evt_ring_id
);
1428 /* The event bitmap represents which event ids are available for allocation.
1429 * Set bits are not available, clear bits can be used. This function
1430 * initializes the map so all events supported by the hardware are available,
1431 * then precludes any reserved events from being allocated.
1433 static u32
gsi_event_bitmap_init(u32 evt_ring_max
)
1435 u32 event_bitmap
= GENMASK(BITS_PER_LONG
- 1, evt_ring_max
);
1437 event_bitmap
|= GENMASK(GSI_MHI_EVENT_ID_END
, GSI_MHI_EVENT_ID_START
);
1439 return event_bitmap
;
1442 /* Setup function for event rings */
1443 static void gsi_evt_ring_setup(struct gsi
*gsi
)
1448 /* Inverse of gsi_evt_ring_setup() */
1449 static void gsi_evt_ring_teardown(struct gsi
*gsi
)
1454 /* Setup function for a single channel */
1455 static int gsi_channel_setup_one(struct gsi
*gsi
, u32 channel_id
,
1458 struct gsi_channel
*channel
= &gsi
->channel
[channel_id
];
1459 u32 evt_ring_id
= channel
->evt_ring_id
;
1463 return 0; /* Ignore uninitialized channels */
1465 ret
= gsi_evt_ring_alloc_command(gsi
, evt_ring_id
);
1469 gsi_evt_ring_program(gsi
, evt_ring_id
);
1471 ret
= gsi_channel_alloc_command(gsi
, channel_id
);
1473 goto err_evt_ring_de_alloc
;
1475 gsi_channel_program(channel
, legacy
);
1477 if (channel
->toward_ipa
)
1478 netif_tx_napi_add(&gsi
->dummy_dev
, &channel
->napi
,
1479 gsi_channel_poll
, NAPI_POLL_WEIGHT
);
1481 netif_napi_add(&gsi
->dummy_dev
, &channel
->napi
,
1482 gsi_channel_poll
, NAPI_POLL_WEIGHT
);
1486 err_evt_ring_de_alloc
:
1487 /* We've done nothing with the event ring yet so don't reset */
1488 gsi_evt_ring_de_alloc_command(gsi
, evt_ring_id
);
1493 /* Inverse of gsi_channel_setup_one() */
1494 static void gsi_channel_teardown_one(struct gsi
*gsi
, u32 channel_id
)
1496 struct gsi_channel
*channel
= &gsi
->channel
[channel_id
];
1497 u32 evt_ring_id
= channel
->evt_ring_id
;
1500 return; /* Ignore uninitialized channels */
1502 netif_napi_del(&channel
->napi
);
1504 gsi_channel_deprogram(channel
);
1505 gsi_channel_de_alloc_command(gsi
, channel_id
);
1506 gsi_evt_ring_reset_command(gsi
, evt_ring_id
);
1507 gsi_evt_ring_de_alloc_command(gsi
, evt_ring_id
);
1510 static int gsi_generic_command(struct gsi
*gsi
, u32 channel_id
,
1511 enum gsi_generic_cmd_opcode opcode
)
1513 struct completion
*completion
= &gsi
->completion
;
1516 /* First zero the result code field */
1517 val
= ioread32(gsi
->virt
+ GSI_CNTXT_SCRATCH_0_OFFSET
);
1518 val
&= ~GENERIC_EE_RESULT_FMASK
;
1519 iowrite32(val
, gsi
->virt
+ GSI_CNTXT_SCRATCH_0_OFFSET
);
1521 /* Now issue the command */
1522 val
= u32_encode_bits(opcode
, GENERIC_OPCODE_FMASK
);
1523 val
|= u32_encode_bits(channel_id
, GENERIC_CHID_FMASK
);
1524 val
|= u32_encode_bits(GSI_EE_MODEM
, GENERIC_EE_FMASK
);
1526 if (gsi_command(gsi
, GSI_GENERIC_CMD_OFFSET
, val
, completion
))
1527 return 0; /* Success! */
1529 dev_err(gsi
->dev
, "GSI generic command %u to channel %u timed out\n",
1530 opcode
, channel_id
);
1535 static int gsi_modem_channel_alloc(struct gsi
*gsi
, u32 channel_id
)
1537 return gsi_generic_command(gsi
, channel_id
,
1538 GSI_GENERIC_ALLOCATE_CHANNEL
);
1541 static void gsi_modem_channel_halt(struct gsi
*gsi
, u32 channel_id
)
1545 ret
= gsi_generic_command(gsi
, channel_id
, GSI_GENERIC_HALT_CHANNEL
);
1547 dev_err(gsi
->dev
, "error %d halting modem channel %u\n",
1551 /* Setup function for channels */
1552 static int gsi_channel_setup(struct gsi
*gsi
, bool legacy
)
1558 gsi_evt_ring_setup(gsi
);
1559 gsi_irq_enable(gsi
);
1561 mutex_lock(&gsi
->mutex
);
1564 ret
= gsi_channel_setup_one(gsi
, channel_id
, legacy
);
1567 } while (++channel_id
< gsi
->channel_count
);
1569 /* Make sure no channels were defined that hardware does not support */
1570 while (channel_id
< GSI_CHANNEL_COUNT_MAX
) {
1571 struct gsi_channel
*channel
= &gsi
->channel
[channel_id
++];
1574 continue; /* Ignore uninitialized channels */
1576 dev_err(gsi
->dev
, "channel %u not supported by hardware\n",
1578 channel_id
= gsi
->channel_count
;
1582 /* Allocate modem channels if necessary */
1583 mask
= gsi
->modem_channel_bitmap
;
1585 u32 modem_channel_id
= __ffs(mask
);
1587 ret
= gsi_modem_channel_alloc(gsi
, modem_channel_id
);
1589 goto err_unwind_modem
;
1591 /* Clear bit from mask only after success (for unwind) */
1592 mask
^= BIT(modem_channel_id
);
1595 mutex_unlock(&gsi
->mutex
);
1600 /* Compute which modem channels need to be deallocated */
1601 mask
^= gsi
->modem_channel_bitmap
;
1603 u32 channel_id
= __fls(mask
);
1605 mask
^= BIT(channel_id
);
1607 gsi_modem_channel_halt(gsi
, channel_id
);
1611 while (channel_id
--)
1612 gsi_channel_teardown_one(gsi
, channel_id
);
1614 mutex_unlock(&gsi
->mutex
);
1616 gsi_irq_disable(gsi
);
1617 gsi_evt_ring_teardown(gsi
);
1622 /* Inverse of gsi_channel_setup() */
1623 static void gsi_channel_teardown(struct gsi
*gsi
)
1625 u32 mask
= gsi
->modem_channel_bitmap
;
1628 mutex_lock(&gsi
->mutex
);
1631 u32 channel_id
= __fls(mask
);
1633 mask
^= BIT(channel_id
);
1635 gsi_modem_channel_halt(gsi
, channel_id
);
1638 channel_id
= gsi
->channel_count
- 1;
1640 gsi_channel_teardown_one(gsi
, channel_id
);
1641 while (channel_id
--);
1643 mutex_unlock(&gsi
->mutex
);
1645 gsi_irq_disable(gsi
);
1646 gsi_evt_ring_teardown(gsi
);
1649 /* Setup function for GSI. GSI firmware must be loaded and initialized */
1650 int gsi_setup(struct gsi
*gsi
, bool legacy
)
1652 struct device
*dev
= gsi
->dev
;
1655 /* Here is where we first touch the GSI hardware */
1656 val
= ioread32(gsi
->virt
+ GSI_GSI_STATUS_OFFSET
);
1657 if (!(val
& ENABLED_FMASK
)) {
1658 dev_err(dev
, "GSI has not been enabled\n");
1662 val
= ioread32(gsi
->virt
+ GSI_GSI_HW_PARAM_2_OFFSET
);
1664 gsi
->channel_count
= u32_get_bits(val
, NUM_CH_PER_EE_FMASK
);
1665 if (!gsi
->channel_count
) {
1666 dev_err(dev
, "GSI reports zero channels supported\n");
1669 if (gsi
->channel_count
> GSI_CHANNEL_COUNT_MAX
) {
1671 "limiting to %u channels; hardware supports %u\n",
1672 GSI_CHANNEL_COUNT_MAX
, gsi
->channel_count
);
1673 gsi
->channel_count
= GSI_CHANNEL_COUNT_MAX
;
1676 gsi
->evt_ring_count
= u32_get_bits(val
, NUM_EV_PER_EE_FMASK
);
1677 if (!gsi
->evt_ring_count
) {
1678 dev_err(dev
, "GSI reports zero event rings supported\n");
1681 if (gsi
->evt_ring_count
> GSI_EVT_RING_COUNT_MAX
) {
1683 "limiting to %u event rings; hardware supports %u\n",
1684 GSI_EVT_RING_COUNT_MAX
, gsi
->evt_ring_count
);
1685 gsi
->evt_ring_count
= GSI_EVT_RING_COUNT_MAX
;
1688 /* Initialize the error log */
1689 iowrite32(0, gsi
->virt
+ GSI_ERROR_LOG_OFFSET
);
1691 /* Writing 1 indicates IRQ interrupts; 0 would be MSI */
1692 iowrite32(1, gsi
->virt
+ GSI_CNTXT_INTSET_OFFSET
);
1694 return gsi_channel_setup(gsi
, legacy
);
1697 /* Inverse of gsi_setup() */
1698 void gsi_teardown(struct gsi
*gsi
)
1700 gsi_channel_teardown(gsi
);
1703 /* Initialize a channel's event ring */
1704 static int gsi_channel_evt_ring_init(struct gsi_channel
*channel
)
1706 struct gsi
*gsi
= channel
->gsi
;
1707 struct gsi_evt_ring
*evt_ring
;
1710 ret
= gsi_evt_ring_id_alloc(gsi
);
1713 channel
->evt_ring_id
= ret
;
1715 evt_ring
= &gsi
->evt_ring
[channel
->evt_ring_id
];
1716 evt_ring
->channel
= channel
;
1718 ret
= gsi_ring_alloc(gsi
, &evt_ring
->ring
, channel
->event_count
);
1720 return 0; /* Success! */
1722 dev_err(gsi
->dev
, "error %d allocating channel %u event ring\n",
1723 ret
, gsi_channel_id(channel
));
1725 gsi_evt_ring_id_free(gsi
, channel
->evt_ring_id
);
1730 /* Inverse of gsi_channel_evt_ring_init() */
1731 static void gsi_channel_evt_ring_exit(struct gsi_channel
*channel
)
1733 u32 evt_ring_id
= channel
->evt_ring_id
;
1734 struct gsi
*gsi
= channel
->gsi
;
1735 struct gsi_evt_ring
*evt_ring
;
1737 evt_ring
= &gsi
->evt_ring
[evt_ring_id
];
1738 gsi_ring_free(gsi
, &evt_ring
->ring
);
1739 gsi_evt_ring_id_free(gsi
, evt_ring_id
);
1742 /* Init function for event rings */
1743 static void gsi_evt_ring_init(struct gsi
*gsi
)
1745 u32 evt_ring_id
= 0;
1747 gsi
->event_bitmap
= gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX
);
1748 gsi
->event_enable_bitmap
= 0;
1750 init_completion(&gsi
->evt_ring
[evt_ring_id
].completion
);
1751 while (++evt_ring_id
< GSI_EVT_RING_COUNT_MAX
);
1754 /* Inverse of gsi_evt_ring_init() */
1755 static void gsi_evt_ring_exit(struct gsi
*gsi
)
1760 static bool gsi_channel_data_valid(struct gsi
*gsi
,
1761 const struct ipa_gsi_endpoint_data
*data
)
1763 #ifdef IPA_VALIDATION
1764 u32 channel_id
= data
->channel_id
;
1765 struct device
*dev
= gsi
->dev
;
1767 /* Make sure channel ids are in the range driver supports */
1768 if (channel_id
>= GSI_CHANNEL_COUNT_MAX
) {
1769 dev_err(dev
, "bad channel id %u; must be less than %u\n",
1770 channel_id
, GSI_CHANNEL_COUNT_MAX
);
1774 if (data
->ee_id
!= GSI_EE_AP
&& data
->ee_id
!= GSI_EE_MODEM
) {
1775 dev_err(dev
, "bad EE id %u; not AP or modem\n", data
->ee_id
);
1779 if (!data
->channel
.tlv_count
||
1780 data
->channel
.tlv_count
> GSI_TLV_MAX
) {
1781 dev_err(dev
, "channel %u bad tlv_count %u; must be 1..%u\n",
1782 channel_id
, data
->channel
.tlv_count
, GSI_TLV_MAX
);
1786 /* We have to allow at least one maximally-sized transaction to
1787 * be outstanding (which would use tlv_count TREs). Given how
1788 * gsi_channel_tre_max() is computed, tre_count has to be almost
1789 * twice the TLV FIFO size to satisfy this requirement.
1791 if (data
->channel
.tre_count
< 2 * data
->channel
.tlv_count
- 1) {
1792 dev_err(dev
, "channel %u TLV count %u exceeds TRE count %u\n",
1793 channel_id
, data
->channel
.tlv_count
,
1794 data
->channel
.tre_count
);
1798 if (!is_power_of_2(data
->channel
.tre_count
)) {
1799 dev_err(dev
, "channel %u bad tre_count %u; not power of 2\n",
1800 channel_id
, data
->channel
.tre_count
);
1804 if (!is_power_of_2(data
->channel
.event_count
)) {
1805 dev_err(dev
, "channel %u bad event_count %u; not power of 2\n",
1806 channel_id
, data
->channel
.event_count
);
1809 #endif /* IPA_VALIDATION */
1814 /* Init function for a single channel */
1815 static int gsi_channel_init_one(struct gsi
*gsi
,
1816 const struct ipa_gsi_endpoint_data
*data
,
1817 bool command
, bool prefetch
)
1819 struct gsi_channel
*channel
;
1823 if (!gsi_channel_data_valid(gsi
, data
))
1826 /* Worst case we need an event for every outstanding TRE */
1827 if (data
->channel
.tre_count
> data
->channel
.event_count
) {
1828 tre_count
= data
->channel
.event_count
;
1829 dev_warn(gsi
->dev
, "channel %u limited to %u TREs\n",
1830 data
->channel_id
, tre_count
);
1832 tre_count
= data
->channel
.tre_count
;
1835 channel
= &gsi
->channel
[data
->channel_id
];
1836 memset(channel
, 0, sizeof(*channel
));
1839 channel
->toward_ipa
= data
->toward_ipa
;
1840 channel
->command
= command
;
1841 channel
->use_prefetch
= command
&& prefetch
;
1842 channel
->tlv_count
= data
->channel
.tlv_count
;
1843 channel
->tre_count
= tre_count
;
1844 channel
->event_count
= data
->channel
.event_count
;
1845 init_completion(&channel
->completion
);
1847 ret
= gsi_channel_evt_ring_init(channel
);
1851 ret
= gsi_ring_alloc(gsi
, &channel
->tre_ring
, data
->channel
.tre_count
);
1853 dev_err(gsi
->dev
, "error %d allocating channel %u ring\n",
1854 ret
, data
->channel_id
);
1855 goto err_channel_evt_ring_exit
;
1858 ret
= gsi_channel_trans_init(gsi
, data
->channel_id
);
1863 u32 tre_max
= gsi_channel_tre_max(gsi
, data
->channel_id
);
1865 ret
= ipa_cmd_pool_init(channel
, tre_max
);
1868 return 0; /* Success! */
1870 gsi_channel_trans_exit(channel
);
1872 gsi_ring_free(gsi
, &channel
->tre_ring
);
1873 err_channel_evt_ring_exit
:
1874 gsi_channel_evt_ring_exit(channel
);
1876 channel
->gsi
= NULL
; /* Mark it not (fully) initialized */
1881 /* Inverse of gsi_channel_init_one() */
1882 static void gsi_channel_exit_one(struct gsi_channel
*channel
)
1885 return; /* Ignore uninitialized channels */
1887 if (channel
->command
)
1888 ipa_cmd_pool_exit(channel
);
1889 gsi_channel_trans_exit(channel
);
1890 gsi_ring_free(channel
->gsi
, &channel
->tre_ring
);
1891 gsi_channel_evt_ring_exit(channel
);
1894 /* Init function for channels */
1895 static int gsi_channel_init(struct gsi
*gsi
, bool prefetch
, u32 count
,
1896 const struct ipa_gsi_endpoint_data
*data
,
1902 gsi_evt_ring_init(gsi
);
1904 /* The endpoint data array is indexed by endpoint name */
1905 for (i
= 0; i
< count
; i
++) {
1906 bool command
= i
== IPA_ENDPOINT_AP_COMMAND_TX
;
1908 if (ipa_gsi_endpoint_data_empty(&data
[i
]))
1909 continue; /* Skip over empty slots */
1911 /* Mark modem channels to be allocated (hardware workaround) */
1912 if (data
[i
].ee_id
== GSI_EE_MODEM
) {
1914 gsi
->modem_channel_bitmap
|=
1915 BIT(data
[i
].channel_id
);
1919 ret
= gsi_channel_init_one(gsi
, &data
[i
], command
, prefetch
);
1928 if (ipa_gsi_endpoint_data_empty(&data
[i
]))
1930 if (modem_alloc
&& data
[i
].ee_id
== GSI_EE_MODEM
) {
1931 gsi
->modem_channel_bitmap
&= ~BIT(data
[i
].channel_id
);
1934 gsi_channel_exit_one(&gsi
->channel
[data
->channel_id
]);
1936 gsi_evt_ring_exit(gsi
);
1941 /* Inverse of gsi_channel_init() */
1942 static void gsi_channel_exit(struct gsi
*gsi
)
1944 u32 channel_id
= GSI_CHANNEL_COUNT_MAX
- 1;
1947 gsi_channel_exit_one(&gsi
->channel
[channel_id
]);
1948 while (channel_id
--);
1949 gsi
->modem_channel_bitmap
= 0;
1951 gsi_evt_ring_exit(gsi
);
1954 /* Init function for GSI. GSI hardware does not need to be "ready" */
1955 int gsi_init(struct gsi
*gsi
, struct platform_device
*pdev
, bool prefetch
,
1956 u32 count
, const struct ipa_gsi_endpoint_data
*data
,
1959 struct device
*dev
= &pdev
->dev
;
1960 struct resource
*res
;
1961 resource_size_t size
;
1965 gsi_validate_build();
1969 /* The GSI layer performs NAPI on all endpoints. NAPI requires a
1970 * network device structure, but the GSI layer does not have one,
1971 * so we must create a dummy network device for this purpose.
1973 init_dummy_netdev(&gsi
->dummy_dev
);
1975 /* Get the GSI IRQ and request for it to wake the system */
1976 ret
= platform_get_irq_byname(pdev
, "gsi");
1978 dev_err(dev
, "DT error %d getting \"gsi\" IRQ property\n", ret
);
1979 return ret
? : -EINVAL
;
1983 ret
= request_irq(irq
, gsi_isr
, 0, "gsi", gsi
);
1985 dev_err(dev
, "error %d requesting \"gsi\" IRQ\n", ret
);
1990 /* Get GSI memory range and map it */
1991 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "gsi");
1993 dev_err(dev
, "DT error getting \"gsi\" memory property\n");
1998 size
= resource_size(res
);
1999 if (res
->start
> U32_MAX
|| size
> U32_MAX
- res
->start
) {
2000 dev_err(dev
, "DT memory resource \"gsi\" out of range\n");
2005 gsi
->virt
= ioremap(res
->start
, size
);
2007 dev_err(dev
, "unable to remap \"gsi\" memory\n");
2012 ret
= gsi_channel_init(gsi
, prefetch
, count
, data
, modem_alloc
);
2016 mutex_init(&gsi
->mutex
);
2017 init_completion(&gsi
->completion
);
2024 free_irq(gsi
->irq
, gsi
);
2029 /* Inverse of gsi_init() */
2030 void gsi_exit(struct gsi
*gsi
)
2032 mutex_destroy(&gsi
->mutex
);
2033 gsi_channel_exit(gsi
);
2034 free_irq(gsi
->irq
, gsi
);
2038 /* The maximum number of outstanding TREs on a channel. This limits
2039 * a channel's maximum number of transactions outstanding (worst case
2040 * is one TRE per transaction).
2042 * The absolute limit is the number of TREs in the channel's TRE ring,
2043 * and in theory we should be able use all of them. But in practice,
2044 * doing that led to the hardware reporting exhaustion of event ring
2045 * slots for writing completion information. So the hardware limit
2046 * would be (tre_count - 1).
2048 * We reduce it a bit further though. Transaction resource pools are
2049 * sized to be a little larger than this maximum, to allow resource
2050 * allocations to always be contiguous. The number of entries in a
2051 * TRE ring buffer is a power of 2, and the extra resources in a pool
2052 * tends to nearly double the memory allocated for it. Reducing the
2053 * maximum number of outstanding TREs allows the number of entries in
2054 * a pool to avoid crossing that power-of-2 boundary, and this can
2055 * substantially reduce pool memory requirements. The number we
2056 * reduce it by matches the number added in gsi_trans_pool_init().
2058 u32
gsi_channel_tre_max(struct gsi
*gsi
, u32 channel_id
)
2060 struct gsi_channel
*channel
= &gsi
->channel
[channel_id
];
2062 /* Hardware limit is channel->tre_count - 1 */
2063 return channel
->tre_count
- (channel
->tlv_count
- 1);
2066 /* Returns the maximum number of TREs in a single transaction for a channel */
2067 u32
gsi_channel_trans_tre_max(struct gsi
*gsi
, u32 channel_id
)
2069 struct gsi_channel
*channel
= &gsi
->channel
[channel_id
];
2071 return channel
->tlv_count
;