1 // SPDX-License-Identifier: GPL-2.0
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2019-2020 Linaro Ltd.
7 #include <linux/types.h>
8 #include <linux/device.h>
9 #include <linux/slab.h>
10 #include <linux/bitfield.h>
11 #include <linux/if_rmnet.h>
12 #include <linux/dma-direction.h>
15 #include "gsi_trans.h"
18 #include "ipa_endpoint.h"
21 #include "ipa_modem.h"
22 #include "ipa_table.h"
25 #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
27 #define IPA_REPLENISH_BATCH 16
29 /* RX buffer is 1 page (or a power-of-2 contiguous pages) */
30 #define IPA_RX_BUFFER_SIZE 8192 /* PAGE_SIZE > 4096 wastes a LOT */
32 /* The amount of RX buffer space consumed by standard skb overhead */
33 #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
35 #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3
36 #define IPA_AGGR_TIME_LIMIT_DEFAULT 1000 /* microseconds */
38 /** enum ipa_status_opcode - status element opcode hardware values */
39 enum ipa_status_opcode
{
40 IPA_STATUS_OPCODE_PACKET
= 0x01,
41 IPA_STATUS_OPCODE_NEW_FRAG_RULE
= 0x02,
42 IPA_STATUS_OPCODE_DROPPED_PACKET
= 0x04,
43 IPA_STATUS_OPCODE_SUSPENDED_PACKET
= 0x08,
44 IPA_STATUS_OPCODE_LOG
= 0x10,
45 IPA_STATUS_OPCODE_DCMP
= 0x20,
46 IPA_STATUS_OPCODE_PACKET_2ND_PASS
= 0x40,
49 /** enum ipa_status_exception - status element exception type */
50 enum ipa_status_exception
{
51 /* 0 means no exception */
52 IPA_STATUS_EXCEPTION_DEAGGR
= 0x01,
53 IPA_STATUS_EXCEPTION_IPTYPE
= 0x04,
54 IPA_STATUS_EXCEPTION_PACKET_LENGTH
= 0x08,
55 IPA_STATUS_EXCEPTION_FRAG_RULE_MISS
= 0x10,
56 IPA_STATUS_EXCEPTION_SW_FILT
= 0x20,
57 /* The meaning of the next value depends on whether the IP version */
58 IPA_STATUS_EXCEPTION_NAT
= 0x40, /* IPv4 */
59 IPA_STATUS_EXCEPTION_IPV6CT
= IPA_STATUS_EXCEPTION_NAT
,
62 /* Status element provided by hardware */
64 u8 opcode
; /* enum ipa_status_opcode */
65 u8 exception
; /* enum ipa_status_exception */
77 /* Field masks for struct ipa_status structure fields */
79 #define IPA_STATUS_SRC_IDX_FMASK GENMASK(4, 0)
81 #define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0)
83 #define IPA_STATUS_FLAGS1_FLT_LOCAL_FMASK GENMASK(0, 0)
84 #define IPA_STATUS_FLAGS1_FLT_HASH_FMASK GENMASK(1, 1)
85 #define IPA_STATUS_FLAGS1_FLT_GLOBAL_FMASK GENMASK(2, 2)
86 #define IPA_STATUS_FLAGS1_FLT_RET_HDR_FMASK GENMASK(3, 3)
87 #define IPA_STATUS_FLAGS1_FLT_RULE_ID_FMASK GENMASK(13, 4)
88 #define IPA_STATUS_FLAGS1_RT_LOCAL_FMASK GENMASK(14, 14)
89 #define IPA_STATUS_FLAGS1_RT_HASH_FMASK GENMASK(15, 15)
90 #define IPA_STATUS_FLAGS1_UCP_FMASK GENMASK(16, 16)
91 #define IPA_STATUS_FLAGS1_RT_TBL_IDX_FMASK GENMASK(21, 17)
92 #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22)
94 #define IPA_STATUS_FLAGS2_NAT_HIT_FMASK GENMASK_ULL(0, 0)
95 #define IPA_STATUS_FLAGS2_NAT_ENTRY_IDX_FMASK GENMASK_ULL(13, 1)
96 #define IPA_STATUS_FLAGS2_NAT_TYPE_FMASK GENMASK_ULL(15, 14)
97 #define IPA_STATUS_FLAGS2_TAG_INFO_FMASK GENMASK_ULL(63, 16)
99 #define IPA_STATUS_FLAGS3_SEQ_NUM_FMASK GENMASK(7, 0)
100 #define IPA_STATUS_FLAGS3_TOD_CTR_FMASK GENMASK(31, 8)
102 #define IPA_STATUS_FLAGS4_HDR_LOCAL_FMASK GENMASK(0, 0)
103 #define IPA_STATUS_FLAGS4_HDR_OFFSET_FMASK GENMASK(10, 1)
104 #define IPA_STATUS_FLAGS4_FRAG_HIT_FMASK GENMASK(11, 11)
105 #define IPA_STATUS_FLAGS4_FRAG_RULE_FMASK GENMASK(15, 12)
106 #define IPA_STATUS_FLAGS4_HW_SPECIFIC_FMASK GENMASK(31, 16)
110 static void ipa_endpoint_validate_build(void)
112 /* The aggregation byte limit defines the point at which an
113 * aggregation window will close. It is programmed into the
114 * IPA hardware as a number of KB. We don't use "hard byte
115 * limit" aggregation, which means that we need to supply
116 * enough space in a receive buffer to hold a complete MTU
117 * plus normal skb overhead *after* that aggregation byte
118 * limit has been crossed.
120 * This check just ensures we don't define a receive buffer
121 * size that would exceed what we can represent in the field
122 * that is used to program its size.
124 BUILD_BUG_ON(IPA_RX_BUFFER_SIZE
>
125 field_max(AGGR_BYTE_LIMIT_FMASK
) * SZ_1K
+
126 IPA_MTU
+ IPA_RX_BUFFER_OVERHEAD
);
128 /* I honestly don't know where this requirement comes from. But
129 * it holds, and if we someday need to loosen the constraint we
130 * can try to track it down.
132 BUILD_BUG_ON(sizeof(struct ipa_status
) % 4);
135 static bool ipa_endpoint_data_valid_one(struct ipa
*ipa
, u32 count
,
136 const struct ipa_gsi_endpoint_data
*all_data
,
137 const struct ipa_gsi_endpoint_data
*data
)
139 const struct ipa_gsi_endpoint_data
*other_data
;
140 struct device
*dev
= &ipa
->pdev
->dev
;
141 enum ipa_endpoint_name other_name
;
143 if (ipa_gsi_endpoint_data_empty(data
))
146 if (!data
->toward_ipa
) {
147 if (data
->endpoint
.filter_support
) {
148 dev_err(dev
, "filtering not supported for "
154 return true; /* Nothing more to check for RX */
157 if (data
->endpoint
.config
.status_enable
) {
158 other_name
= data
->endpoint
.config
.tx
.status_endpoint
;
159 if (other_name
>= count
) {
160 dev_err(dev
, "status endpoint name %u out of range "
162 other_name
, data
->endpoint_id
);
166 /* Status endpoint must be defined... */
167 other_data
= &all_data
[other_name
];
168 if (ipa_gsi_endpoint_data_empty(other_data
)) {
169 dev_err(dev
, "DMA endpoint name %u undefined "
171 other_name
, data
->endpoint_id
);
175 /* ...and has to be an RX endpoint... */
176 if (other_data
->toward_ipa
) {
178 "status endpoint for endpoint %u not RX\n",
183 /* ...and if it's to be an AP endpoint... */
184 if (other_data
->ee_id
== GSI_EE_AP
) {
185 /* ...make sure it has status enabled. */
186 if (!other_data
->endpoint
.config
.status_enable
) {
188 "status not enabled for endpoint %u\n",
189 other_data
->endpoint_id
);
195 if (data
->endpoint
.config
.dma_mode
) {
196 other_name
= data
->endpoint
.config
.dma_endpoint
;
197 if (other_name
>= count
) {
198 dev_err(dev
, "DMA endpoint name %u out of range "
200 other_name
, data
->endpoint_id
);
204 other_data
= &all_data
[other_name
];
205 if (ipa_gsi_endpoint_data_empty(other_data
)) {
206 dev_err(dev
, "DMA endpoint name %u undefined "
208 other_name
, data
->endpoint_id
);
216 static bool ipa_endpoint_data_valid(struct ipa
*ipa
, u32 count
,
217 const struct ipa_gsi_endpoint_data
*data
)
219 const struct ipa_gsi_endpoint_data
*dp
= data
;
220 struct device
*dev
= &ipa
->pdev
->dev
;
221 enum ipa_endpoint_name name
;
223 ipa_endpoint_validate_build();
225 if (count
> IPA_ENDPOINT_COUNT
) {
226 dev_err(dev
, "too many endpoints specified (%u > %u)\n",
227 count
, IPA_ENDPOINT_COUNT
);
231 /* Make sure needed endpoints have defined data */
232 if (ipa_gsi_endpoint_data_empty(&data
[IPA_ENDPOINT_AP_COMMAND_TX
])) {
233 dev_err(dev
, "command TX endpoint not defined\n");
236 if (ipa_gsi_endpoint_data_empty(&data
[IPA_ENDPOINT_AP_LAN_RX
])) {
237 dev_err(dev
, "LAN RX endpoint not defined\n");
240 if (ipa_gsi_endpoint_data_empty(&data
[IPA_ENDPOINT_AP_MODEM_TX
])) {
241 dev_err(dev
, "AP->modem TX endpoint not defined\n");
244 if (ipa_gsi_endpoint_data_empty(&data
[IPA_ENDPOINT_AP_MODEM_RX
])) {
245 dev_err(dev
, "AP<-modem RX endpoint not defined\n");
249 for (name
= 0; name
< count
; name
++, dp
++)
250 if (!ipa_endpoint_data_valid_one(ipa
, count
, data
, dp
))
256 #else /* !IPA_VALIDATE */
258 static bool ipa_endpoint_data_valid(struct ipa
*ipa
, u32 count
,
259 const struct ipa_gsi_endpoint_data
*data
)
264 #endif /* !IPA_VALIDATE */
266 /* Allocate a transaction to use on a non-command endpoint */
267 static struct gsi_trans
*ipa_endpoint_trans_alloc(struct ipa_endpoint
*endpoint
,
270 struct gsi
*gsi
= &endpoint
->ipa
->gsi
;
271 u32 channel_id
= endpoint
->channel_id
;
272 enum dma_data_direction direction
;
274 direction
= endpoint
->toward_ipa
? DMA_TO_DEVICE
: DMA_FROM_DEVICE
;
276 return gsi_channel_trans_alloc(gsi
, channel_id
, tre_count
, direction
);
279 /* suspend_delay represents suspend for RX, delay for TX endpoints.
280 * Note that suspend is not supported starting with IPA v4.0.
283 ipa_endpoint_init_ctrl(struct ipa_endpoint
*endpoint
, bool suspend_delay
)
285 u32 offset
= IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint
->endpoint_id
);
286 struct ipa
*ipa
= endpoint
->ipa
;
291 /* Suspend is not supported for IPA v4.0+. Delay doesn't work
292 * correctly on IPA v4.2.
294 * if (endpoint->toward_ipa)
295 * assert(ipa->version != IPA_VERSION_4.2);
297 * assert(ipa->version == IPA_VERSION_3_5_1);
299 mask
= endpoint
->toward_ipa
? ENDP_DELAY_FMASK
: ENDP_SUSPEND_FMASK
;
301 val
= ioread32(ipa
->reg_virt
+ offset
);
302 /* Don't bother if it's already in the requested state */
303 state
= !!(val
& mask
);
304 if (suspend_delay
!= state
) {
306 iowrite32(val
, ipa
->reg_virt
+ offset
);
312 /* We currently don't care what the previous state was for delay mode */
314 ipa_endpoint_program_delay(struct ipa_endpoint
*endpoint
, bool enable
)
316 /* assert(endpoint->toward_ipa); */
318 (void)ipa_endpoint_init_ctrl(endpoint
, enable
);
321 /* Returns previous suspend state (true means it was enabled) */
323 ipa_endpoint_program_suspend(struct ipa_endpoint
*endpoint
, bool enable
)
325 /* assert(!endpoint->toward_ipa); */
327 return ipa_endpoint_init_ctrl(endpoint
, enable
);
330 /* Enable or disable delay or suspend mode on all modem endpoints */
331 void ipa_endpoint_modem_pause_all(struct ipa
*ipa
, bool enable
)
333 bool support_suspend
;
336 /* DELAY mode doesn't work correctly on IPA v4.2 */
337 if (ipa
->version
== IPA_VERSION_4_2
)
340 /* Only IPA v3.5.1 supports SUSPEND mode on RX endpoints */
341 support_suspend
= ipa
->version
== IPA_VERSION_3_5_1
;
343 for (endpoint_id
= 0; endpoint_id
< IPA_ENDPOINT_MAX
; endpoint_id
++) {
344 struct ipa_endpoint
*endpoint
= &ipa
->endpoint
[endpoint_id
];
346 if (endpoint
->ee_id
!= GSI_EE_MODEM
)
349 /* Set TX delay mode, or for IPA v3.5.1 RX suspend mode */
350 if (endpoint
->toward_ipa
)
351 ipa_endpoint_program_delay(endpoint
, enable
);
352 else if (support_suspend
)
353 (void)ipa_endpoint_program_suspend(endpoint
, enable
);
357 /* Reset all modem endpoints to use the default exception endpoint */
358 int ipa_endpoint_modem_exception_reset_all(struct ipa
*ipa
)
360 u32 initialized
= ipa
->initialized
;
361 struct gsi_trans
*trans
;
364 /* We need one command per modem TX endpoint. We can get an upper
365 * bound on that by assuming all initialized endpoints are modem->IPA.
366 * That won't happen, and we could be more precise, but this is fine
367 * for now. We need to end the transaction with a "tag process."
369 count
= hweight32(initialized
) + ipa_cmd_tag_process_count();
370 trans
= ipa_cmd_trans_alloc(ipa
, count
);
372 dev_err(&ipa
->pdev
->dev
,
373 "no transaction to reset modem exception endpoints\n");
377 while (initialized
) {
378 u32 endpoint_id
= __ffs(initialized
);
379 struct ipa_endpoint
*endpoint
;
382 initialized
^= BIT(endpoint_id
);
384 /* We only reset modem TX endpoints */
385 endpoint
= &ipa
->endpoint
[endpoint_id
];
386 if (!(endpoint
->ee_id
== GSI_EE_MODEM
&& endpoint
->toward_ipa
))
389 offset
= IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id
);
391 /* Value written is 0, and all bits are updated. That
392 * means status is disabled on the endpoint, and as a
393 * result all other fields in the register are ignored.
395 ipa_cmd_register_write_add(trans
, offset
, 0, ~0, false);
398 ipa_cmd_tag_process_add(trans
);
400 /* XXX This should have a 1 second timeout */
401 gsi_trans_commit_wait(trans
);
406 static void ipa_endpoint_init_cfg(struct ipa_endpoint
*endpoint
)
408 u32 offset
= IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint
->endpoint_id
);
411 /* FRAG_OFFLOAD_EN is 0 */
412 if (endpoint
->data
->checksum
) {
413 if (endpoint
->toward_ipa
) {
416 val
|= u32_encode_bits(IPA_CS_OFFLOAD_UL
,
417 CS_OFFLOAD_EN_FMASK
);
418 /* Checksum header offset is in 4-byte units */
419 checksum_offset
= sizeof(struct rmnet_map_header
);
420 checksum_offset
/= sizeof(u32
);
421 val
|= u32_encode_bits(checksum_offset
,
422 CS_METADATA_HDR_OFFSET_FMASK
);
424 val
|= u32_encode_bits(IPA_CS_OFFLOAD_DL
,
425 CS_OFFLOAD_EN_FMASK
);
428 val
|= u32_encode_bits(IPA_CS_OFFLOAD_NONE
,
429 CS_OFFLOAD_EN_FMASK
);
431 /* CS_GEN_QMB_MASTER_SEL is 0 */
433 iowrite32(val
, endpoint
->ipa
->reg_virt
+ offset
);
436 static void ipa_endpoint_init_hdr(struct ipa_endpoint
*endpoint
)
438 u32 offset
= IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint
->endpoint_id
);
441 if (endpoint
->data
->qmap
) {
442 size_t header_size
= sizeof(struct rmnet_map_header
);
444 if (endpoint
->toward_ipa
&& endpoint
->data
->checksum
)
445 header_size
+= sizeof(struct rmnet_map_ul_csum_header
);
447 val
|= u32_encode_bits(header_size
, HDR_LEN_FMASK
);
448 /* metadata is the 4 byte rmnet_map header itself */
449 val
|= HDR_OFST_METADATA_VALID_FMASK
;
450 val
|= u32_encode_bits(0, HDR_OFST_METADATA_FMASK
);
451 /* HDR_ADDITIONAL_CONST_LEN is 0; (IPA->AP only) */
452 if (!endpoint
->toward_ipa
) {
453 u32 size_offset
= offsetof(struct rmnet_map_header
,
456 val
|= HDR_OFST_PKT_SIZE_VALID_FMASK
;
457 val
|= u32_encode_bits(size_offset
,
458 HDR_OFST_PKT_SIZE_FMASK
);
460 /* HDR_A5_MUX is 0 */
461 /* HDR_LEN_INC_DEAGG_HDR is 0 */
462 /* HDR_METADATA_REG_VALID is 0; (AP->IPA only) */
465 iowrite32(val
, endpoint
->ipa
->reg_virt
+ offset
);
468 static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint
*endpoint
)
470 u32 offset
= IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint
->endpoint_id
);
471 u32 pad_align
= endpoint
->data
->rx
.pad_align
;
474 val
|= HDR_ENDIANNESS_FMASK
; /* big endian */
475 val
|= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK
;
476 /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
477 /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
478 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
479 if (!endpoint
->toward_ipa
)
480 val
|= u32_encode_bits(pad_align
, HDR_PAD_TO_ALIGNMENT_FMASK
);
482 iowrite32(val
, endpoint
->ipa
->reg_virt
+ offset
);
486 * Generate a metadata mask value that will select only the mux_id
487 * field in an rmnet_map header structure. The mux_id is at offset
488 * 1 byte from the beginning of the structure, but the metadata
489 * value is treated as a 4-byte unit. So this mask must be computed
490 * with endianness in mind. Note that ipa_endpoint_init_hdr_metadata_mask()
491 * will convert this value to the proper byte order.
493 * Marked __always_inline because this is really computing a
496 static __always_inline __be32
ipa_rmnet_mux_id_metadata_mask(void)
498 size_t mux_id_offset
= offsetof(struct rmnet_map_header
, mux_id
);
502 bytes
= (u8
*)&mux_id_mask
;
503 bytes
[mux_id_offset
] = 0xff; /* mux_id is 1 byte */
505 return cpu_to_be32(mux_id_mask
);
508 static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint
*endpoint
)
510 u32 endpoint_id
= endpoint
->endpoint_id
;
514 offset
= IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id
);
516 if (!endpoint
->toward_ipa
&& endpoint
->data
->qmap
)
517 val
= ipa_rmnet_mux_id_metadata_mask();
519 iowrite32(val
, endpoint
->ipa
->reg_virt
+ offset
);
522 static void ipa_endpoint_init_mode(struct ipa_endpoint
*endpoint
)
524 u32 offset
= IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint
->endpoint_id
);
527 if (endpoint
->toward_ipa
&& endpoint
->data
->dma_mode
) {
528 enum ipa_endpoint_name name
= endpoint
->data
->dma_endpoint
;
531 dma_endpoint_id
= endpoint
->ipa
->name_map
[name
]->endpoint_id
;
533 val
= u32_encode_bits(IPA_DMA
, MODE_FMASK
);
534 val
|= u32_encode_bits(dma_endpoint_id
, DEST_PIPE_INDEX_FMASK
);
536 val
= u32_encode_bits(IPA_BASIC
, MODE_FMASK
);
538 /* Other bitfields unspecified (and 0) */
540 iowrite32(val
, endpoint
->ipa
->reg_virt
+ offset
);
543 /* Compute the aggregation size value to use for a given buffer size */
544 static u32
ipa_aggr_size_kb(u32 rx_buffer_size
)
546 /* We don't use "hard byte limit" aggregation, so we define the
547 * aggregation limit such that our buffer has enough space *after*
548 * that limit to receive a full MTU of data, plus overhead.
550 rx_buffer_size
-= IPA_MTU
+ IPA_RX_BUFFER_OVERHEAD
;
552 return rx_buffer_size
/ SZ_1K
;
555 static void ipa_endpoint_init_aggr(struct ipa_endpoint
*endpoint
)
557 u32 offset
= IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint
->endpoint_id
);
560 if (endpoint
->data
->aggregation
) {
561 if (!endpoint
->toward_ipa
) {
562 u32 aggr_size
= ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE
);
565 val
|= u32_encode_bits(IPA_ENABLE_AGGR
, AGGR_EN_FMASK
);
566 val
|= u32_encode_bits(IPA_GENERIC
, AGGR_TYPE_FMASK
);
567 val
|= u32_encode_bits(aggr_size
,
568 AGGR_BYTE_LIMIT_FMASK
);
569 limit
= IPA_AGGR_TIME_LIMIT_DEFAULT
;
570 val
|= u32_encode_bits(limit
/ IPA_AGGR_GRANULARITY
,
571 AGGR_TIME_LIMIT_FMASK
);
572 val
|= u32_encode_bits(0, AGGR_PKT_LIMIT_FMASK
);
573 if (endpoint
->data
->rx
.aggr_close_eof
)
574 val
|= AGGR_SW_EOF_ACTIVE_FMASK
;
575 /* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */
577 val
|= u32_encode_bits(IPA_ENABLE_DEAGGR
,
579 val
|= u32_encode_bits(IPA_QCMAP
, AGGR_TYPE_FMASK
);
580 /* other fields ignored */
582 /* AGGR_FORCE_CLOSE is 0 */
584 val
|= u32_encode_bits(IPA_BYPASS_AGGR
, AGGR_EN_FMASK
);
585 /* other fields ignored */
588 iowrite32(val
, endpoint
->ipa
->reg_virt
+ offset
);
591 /* A return value of 0 indicates an error */
592 static u32
ipa_reg_init_hol_block_timer_val(struct ipa
*ipa
, u32 microseconds
)
599 return 0; /* invalid delay */
601 /* Timer is represented in units of clock ticks. */
602 if (ipa
->version
< IPA_VERSION_4_2
)
603 return microseconds
; /* XXX Needs to be computed */
605 /* IPA v4.2 represents the tick count as base * scale */
606 scale
= 1; /* XXX Needs to be computed */
607 if (scale
> field_max(SCALE_FMASK
))
608 return 0; /* scale too big */
610 base
= DIV_ROUND_CLOSEST(microseconds
, scale
);
611 if (base
> field_max(BASE_VALUE_FMASK
))
612 return 0; /* microseconds too big */
614 val
= u32_encode_bits(scale
, SCALE_FMASK
);
615 val
|= u32_encode_bits(base
, BASE_VALUE_FMASK
);
620 static int ipa_endpoint_init_hol_block_timer(struct ipa_endpoint
*endpoint
,
623 u32 endpoint_id
= endpoint
->endpoint_id
;
624 struct ipa
*ipa
= endpoint
->ipa
;
628 /* XXX We'll fix this when the register definition is clear */
630 struct device
*dev
= &ipa
->pdev
->dev
;
632 dev_err(dev
, "endpoint %u non-zero HOLB period (ignoring)\n",
638 val
= ipa_reg_init_hol_block_timer_val(ipa
, microseconds
);
642 val
= 0; /* timeout is immediate */
644 offset
= IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id
);
645 iowrite32(val
, ipa
->reg_virt
+ offset
);
651 ipa_endpoint_init_hol_block_enable(struct ipa_endpoint
*endpoint
, bool enable
)
653 u32 endpoint_id
= endpoint
->endpoint_id
;
657 val
= u32_encode_bits(enable
? 1 : 0, HOL_BLOCK_EN_FMASK
);
658 offset
= IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id
);
659 iowrite32(val
, endpoint
->ipa
->reg_virt
+ offset
);
662 void ipa_endpoint_modem_hol_block_clear_all(struct ipa
*ipa
)
666 for (i
= 0; i
< IPA_ENDPOINT_MAX
; i
++) {
667 struct ipa_endpoint
*endpoint
= &ipa
->endpoint
[i
];
669 if (endpoint
->ee_id
!= GSI_EE_MODEM
)
672 (void)ipa_endpoint_init_hol_block_timer(endpoint
, 0);
673 ipa_endpoint_init_hol_block_enable(endpoint
, true);
677 static void ipa_endpoint_init_deaggr(struct ipa_endpoint
*endpoint
)
679 u32 offset
= IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint
->endpoint_id
);
682 /* DEAGGR_HDR_LEN is 0 */
683 /* PACKET_OFFSET_VALID is 0 */
684 /* PACKET_OFFSET_LOCATION is ignored (not valid) */
685 /* MAX_PACKET_LEN is 0 (not enforced) */
687 iowrite32(val
, endpoint
->ipa
->reg_virt
+ offset
);
690 static void ipa_endpoint_init_seq(struct ipa_endpoint
*endpoint
)
692 u32 offset
= IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint
->endpoint_id
);
693 u32 seq_type
= endpoint
->seq_type
;
696 val
|= u32_encode_bits(seq_type
& 0xf, HPS_SEQ_TYPE_FMASK
);
697 val
|= u32_encode_bits((seq_type
>> 4) & 0xf, DPS_SEQ_TYPE_FMASK
);
698 /* HPS_REP_SEQ_TYPE is 0 */
699 /* DPS_REP_SEQ_TYPE is 0 */
701 iowrite32(val
, endpoint
->ipa
->reg_virt
+ offset
);
705 * ipa_endpoint_skb_tx() - Transmit a socket buffer
706 * @endpoint: Endpoint pointer
707 * @skb: Socket buffer to send
709 * Returns: 0 if successful, or a negative error code
711 int ipa_endpoint_skb_tx(struct ipa_endpoint
*endpoint
, struct sk_buff
*skb
)
713 struct gsi_trans
*trans
;
717 /* Make sure source endpoint's TLV FIFO has enough entries to
718 * hold the linear portion of the skb and all its fragments.
719 * If not, see if we can linearize it before giving up.
721 nr_frags
= skb_shinfo(skb
)->nr_frags
;
722 if (1 + nr_frags
> endpoint
->trans_tre_max
) {
723 if (skb_linearize(skb
))
728 trans
= ipa_endpoint_trans_alloc(endpoint
, 1 + nr_frags
);
732 ret
= gsi_trans_skb_add(trans
, skb
);
735 trans
->data
= skb
; /* transaction owns skb now */
737 gsi_trans_commit(trans
, !netdev_xmit_more());
742 gsi_trans_free(trans
);
747 static void ipa_endpoint_status(struct ipa_endpoint
*endpoint
)
749 u32 endpoint_id
= endpoint
->endpoint_id
;
750 struct ipa
*ipa
= endpoint
->ipa
;
754 offset
= IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id
);
756 if (endpoint
->data
->status_enable
) {
757 val
|= STATUS_EN_FMASK
;
758 if (endpoint
->toward_ipa
) {
759 enum ipa_endpoint_name name
;
760 u32 status_endpoint_id
;
762 name
= endpoint
->data
->tx
.status_endpoint
;
763 status_endpoint_id
= ipa
->name_map
[name
]->endpoint_id
;
765 val
|= u32_encode_bits(status_endpoint_id
,
768 /* STATUS_LOCATION is 0 (status element precedes packet) */
769 /* The next field is present for IPA v4.0 and above */
770 /* STATUS_PKT_SUPPRESS_FMASK is 0 */
773 iowrite32(val
, ipa
->reg_virt
+ offset
);
776 static int ipa_endpoint_replenish_one(struct ipa_endpoint
*endpoint
)
778 struct gsi_trans
*trans
;
779 bool doorbell
= false;
785 page
= dev_alloc_pages(get_order(IPA_RX_BUFFER_SIZE
));
789 trans
= ipa_endpoint_trans_alloc(endpoint
, 1);
793 /* Offset the buffer to make space for skb headroom */
794 offset
= NET_SKB_PAD
;
795 len
= IPA_RX_BUFFER_SIZE
- offset
;
797 ret
= gsi_trans_page_add(trans
, page
, len
, offset
);
800 trans
->data
= page
; /* transaction owns page now */
802 if (++endpoint
->replenish_ready
== IPA_REPLENISH_BATCH
) {
804 endpoint
->replenish_ready
= 0;
807 gsi_trans_commit(trans
, doorbell
);
812 gsi_trans_free(trans
);
814 __free_pages(page
, get_order(IPA_RX_BUFFER_SIZE
));
820 * ipa_endpoint_replenish() - Replenish the Rx packets cache.
822 * Allocate RX packet wrapper structures with maximal socket buffers
823 * for an endpoint. These are supplied to the hardware, which fills
824 * them with incoming data.
826 static void ipa_endpoint_replenish(struct ipa_endpoint
*endpoint
, u32 count
)
831 if (!endpoint
->replenish_enabled
) {
833 atomic_add(count
, &endpoint
->replenish_saved
);
838 while (atomic_dec_not_zero(&endpoint
->replenish_backlog
))
839 if (ipa_endpoint_replenish_one(endpoint
))
840 goto try_again_later
;
842 atomic_add(count
, &endpoint
->replenish_backlog
);
847 /* The last one didn't succeed, so fix the backlog */
848 backlog
= atomic_inc_return(&endpoint
->replenish_backlog
);
851 atomic_add(count
, &endpoint
->replenish_backlog
);
853 /* Whenever a receive buffer transaction completes we'll try to
854 * replenish again. It's unlikely, but if we fail to supply even
855 * one buffer, nothing will trigger another replenish attempt.
856 * Receive buffer transactions use one TRE, so schedule work to
857 * try replenishing again if our backlog is *all* available TREs.
859 gsi
= &endpoint
->ipa
->gsi
;
860 if (backlog
== gsi_channel_tre_max(gsi
, endpoint
->channel_id
))
861 schedule_delayed_work(&endpoint
->replenish_work
,
862 msecs_to_jiffies(1));
865 static void ipa_endpoint_replenish_enable(struct ipa_endpoint
*endpoint
)
867 struct gsi
*gsi
= &endpoint
->ipa
->gsi
;
871 endpoint
->replenish_enabled
= true;
872 while ((saved
= atomic_xchg(&endpoint
->replenish_saved
, 0)))
873 atomic_add(saved
, &endpoint
->replenish_backlog
);
875 /* Start replenishing if hardware currently has no buffers */
876 max_backlog
= gsi_channel_tre_max(gsi
, endpoint
->channel_id
);
877 if (atomic_read(&endpoint
->replenish_backlog
) == max_backlog
)
878 ipa_endpoint_replenish(endpoint
, 0);
881 static void ipa_endpoint_replenish_disable(struct ipa_endpoint
*endpoint
)
885 endpoint
->replenish_enabled
= false;
886 while ((backlog
= atomic_xchg(&endpoint
->replenish_backlog
, 0)))
887 atomic_add(backlog
, &endpoint
->replenish_saved
);
890 static void ipa_endpoint_replenish_work(struct work_struct
*work
)
892 struct delayed_work
*dwork
= to_delayed_work(work
);
893 struct ipa_endpoint
*endpoint
;
895 endpoint
= container_of(dwork
, struct ipa_endpoint
, replenish_work
);
897 ipa_endpoint_replenish(endpoint
, 0);
900 static void ipa_endpoint_skb_copy(struct ipa_endpoint
*endpoint
,
901 void *data
, u32 len
, u32 extra
)
905 skb
= __dev_alloc_skb(len
, GFP_ATOMIC
);
908 memcpy(skb
->data
, data
, len
);
909 skb
->truesize
+= extra
;
912 /* Now receive it, or drop it if there's no netdev */
913 if (endpoint
->netdev
)
914 ipa_modem_skb_rx(endpoint
->netdev
, skb
);
916 dev_kfree_skb_any(skb
);
919 static bool ipa_endpoint_skb_build(struct ipa_endpoint
*endpoint
,
920 struct page
*page
, u32 len
)
924 /* Nothing to do if there's no netdev */
925 if (!endpoint
->netdev
)
928 /* assert(len <= SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE-NET_SKB_PAD)); */
929 skb
= build_skb(page_address(page
), IPA_RX_BUFFER_SIZE
);
931 /* Reserve the headroom and account for the data */
932 skb_reserve(skb
, NET_SKB_PAD
);
936 /* Receive the buffer (or record drop if unable to build it) */
937 ipa_modem_skb_rx(endpoint
->netdev
, skb
);
942 /* The format of a packet status element is the same for several status
943 * types (opcodes). The NEW_FRAG_RULE, LOG, DCMP (decompression) types
944 * aren't currently supported
946 static bool ipa_status_format_packet(enum ipa_status_opcode opcode
)
949 case IPA_STATUS_OPCODE_PACKET
:
950 case IPA_STATUS_OPCODE_DROPPED_PACKET
:
951 case IPA_STATUS_OPCODE_SUSPENDED_PACKET
:
952 case IPA_STATUS_OPCODE_PACKET_2ND_PASS
:
959 static bool ipa_endpoint_status_skip(struct ipa_endpoint
*endpoint
,
960 const struct ipa_status
*status
)
964 if (!ipa_status_format_packet(status
->opcode
))
966 if (!status
->pkt_len
)
968 endpoint_id
= u32_get_bits(status
->endp_dst_idx
,
969 IPA_STATUS_DST_IDX_FMASK
);
970 if (endpoint_id
!= endpoint
->endpoint_id
)
973 return false; /* Don't skip this packet, process it */
976 /* Return whether the status indicates the packet should be dropped */
977 static bool ipa_status_drop_packet(const struct ipa_status
*status
)
981 /* Deaggregation exceptions we drop; others we consume */
982 if (status
->exception
)
983 return status
->exception
== IPA_STATUS_EXCEPTION_DEAGGR
;
985 /* Drop the packet if it fails to match a routing rule; otherwise no */
986 val
= le32_get_bits(status
->flags1
, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK
);
988 return val
== field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK
);
991 static void ipa_endpoint_status_parse(struct ipa_endpoint
*endpoint
,
992 struct page
*page
, u32 total_len
)
994 void *data
= page_address(page
) + NET_SKB_PAD
;
995 u32 unused
= IPA_RX_BUFFER_SIZE
- total_len
;
996 u32 resid
= total_len
;
999 const struct ipa_status
*status
= data
;
1003 if (resid
< sizeof(*status
)) {
1004 dev_err(&endpoint
->ipa
->pdev
->dev
,
1005 "short message (%u bytes < %zu byte status)\n",
1006 resid
, sizeof(*status
));
1010 /* Skip over status packets that lack packet data */
1011 if (ipa_endpoint_status_skip(endpoint
, status
)) {
1012 data
+= sizeof(*status
);
1013 resid
-= sizeof(*status
);
1017 /* Compute the amount of buffer space consumed by the
1018 * packet, including the status element. If the hardware
1019 * is configured to pad packet data to an aligned boundary,
1020 * account for that. And if checksum offload is is enabled
1021 * a trailer containing computed checksum information will
1024 align
= endpoint
->data
->rx
.pad_align
? : 1;
1025 len
= le16_to_cpu(status
->pkt_len
);
1026 len
= sizeof(*status
) + ALIGN(len
, align
);
1027 if (endpoint
->data
->checksum
)
1028 len
+= sizeof(struct rmnet_map_dl_csum_trailer
);
1030 /* Charge the new packet with a proportional fraction of
1031 * the unused space in the original receive buffer.
1032 * XXX Charge a proportion of the *whole* receive buffer?
1034 if (!ipa_status_drop_packet(status
)) {
1035 u32 extra
= unused
* len
/ total_len
;
1036 void *data2
= data
+ sizeof(*status
);
1037 u32 len2
= le16_to_cpu(status
->pkt_len
);
1039 /* Client receives only packet data (no status) */
1040 ipa_endpoint_skb_copy(endpoint
, data2
, len2
, extra
);
1043 /* Consume status and the full packet it describes */
1049 /* Complete a TX transaction, command or from ipa_endpoint_skb_tx() */
1050 static void ipa_endpoint_tx_complete(struct ipa_endpoint
*endpoint
,
1051 struct gsi_trans
*trans
)
1055 /* Complete transaction initiated in ipa_endpoint_replenish_one() */
1056 static void ipa_endpoint_rx_complete(struct ipa_endpoint
*endpoint
,
1057 struct gsi_trans
*trans
)
1061 ipa_endpoint_replenish(endpoint
, 1);
1063 if (trans
->cancelled
)
1066 /* Parse or build a socket buffer using the actual received length */
1068 if (endpoint
->data
->status_enable
)
1069 ipa_endpoint_status_parse(endpoint
, page
, trans
->len
);
1070 else if (ipa_endpoint_skb_build(endpoint
, page
, trans
->len
))
1071 trans
->data
= NULL
; /* Pages have been consumed */
1074 void ipa_endpoint_trans_complete(struct ipa_endpoint
*endpoint
,
1075 struct gsi_trans
*trans
)
1077 if (endpoint
->toward_ipa
)
1078 ipa_endpoint_tx_complete(endpoint
, trans
);
1080 ipa_endpoint_rx_complete(endpoint
, trans
);
1083 void ipa_endpoint_trans_release(struct ipa_endpoint
*endpoint
,
1084 struct gsi_trans
*trans
)
1086 if (endpoint
->toward_ipa
) {
1087 struct ipa
*ipa
= endpoint
->ipa
;
1089 /* Nothing to do for command transactions */
1090 if (endpoint
!= ipa
->name_map
[IPA_ENDPOINT_AP_COMMAND_TX
]) {
1091 struct sk_buff
*skb
= trans
->data
;
1094 dev_kfree_skb_any(skb
);
1097 struct page
*page
= trans
->data
;
1100 __free_pages(page
, get_order(IPA_RX_BUFFER_SIZE
));
1104 void ipa_endpoint_default_route_set(struct ipa
*ipa
, u32 endpoint_id
)
1108 /* ROUTE_DIS is 0 */
1109 val
= u32_encode_bits(endpoint_id
, ROUTE_DEF_PIPE_FMASK
);
1110 val
|= ROUTE_DEF_HDR_TABLE_FMASK
;
1111 val
|= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK
);
1112 val
|= u32_encode_bits(endpoint_id
, ROUTE_FRAG_DEF_PIPE_FMASK
);
1113 val
|= ROUTE_DEF_RETAIN_HDR_FMASK
;
1115 iowrite32(val
, ipa
->reg_virt
+ IPA_REG_ROUTE_OFFSET
);
1118 void ipa_endpoint_default_route_clear(struct ipa
*ipa
)
1120 ipa_endpoint_default_route_set(ipa
, 0);
1123 static bool ipa_endpoint_aggr_active(struct ipa_endpoint
*endpoint
)
1125 u32 mask
= BIT(endpoint
->endpoint_id
);
1126 struct ipa
*ipa
= endpoint
->ipa
;
1130 /* assert(mask & ipa->available); */
1131 offset
= ipa_reg_state_aggr_active_offset(ipa
->version
);
1132 val
= ioread32(ipa
->reg_virt
+ offset
);
1134 return !!(val
& mask
);
1137 static void ipa_endpoint_force_close(struct ipa_endpoint
*endpoint
)
1139 u32 mask
= BIT(endpoint
->endpoint_id
);
1140 struct ipa
*ipa
= endpoint
->ipa
;
1142 /* assert(mask & ipa->available); */
1143 iowrite32(mask
, ipa
->reg_virt
+ IPA_REG_AGGR_FORCE_CLOSE_OFFSET
);
1147 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1148 * @endpoint: Endpoint to be reset
1150 * If aggregation is active on an RX endpoint when a reset is performed
1151 * on its underlying GSI channel, a special sequence of actions must be
1152 * taken to ensure the IPA pipeline is properly cleared.
1154 * @Return: 0 if successful, or a negative error code
1156 static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint
*endpoint
)
1158 struct device
*dev
= &endpoint
->ipa
->pdev
->dev
;
1159 struct ipa
*ipa
= endpoint
->ipa
;
1160 struct gsi
*gsi
= &ipa
->gsi
;
1161 bool suspended
= false;
1169 virt
= kzalloc(len
, GFP_KERNEL
);
1173 addr
= dma_map_single(dev
, virt
, len
, DMA_FROM_DEVICE
);
1174 if (dma_mapping_error(dev
, addr
)) {
1179 /* Force close aggregation before issuing the reset */
1180 ipa_endpoint_force_close(endpoint
);
1182 /* Reset and reconfigure the channel with the doorbell engine
1183 * disabled. Then poll until we know aggregation is no longer
1184 * active. We'll re-enable the doorbell (if appropriate) when
1185 * we reset again below.
1187 gsi_channel_reset(gsi
, endpoint
->channel_id
, false);
1189 /* Make sure the channel isn't suspended */
1190 if (endpoint
->ipa
->version
== IPA_VERSION_3_5_1
)
1191 suspended
= ipa_endpoint_program_suspend(endpoint
, false);
1193 /* Start channel and do a 1 byte read */
1194 ret
= gsi_channel_start(gsi
, endpoint
->channel_id
);
1196 goto out_suspend_again
;
1198 ret
= gsi_trans_read_byte(gsi
, endpoint
->channel_id
, addr
);
1200 goto err_endpoint_stop
;
1202 /* Wait for aggregation to be closed on the channel */
1203 retries
= IPA_ENDPOINT_RESET_AGGR_RETRY_MAX
;
1205 if (!ipa_endpoint_aggr_active(endpoint
))
1208 } while (retries
--);
1210 /* Check one last time */
1211 if (ipa_endpoint_aggr_active(endpoint
))
1212 dev_err(dev
, "endpoint %u still active during reset\n",
1213 endpoint
->endpoint_id
);
1215 gsi_trans_read_byte_done(gsi
, endpoint
->channel_id
);
1217 ret
= gsi_channel_stop(gsi
, endpoint
->channel_id
);
1219 goto out_suspend_again
;
1221 /* Finally, reset and reconfigure the channel again (re-enabling the
1222 * the doorbell engine if appropriate). Sleep for 1 millisecond to
1223 * complete the channel reset sequence. Finish by suspending the
1224 * channel again (if necessary).
1226 legacy
= ipa
->version
== IPA_VERSION_3_5_1
;
1227 gsi_channel_reset(gsi
, endpoint
->channel_id
, legacy
);
1231 goto out_suspend_again
;
1234 (void)gsi_channel_stop(gsi
, endpoint
->channel_id
);
1237 (void)ipa_endpoint_program_suspend(endpoint
, true);
1238 dma_unmap_single(dev
, addr
, len
, DMA_FROM_DEVICE
);
1245 static void ipa_endpoint_reset(struct ipa_endpoint
*endpoint
)
1247 u32 channel_id
= endpoint
->channel_id
;
1248 struct ipa
*ipa
= endpoint
->ipa
;
1253 /* On IPA v3.5.1, if an RX endpoint is reset while aggregation
1254 * is active, we need to handle things specially to recover.
1255 * All other cases just need to reset the underlying GSI channel.
1257 * IPA v3.5.1 enables the doorbell engine. Newer versions do not.
1259 legacy
= ipa
->version
== IPA_VERSION_3_5_1
;
1260 special
= !endpoint
->toward_ipa
&& endpoint
->data
->aggregation
;
1261 if (special
&& ipa_endpoint_aggr_active(endpoint
))
1262 ret
= ipa_endpoint_reset_rx_aggr(endpoint
);
1264 gsi_channel_reset(&ipa
->gsi
, channel_id
, legacy
);
1267 dev_err(&ipa
->pdev
->dev
,
1268 "error %d resetting channel %u for endpoint %u\n",
1269 ret
, endpoint
->channel_id
, endpoint
->endpoint_id
);
1272 static void ipa_endpoint_program(struct ipa_endpoint
*endpoint
)
1274 if (endpoint
->toward_ipa
) {
1275 if (endpoint
->ipa
->version
!= IPA_VERSION_4_2
)
1276 ipa_endpoint_program_delay(endpoint
, false);
1277 ipa_endpoint_init_hdr_ext(endpoint
);
1278 ipa_endpoint_init_aggr(endpoint
);
1279 ipa_endpoint_init_deaggr(endpoint
);
1280 ipa_endpoint_init_seq(endpoint
);
1282 if (endpoint
->ipa
->version
== IPA_VERSION_3_5_1
)
1283 (void)ipa_endpoint_program_suspend(endpoint
, false);
1284 ipa_endpoint_init_hdr_ext(endpoint
);
1285 ipa_endpoint_init_aggr(endpoint
);
1287 ipa_endpoint_init_cfg(endpoint
);
1288 ipa_endpoint_init_hdr(endpoint
);
1289 ipa_endpoint_init_hdr_metadata_mask(endpoint
);
1290 ipa_endpoint_init_mode(endpoint
);
1291 ipa_endpoint_status(endpoint
);
1294 int ipa_endpoint_enable_one(struct ipa_endpoint
*endpoint
)
1296 struct ipa
*ipa
= endpoint
->ipa
;
1297 struct gsi
*gsi
= &ipa
->gsi
;
1300 ret
= gsi_channel_start(gsi
, endpoint
->channel_id
);
1302 dev_err(&ipa
->pdev
->dev
,
1303 "error %d starting %cX channel %u for endpoint %u\n",
1304 ret
, endpoint
->toward_ipa
? 'T' : 'R',
1305 endpoint
->channel_id
, endpoint
->endpoint_id
);
1309 if (!endpoint
->toward_ipa
) {
1310 ipa_interrupt_suspend_enable(ipa
->interrupt
,
1311 endpoint
->endpoint_id
);
1312 ipa_endpoint_replenish_enable(endpoint
);
1315 ipa
->enabled
|= BIT(endpoint
->endpoint_id
);
1320 void ipa_endpoint_disable_one(struct ipa_endpoint
*endpoint
)
1322 u32 mask
= BIT(endpoint
->endpoint_id
);
1323 struct ipa
*ipa
= endpoint
->ipa
;
1324 struct gsi
*gsi
= &ipa
->gsi
;
1327 if (!(ipa
->enabled
& mask
))
1330 ipa
->enabled
^= mask
;
1332 if (!endpoint
->toward_ipa
) {
1333 ipa_endpoint_replenish_disable(endpoint
);
1334 ipa_interrupt_suspend_disable(ipa
->interrupt
,
1335 endpoint
->endpoint_id
);
1338 /* Note that if stop fails, the channel's state is not well-defined */
1339 ret
= gsi_channel_stop(gsi
, endpoint
->channel_id
);
1341 dev_err(&ipa
->pdev
->dev
,
1342 "error %d attempting to stop endpoint %u\n", ret
,
1343 endpoint
->endpoint_id
);
1347 * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
1348 * @endpoint_id: Endpoint on which to emulate a suspend
1350 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended
1351 * with an open aggregation frame. This is to work around a hardware
1352 * issue in IPA version 3.5.1 where the suspend interrupt will not be
1353 * generated when it should be.
1355 static void ipa_endpoint_suspend_aggr(struct ipa_endpoint
*endpoint
)
1357 struct ipa
*ipa
= endpoint
->ipa
;
1359 /* assert(ipa->version == IPA_VERSION_3_5_1); */
1361 if (!endpoint
->data
->aggregation
)
1364 /* Nothing to do if the endpoint doesn't have aggregation open */
1365 if (!ipa_endpoint_aggr_active(endpoint
))
1368 /* Force close aggregation */
1369 ipa_endpoint_force_close(endpoint
);
1371 ipa_interrupt_simulate_suspend(ipa
->interrupt
);
1374 void ipa_endpoint_suspend_one(struct ipa_endpoint
*endpoint
)
1376 struct device
*dev
= &endpoint
->ipa
->pdev
->dev
;
1377 struct gsi
*gsi
= &endpoint
->ipa
->gsi
;
1381 if (!(endpoint
->ipa
->enabled
& BIT(endpoint
->endpoint_id
)))
1384 if (!endpoint
->toward_ipa
)
1385 ipa_endpoint_replenish_disable(endpoint
);
1387 /* IPA v3.5.1 doesn't use channel stop for suspend */
1388 stop_channel
= endpoint
->ipa
->version
!= IPA_VERSION_3_5_1
;
1389 if (!endpoint
->toward_ipa
&& !stop_channel
) {
1390 /* Due to a hardware bug, a client suspended with an open
1391 * aggregation frame will not generate a SUSPEND IPA
1392 * interrupt. We work around this by force-closing the
1393 * aggregation frame, then simulating the arrival of such
1396 (void)ipa_endpoint_program_suspend(endpoint
, true);
1397 ipa_endpoint_suspend_aggr(endpoint
);
1400 ret
= gsi_channel_suspend(gsi
, endpoint
->channel_id
, stop_channel
);
1402 dev_err(dev
, "error %d suspending channel %u\n", ret
,
1403 endpoint
->channel_id
);
1406 void ipa_endpoint_resume_one(struct ipa_endpoint
*endpoint
)
1408 struct device
*dev
= &endpoint
->ipa
->pdev
->dev
;
1409 struct gsi
*gsi
= &endpoint
->ipa
->gsi
;
1413 if (!(endpoint
->ipa
->enabled
& BIT(endpoint
->endpoint_id
)))
1416 /* IPA v3.5.1 doesn't use channel start for resume */
1417 start_channel
= endpoint
->ipa
->version
!= IPA_VERSION_3_5_1
;
1418 if (!endpoint
->toward_ipa
&& !start_channel
)
1419 (void)ipa_endpoint_program_suspend(endpoint
, false);
1421 ret
= gsi_channel_resume(gsi
, endpoint
->channel_id
, start_channel
);
1423 dev_err(dev
, "error %d resuming channel %u\n", ret
,
1424 endpoint
->channel_id
);
1425 else if (!endpoint
->toward_ipa
)
1426 ipa_endpoint_replenish_enable(endpoint
);
1429 void ipa_endpoint_suspend(struct ipa
*ipa
)
1431 if (ipa
->modem_netdev
)
1432 ipa_modem_suspend(ipa
->modem_netdev
);
1434 ipa_endpoint_suspend_one(ipa
->name_map
[IPA_ENDPOINT_AP_LAN_RX
]);
1435 ipa_endpoint_suspend_one(ipa
->name_map
[IPA_ENDPOINT_AP_COMMAND_TX
]);
1438 void ipa_endpoint_resume(struct ipa
*ipa
)
1440 ipa_endpoint_resume_one(ipa
->name_map
[IPA_ENDPOINT_AP_COMMAND_TX
]);
1441 ipa_endpoint_resume_one(ipa
->name_map
[IPA_ENDPOINT_AP_LAN_RX
]);
1443 if (ipa
->modem_netdev
)
1444 ipa_modem_resume(ipa
->modem_netdev
);
1447 static void ipa_endpoint_setup_one(struct ipa_endpoint
*endpoint
)
1449 struct gsi
*gsi
= &endpoint
->ipa
->gsi
;
1450 u32 channel_id
= endpoint
->channel_id
;
1452 /* Only AP endpoints get set up */
1453 if (endpoint
->ee_id
!= GSI_EE_AP
)
1456 endpoint
->trans_tre_max
= gsi_channel_trans_tre_max(gsi
, channel_id
);
1457 if (!endpoint
->toward_ipa
) {
1458 /* RX transactions require a single TRE, so the maximum
1459 * backlog is the same as the maximum outstanding TREs.
1461 endpoint
->replenish_enabled
= false;
1462 atomic_set(&endpoint
->replenish_saved
,
1463 gsi_channel_tre_max(gsi
, endpoint
->channel_id
));
1464 atomic_set(&endpoint
->replenish_backlog
, 0);
1465 INIT_DELAYED_WORK(&endpoint
->replenish_work
,
1466 ipa_endpoint_replenish_work
);
1469 ipa_endpoint_program(endpoint
);
1471 endpoint
->ipa
->set_up
|= BIT(endpoint
->endpoint_id
);
1474 static void ipa_endpoint_teardown_one(struct ipa_endpoint
*endpoint
)
1476 endpoint
->ipa
->set_up
&= ~BIT(endpoint
->endpoint_id
);
1478 if (!endpoint
->toward_ipa
)
1479 cancel_delayed_work_sync(&endpoint
->replenish_work
);
1481 ipa_endpoint_reset(endpoint
);
1484 void ipa_endpoint_setup(struct ipa
*ipa
)
1486 u32 initialized
= ipa
->initialized
;
1489 while (initialized
) {
1490 u32 endpoint_id
= __ffs(initialized
);
1492 initialized
^= BIT(endpoint_id
);
1494 ipa_endpoint_setup_one(&ipa
->endpoint
[endpoint_id
]);
1498 void ipa_endpoint_teardown(struct ipa
*ipa
)
1500 u32 set_up
= ipa
->set_up
;
1503 u32 endpoint_id
= __fls(set_up
);
1505 set_up
^= BIT(endpoint_id
);
1507 ipa_endpoint_teardown_one(&ipa
->endpoint
[endpoint_id
]);
1512 int ipa_endpoint_config(struct ipa
*ipa
)
1514 struct device
*dev
= &ipa
->pdev
->dev
;
1523 /* Find out about the endpoints supplied by the hardware, and ensure
1524 * the highest one doesn't exceed the number we support.
1526 val
= ioread32(ipa
->reg_virt
+ IPA_REG_FLAVOR_0_OFFSET
);
1528 /* Our RX is an IPA producer */
1529 rx_base
= u32_get_bits(val
, BAM_PROD_LOWEST_FMASK
);
1530 max
= rx_base
+ u32_get_bits(val
, BAM_MAX_PROD_PIPES_FMASK
);
1531 if (max
> IPA_ENDPOINT_MAX
) {
1532 dev_err(dev
, "too many endpoints (%u > %u)\n",
1533 max
, IPA_ENDPOINT_MAX
);
1536 rx_mask
= GENMASK(max
- 1, rx_base
);
1538 /* Our TX is an IPA consumer */
1539 max
= u32_get_bits(val
, BAM_MAX_CONS_PIPES_FMASK
);
1540 tx_mask
= GENMASK(max
- 1, 0);
1542 ipa
->available
= rx_mask
| tx_mask
;
1544 /* Check for initialized endpoints not supported by the hardware */
1545 if (ipa
->initialized
& ~ipa
->available
) {
1546 dev_err(dev
, "unavailable endpoint id(s) 0x%08x\n",
1547 ipa
->initialized
& ~ipa
->available
);
1548 ret
= -EINVAL
; /* Report other errors too */
1551 initialized
= ipa
->initialized
;
1552 while (initialized
) {
1553 u32 endpoint_id
= __ffs(initialized
);
1554 struct ipa_endpoint
*endpoint
;
1556 initialized
^= BIT(endpoint_id
);
1558 /* Make sure it's pointing in the right direction */
1559 endpoint
= &ipa
->endpoint
[endpoint_id
];
1560 if ((endpoint_id
< rx_base
) != !!endpoint
->toward_ipa
) {
1561 dev_err(dev
, "endpoint id %u wrong direction\n",
1570 void ipa_endpoint_deconfig(struct ipa
*ipa
)
1572 ipa
->available
= 0; /* Nothing more to do */
1575 static void ipa_endpoint_init_one(struct ipa
*ipa
, enum ipa_endpoint_name name
,
1576 const struct ipa_gsi_endpoint_data
*data
)
1578 struct ipa_endpoint
*endpoint
;
1580 endpoint
= &ipa
->endpoint
[data
->endpoint_id
];
1582 if (data
->ee_id
== GSI_EE_AP
)
1583 ipa
->channel_map
[data
->channel_id
] = endpoint
;
1584 ipa
->name_map
[name
] = endpoint
;
1586 endpoint
->ipa
= ipa
;
1587 endpoint
->ee_id
= data
->ee_id
;
1588 endpoint
->seq_type
= data
->endpoint
.seq_type
;
1589 endpoint
->channel_id
= data
->channel_id
;
1590 endpoint
->endpoint_id
= data
->endpoint_id
;
1591 endpoint
->toward_ipa
= data
->toward_ipa
;
1592 endpoint
->data
= &data
->endpoint
.config
;
1594 ipa
->initialized
|= BIT(endpoint
->endpoint_id
);
1597 void ipa_endpoint_exit_one(struct ipa_endpoint
*endpoint
)
1599 endpoint
->ipa
->initialized
&= ~BIT(endpoint
->endpoint_id
);
1601 memset(endpoint
, 0, sizeof(*endpoint
));
1604 void ipa_endpoint_exit(struct ipa
*ipa
)
1606 u32 initialized
= ipa
->initialized
;
1608 while (initialized
) {
1609 u32 endpoint_id
= __fls(initialized
);
1611 initialized
^= BIT(endpoint_id
);
1613 ipa_endpoint_exit_one(&ipa
->endpoint
[endpoint_id
]);
1615 memset(ipa
->name_map
, 0, sizeof(ipa
->name_map
));
1616 memset(ipa
->channel_map
, 0, sizeof(ipa
->channel_map
));
1619 /* Returns a bitmask of endpoints that support filtering, or 0 on error */
1620 u32
ipa_endpoint_init(struct ipa
*ipa
, u32 count
,
1621 const struct ipa_gsi_endpoint_data
*data
)
1623 enum ipa_endpoint_name name
;
1626 if (!ipa_endpoint_data_valid(ipa
, count
, data
))
1627 return 0; /* Error */
1629 ipa
->initialized
= 0;
1632 for (name
= 0; name
< count
; name
++, data
++) {
1633 if (ipa_gsi_endpoint_data_empty(data
))
1634 continue; /* Skip over empty slots */
1636 ipa_endpoint_init_one(ipa
, name
, data
);
1638 if (data
->endpoint
.filter_support
)
1639 filter_map
|= BIT(data
->endpoint_id
);
1642 if (!ipa_filter_map_valid(ipa
, filter_map
))
1643 goto err_endpoint_exit
;
1645 return filter_map
; /* Non-zero bitmask */
1648 ipa_endpoint_exit(ipa
);
1650 return 0; /* Error */