]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/net/ipa/ipa_endpoint.c
66649a806dd1f7b12caae717cccadb338e36ee83
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / ipa / ipa_endpoint.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2019-2020 Linaro Ltd.
5 */
6
7 #include <linux/types.h>
8 #include <linux/device.h>
9 #include <linux/slab.h>
10 #include <linux/bitfield.h>
11 #include <linux/if_rmnet.h>
12 #include <linux/dma-direction.h>
13
14 #include "gsi.h"
15 #include "gsi_trans.h"
16 #include "ipa.h"
17 #include "ipa_data.h"
18 #include "ipa_endpoint.h"
19 #include "ipa_cmd.h"
20 #include "ipa_mem.h"
21 #include "ipa_modem.h"
22 #include "ipa_table.h"
23 #include "ipa_gsi.h"
24
25 #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
26
27 #define IPA_REPLENISH_BATCH 16
28
29 /* RX buffer is 1 page (or a power-of-2 contiguous pages) */
30 #define IPA_RX_BUFFER_SIZE 8192 /* PAGE_SIZE > 4096 wastes a LOT */
31
32 /* The amount of RX buffer space consumed by standard skb overhead */
33 #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
34
35 #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3
36 #define IPA_AGGR_TIME_LIMIT_DEFAULT 1000 /* microseconds */
37
38 /** enum ipa_status_opcode - status element opcode hardware values */
39 enum ipa_status_opcode {
40 IPA_STATUS_OPCODE_PACKET = 0x01,
41 IPA_STATUS_OPCODE_NEW_FRAG_RULE = 0x02,
42 IPA_STATUS_OPCODE_DROPPED_PACKET = 0x04,
43 IPA_STATUS_OPCODE_SUSPENDED_PACKET = 0x08,
44 IPA_STATUS_OPCODE_LOG = 0x10,
45 IPA_STATUS_OPCODE_DCMP = 0x20,
46 IPA_STATUS_OPCODE_PACKET_2ND_PASS = 0x40,
47 };
48
49 /** enum ipa_status_exception - status element exception type */
50 enum ipa_status_exception {
51 /* 0 means no exception */
52 IPA_STATUS_EXCEPTION_DEAGGR = 0x01,
53 IPA_STATUS_EXCEPTION_IPTYPE = 0x04,
54 IPA_STATUS_EXCEPTION_PACKET_LENGTH = 0x08,
55 IPA_STATUS_EXCEPTION_FRAG_RULE_MISS = 0x10,
56 IPA_STATUS_EXCEPTION_SW_FILT = 0x20,
57 /* The meaning of the next value depends on whether the IP version */
58 IPA_STATUS_EXCEPTION_NAT = 0x40, /* IPv4 */
59 IPA_STATUS_EXCEPTION_IPV6CT = IPA_STATUS_EXCEPTION_NAT,
60 };
61
62 /* Status element provided by hardware */
63 struct ipa_status {
64 u8 opcode; /* enum ipa_status_opcode */
65 u8 exception; /* enum ipa_status_exception */
66 __le16 mask;
67 __le16 pkt_len;
68 u8 endp_src_idx;
69 u8 endp_dst_idx;
70 __le32 metadata;
71 __le32 flags1;
72 __le64 flags2;
73 __le32 flags3;
74 __le32 flags4;
75 };
76
77 /* Field masks for struct ipa_status structure fields */
78
79 #define IPA_STATUS_SRC_IDX_FMASK GENMASK(4, 0)
80
81 #define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0)
82
83 #define IPA_STATUS_FLAGS1_FLT_LOCAL_FMASK GENMASK(0, 0)
84 #define IPA_STATUS_FLAGS1_FLT_HASH_FMASK GENMASK(1, 1)
85 #define IPA_STATUS_FLAGS1_FLT_GLOBAL_FMASK GENMASK(2, 2)
86 #define IPA_STATUS_FLAGS1_FLT_RET_HDR_FMASK GENMASK(3, 3)
87 #define IPA_STATUS_FLAGS1_FLT_RULE_ID_FMASK GENMASK(13, 4)
88 #define IPA_STATUS_FLAGS1_RT_LOCAL_FMASK GENMASK(14, 14)
89 #define IPA_STATUS_FLAGS1_RT_HASH_FMASK GENMASK(15, 15)
90 #define IPA_STATUS_FLAGS1_UCP_FMASK GENMASK(16, 16)
91 #define IPA_STATUS_FLAGS1_RT_TBL_IDX_FMASK GENMASK(21, 17)
92 #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22)
93
94 #define IPA_STATUS_FLAGS2_NAT_HIT_FMASK GENMASK_ULL(0, 0)
95 #define IPA_STATUS_FLAGS2_NAT_ENTRY_IDX_FMASK GENMASK_ULL(13, 1)
96 #define IPA_STATUS_FLAGS2_NAT_TYPE_FMASK GENMASK_ULL(15, 14)
97 #define IPA_STATUS_FLAGS2_TAG_INFO_FMASK GENMASK_ULL(63, 16)
98
99 #define IPA_STATUS_FLAGS3_SEQ_NUM_FMASK GENMASK(7, 0)
100 #define IPA_STATUS_FLAGS3_TOD_CTR_FMASK GENMASK(31, 8)
101
102 #define IPA_STATUS_FLAGS4_HDR_LOCAL_FMASK GENMASK(0, 0)
103 #define IPA_STATUS_FLAGS4_HDR_OFFSET_FMASK GENMASK(10, 1)
104 #define IPA_STATUS_FLAGS4_FRAG_HIT_FMASK GENMASK(11, 11)
105 #define IPA_STATUS_FLAGS4_FRAG_RULE_FMASK GENMASK(15, 12)
106 #define IPA_STATUS_FLAGS4_HW_SPECIFIC_FMASK GENMASK(31, 16)
107
108 #ifdef IPA_VALIDATE
109
110 static void ipa_endpoint_validate_build(void)
111 {
112 /* The aggregation byte limit defines the point at which an
113 * aggregation window will close. It is programmed into the
114 * IPA hardware as a number of KB. We don't use "hard byte
115 * limit" aggregation, which means that we need to supply
116 * enough space in a receive buffer to hold a complete MTU
117 * plus normal skb overhead *after* that aggregation byte
118 * limit has been crossed.
119 *
120 * This check just ensures we don't define a receive buffer
121 * size that would exceed what we can represent in the field
122 * that is used to program its size.
123 */
124 BUILD_BUG_ON(IPA_RX_BUFFER_SIZE >
125 field_max(AGGR_BYTE_LIMIT_FMASK) * SZ_1K +
126 IPA_MTU + IPA_RX_BUFFER_OVERHEAD);
127
128 /* I honestly don't know where this requirement comes from. But
129 * it holds, and if we someday need to loosen the constraint we
130 * can try to track it down.
131 */
132 BUILD_BUG_ON(sizeof(struct ipa_status) % 4);
133 }
134
135 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
136 const struct ipa_gsi_endpoint_data *all_data,
137 const struct ipa_gsi_endpoint_data *data)
138 {
139 const struct ipa_gsi_endpoint_data *other_data;
140 struct device *dev = &ipa->pdev->dev;
141 enum ipa_endpoint_name other_name;
142
143 if (ipa_gsi_endpoint_data_empty(data))
144 return true;
145
146 if (!data->toward_ipa) {
147 if (data->endpoint.filter_support) {
148 dev_err(dev, "filtering not supported for "
149 "RX endpoint %u\n",
150 data->endpoint_id);
151 return false;
152 }
153
154 return true; /* Nothing more to check for RX */
155 }
156
157 if (data->endpoint.config.status_enable) {
158 other_name = data->endpoint.config.tx.status_endpoint;
159 if (other_name >= count) {
160 dev_err(dev, "status endpoint name %u out of range "
161 "for endpoint %u\n",
162 other_name, data->endpoint_id);
163 return false;
164 }
165
166 /* Status endpoint must be defined... */
167 other_data = &all_data[other_name];
168 if (ipa_gsi_endpoint_data_empty(other_data)) {
169 dev_err(dev, "DMA endpoint name %u undefined "
170 "for endpoint %u\n",
171 other_name, data->endpoint_id);
172 return false;
173 }
174
175 /* ...and has to be an RX endpoint... */
176 if (other_data->toward_ipa) {
177 dev_err(dev,
178 "status endpoint for endpoint %u not RX\n",
179 data->endpoint_id);
180 return false;
181 }
182
183 /* ...and if it's to be an AP endpoint... */
184 if (other_data->ee_id == GSI_EE_AP) {
185 /* ...make sure it has status enabled. */
186 if (!other_data->endpoint.config.status_enable) {
187 dev_err(dev,
188 "status not enabled for endpoint %u\n",
189 other_data->endpoint_id);
190 return false;
191 }
192 }
193 }
194
195 if (data->endpoint.config.dma_mode) {
196 other_name = data->endpoint.config.dma_endpoint;
197 if (other_name >= count) {
198 dev_err(dev, "DMA endpoint name %u out of range "
199 "for endpoint %u\n",
200 other_name, data->endpoint_id);
201 return false;
202 }
203
204 other_data = &all_data[other_name];
205 if (ipa_gsi_endpoint_data_empty(other_data)) {
206 dev_err(dev, "DMA endpoint name %u undefined "
207 "for endpoint %u\n",
208 other_name, data->endpoint_id);
209 return false;
210 }
211 }
212
213 return true;
214 }
215
216 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
217 const struct ipa_gsi_endpoint_data *data)
218 {
219 const struct ipa_gsi_endpoint_data *dp = data;
220 struct device *dev = &ipa->pdev->dev;
221 enum ipa_endpoint_name name;
222
223 ipa_endpoint_validate_build();
224
225 if (count > IPA_ENDPOINT_COUNT) {
226 dev_err(dev, "too many endpoints specified (%u > %u)\n",
227 count, IPA_ENDPOINT_COUNT);
228 return false;
229 }
230
231 /* Make sure needed endpoints have defined data */
232 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
233 dev_err(dev, "command TX endpoint not defined\n");
234 return false;
235 }
236 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
237 dev_err(dev, "LAN RX endpoint not defined\n");
238 return false;
239 }
240 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
241 dev_err(dev, "AP->modem TX endpoint not defined\n");
242 return false;
243 }
244 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
245 dev_err(dev, "AP<-modem RX endpoint not defined\n");
246 return false;
247 }
248
249 for (name = 0; name < count; name++, dp++)
250 if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
251 return false;
252
253 return true;
254 }
255
256 #else /* !IPA_VALIDATE */
257
258 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
259 const struct ipa_gsi_endpoint_data *data)
260 {
261 return true;
262 }
263
264 #endif /* !IPA_VALIDATE */
265
266 /* Allocate a transaction to use on a non-command endpoint */
267 static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
268 u32 tre_count)
269 {
270 struct gsi *gsi = &endpoint->ipa->gsi;
271 u32 channel_id = endpoint->channel_id;
272 enum dma_data_direction direction;
273
274 direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
275
276 return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction);
277 }
278
279 /* suspend_delay represents suspend for RX, delay for TX endpoints.
280 * Note that suspend is not supported starting with IPA v4.0.
281 */
282 static bool
283 ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
284 {
285 u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id);
286 struct ipa *ipa = endpoint->ipa;
287 bool state;
288 u32 mask;
289 u32 val;
290
291 /* Suspend is not supported for IPA v4.0+. Delay doesn't work
292 * correctly on IPA v4.2.
293 *
294 * if (endpoint->toward_ipa)
295 * assert(ipa->version != IPA_VERSION_4.2);
296 * else
297 * assert(ipa->version == IPA_VERSION_3_5_1);
298 */
299 mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK;
300
301 val = ioread32(ipa->reg_virt + offset);
302 /* Don't bother if it's already in the requested state */
303 state = !!(val & mask);
304 if (suspend_delay != state) {
305 val ^= mask;
306 iowrite32(val, ipa->reg_virt + offset);
307 }
308
309 return state;
310 }
311
312 /* We currently don't care what the previous state was for delay mode */
313 static void
314 ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
315 {
316 /* assert(endpoint->toward_ipa); */
317
318 (void)ipa_endpoint_init_ctrl(endpoint, enable);
319 }
320
321 /* Returns previous suspend state (true means it was enabled) */
322 static bool
323 ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
324 {
325 /* assert(!endpoint->toward_ipa); */
326
327 return ipa_endpoint_init_ctrl(endpoint, enable);
328 }
329
330 /* Enable or disable delay or suspend mode on all modem endpoints */
331 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
332 {
333 bool support_suspend;
334 u32 endpoint_id;
335
336 /* DELAY mode doesn't work correctly on IPA v4.2 */
337 if (ipa->version == IPA_VERSION_4_2)
338 return;
339
340 /* Only IPA v3.5.1 supports SUSPEND mode on RX endpoints */
341 support_suspend = ipa->version == IPA_VERSION_3_5_1;
342
343 for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) {
344 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id];
345
346 if (endpoint->ee_id != GSI_EE_MODEM)
347 continue;
348
349 /* Set TX delay mode, or for IPA v3.5.1 RX suspend mode */
350 if (endpoint->toward_ipa)
351 ipa_endpoint_program_delay(endpoint, enable);
352 else if (support_suspend)
353 (void)ipa_endpoint_program_suspend(endpoint, enable);
354 }
355 }
356
357 /* Reset all modem endpoints to use the default exception endpoint */
358 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
359 {
360 u32 initialized = ipa->initialized;
361 struct gsi_trans *trans;
362 u32 count;
363
364 /* We need one command per modem TX endpoint. We can get an upper
365 * bound on that by assuming all initialized endpoints are modem->IPA.
366 * That won't happen, and we could be more precise, but this is fine
367 * for now. We need to end the transaction with a "tag process."
368 */
369 count = hweight32(initialized) + ipa_cmd_tag_process_count();
370 trans = ipa_cmd_trans_alloc(ipa, count);
371 if (!trans) {
372 dev_err(&ipa->pdev->dev,
373 "no transaction to reset modem exception endpoints\n");
374 return -EBUSY;
375 }
376
377 while (initialized) {
378 u32 endpoint_id = __ffs(initialized);
379 struct ipa_endpoint *endpoint;
380 u32 offset;
381
382 initialized ^= BIT(endpoint_id);
383
384 /* We only reset modem TX endpoints */
385 endpoint = &ipa->endpoint[endpoint_id];
386 if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
387 continue;
388
389 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
390
391 /* Value written is 0, and all bits are updated. That
392 * means status is disabled on the endpoint, and as a
393 * result all other fields in the register are ignored.
394 */
395 ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
396 }
397
398 ipa_cmd_tag_process_add(trans);
399
400 /* XXX This should have a 1 second timeout */
401 gsi_trans_commit_wait(trans);
402
403 return 0;
404 }
405
406 static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
407 {
408 u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id);
409 u32 val = 0;
410
411 /* FRAG_OFFLOAD_EN is 0 */
412 if (endpoint->data->checksum) {
413 if (endpoint->toward_ipa) {
414 u32 checksum_offset;
415
416 val |= u32_encode_bits(IPA_CS_OFFLOAD_UL,
417 CS_OFFLOAD_EN_FMASK);
418 /* Checksum header offset is in 4-byte units */
419 checksum_offset = sizeof(struct rmnet_map_header);
420 checksum_offset /= sizeof(u32);
421 val |= u32_encode_bits(checksum_offset,
422 CS_METADATA_HDR_OFFSET_FMASK);
423 } else {
424 val |= u32_encode_bits(IPA_CS_OFFLOAD_DL,
425 CS_OFFLOAD_EN_FMASK);
426 }
427 } else {
428 val |= u32_encode_bits(IPA_CS_OFFLOAD_NONE,
429 CS_OFFLOAD_EN_FMASK);
430 }
431 /* CS_GEN_QMB_MASTER_SEL is 0 */
432
433 iowrite32(val, endpoint->ipa->reg_virt + offset);
434 }
435
436 static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
437 {
438 u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id);
439 u32 val = 0;
440
441 if (endpoint->data->qmap) {
442 size_t header_size = sizeof(struct rmnet_map_header);
443
444 if (endpoint->toward_ipa && endpoint->data->checksum)
445 header_size += sizeof(struct rmnet_map_ul_csum_header);
446
447 val |= u32_encode_bits(header_size, HDR_LEN_FMASK);
448 /* metadata is the 4 byte rmnet_map header itself */
449 val |= HDR_OFST_METADATA_VALID_FMASK;
450 val |= u32_encode_bits(0, HDR_OFST_METADATA_FMASK);
451 /* HDR_ADDITIONAL_CONST_LEN is 0; (IPA->AP only) */
452 if (!endpoint->toward_ipa) {
453 u32 size_offset = offsetof(struct rmnet_map_header,
454 pkt_len);
455
456 val |= HDR_OFST_PKT_SIZE_VALID_FMASK;
457 val |= u32_encode_bits(size_offset,
458 HDR_OFST_PKT_SIZE_FMASK);
459 }
460 /* HDR_A5_MUX is 0 */
461 /* HDR_LEN_INC_DEAGG_HDR is 0 */
462 /* HDR_METADATA_REG_VALID is 0; (AP->IPA only) */
463 }
464
465 iowrite32(val, endpoint->ipa->reg_virt + offset);
466 }
467
468 static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
469 {
470 u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id);
471 u32 pad_align = endpoint->data->rx.pad_align;
472 u32 val = 0;
473
474 val |= HDR_ENDIANNESS_FMASK; /* big endian */
475 val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK;
476 /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
477 /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
478 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
479 if (!endpoint->toward_ipa)
480 val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK);
481
482 iowrite32(val, endpoint->ipa->reg_virt + offset);
483 }
484
485 /**
486 * Generate a metadata mask value that will select only the mux_id
487 * field in an rmnet_map header structure. The mux_id is at offset
488 * 1 byte from the beginning of the structure, but the metadata
489 * value is treated as a 4-byte unit. So this mask must be computed
490 * with endianness in mind. Note that ipa_endpoint_init_hdr_metadata_mask()
491 * will convert this value to the proper byte order.
492 *
493 * Marked __always_inline because this is really computing a
494 * constant value.
495 */
496 static __always_inline __be32 ipa_rmnet_mux_id_metadata_mask(void)
497 {
498 size_t mux_id_offset = offsetof(struct rmnet_map_header, mux_id);
499 u32 mux_id_mask = 0;
500 u8 *bytes;
501
502 bytes = (u8 *)&mux_id_mask;
503 bytes[mux_id_offset] = 0xff; /* mux_id is 1 byte */
504
505 return cpu_to_be32(mux_id_mask);
506 }
507
508 static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
509 {
510 u32 endpoint_id = endpoint->endpoint_id;
511 u32 val = 0;
512 u32 offset;
513
514 offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id);
515
516 if (!endpoint->toward_ipa && endpoint->data->qmap)
517 val = ipa_rmnet_mux_id_metadata_mask();
518
519 iowrite32(val, endpoint->ipa->reg_virt + offset);
520 }
521
522 static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
523 {
524 u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id);
525 u32 val;
526
527 if (endpoint->toward_ipa && endpoint->data->dma_mode) {
528 enum ipa_endpoint_name name = endpoint->data->dma_endpoint;
529 u32 dma_endpoint_id;
530
531 dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id;
532
533 val = u32_encode_bits(IPA_DMA, MODE_FMASK);
534 val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK);
535 } else {
536 val = u32_encode_bits(IPA_BASIC, MODE_FMASK);
537 }
538 /* Other bitfields unspecified (and 0) */
539
540 iowrite32(val, endpoint->ipa->reg_virt + offset);
541 }
542
543 /* Compute the aggregation size value to use for a given buffer size */
544 static u32 ipa_aggr_size_kb(u32 rx_buffer_size)
545 {
546 /* We don't use "hard byte limit" aggregation, so we define the
547 * aggregation limit such that our buffer has enough space *after*
548 * that limit to receive a full MTU of data, plus overhead.
549 */
550 rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
551
552 return rx_buffer_size / SZ_1K;
553 }
554
555 static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
556 {
557 u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id);
558 u32 val = 0;
559
560 if (endpoint->data->aggregation) {
561 if (!endpoint->toward_ipa) {
562 u32 aggr_size = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE);
563 u32 limit;
564
565 val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
566 val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
567 val |= u32_encode_bits(aggr_size,
568 AGGR_BYTE_LIMIT_FMASK);
569 limit = IPA_AGGR_TIME_LIMIT_DEFAULT;
570 val |= u32_encode_bits(limit / IPA_AGGR_GRANULARITY,
571 AGGR_TIME_LIMIT_FMASK);
572 val |= u32_encode_bits(0, AGGR_PKT_LIMIT_FMASK);
573 if (endpoint->data->rx.aggr_close_eof)
574 val |= AGGR_SW_EOF_ACTIVE_FMASK;
575 /* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */
576 } else {
577 val |= u32_encode_bits(IPA_ENABLE_DEAGGR,
578 AGGR_EN_FMASK);
579 val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK);
580 /* other fields ignored */
581 }
582 /* AGGR_FORCE_CLOSE is 0 */
583 } else {
584 val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK);
585 /* other fields ignored */
586 }
587
588 iowrite32(val, endpoint->ipa->reg_virt + offset);
589 }
590
591 /* A return value of 0 indicates an error */
592 static u32 ipa_reg_init_hol_block_timer_val(struct ipa *ipa, u32 microseconds)
593 {
594 u32 scale;
595 u32 base;
596 u32 val;
597
598 if (!microseconds)
599 return 0; /* invalid delay */
600
601 /* Timer is represented in units of clock ticks. */
602 if (ipa->version < IPA_VERSION_4_2)
603 return microseconds; /* XXX Needs to be computed */
604
605 /* IPA v4.2 represents the tick count as base * scale */
606 scale = 1; /* XXX Needs to be computed */
607 if (scale > field_max(SCALE_FMASK))
608 return 0; /* scale too big */
609
610 base = DIV_ROUND_CLOSEST(microseconds, scale);
611 if (base > field_max(BASE_VALUE_FMASK))
612 return 0; /* microseconds too big */
613
614 val = u32_encode_bits(scale, SCALE_FMASK);
615 val |= u32_encode_bits(base, BASE_VALUE_FMASK);
616
617 return val;
618 }
619
620 static int ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
621 u32 microseconds)
622 {
623 u32 endpoint_id = endpoint->endpoint_id;
624 struct ipa *ipa = endpoint->ipa;
625 u32 offset;
626 u32 val;
627
628 /* XXX We'll fix this when the register definition is clear */
629 if (microseconds) {
630 struct device *dev = &ipa->pdev->dev;
631
632 dev_err(dev, "endpoint %u non-zero HOLB period (ignoring)\n",
633 endpoint_id);
634 microseconds = 0;
635 }
636
637 if (microseconds) {
638 val = ipa_reg_init_hol_block_timer_val(ipa, microseconds);
639 if (!val)
640 return -EINVAL;
641 } else {
642 val = 0; /* timeout is immediate */
643 }
644 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id);
645 iowrite32(val, ipa->reg_virt + offset);
646
647 return 0;
648 }
649
650 static void
651 ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable)
652 {
653 u32 endpoint_id = endpoint->endpoint_id;
654 u32 offset;
655 u32 val;
656
657 val = u32_encode_bits(enable ? 1 : 0, HOL_BLOCK_EN_FMASK);
658 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id);
659 iowrite32(val, endpoint->ipa->reg_virt + offset);
660 }
661
662 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
663 {
664 u32 i;
665
666 for (i = 0; i < IPA_ENDPOINT_MAX; i++) {
667 struct ipa_endpoint *endpoint = &ipa->endpoint[i];
668
669 if (endpoint->ee_id != GSI_EE_MODEM)
670 continue;
671
672 (void)ipa_endpoint_init_hol_block_timer(endpoint, 0);
673 ipa_endpoint_init_hol_block_enable(endpoint, true);
674 }
675 }
676
677 static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
678 {
679 u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id);
680 u32 val = 0;
681
682 /* DEAGGR_HDR_LEN is 0 */
683 /* PACKET_OFFSET_VALID is 0 */
684 /* PACKET_OFFSET_LOCATION is ignored (not valid) */
685 /* MAX_PACKET_LEN is 0 (not enforced) */
686
687 iowrite32(val, endpoint->ipa->reg_virt + offset);
688 }
689
690 static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
691 {
692 u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id);
693 u32 seq_type = endpoint->seq_type;
694 u32 val = 0;
695
696 val |= u32_encode_bits(seq_type & 0xf, HPS_SEQ_TYPE_FMASK);
697 val |= u32_encode_bits((seq_type >> 4) & 0xf, DPS_SEQ_TYPE_FMASK);
698 /* HPS_REP_SEQ_TYPE is 0 */
699 /* DPS_REP_SEQ_TYPE is 0 */
700
701 iowrite32(val, endpoint->ipa->reg_virt + offset);
702 }
703
704 /**
705 * ipa_endpoint_skb_tx() - Transmit a socket buffer
706 * @endpoint: Endpoint pointer
707 * @skb: Socket buffer to send
708 *
709 * Returns: 0 if successful, or a negative error code
710 */
711 int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
712 {
713 struct gsi_trans *trans;
714 u32 nr_frags;
715 int ret;
716
717 /* Make sure source endpoint's TLV FIFO has enough entries to
718 * hold the linear portion of the skb and all its fragments.
719 * If not, see if we can linearize it before giving up.
720 */
721 nr_frags = skb_shinfo(skb)->nr_frags;
722 if (1 + nr_frags > endpoint->trans_tre_max) {
723 if (skb_linearize(skb))
724 return -E2BIG;
725 nr_frags = 0;
726 }
727
728 trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags);
729 if (!trans)
730 return -EBUSY;
731
732 ret = gsi_trans_skb_add(trans, skb);
733 if (ret)
734 goto err_trans_free;
735 trans->data = skb; /* transaction owns skb now */
736
737 gsi_trans_commit(trans, !netdev_xmit_more());
738
739 return 0;
740
741 err_trans_free:
742 gsi_trans_free(trans);
743
744 return -ENOMEM;
745 }
746
747 static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
748 {
749 u32 endpoint_id = endpoint->endpoint_id;
750 struct ipa *ipa = endpoint->ipa;
751 u32 val = 0;
752 u32 offset;
753
754 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
755
756 if (endpoint->data->status_enable) {
757 val |= STATUS_EN_FMASK;
758 if (endpoint->toward_ipa) {
759 enum ipa_endpoint_name name;
760 u32 status_endpoint_id;
761
762 name = endpoint->data->tx.status_endpoint;
763 status_endpoint_id = ipa->name_map[name]->endpoint_id;
764
765 val |= u32_encode_bits(status_endpoint_id,
766 STATUS_ENDP_FMASK);
767 }
768 /* STATUS_LOCATION is 0 (status element precedes packet) */
769 /* The next field is present for IPA v4.0 and above */
770 /* STATUS_PKT_SUPPRESS_FMASK is 0 */
771 }
772
773 iowrite32(val, ipa->reg_virt + offset);
774 }
775
776 static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint)
777 {
778 struct gsi_trans *trans;
779 bool doorbell = false;
780 struct page *page;
781 u32 offset;
782 u32 len;
783 int ret;
784
785 page = dev_alloc_pages(get_order(IPA_RX_BUFFER_SIZE));
786 if (!page)
787 return -ENOMEM;
788
789 trans = ipa_endpoint_trans_alloc(endpoint, 1);
790 if (!trans)
791 goto err_free_pages;
792
793 /* Offset the buffer to make space for skb headroom */
794 offset = NET_SKB_PAD;
795 len = IPA_RX_BUFFER_SIZE - offset;
796
797 ret = gsi_trans_page_add(trans, page, len, offset);
798 if (ret)
799 goto err_trans_free;
800 trans->data = page; /* transaction owns page now */
801
802 if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) {
803 doorbell = true;
804 endpoint->replenish_ready = 0;
805 }
806
807 gsi_trans_commit(trans, doorbell);
808
809 return 0;
810
811 err_trans_free:
812 gsi_trans_free(trans);
813 err_free_pages:
814 __free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
815
816 return -ENOMEM;
817 }
818
819 /**
820 * ipa_endpoint_replenish() - Replenish the Rx packets cache.
821 *
822 * Allocate RX packet wrapper structures with maximal socket buffers
823 * for an endpoint. These are supplied to the hardware, which fills
824 * them with incoming data.
825 */
826 static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, u32 count)
827 {
828 struct gsi *gsi;
829 u32 backlog;
830
831 if (!endpoint->replenish_enabled) {
832 if (count)
833 atomic_add(count, &endpoint->replenish_saved);
834 return;
835 }
836
837
838 while (atomic_dec_not_zero(&endpoint->replenish_backlog))
839 if (ipa_endpoint_replenish_one(endpoint))
840 goto try_again_later;
841 if (count)
842 atomic_add(count, &endpoint->replenish_backlog);
843
844 return;
845
846 try_again_later:
847 /* The last one didn't succeed, so fix the backlog */
848 backlog = atomic_inc_return(&endpoint->replenish_backlog);
849
850 if (count)
851 atomic_add(count, &endpoint->replenish_backlog);
852
853 /* Whenever a receive buffer transaction completes we'll try to
854 * replenish again. It's unlikely, but if we fail to supply even
855 * one buffer, nothing will trigger another replenish attempt.
856 * Receive buffer transactions use one TRE, so schedule work to
857 * try replenishing again if our backlog is *all* available TREs.
858 */
859 gsi = &endpoint->ipa->gsi;
860 if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id))
861 schedule_delayed_work(&endpoint->replenish_work,
862 msecs_to_jiffies(1));
863 }
864
865 static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
866 {
867 struct gsi *gsi = &endpoint->ipa->gsi;
868 u32 max_backlog;
869 u32 saved;
870
871 endpoint->replenish_enabled = true;
872 while ((saved = atomic_xchg(&endpoint->replenish_saved, 0)))
873 atomic_add(saved, &endpoint->replenish_backlog);
874
875 /* Start replenishing if hardware currently has no buffers */
876 max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id);
877 if (atomic_read(&endpoint->replenish_backlog) == max_backlog)
878 ipa_endpoint_replenish(endpoint, 0);
879 }
880
881 static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
882 {
883 u32 backlog;
884
885 endpoint->replenish_enabled = false;
886 while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0)))
887 atomic_add(backlog, &endpoint->replenish_saved);
888 }
889
890 static void ipa_endpoint_replenish_work(struct work_struct *work)
891 {
892 struct delayed_work *dwork = to_delayed_work(work);
893 struct ipa_endpoint *endpoint;
894
895 endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
896
897 ipa_endpoint_replenish(endpoint, 0);
898 }
899
900 static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
901 void *data, u32 len, u32 extra)
902 {
903 struct sk_buff *skb;
904
905 skb = __dev_alloc_skb(len, GFP_ATOMIC);
906 if (skb) {
907 skb_put(skb, len);
908 memcpy(skb->data, data, len);
909 skb->truesize += extra;
910 }
911
912 /* Now receive it, or drop it if there's no netdev */
913 if (endpoint->netdev)
914 ipa_modem_skb_rx(endpoint->netdev, skb);
915 else if (skb)
916 dev_kfree_skb_any(skb);
917 }
918
919 static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
920 struct page *page, u32 len)
921 {
922 struct sk_buff *skb;
923
924 /* Nothing to do if there's no netdev */
925 if (!endpoint->netdev)
926 return false;
927
928 /* assert(len <= SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE-NET_SKB_PAD)); */
929 skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE);
930 if (skb) {
931 /* Reserve the headroom and account for the data */
932 skb_reserve(skb, NET_SKB_PAD);
933 skb_put(skb, len);
934 }
935
936 /* Receive the buffer (or record drop if unable to build it) */
937 ipa_modem_skb_rx(endpoint->netdev, skb);
938
939 return skb != NULL;
940 }
941
942 /* The format of a packet status element is the same for several status
943 * types (opcodes). The NEW_FRAG_RULE, LOG, DCMP (decompression) types
944 * aren't currently supported
945 */
946 static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
947 {
948 switch (opcode) {
949 case IPA_STATUS_OPCODE_PACKET:
950 case IPA_STATUS_OPCODE_DROPPED_PACKET:
951 case IPA_STATUS_OPCODE_SUSPENDED_PACKET:
952 case IPA_STATUS_OPCODE_PACKET_2ND_PASS:
953 return true;
954 default:
955 return false;
956 }
957 }
958
959 static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint,
960 const struct ipa_status *status)
961 {
962 u32 endpoint_id;
963
964 if (!ipa_status_format_packet(status->opcode))
965 return true;
966 if (!status->pkt_len)
967 return true;
968 endpoint_id = u32_get_bits(status->endp_dst_idx,
969 IPA_STATUS_DST_IDX_FMASK);
970 if (endpoint_id != endpoint->endpoint_id)
971 return true;
972
973 return false; /* Don't skip this packet, process it */
974 }
975
976 /* Return whether the status indicates the packet should be dropped */
977 static bool ipa_status_drop_packet(const struct ipa_status *status)
978 {
979 u32 val;
980
981 /* Deaggregation exceptions we drop; others we consume */
982 if (status->exception)
983 return status->exception == IPA_STATUS_EXCEPTION_DEAGGR;
984
985 /* Drop the packet if it fails to match a routing rule; otherwise no */
986 val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
987
988 return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
989 }
990
991 static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
992 struct page *page, u32 total_len)
993 {
994 void *data = page_address(page) + NET_SKB_PAD;
995 u32 unused = IPA_RX_BUFFER_SIZE - total_len;
996 u32 resid = total_len;
997
998 while (resid) {
999 const struct ipa_status *status = data;
1000 u32 align;
1001 u32 len;
1002
1003 if (resid < sizeof(*status)) {
1004 dev_err(&endpoint->ipa->pdev->dev,
1005 "short message (%u bytes < %zu byte status)\n",
1006 resid, sizeof(*status));
1007 break;
1008 }
1009
1010 /* Skip over status packets that lack packet data */
1011 if (ipa_endpoint_status_skip(endpoint, status)) {
1012 data += sizeof(*status);
1013 resid -= sizeof(*status);
1014 continue;
1015 }
1016
1017 /* Compute the amount of buffer space consumed by the
1018 * packet, including the status element. If the hardware
1019 * is configured to pad packet data to an aligned boundary,
1020 * account for that. And if checksum offload is is enabled
1021 * a trailer containing computed checksum information will
1022 * be appended.
1023 */
1024 align = endpoint->data->rx.pad_align ? : 1;
1025 len = le16_to_cpu(status->pkt_len);
1026 len = sizeof(*status) + ALIGN(len, align);
1027 if (endpoint->data->checksum)
1028 len += sizeof(struct rmnet_map_dl_csum_trailer);
1029
1030 /* Charge the new packet with a proportional fraction of
1031 * the unused space in the original receive buffer.
1032 * XXX Charge a proportion of the *whole* receive buffer?
1033 */
1034 if (!ipa_status_drop_packet(status)) {
1035 u32 extra = unused * len / total_len;
1036 void *data2 = data + sizeof(*status);
1037 u32 len2 = le16_to_cpu(status->pkt_len);
1038
1039 /* Client receives only packet data (no status) */
1040 ipa_endpoint_skb_copy(endpoint, data2, len2, extra);
1041 }
1042
1043 /* Consume status and the full packet it describes */
1044 data += len;
1045 resid -= len;
1046 }
1047 }
1048
1049 /* Complete a TX transaction, command or from ipa_endpoint_skb_tx() */
1050 static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint,
1051 struct gsi_trans *trans)
1052 {
1053 }
1054
1055 /* Complete transaction initiated in ipa_endpoint_replenish_one() */
1056 static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
1057 struct gsi_trans *trans)
1058 {
1059 struct page *page;
1060
1061 ipa_endpoint_replenish(endpoint, 1);
1062
1063 if (trans->cancelled)
1064 return;
1065
1066 /* Parse or build a socket buffer using the actual received length */
1067 page = trans->data;
1068 if (endpoint->data->status_enable)
1069 ipa_endpoint_status_parse(endpoint, page, trans->len);
1070 else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
1071 trans->data = NULL; /* Pages have been consumed */
1072 }
1073
1074 void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
1075 struct gsi_trans *trans)
1076 {
1077 if (endpoint->toward_ipa)
1078 ipa_endpoint_tx_complete(endpoint, trans);
1079 else
1080 ipa_endpoint_rx_complete(endpoint, trans);
1081 }
1082
1083 void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
1084 struct gsi_trans *trans)
1085 {
1086 if (endpoint->toward_ipa) {
1087 struct ipa *ipa = endpoint->ipa;
1088
1089 /* Nothing to do for command transactions */
1090 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
1091 struct sk_buff *skb = trans->data;
1092
1093 if (skb)
1094 dev_kfree_skb_any(skb);
1095 }
1096 } else {
1097 struct page *page = trans->data;
1098
1099 if (page)
1100 __free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
1101 }
1102 }
1103
1104 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
1105 {
1106 u32 val;
1107
1108 /* ROUTE_DIS is 0 */
1109 val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK);
1110 val |= ROUTE_DEF_HDR_TABLE_FMASK;
1111 val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK);
1112 val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK);
1113 val |= ROUTE_DEF_RETAIN_HDR_FMASK;
1114
1115 iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET);
1116 }
1117
1118 void ipa_endpoint_default_route_clear(struct ipa *ipa)
1119 {
1120 ipa_endpoint_default_route_set(ipa, 0);
1121 }
1122
1123 static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
1124 {
1125 u32 mask = BIT(endpoint->endpoint_id);
1126 struct ipa *ipa = endpoint->ipa;
1127 u32 offset;
1128 u32 val;
1129
1130 /* assert(mask & ipa->available); */
1131 offset = ipa_reg_state_aggr_active_offset(ipa->version);
1132 val = ioread32(ipa->reg_virt + offset);
1133
1134 return !!(val & mask);
1135 }
1136
1137 static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
1138 {
1139 u32 mask = BIT(endpoint->endpoint_id);
1140 struct ipa *ipa = endpoint->ipa;
1141
1142 /* assert(mask & ipa->available); */
1143 iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET);
1144 }
1145
1146 /**
1147 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1148 * @endpoint: Endpoint to be reset
1149 *
1150 * If aggregation is active on an RX endpoint when a reset is performed
1151 * on its underlying GSI channel, a special sequence of actions must be
1152 * taken to ensure the IPA pipeline is properly cleared.
1153 *
1154 * @Return: 0 if successful, or a negative error code
1155 */
1156 static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
1157 {
1158 struct device *dev = &endpoint->ipa->pdev->dev;
1159 struct ipa *ipa = endpoint->ipa;
1160 struct gsi *gsi = &ipa->gsi;
1161 bool suspended = false;
1162 dma_addr_t addr;
1163 bool legacy;
1164 u32 retries;
1165 u32 len = 1;
1166 void *virt;
1167 int ret;
1168
1169 virt = kzalloc(len, GFP_KERNEL);
1170 if (!virt)
1171 return -ENOMEM;
1172
1173 addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE);
1174 if (dma_mapping_error(dev, addr)) {
1175 ret = -ENOMEM;
1176 goto out_kfree;
1177 }
1178
1179 /* Force close aggregation before issuing the reset */
1180 ipa_endpoint_force_close(endpoint);
1181
1182 /* Reset and reconfigure the channel with the doorbell engine
1183 * disabled. Then poll until we know aggregation is no longer
1184 * active. We'll re-enable the doorbell (if appropriate) when
1185 * we reset again below.
1186 */
1187 gsi_channel_reset(gsi, endpoint->channel_id, false);
1188
1189 /* Make sure the channel isn't suspended */
1190 if (endpoint->ipa->version == IPA_VERSION_3_5_1)
1191 suspended = ipa_endpoint_program_suspend(endpoint, false);
1192
1193 /* Start channel and do a 1 byte read */
1194 ret = gsi_channel_start(gsi, endpoint->channel_id);
1195 if (ret)
1196 goto out_suspend_again;
1197
1198 ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr);
1199 if (ret)
1200 goto err_endpoint_stop;
1201
1202 /* Wait for aggregation to be closed on the channel */
1203 retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX;
1204 do {
1205 if (!ipa_endpoint_aggr_active(endpoint))
1206 break;
1207 msleep(1);
1208 } while (retries--);
1209
1210 /* Check one last time */
1211 if (ipa_endpoint_aggr_active(endpoint))
1212 dev_err(dev, "endpoint %u still active during reset\n",
1213 endpoint->endpoint_id);
1214
1215 gsi_trans_read_byte_done(gsi, endpoint->channel_id);
1216
1217 ret = gsi_channel_stop(gsi, endpoint->channel_id);
1218 if (ret)
1219 goto out_suspend_again;
1220
1221 /* Finally, reset and reconfigure the channel again (re-enabling the
1222 * the doorbell engine if appropriate). Sleep for 1 millisecond to
1223 * complete the channel reset sequence. Finish by suspending the
1224 * channel again (if necessary).
1225 */
1226 legacy = ipa->version == IPA_VERSION_3_5_1;
1227 gsi_channel_reset(gsi, endpoint->channel_id, legacy);
1228
1229 msleep(1);
1230
1231 goto out_suspend_again;
1232
1233 err_endpoint_stop:
1234 (void)gsi_channel_stop(gsi, endpoint->channel_id);
1235 out_suspend_again:
1236 if (suspended)
1237 (void)ipa_endpoint_program_suspend(endpoint, true);
1238 dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE);
1239 out_kfree:
1240 kfree(virt);
1241
1242 return ret;
1243 }
1244
1245 static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
1246 {
1247 u32 channel_id = endpoint->channel_id;
1248 struct ipa *ipa = endpoint->ipa;
1249 bool special;
1250 bool legacy;
1251 int ret = 0;
1252
1253 /* On IPA v3.5.1, if an RX endpoint is reset while aggregation
1254 * is active, we need to handle things specially to recover.
1255 * All other cases just need to reset the underlying GSI channel.
1256 *
1257 * IPA v3.5.1 enables the doorbell engine. Newer versions do not.
1258 */
1259 legacy = ipa->version == IPA_VERSION_3_5_1;
1260 special = !endpoint->toward_ipa && endpoint->data->aggregation;
1261 if (special && ipa_endpoint_aggr_active(endpoint))
1262 ret = ipa_endpoint_reset_rx_aggr(endpoint);
1263 else
1264 gsi_channel_reset(&ipa->gsi, channel_id, legacy);
1265
1266 if (ret)
1267 dev_err(&ipa->pdev->dev,
1268 "error %d resetting channel %u for endpoint %u\n",
1269 ret, endpoint->channel_id, endpoint->endpoint_id);
1270 }
1271
1272 static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
1273 {
1274 if (endpoint->toward_ipa) {
1275 if (endpoint->ipa->version != IPA_VERSION_4_2)
1276 ipa_endpoint_program_delay(endpoint, false);
1277 ipa_endpoint_init_hdr_ext(endpoint);
1278 ipa_endpoint_init_aggr(endpoint);
1279 ipa_endpoint_init_deaggr(endpoint);
1280 ipa_endpoint_init_seq(endpoint);
1281 } else {
1282 if (endpoint->ipa->version == IPA_VERSION_3_5_1)
1283 (void)ipa_endpoint_program_suspend(endpoint, false);
1284 ipa_endpoint_init_hdr_ext(endpoint);
1285 ipa_endpoint_init_aggr(endpoint);
1286 }
1287 ipa_endpoint_init_cfg(endpoint);
1288 ipa_endpoint_init_hdr(endpoint);
1289 ipa_endpoint_init_hdr_metadata_mask(endpoint);
1290 ipa_endpoint_init_mode(endpoint);
1291 ipa_endpoint_status(endpoint);
1292 }
1293
1294 int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
1295 {
1296 struct ipa *ipa = endpoint->ipa;
1297 struct gsi *gsi = &ipa->gsi;
1298 int ret;
1299
1300 ret = gsi_channel_start(gsi, endpoint->channel_id);
1301 if (ret) {
1302 dev_err(&ipa->pdev->dev,
1303 "error %d starting %cX channel %u for endpoint %u\n",
1304 ret, endpoint->toward_ipa ? 'T' : 'R',
1305 endpoint->channel_id, endpoint->endpoint_id);
1306 return ret;
1307 }
1308
1309 if (!endpoint->toward_ipa) {
1310 ipa_interrupt_suspend_enable(ipa->interrupt,
1311 endpoint->endpoint_id);
1312 ipa_endpoint_replenish_enable(endpoint);
1313 }
1314
1315 ipa->enabled |= BIT(endpoint->endpoint_id);
1316
1317 return 0;
1318 }
1319
1320 void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
1321 {
1322 u32 mask = BIT(endpoint->endpoint_id);
1323 struct ipa *ipa = endpoint->ipa;
1324 struct gsi *gsi = &ipa->gsi;
1325 int ret;
1326
1327 if (!(ipa->enabled & mask))
1328 return;
1329
1330 ipa->enabled ^= mask;
1331
1332 if (!endpoint->toward_ipa) {
1333 ipa_endpoint_replenish_disable(endpoint);
1334 ipa_interrupt_suspend_disable(ipa->interrupt,
1335 endpoint->endpoint_id);
1336 }
1337
1338 /* Note that if stop fails, the channel's state is not well-defined */
1339 ret = gsi_channel_stop(gsi, endpoint->channel_id);
1340 if (ret)
1341 dev_err(&ipa->pdev->dev,
1342 "error %d attempting to stop endpoint %u\n", ret,
1343 endpoint->endpoint_id);
1344 }
1345
1346 /**
1347 * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
1348 * @endpoint_id: Endpoint on which to emulate a suspend
1349 *
1350 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended
1351 * with an open aggregation frame. This is to work around a hardware
1352 * issue in IPA version 3.5.1 where the suspend interrupt will not be
1353 * generated when it should be.
1354 */
1355 static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
1356 {
1357 struct ipa *ipa = endpoint->ipa;
1358
1359 /* assert(ipa->version == IPA_VERSION_3_5_1); */
1360
1361 if (!endpoint->data->aggregation)
1362 return;
1363
1364 /* Nothing to do if the endpoint doesn't have aggregation open */
1365 if (!ipa_endpoint_aggr_active(endpoint))
1366 return;
1367
1368 /* Force close aggregation */
1369 ipa_endpoint_force_close(endpoint);
1370
1371 ipa_interrupt_simulate_suspend(ipa->interrupt);
1372 }
1373
1374 void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
1375 {
1376 struct device *dev = &endpoint->ipa->pdev->dev;
1377 struct gsi *gsi = &endpoint->ipa->gsi;
1378 bool stop_channel;
1379 int ret;
1380
1381 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1382 return;
1383
1384 if (!endpoint->toward_ipa)
1385 ipa_endpoint_replenish_disable(endpoint);
1386
1387 /* IPA v3.5.1 doesn't use channel stop for suspend */
1388 stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
1389 if (!endpoint->toward_ipa && !stop_channel) {
1390 /* Due to a hardware bug, a client suspended with an open
1391 * aggregation frame will not generate a SUSPEND IPA
1392 * interrupt. We work around this by force-closing the
1393 * aggregation frame, then simulating the arrival of such
1394 * an interrupt.
1395 */
1396 (void)ipa_endpoint_program_suspend(endpoint, true);
1397 ipa_endpoint_suspend_aggr(endpoint);
1398 }
1399
1400 ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel);
1401 if (ret)
1402 dev_err(dev, "error %d suspending channel %u\n", ret,
1403 endpoint->channel_id);
1404 }
1405
1406 void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
1407 {
1408 struct device *dev = &endpoint->ipa->pdev->dev;
1409 struct gsi *gsi = &endpoint->ipa->gsi;
1410 bool start_channel;
1411 int ret;
1412
1413 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1414 return;
1415
1416 /* IPA v3.5.1 doesn't use channel start for resume */
1417 start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
1418 if (!endpoint->toward_ipa && !start_channel)
1419 (void)ipa_endpoint_program_suspend(endpoint, false);
1420
1421 ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel);
1422 if (ret)
1423 dev_err(dev, "error %d resuming channel %u\n", ret,
1424 endpoint->channel_id);
1425 else if (!endpoint->toward_ipa)
1426 ipa_endpoint_replenish_enable(endpoint);
1427 }
1428
1429 void ipa_endpoint_suspend(struct ipa *ipa)
1430 {
1431 if (ipa->modem_netdev)
1432 ipa_modem_suspend(ipa->modem_netdev);
1433
1434 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1435 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1436 }
1437
1438 void ipa_endpoint_resume(struct ipa *ipa)
1439 {
1440 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1441 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1442
1443 if (ipa->modem_netdev)
1444 ipa_modem_resume(ipa->modem_netdev);
1445 }
1446
1447 static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
1448 {
1449 struct gsi *gsi = &endpoint->ipa->gsi;
1450 u32 channel_id = endpoint->channel_id;
1451
1452 /* Only AP endpoints get set up */
1453 if (endpoint->ee_id != GSI_EE_AP)
1454 return;
1455
1456 endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id);
1457 if (!endpoint->toward_ipa) {
1458 /* RX transactions require a single TRE, so the maximum
1459 * backlog is the same as the maximum outstanding TREs.
1460 */
1461 endpoint->replenish_enabled = false;
1462 atomic_set(&endpoint->replenish_saved,
1463 gsi_channel_tre_max(gsi, endpoint->channel_id));
1464 atomic_set(&endpoint->replenish_backlog, 0);
1465 INIT_DELAYED_WORK(&endpoint->replenish_work,
1466 ipa_endpoint_replenish_work);
1467 }
1468
1469 ipa_endpoint_program(endpoint);
1470
1471 endpoint->ipa->set_up |= BIT(endpoint->endpoint_id);
1472 }
1473
1474 static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
1475 {
1476 endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id);
1477
1478 if (!endpoint->toward_ipa)
1479 cancel_delayed_work_sync(&endpoint->replenish_work);
1480
1481 ipa_endpoint_reset(endpoint);
1482 }
1483
1484 void ipa_endpoint_setup(struct ipa *ipa)
1485 {
1486 u32 initialized = ipa->initialized;
1487
1488 ipa->set_up = 0;
1489 while (initialized) {
1490 u32 endpoint_id = __ffs(initialized);
1491
1492 initialized ^= BIT(endpoint_id);
1493
1494 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
1495 }
1496 }
1497
1498 void ipa_endpoint_teardown(struct ipa *ipa)
1499 {
1500 u32 set_up = ipa->set_up;
1501
1502 while (set_up) {
1503 u32 endpoint_id = __fls(set_up);
1504
1505 set_up ^= BIT(endpoint_id);
1506
1507 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
1508 }
1509 ipa->set_up = 0;
1510 }
1511
1512 int ipa_endpoint_config(struct ipa *ipa)
1513 {
1514 struct device *dev = &ipa->pdev->dev;
1515 u32 initialized;
1516 u32 rx_base;
1517 u32 rx_mask;
1518 u32 tx_mask;
1519 int ret = 0;
1520 u32 max;
1521 u32 val;
1522
1523 /* Find out about the endpoints supplied by the hardware, and ensure
1524 * the highest one doesn't exceed the number we support.
1525 */
1526 val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET);
1527
1528 /* Our RX is an IPA producer */
1529 rx_base = u32_get_bits(val, BAM_PROD_LOWEST_FMASK);
1530 max = rx_base + u32_get_bits(val, BAM_MAX_PROD_PIPES_FMASK);
1531 if (max > IPA_ENDPOINT_MAX) {
1532 dev_err(dev, "too many endpoints (%u > %u)\n",
1533 max, IPA_ENDPOINT_MAX);
1534 return -EINVAL;
1535 }
1536 rx_mask = GENMASK(max - 1, rx_base);
1537
1538 /* Our TX is an IPA consumer */
1539 max = u32_get_bits(val, BAM_MAX_CONS_PIPES_FMASK);
1540 tx_mask = GENMASK(max - 1, 0);
1541
1542 ipa->available = rx_mask | tx_mask;
1543
1544 /* Check for initialized endpoints not supported by the hardware */
1545 if (ipa->initialized & ~ipa->available) {
1546 dev_err(dev, "unavailable endpoint id(s) 0x%08x\n",
1547 ipa->initialized & ~ipa->available);
1548 ret = -EINVAL; /* Report other errors too */
1549 }
1550
1551 initialized = ipa->initialized;
1552 while (initialized) {
1553 u32 endpoint_id = __ffs(initialized);
1554 struct ipa_endpoint *endpoint;
1555
1556 initialized ^= BIT(endpoint_id);
1557
1558 /* Make sure it's pointing in the right direction */
1559 endpoint = &ipa->endpoint[endpoint_id];
1560 if ((endpoint_id < rx_base) != !!endpoint->toward_ipa) {
1561 dev_err(dev, "endpoint id %u wrong direction\n",
1562 endpoint_id);
1563 ret = -EINVAL;
1564 }
1565 }
1566
1567 return ret;
1568 }
1569
1570 void ipa_endpoint_deconfig(struct ipa *ipa)
1571 {
1572 ipa->available = 0; /* Nothing more to do */
1573 }
1574
1575 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
1576 const struct ipa_gsi_endpoint_data *data)
1577 {
1578 struct ipa_endpoint *endpoint;
1579
1580 endpoint = &ipa->endpoint[data->endpoint_id];
1581
1582 if (data->ee_id == GSI_EE_AP)
1583 ipa->channel_map[data->channel_id] = endpoint;
1584 ipa->name_map[name] = endpoint;
1585
1586 endpoint->ipa = ipa;
1587 endpoint->ee_id = data->ee_id;
1588 endpoint->seq_type = data->endpoint.seq_type;
1589 endpoint->channel_id = data->channel_id;
1590 endpoint->endpoint_id = data->endpoint_id;
1591 endpoint->toward_ipa = data->toward_ipa;
1592 endpoint->data = &data->endpoint.config;
1593
1594 ipa->initialized |= BIT(endpoint->endpoint_id);
1595 }
1596
1597 void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
1598 {
1599 endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id);
1600
1601 memset(endpoint, 0, sizeof(*endpoint));
1602 }
1603
1604 void ipa_endpoint_exit(struct ipa *ipa)
1605 {
1606 u32 initialized = ipa->initialized;
1607
1608 while (initialized) {
1609 u32 endpoint_id = __fls(initialized);
1610
1611 initialized ^= BIT(endpoint_id);
1612
1613 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
1614 }
1615 memset(ipa->name_map, 0, sizeof(ipa->name_map));
1616 memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
1617 }
1618
1619 /* Returns a bitmask of endpoints that support filtering, or 0 on error */
1620 u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
1621 const struct ipa_gsi_endpoint_data *data)
1622 {
1623 enum ipa_endpoint_name name;
1624 u32 filter_map;
1625
1626 if (!ipa_endpoint_data_valid(ipa, count, data))
1627 return 0; /* Error */
1628
1629 ipa->initialized = 0;
1630
1631 filter_map = 0;
1632 for (name = 0; name < count; name++, data++) {
1633 if (ipa_gsi_endpoint_data_empty(data))
1634 continue; /* Skip over empty slots */
1635
1636 ipa_endpoint_init_one(ipa, name, data);
1637
1638 if (data->endpoint.filter_support)
1639 filter_map |= BIT(data->endpoint_id);
1640 }
1641
1642 if (!ipa_filter_map_valid(ipa, filter_map))
1643 goto err_endpoint_exit;
1644
1645 return filter_map; /* Non-zero bitmask */
1646
1647 err_endpoint_exit:
1648 ipa_endpoint_exit(ipa);
1649
1650 return 0; /* Error */
1651 }