1 // SPDX-License-Identifier: GPL-2.0
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2019-2020 Linaro Ltd.
7 #include <linux/types.h>
8 #include <linux/device.h>
9 #include <linux/slab.h>
10 #include <linux/bitfield.h>
11 #include <linux/dma-direction.h>
14 #include "gsi_trans.h"
16 #include "ipa_endpoint.h"
17 #include "ipa_table.h"
22 * DOC: IPA Immediate Commands
24 * The AP command TX endpoint is used to issue immediate commands to the IPA.
25 * An immediate command is generally used to request the IPA do something
26 * other than data transfer to another endpoint.
28 * Immediate commands are represented by GSI transactions just like other
29 * transfer requests, represented by a single GSI TRE. Each immediate
30 * command has a well-defined format, having a payload of a known length.
31 * This allows the transfer element's length field to be used to hold an
32 * immediate command's opcode. The payload for a command resides in DRAM
33 * and is described by a single scatterlist entry in its transaction.
34 * Commands do not require a transaction completion callback. To commit
35 * an immediate command transaction, either gsi_trans_commit_wait() or
36 * gsi_trans_commit_wait_timeout() is used.
39 /* Some commands can wait until indicated pipeline stages are clear */
40 enum pipeline_clear_options
{
41 pipeline_clear_hps
= 0,
42 pipeline_clear_src_grp
= 1,
43 pipeline_clear_full
= 2,
46 /* IPA_CMD_IP_V{4,6}_{FILTER,ROUTING}_INIT */
48 struct ipa_cmd_hw_ip_fltrt_init
{
49 __le64 hash_rules_addr
;
51 __le64 nhash_rules_addr
;
54 /* Field masks for ipa_cmd_hw_ip_fltrt_init structure fields */
55 #define IP_FLTRT_FLAGS_HASH_SIZE_FMASK GENMASK_ULL(11, 0)
56 #define IP_FLTRT_FLAGS_HASH_ADDR_FMASK GENMASK_ULL(27, 12)
57 #define IP_FLTRT_FLAGS_NHASH_SIZE_FMASK GENMASK_ULL(39, 28)
58 #define IP_FLTRT_FLAGS_NHASH_ADDR_FMASK GENMASK_ULL(55, 40)
60 /* IPA_CMD_HDR_INIT_LOCAL */
62 struct ipa_cmd_hw_hdr_init_local
{
63 __le64 hdr_table_addr
;
68 /* Field masks for ipa_cmd_hw_hdr_init_local structure fields */
69 #define HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK GENMASK(11, 0)
70 #define HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK GENMASK(27, 12)
72 /* IPA_CMD_REGISTER_WRITE */
74 /* For IPA v4.0+, this opcode gets modified with pipeline clear options */
76 #define REGISTER_WRITE_OPCODE_SKIP_CLEAR_FMASK GENMASK(8, 8)
77 #define REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK GENMASK(10, 9)
79 struct ipa_cmd_register_write
{
80 __le16 flags
; /* Unused/reserved for IPA v3.5.1 */
84 __le32 clear_options
; /* Unused/reserved for IPA v4.0+ */
87 /* Field masks for ipa_cmd_register_write structure fields */
88 /* The next field is present for IPA v4.0 and above */
89 #define REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK GENMASK(14, 11)
90 /* The next field is present for IPA v3.5.1 only */
91 #define REGISTER_WRITE_FLAGS_SKIP_CLEAR_FMASK GENMASK(15, 15)
93 /* The next field and its values are present for IPA v3.5.1 only */
94 #define REGISTER_WRITE_CLEAR_OPTIONS_FMASK GENMASK(1, 0)
96 /* IPA_CMD_IP_PACKET_INIT */
98 struct ipa_cmd_ip_packet_init
{
103 /* Field masks for ipa_cmd_ip_packet_init dest_endpoint field */
104 #define IPA_PACKET_INIT_DEST_ENDPOINT_FMASK GENMASK(4, 0)
106 /* IPA_CMD_DMA_SHARED_MEM */
108 /* For IPA v4.0+, this opcode gets modified with pipeline clear options */
110 #define DMA_SHARED_MEM_OPCODE_SKIP_CLEAR_FMASK GENMASK(8, 8)
111 #define DMA_SHARED_MEM_OPCODE_CLEAR_OPTION_FMASK GENMASK(10, 9)
113 struct ipa_cmd_hw_dma_mem_mem
{
114 __le16 clear_after_read
; /* 0 or DMA_SHARED_MEM_CLEAR_AFTER_READ */
121 /* Flag allowing atomic clear of target region after reading data (v4.0+)*/
122 #define DMA_SHARED_MEM_CLEAR_AFTER_READ GENMASK(15, 15)
124 /* Field masks for ipa_cmd_hw_dma_mem_mem structure fields */
125 #define DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK GENMASK(0, 0)
126 /* The next two fields are present for IPA v3.5.1 only. */
127 #define DMA_SHARED_MEM_FLAGS_SKIP_CLEAR_FMASK GENMASK(1, 1)
128 #define DMA_SHARED_MEM_FLAGS_CLEAR_OPTIONS_FMASK GENMASK(3, 2)
130 /* IPA_CMD_IP_PACKET_TAG_STATUS */
132 struct ipa_cmd_ip_packet_tag_status
{
136 #define IP_PACKET_TAG_STATUS_TAG_FMASK GENMASK_ULL(63, 16)
138 /* Immediate command payload */
139 union ipa_cmd_payload
{
140 struct ipa_cmd_hw_ip_fltrt_init table_init
;
141 struct ipa_cmd_hw_hdr_init_local hdr_init_local
;
142 struct ipa_cmd_register_write register_write
;
143 struct ipa_cmd_ip_packet_init ip_packet_init
;
144 struct ipa_cmd_hw_dma_mem_mem dma_shared_mem
;
145 struct ipa_cmd_ip_packet_tag_status ip_packet_tag_status
;
148 static void ipa_cmd_validate_build(void)
150 /* The sizes of a filter and route tables need to fit into fields
151 * in the ipa_cmd_hw_ip_fltrt_init structure. Although hashed tables
152 * might not be used, non-hashed and hashed tables have the same
153 * maximum size. IPv4 and IPv6 filter tables have the same number
154 * of entries, as and IPv4 and IPv6 route tables have the same number
157 #define TABLE_SIZE (TABLE_COUNT_MAX * IPA_TABLE_ENTRY_SIZE)
158 #define TABLE_COUNT_MAX max_t(u32, IPA_ROUTE_COUNT_MAX, IPA_FILTER_COUNT_MAX)
159 BUILD_BUG_ON(TABLE_SIZE
> field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK
));
160 BUILD_BUG_ON(TABLE_SIZE
> field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK
));
161 #undef TABLE_COUNT_MAX
167 /* Validate a memory region holding a table */
168 bool ipa_cmd_table_valid(struct ipa
*ipa
, const struct ipa_mem
*mem
,
169 bool route
, bool ipv6
, bool hashed
)
171 struct device
*dev
= &ipa
->pdev
->dev
;
174 offset_max
= hashed
? field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK
)
175 : field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK
);
176 if (mem
->offset
> offset_max
||
177 ipa
->mem_offset
> offset_max
- mem
->offset
) {
178 dev_err(dev
, "IPv%c %s%s table region offset too large "
179 "(0x%04x + 0x%04x > 0x%04x)\n",
180 ipv6
? '6' : '4', hashed
? "hashed " : "",
181 route
? "route" : "filter",
182 ipa
->mem_offset
, mem
->offset
, offset_max
);
186 if (mem
->offset
> ipa
->mem_size
||
187 mem
->size
> ipa
->mem_size
- mem
->offset
) {
188 dev_err(dev
, "IPv%c %s%s table region out of range "
189 "(0x%04x + 0x%04x > 0x%04x)\n",
190 ipv6
? '6' : '4', hashed
? "hashed " : "",
191 route
? "route" : "filter",
192 mem
->offset
, mem
->size
, ipa
->mem_size
);
199 /* Validate the memory region that holds headers */
200 static bool ipa_cmd_header_valid(struct ipa
*ipa
)
202 const struct ipa_mem
*mem
= &ipa
->mem
[IPA_MEM_MODEM_HEADER
];
203 struct device
*dev
= &ipa
->pdev
->dev
;
208 offset_max
= field_max(HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK
);
209 if (mem
->offset
> offset_max
||
210 ipa
->mem_offset
> offset_max
- mem
->offset
) {
211 dev_err(dev
, "header table region offset too large "
212 "(0x%04x + 0x%04x > 0x%04x)\n",
213 ipa
->mem_offset
+ mem
->offset
, offset_max
);
217 size_max
= field_max(HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK
);
218 size
= ipa
->mem
[IPA_MEM_MODEM_HEADER
].size
;
219 size
+= ipa
->mem
[IPA_MEM_AP_HEADER
].size
;
220 if (mem
->offset
> ipa
->mem_size
|| size
> ipa
->mem_size
- mem
->offset
) {
221 dev_err(dev
, "header table region out of range "
222 "(0x%04x + 0x%04x > 0x%04x)\n",
223 mem
->offset
, size
, ipa
->mem_size
);
230 /* Indicate whether an offset can be used with a register_write command */
231 static bool ipa_cmd_register_write_offset_valid(struct ipa
*ipa
,
232 const char *name
, u32 offset
)
234 struct ipa_cmd_register_write
*payload
;
235 struct device
*dev
= &ipa
->pdev
->dev
;
239 /* The maximum offset in a register_write immediate command depends
240 * on the version of IPA. IPA v3.5.1 supports a 16 bit offset, but
241 * newer versions allow some additional high-order bits.
243 bit_count
= BITS_PER_BYTE
* sizeof(payload
->offset
);
244 if (ipa
->version
!= IPA_VERSION_3_5_1
)
245 bit_count
+= hweight32(REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK
);
246 BUILD_BUG_ON(bit_count
> 32);
247 offset_max
= ~0 >> (32 - bit_count
);
249 if (offset
> offset_max
|| ipa
->mem_offset
> offset_max
- offset
) {
250 dev_err(dev
, "%s offset too large 0x%04x + 0x%04x > 0x%04x)\n",
251 ipa
->mem_offset
+ offset
, offset_max
);
258 /* Check whether offsets passed to register_write are valid */
259 static bool ipa_cmd_register_write_valid(struct ipa
*ipa
)
264 offset
= ipa_reg_filt_rout_hash_flush_offset(ipa
->version
);
265 name
= "filter/route hash flush";
266 if (!ipa_cmd_register_write_offset_valid(ipa
, name
, offset
))
269 offset
= IPA_REG_ENDP_STATUS_N_OFFSET(IPA_ENDPOINT_COUNT
);
270 name
= "maximal endpoint status";
271 if (!ipa_cmd_register_write_offset_valid(ipa
, name
, offset
))
277 bool ipa_cmd_data_valid(struct ipa
*ipa
)
279 if (!ipa_cmd_header_valid(ipa
))
282 if (!ipa_cmd_register_write_valid(ipa
))
288 #endif /* IPA_VALIDATE */
290 int ipa_cmd_pool_init(struct gsi_channel
*channel
, u32 tre_max
)
292 struct gsi_trans_info
*trans_info
= &channel
->trans_info
;
293 struct device
*dev
= channel
->gsi
->dev
;
296 /* This is as good a place as any to validate build constants */
297 ipa_cmd_validate_build();
299 /* Even though command payloads are allocated one at a time,
300 * a single transaction can require up to tlv_count of them,
301 * so we treat them as if that many can be allocated at once.
303 ret
= gsi_trans_pool_init_dma(dev
, &trans_info
->cmd_pool
,
304 sizeof(union ipa_cmd_payload
),
305 tre_max
, channel
->tlv_count
);
309 /* Each TRE needs a command info structure */
310 ret
= gsi_trans_pool_init(&trans_info
->info_pool
,
311 sizeof(struct ipa_cmd_info
),
312 tre_max
, channel
->tlv_count
);
314 gsi_trans_pool_exit_dma(dev
, &trans_info
->cmd_pool
);
319 void ipa_cmd_pool_exit(struct gsi_channel
*channel
)
321 struct gsi_trans_info
*trans_info
= &channel
->trans_info
;
322 struct device
*dev
= channel
->gsi
->dev
;
324 gsi_trans_pool_exit(&trans_info
->info_pool
);
325 gsi_trans_pool_exit_dma(dev
, &trans_info
->cmd_pool
);
328 static union ipa_cmd_payload
*
329 ipa_cmd_payload_alloc(struct ipa
*ipa
, dma_addr_t
*addr
)
331 struct gsi_trans_info
*trans_info
;
332 struct ipa_endpoint
*endpoint
;
334 endpoint
= ipa
->name_map
[IPA_ENDPOINT_AP_COMMAND_TX
];
335 trans_info
= &ipa
->gsi
.channel
[endpoint
->channel_id
].trans_info
;
337 return gsi_trans_pool_alloc_dma(&trans_info
->cmd_pool
, addr
);
340 /* If hash_size is 0, hash_offset and hash_addr ignored. */
341 void ipa_cmd_table_init_add(struct gsi_trans
*trans
,
342 enum ipa_cmd_opcode opcode
, u16 size
, u32 offset
,
343 dma_addr_t addr
, u16 hash_size
, u32 hash_offset
,
344 dma_addr_t hash_addr
)
346 struct ipa
*ipa
= container_of(trans
->gsi
, struct ipa
, gsi
);
347 enum dma_data_direction direction
= DMA_TO_DEVICE
;
348 struct ipa_cmd_hw_ip_fltrt_init
*payload
;
349 union ipa_cmd_payload
*cmd_payload
;
350 dma_addr_t payload_addr
;
353 /* Record the non-hash table offset and size */
354 offset
+= ipa
->mem_offset
;
355 val
= u64_encode_bits(offset
, IP_FLTRT_FLAGS_NHASH_ADDR_FMASK
);
356 val
|= u64_encode_bits(size
, IP_FLTRT_FLAGS_NHASH_SIZE_FMASK
);
358 /* The hash table offset and address are zero if its size is 0 */
360 /* Record the hash table offset and size */
361 hash_offset
+= ipa
->mem_offset
;
362 val
|= u64_encode_bits(hash_offset
,
363 IP_FLTRT_FLAGS_HASH_ADDR_FMASK
);
364 val
|= u64_encode_bits(hash_size
,
365 IP_FLTRT_FLAGS_HASH_SIZE_FMASK
);
368 cmd_payload
= ipa_cmd_payload_alloc(ipa
, &payload_addr
);
369 payload
= &cmd_payload
->table_init
;
371 /* Fill in all offsets and sizes and the non-hash table address */
373 payload
->hash_rules_addr
= cpu_to_le64(hash_addr
);
374 payload
->flags
= cpu_to_le64(val
);
375 payload
->nhash_rules_addr
= cpu_to_le64(addr
);
377 gsi_trans_cmd_add(trans
, payload
, sizeof(*payload
), payload_addr
,
381 /* Initialize header space in IPA-local memory */
382 void ipa_cmd_hdr_init_local_add(struct gsi_trans
*trans
, u32 offset
, u16 size
,
385 struct ipa
*ipa
= container_of(trans
->gsi
, struct ipa
, gsi
);
386 enum ipa_cmd_opcode opcode
= IPA_CMD_HDR_INIT_LOCAL
;
387 enum dma_data_direction direction
= DMA_TO_DEVICE
;
388 struct ipa_cmd_hw_hdr_init_local
*payload
;
389 union ipa_cmd_payload
*cmd_payload
;
390 dma_addr_t payload_addr
;
393 offset
+= ipa
->mem_offset
;
395 /* With this command we tell the IPA where in its local memory the
396 * header tables reside. The content of the buffer provided is
397 * also written via DMA into that space. The IPA hardware owns
398 * the table, but the AP must initialize it.
400 cmd_payload
= ipa_cmd_payload_alloc(ipa
, &payload_addr
);
401 payload
= &cmd_payload
->hdr_init_local
;
403 payload
->hdr_table_addr
= cpu_to_le64(addr
);
404 flags
= u32_encode_bits(size
, HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK
);
405 flags
|= u32_encode_bits(offset
, HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK
);
406 payload
->flags
= cpu_to_le32(flags
);
408 gsi_trans_cmd_add(trans
, payload
, sizeof(*payload
), payload_addr
,
412 void ipa_cmd_register_write_add(struct gsi_trans
*trans
, u32 offset
, u32 value
,
413 u32 mask
, bool clear_full
)
415 struct ipa
*ipa
= container_of(trans
->gsi
, struct ipa
, gsi
);
416 struct ipa_cmd_register_write
*payload
;
417 union ipa_cmd_payload
*cmd_payload
;
418 u32 opcode
= IPA_CMD_REGISTER_WRITE
;
419 dma_addr_t payload_addr
;
424 /* pipeline_clear_src_grp is not used */
425 clear_option
= clear_full
? pipeline_clear_full
: pipeline_clear_hps
;
427 if (ipa
->version
!= IPA_VERSION_3_5_1
) {
431 /* Opcode encodes pipeline clear options */
432 /* SKIP_CLEAR is always 0 (don't skip pipeline clear) */
433 val
= u16_encode_bits(clear_option
,
434 REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK
);
437 /* Extract the high 4 bits from the offset */
438 offset_high
= (u16
)u32_get_bits(offset
, GENMASK(19, 16));
439 offset
&= (1 << 16) - 1;
441 /* Extract the top 4 bits and encode it into the flags field */
442 flags
= u16_encode_bits(offset_high
,
443 REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK
);
444 options
= 0; /* reserved */
447 flags
= 0; /* SKIP_CLEAR flag is always 0 */
448 options
= u16_encode_bits(clear_option
,
449 REGISTER_WRITE_CLEAR_OPTIONS_FMASK
);
452 cmd_payload
= ipa_cmd_payload_alloc(ipa
, &payload_addr
);
453 payload
= &cmd_payload
->register_write
;
455 payload
->flags
= cpu_to_le16(flags
);
456 payload
->offset
= cpu_to_le16((u16
)offset
);
457 payload
->value
= cpu_to_le32(value
);
458 payload
->value_mask
= cpu_to_le32(mask
);
459 payload
->clear_options
= cpu_to_le32(options
);
461 gsi_trans_cmd_add(trans
, payload
, sizeof(*payload
), payload_addr
,
465 /* Skip IP packet processing on the next data transfer on a TX channel */
466 static void ipa_cmd_ip_packet_init_add(struct gsi_trans
*trans
, u8 endpoint_id
)
468 struct ipa
*ipa
= container_of(trans
->gsi
, struct ipa
, gsi
);
469 enum ipa_cmd_opcode opcode
= IPA_CMD_IP_PACKET_INIT
;
470 enum dma_data_direction direction
= DMA_TO_DEVICE
;
471 struct ipa_cmd_ip_packet_init
*payload
;
472 union ipa_cmd_payload
*cmd_payload
;
473 dma_addr_t payload_addr
;
475 /* assert(endpoint_id <
476 field_max(IPA_PACKET_INIT_DEST_ENDPOINT_FMASK)); */
478 cmd_payload
= ipa_cmd_payload_alloc(ipa
, &payload_addr
);
479 payload
= &cmd_payload
->ip_packet_init
;
481 payload
->dest_endpoint
= u8_encode_bits(endpoint_id
,
482 IPA_PACKET_INIT_DEST_ENDPOINT_FMASK
);
484 gsi_trans_cmd_add(trans
, payload
, sizeof(*payload
), payload_addr
,
488 /* Use a DMA command to read or write a block of IPA-resident memory */
489 void ipa_cmd_dma_shared_mem_add(struct gsi_trans
*trans
, u32 offset
, u16 size
,
490 dma_addr_t addr
, bool toward_ipa
)
492 struct ipa
*ipa
= container_of(trans
->gsi
, struct ipa
, gsi
);
493 enum ipa_cmd_opcode opcode
= IPA_CMD_DMA_SHARED_MEM
;
494 struct ipa_cmd_hw_dma_mem_mem
*payload
;
495 union ipa_cmd_payload
*cmd_payload
;
496 enum dma_data_direction direction
;
497 dma_addr_t payload_addr
;
500 /* size and offset must fit in 16 bit fields */
501 /* assert(size > 0 && size <= U16_MAX); */
502 /* assert(offset <= U16_MAX && ipa->mem_offset <= U16_MAX - offset); */
504 offset
+= ipa
->mem_offset
;
506 cmd_payload
= ipa_cmd_payload_alloc(ipa
, &payload_addr
);
507 payload
= &cmd_payload
->dma_shared_mem
;
509 /* payload->clear_after_read was reserved prior to IPA v4.0. It's
510 * never needed for current code, so it's 0 regardless of version.
512 payload
->size
= cpu_to_le16(size
);
513 payload
->local_addr
= cpu_to_le16(offset
);
515 * direction: 0 = write to IPA, 1 read from IPA
516 * Starting at v4.0 these are reserved; either way, all zero:
517 * pipeline clear: 0 = wait for pipeline clear (don't skip)
518 * clear_options: 0 = pipeline_clear_hps
519 * Instead, for v4.0+ these are encoded in the opcode. But again
520 * since both values are 0 we won't bother OR'ing them in.
522 flags
= toward_ipa
? 0 : DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK
;
523 payload
->flags
= cpu_to_le16(flags
);
524 payload
->system_addr
= cpu_to_le64(addr
);
526 direction
= toward_ipa
? DMA_TO_DEVICE
: DMA_FROM_DEVICE
;
528 gsi_trans_cmd_add(trans
, payload
, sizeof(*payload
), payload_addr
,
532 static void ipa_cmd_ip_tag_status_add(struct gsi_trans
*trans
, u64 tag
)
534 struct ipa
*ipa
= container_of(trans
->gsi
, struct ipa
, gsi
);
535 enum ipa_cmd_opcode opcode
= IPA_CMD_IP_PACKET_TAG_STATUS
;
536 enum dma_data_direction direction
= DMA_TO_DEVICE
;
537 struct ipa_cmd_ip_packet_tag_status
*payload
;
538 union ipa_cmd_payload
*cmd_payload
;
539 dma_addr_t payload_addr
;
541 /* assert(tag <= field_max(IP_PACKET_TAG_STATUS_TAG_FMASK)); */
543 cmd_payload
= ipa_cmd_payload_alloc(ipa
, &payload_addr
);
544 payload
= &cmd_payload
->ip_packet_tag_status
;
546 payload
->tag
= u64_encode_bits(tag
, IP_PACKET_TAG_STATUS_TAG_FMASK
);
548 gsi_trans_cmd_add(trans
, payload
, sizeof(*payload
), payload_addr
,
552 /* Issue a small command TX data transfer */
553 static void ipa_cmd_transfer_add(struct gsi_trans
*trans
, u16 size
)
555 struct ipa
*ipa
= container_of(trans
->gsi
, struct ipa
, gsi
);
556 enum dma_data_direction direction
= DMA_TO_DEVICE
;
557 enum ipa_cmd_opcode opcode
= IPA_CMD_NONE
;
558 union ipa_cmd_payload
*payload
;
559 dma_addr_t payload_addr
;
561 /* assert(size <= sizeof(*payload)); */
563 /* Just transfer a zero-filled payload structure */
564 payload
= ipa_cmd_payload_alloc(ipa
, &payload_addr
);
566 gsi_trans_cmd_add(trans
, payload
, sizeof(*payload
), payload_addr
,
570 void ipa_cmd_tag_process_add(struct gsi_trans
*trans
)
572 struct ipa
*ipa
= container_of(trans
->gsi
, struct ipa
, gsi
);
573 struct ipa_endpoint
*endpoint
;
575 endpoint
= ipa
->name_map
[IPA_ENDPOINT_AP_LAN_RX
];
577 ipa_cmd_register_write_add(trans
, 0, 0, 0, true);
578 ipa_cmd_ip_packet_init_add(trans
, endpoint
->endpoint_id
);
579 ipa_cmd_ip_tag_status_add(trans
, 0xcba987654321);
580 ipa_cmd_transfer_add(trans
, 4);
583 /* Returns the number of commands required for the tag process */
584 u32
ipa_cmd_tag_process_count(void)
589 void ipa_cmd_tag_process(struct ipa
*ipa
)
591 u32 count
= ipa_cmd_tag_process_count();
592 struct gsi_trans
*trans
;
594 trans
= ipa_cmd_trans_alloc(ipa
, count
);
596 ipa_cmd_tag_process_add(trans
);
597 gsi_trans_commit_wait(trans
);
599 dev_err(&ipa
->pdev
->dev
,
600 "error allocating %u entry tag transaction\n", count
);
604 static struct ipa_cmd_info
*
605 ipa_cmd_info_alloc(struct ipa_endpoint
*endpoint
, u32 tre_count
)
607 struct gsi_channel
*channel
;
609 channel
= &endpoint
->ipa
->gsi
.channel
[endpoint
->channel_id
];
611 return gsi_trans_pool_alloc(&channel
->trans_info
.info_pool
, tre_count
);
614 /* Allocate a transaction for the command TX endpoint */
615 struct gsi_trans
*ipa_cmd_trans_alloc(struct ipa
*ipa
, u32 tre_count
)
617 struct ipa_endpoint
*endpoint
;
618 struct gsi_trans
*trans
;
620 endpoint
= ipa
->name_map
[IPA_ENDPOINT_AP_COMMAND_TX
];
622 trans
= gsi_channel_trans_alloc(&ipa
->gsi
, endpoint
->channel_id
,
623 tre_count
, DMA_NONE
);
625 trans
->info
= ipa_cmd_info_alloc(endpoint
, tre_count
);