]>
Commit | Line | Data |
---|---|---|
731c46ed AE |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | ||
3 | /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. | |
4 | * Copyright (C) 2019-2020 Linaro Ltd. | |
5 | */ | |
6 | ||
7 | #include <linux/types.h> | |
8 | #include <linux/device.h> | |
9 | #include <linux/slab.h> | |
10 | #include <linux/bitfield.h> | |
11 | #include <linux/dma-direction.h> | |
12 | ||
13 | #include "gsi.h" | |
14 | #include "gsi_trans.h" | |
15 | #include "ipa.h" | |
16 | #include "ipa_endpoint.h" | |
17 | #include "ipa_table.h" | |
18 | #include "ipa_cmd.h" | |
19 | #include "ipa_mem.h" | |
20 | ||
21 | /** | |
22 | * DOC: IPA Immediate Commands | |
23 | * | |
24 | * The AP command TX endpoint is used to issue immediate commands to the IPA. | |
25 | * An immediate command is generally used to request the IPA do something | |
26 | * other than data transfer to another endpoint. | |
27 | * | |
28 | * Immediate commands are represented by GSI transactions just like other | |
29 | * transfer requests, represented by a single GSI TRE. Each immediate | |
30 | * command has a well-defined format, having a payload of a known length. | |
31 | * This allows the transfer element's length field to be used to hold an | |
32 | * immediate command's opcode. The payload for a command resides in DRAM | |
33 | * and is described by a single scatterlist entry in its transaction. | |
34 | * Commands do not require a transaction completion callback. To commit | |
35 | * an immediate command transaction, either gsi_trans_commit_wait() or | |
36 | * gsi_trans_commit_wait_timeout() is used. | |
37 | */ | |
38 | ||
39 | /* Some commands can wait until indicated pipeline stages are clear */ | |
40 | enum pipeline_clear_options { | |
41 | pipeline_clear_hps = 0, | |
42 | pipeline_clear_src_grp = 1, | |
43 | pipeline_clear_full = 2, | |
44 | }; | |
45 | ||
46 | /* IPA_CMD_IP_V{4,6}_{FILTER,ROUTING}_INIT */ | |
47 | ||
48 | struct ipa_cmd_hw_ip_fltrt_init { | |
49 | __le64 hash_rules_addr; | |
50 | __le64 flags; | |
51 | __le64 nhash_rules_addr; | |
52 | }; | |
53 | ||
54 | /* Field masks for ipa_cmd_hw_ip_fltrt_init structure fields */ | |
55 | #define IP_FLTRT_FLAGS_HASH_SIZE_FMASK GENMASK_ULL(11, 0) | |
56 | #define IP_FLTRT_FLAGS_HASH_ADDR_FMASK GENMASK_ULL(27, 12) | |
57 | #define IP_FLTRT_FLAGS_NHASH_SIZE_FMASK GENMASK_ULL(39, 28) | |
58 | #define IP_FLTRT_FLAGS_NHASH_ADDR_FMASK GENMASK_ULL(55, 40) | |
59 | ||
60 | /* IPA_CMD_HDR_INIT_LOCAL */ | |
61 | ||
62 | struct ipa_cmd_hw_hdr_init_local { | |
63 | __le64 hdr_table_addr; | |
64 | __le32 flags; | |
65 | __le32 reserved; | |
66 | }; | |
67 | ||
68 | /* Field masks for ipa_cmd_hw_hdr_init_local structure fields */ | |
69 | #define HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK GENMASK(11, 0) | |
70 | #define HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK GENMASK(27, 12) | |
71 | ||
72 | /* IPA_CMD_REGISTER_WRITE */ | |
73 | ||
74 | /* For IPA v4.0+, this opcode gets modified with pipeline clear options */ | |
75 | ||
76 | #define REGISTER_WRITE_OPCODE_SKIP_CLEAR_FMASK GENMASK(8, 8) | |
77 | #define REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK GENMASK(10, 9) | |
78 | ||
79 | struct ipa_cmd_register_write { | |
80 | __le16 flags; /* Unused/reserved for IPA v3.5.1 */ | |
81 | __le16 offset; | |
82 | __le32 value; | |
83 | __le32 value_mask; | |
84 | __le32 clear_options; /* Unused/reserved for IPA v4.0+ */ | |
85 | }; | |
86 | ||
87 | /* Field masks for ipa_cmd_register_write structure fields */ | |
88 | /* The next field is present for IPA v4.0 and above */ | |
89 | #define REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK GENMASK(14, 11) | |
90 | /* The next field is present for IPA v3.5.1 only */ | |
91 | #define REGISTER_WRITE_FLAGS_SKIP_CLEAR_FMASK GENMASK(15, 15) | |
92 | ||
93 | /* The next field and its values are present for IPA v3.5.1 only */ | |
94 | #define REGISTER_WRITE_CLEAR_OPTIONS_FMASK GENMASK(1, 0) | |
95 | ||
96 | /* IPA_CMD_IP_PACKET_INIT */ | |
97 | ||
98 | struct ipa_cmd_ip_packet_init { | |
99 | u8 dest_endpoint; | |
100 | u8 reserved[7]; | |
101 | }; | |
102 | ||
103 | /* Field masks for ipa_cmd_ip_packet_init dest_endpoint field */ | |
104 | #define IPA_PACKET_INIT_DEST_ENDPOINT_FMASK GENMASK(4, 0) | |
105 | ||
731c46ed AE |
106 | /* IPA_CMD_DMA_SHARED_MEM */ |
107 | ||
108 | /* For IPA v4.0+, this opcode gets modified with pipeline clear options */ | |
109 | ||
110 | #define DMA_SHARED_MEM_OPCODE_SKIP_CLEAR_FMASK GENMASK(8, 8) | |
111 | #define DMA_SHARED_MEM_OPCODE_CLEAR_OPTION_FMASK GENMASK(10, 9) | |
112 | ||
113 | struct ipa_cmd_hw_dma_mem_mem { | |
114 | __le16 clear_after_read; /* 0 or DMA_SHARED_MEM_CLEAR_AFTER_READ */ | |
115 | __le16 size; | |
116 | __le16 local_addr; | |
117 | __le16 flags; | |
118 | __le64 system_addr; | |
119 | }; | |
120 | ||
121 | /* Flag allowing atomic clear of target region after reading data (v4.0+)*/ | |
122 | #define DMA_SHARED_MEM_CLEAR_AFTER_READ GENMASK(15, 15) | |
123 | ||
124 | /* Field masks for ipa_cmd_hw_dma_mem_mem structure fields */ | |
125 | #define DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK GENMASK(0, 0) | |
126 | /* The next two fields are present for IPA v3.5.1 only. */ | |
127 | #define DMA_SHARED_MEM_FLAGS_SKIP_CLEAR_FMASK GENMASK(1, 1) | |
128 | #define DMA_SHARED_MEM_FLAGS_CLEAR_OPTIONS_FMASK GENMASK(3, 2) | |
129 | ||
130 | /* IPA_CMD_IP_PACKET_TAG_STATUS */ | |
131 | ||
132 | struct ipa_cmd_ip_packet_tag_status { | |
133 | __le64 tag; | |
134 | }; | |
135 | ||
136 | #define IP_PACKET_TAG_STATUS_TAG_FMASK GENMASK_ULL(63, 16) | |
137 | ||
138 | /* Immediate command payload */ | |
139 | union ipa_cmd_payload { | |
140 | struct ipa_cmd_hw_ip_fltrt_init table_init; | |
141 | struct ipa_cmd_hw_hdr_init_local hdr_init_local; | |
142 | struct ipa_cmd_register_write register_write; | |
143 | struct ipa_cmd_ip_packet_init ip_packet_init; | |
731c46ed AE |
144 | struct ipa_cmd_hw_dma_mem_mem dma_shared_mem; |
145 | struct ipa_cmd_ip_packet_tag_status ip_packet_tag_status; | |
146 | }; | |
147 | ||
148 | static void ipa_cmd_validate_build(void) | |
149 | { | |
150 | /* The sizes of a filter and route tables need to fit into fields | |
151 | * in the ipa_cmd_hw_ip_fltrt_init structure. Although hashed tables | |
152 | * might not be used, non-hashed and hashed tables have the same | |
153 | * maximum size. IPv4 and IPv6 filter tables have the same number | |
154 | * of entries, as and IPv4 and IPv6 route tables have the same number | |
155 | * of entries. | |
156 | */ | |
157 | #define TABLE_SIZE (TABLE_COUNT_MAX * IPA_TABLE_ENTRY_SIZE) | |
158 | #define TABLE_COUNT_MAX max_t(u32, IPA_ROUTE_COUNT_MAX, IPA_FILTER_COUNT_MAX) | |
159 | BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK)); | |
160 | BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK)); | |
161 | #undef TABLE_COUNT_MAX | |
162 | #undef TABLE_SIZE | |
163 | } | |
164 | ||
165 | #ifdef IPA_VALIDATE | |
166 | ||
167 | /* Validate a memory region holding a table */ | |
168 | bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem, | |
169 | bool route, bool ipv6, bool hashed) | |
170 | { | |
171 | struct device *dev = &ipa->pdev->dev; | |
172 | u32 offset_max; | |
173 | ||
174 | offset_max = hashed ? field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK) | |
175 | : field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK); | |
176 | if (mem->offset > offset_max || | |
177 | ipa->mem_offset > offset_max - mem->offset) { | |
178 | dev_err(dev, "IPv%c %s%s table region offset too large " | |
179 | "(0x%04x + 0x%04x > 0x%04x)\n", | |
180 | ipv6 ? '6' : '4', hashed ? "hashed " : "", | |
181 | route ? "route" : "filter", | |
182 | ipa->mem_offset, mem->offset, offset_max); | |
183 | return false; | |
184 | } | |
185 | ||
186 | if (mem->offset > ipa->mem_size || | |
187 | mem->size > ipa->mem_size - mem->offset) { | |
188 | dev_err(dev, "IPv%c %s%s table region out of range " | |
189 | "(0x%04x + 0x%04x > 0x%04x)\n", | |
190 | ipv6 ? '6' : '4', hashed ? "hashed " : "", | |
191 | route ? "route" : "filter", | |
192 | mem->offset, mem->size, ipa->mem_size); | |
193 | return false; | |
194 | } | |
195 | ||
196 | return true; | |
197 | } | |
198 | ||
199 | /* Validate the memory region that holds headers */ | |
200 | static bool ipa_cmd_header_valid(struct ipa *ipa) | |
201 | { | |
202 | const struct ipa_mem *mem = &ipa->mem[IPA_MEM_MODEM_HEADER]; | |
203 | struct device *dev = &ipa->pdev->dev; | |
204 | u32 offset_max; | |
205 | u32 size_max; | |
206 | u32 size; | |
207 | ||
208 | offset_max = field_max(HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK); | |
209 | if (mem->offset > offset_max || | |
210 | ipa->mem_offset > offset_max - mem->offset) { | |
211 | dev_err(dev, "header table region offset too large " | |
212 | "(0x%04x + 0x%04x > 0x%04x)\n", | |
213 | ipa->mem_offset + mem->offset, offset_max); | |
214 | return false; | |
215 | } | |
216 | ||
217 | size_max = field_max(HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK); | |
218 | size = ipa->mem[IPA_MEM_MODEM_HEADER].size; | |
219 | size += ipa->mem[IPA_MEM_AP_HEADER].size; | |
220 | if (mem->offset > ipa->mem_size || size > ipa->mem_size - mem->offset) { | |
221 | dev_err(dev, "header table region out of range " | |
222 | "(0x%04x + 0x%04x > 0x%04x)\n", | |
223 | mem->offset, size, ipa->mem_size); | |
224 | return false; | |
225 | } | |
226 | ||
227 | return true; | |
228 | } | |
229 | ||
230 | /* Indicate whether an offset can be used with a register_write command */ | |
231 | static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa, | |
232 | const char *name, u32 offset) | |
233 | { | |
234 | struct ipa_cmd_register_write *payload; | |
235 | struct device *dev = &ipa->pdev->dev; | |
236 | u32 offset_max; | |
237 | u32 bit_count; | |
238 | ||
239 | /* The maximum offset in a register_write immediate command depends | |
240 | * on the version of IPA. IPA v3.5.1 supports a 16 bit offset, but | |
241 | * newer versions allow some additional high-order bits. | |
242 | */ | |
243 | bit_count = BITS_PER_BYTE * sizeof(payload->offset); | |
244 | if (ipa->version != IPA_VERSION_3_5_1) | |
245 | bit_count += hweight32(REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK); | |
246 | BUILD_BUG_ON(bit_count > 32); | |
247 | offset_max = ~0 >> (32 - bit_count); | |
248 | ||
249 | if (offset > offset_max || ipa->mem_offset > offset_max - offset) { | |
250 | dev_err(dev, "%s offset too large 0x%04x + 0x%04x > 0x%04x)\n", | |
251 | ipa->mem_offset + offset, offset_max); | |
252 | return false; | |
253 | } | |
254 | ||
255 | return true; | |
256 | } | |
257 | ||
258 | /* Check whether offsets passed to register_write are valid */ | |
259 | static bool ipa_cmd_register_write_valid(struct ipa *ipa) | |
260 | { | |
261 | const char *name; | |
262 | u32 offset; | |
263 | ||
264 | offset = ipa_reg_filt_rout_hash_flush_offset(ipa->version); | |
265 | name = "filter/route hash flush"; | |
266 | if (!ipa_cmd_register_write_offset_valid(ipa, name, offset)) | |
267 | return false; | |
268 | ||
269 | offset = IPA_REG_ENDP_STATUS_N_OFFSET(IPA_ENDPOINT_COUNT); | |
270 | name = "maximal endpoint status"; | |
271 | if (!ipa_cmd_register_write_offset_valid(ipa, name, offset)) | |
272 | return false; | |
273 | ||
274 | return true; | |
275 | } | |
276 | ||
277 | bool ipa_cmd_data_valid(struct ipa *ipa) | |
278 | { | |
279 | if (!ipa_cmd_header_valid(ipa)) | |
280 | return false; | |
281 | ||
282 | if (!ipa_cmd_register_write_valid(ipa)) | |
283 | return false; | |
284 | ||
285 | return true; | |
286 | } | |
287 | ||
288 | #endif /* IPA_VALIDATE */ | |
289 | ||
290 | int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max) | |
291 | { | |
292 | struct gsi_trans_info *trans_info = &channel->trans_info; | |
293 | struct device *dev = channel->gsi->dev; | |
294 | int ret; | |
295 | ||
296 | /* This is as good a place as any to validate build constants */ | |
297 | ipa_cmd_validate_build(); | |
298 | ||
299 | /* Even though command payloads are allocated one at a time, | |
300 | * a single transaction can require up to tlv_count of them, | |
301 | * so we treat them as if that many can be allocated at once. | |
302 | */ | |
303 | ret = gsi_trans_pool_init_dma(dev, &trans_info->cmd_pool, | |
304 | sizeof(union ipa_cmd_payload), | |
305 | tre_max, channel->tlv_count); | |
306 | if (ret) | |
307 | return ret; | |
308 | ||
309 | /* Each TRE needs a command info structure */ | |
310 | ret = gsi_trans_pool_init(&trans_info->info_pool, | |
311 | sizeof(struct ipa_cmd_info), | |
312 | tre_max, channel->tlv_count); | |
313 | if (ret) | |
314 | gsi_trans_pool_exit_dma(dev, &trans_info->cmd_pool); | |
315 | ||
316 | return ret; | |
317 | } | |
318 | ||
319 | void ipa_cmd_pool_exit(struct gsi_channel *channel) | |
320 | { | |
321 | struct gsi_trans_info *trans_info = &channel->trans_info; | |
322 | struct device *dev = channel->gsi->dev; | |
323 | ||
324 | gsi_trans_pool_exit(&trans_info->info_pool); | |
325 | gsi_trans_pool_exit_dma(dev, &trans_info->cmd_pool); | |
326 | } | |
327 | ||
328 | static union ipa_cmd_payload * | |
329 | ipa_cmd_payload_alloc(struct ipa *ipa, dma_addr_t *addr) | |
330 | { | |
331 | struct gsi_trans_info *trans_info; | |
332 | struct ipa_endpoint *endpoint; | |
333 | ||
334 | endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]; | |
335 | trans_info = &ipa->gsi.channel[endpoint->channel_id].trans_info; | |
336 | ||
337 | return gsi_trans_pool_alloc_dma(&trans_info->cmd_pool, addr); | |
338 | } | |
339 | ||
340 | /* If hash_size is 0, hash_offset and hash_addr ignored. */ | |
341 | void ipa_cmd_table_init_add(struct gsi_trans *trans, | |
342 | enum ipa_cmd_opcode opcode, u16 size, u32 offset, | |
343 | dma_addr_t addr, u16 hash_size, u32 hash_offset, | |
344 | dma_addr_t hash_addr) | |
345 | { | |
346 | struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); | |
347 | enum dma_data_direction direction = DMA_TO_DEVICE; | |
348 | struct ipa_cmd_hw_ip_fltrt_init *payload; | |
349 | union ipa_cmd_payload *cmd_payload; | |
350 | dma_addr_t payload_addr; | |
351 | u64 val; | |
352 | ||
353 | /* Record the non-hash table offset and size */ | |
354 | offset += ipa->mem_offset; | |
355 | val = u64_encode_bits(offset, IP_FLTRT_FLAGS_NHASH_ADDR_FMASK); | |
356 | val |= u64_encode_bits(size, IP_FLTRT_FLAGS_NHASH_SIZE_FMASK); | |
357 | ||
358 | /* The hash table offset and address are zero if its size is 0 */ | |
359 | if (hash_size) { | |
360 | /* Record the hash table offset and size */ | |
361 | hash_offset += ipa->mem_offset; | |
362 | val |= u64_encode_bits(hash_offset, | |
363 | IP_FLTRT_FLAGS_HASH_ADDR_FMASK); | |
364 | val |= u64_encode_bits(hash_size, | |
365 | IP_FLTRT_FLAGS_HASH_SIZE_FMASK); | |
366 | } | |
367 | ||
368 | cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr); | |
369 | payload = &cmd_payload->table_init; | |
370 | ||
371 | /* Fill in all offsets and sizes and the non-hash table address */ | |
372 | if (hash_size) | |
373 | payload->hash_rules_addr = cpu_to_le64(hash_addr); | |
374 | payload->flags = cpu_to_le64(val); | |
375 | payload->nhash_rules_addr = cpu_to_le64(addr); | |
376 | ||
377 | gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr, | |
378 | direction, opcode); | |
379 | } | |
380 | ||
381 | /* Initialize header space in IPA-local memory */ | |
382 | void ipa_cmd_hdr_init_local_add(struct gsi_trans *trans, u32 offset, u16 size, | |
383 | dma_addr_t addr) | |
384 | { | |
385 | struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); | |
386 | enum ipa_cmd_opcode opcode = IPA_CMD_HDR_INIT_LOCAL; | |
387 | enum dma_data_direction direction = DMA_TO_DEVICE; | |
388 | struct ipa_cmd_hw_hdr_init_local *payload; | |
389 | union ipa_cmd_payload *cmd_payload; | |
390 | dma_addr_t payload_addr; | |
391 | u32 flags; | |
392 | ||
393 | offset += ipa->mem_offset; | |
394 | ||
395 | /* With this command we tell the IPA where in its local memory the | |
396 | * header tables reside. The content of the buffer provided is | |
397 | * also written via DMA into that space. The IPA hardware owns | |
398 | * the table, but the AP must initialize it. | |
399 | */ | |
400 | cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr); | |
401 | payload = &cmd_payload->hdr_init_local; | |
402 | ||
403 | payload->hdr_table_addr = cpu_to_le64(addr); | |
404 | flags = u32_encode_bits(size, HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK); | |
405 | flags |= u32_encode_bits(offset, HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK); | |
406 | payload->flags = cpu_to_le32(flags); | |
407 | ||
408 | gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr, | |
409 | direction, opcode); | |
410 | } | |
411 | ||
412 | void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value, | |
413 | u32 mask, bool clear_full) | |
414 | { | |
415 | struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); | |
416 | struct ipa_cmd_register_write *payload; | |
417 | union ipa_cmd_payload *cmd_payload; | |
418 | u32 opcode = IPA_CMD_REGISTER_WRITE; | |
419 | dma_addr_t payload_addr; | |
420 | u32 clear_option; | |
421 | u32 options; | |
422 | u16 flags; | |
423 | ||
424 | /* pipeline_clear_src_grp is not used */ | |
425 | clear_option = clear_full ? pipeline_clear_full : pipeline_clear_hps; | |
426 | ||
427 | if (ipa->version != IPA_VERSION_3_5_1) { | |
428 | u16 offset_high; | |
429 | u32 val; | |
430 | ||
431 | /* Opcode encodes pipeline clear options */ | |
432 | /* SKIP_CLEAR is always 0 (don't skip pipeline clear) */ | |
433 | val = u16_encode_bits(clear_option, | |
434 | REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK); | |
435 | opcode |= val; | |
436 | ||
437 | /* Extract the high 4 bits from the offset */ | |
438 | offset_high = (u16)u32_get_bits(offset, GENMASK(19, 16)); | |
439 | offset &= (1 << 16) - 1; | |
440 | ||
441 | /* Extract the top 4 bits and encode it into the flags field */ | |
442 | flags = u16_encode_bits(offset_high, | |
443 | REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK); | |
444 | options = 0; /* reserved */ | |
445 | ||
446 | } else { | |
447 | flags = 0; /* SKIP_CLEAR flag is always 0 */ | |
448 | options = u16_encode_bits(clear_option, | |
449 | REGISTER_WRITE_CLEAR_OPTIONS_FMASK); | |
450 | } | |
451 | ||
452 | cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr); | |
453 | payload = &cmd_payload->register_write; | |
454 | ||
455 | payload->flags = cpu_to_le16(flags); | |
456 | payload->offset = cpu_to_le16((u16)offset); | |
457 | payload->value = cpu_to_le32(value); | |
458 | payload->value_mask = cpu_to_le32(mask); | |
459 | payload->clear_options = cpu_to_le32(options); | |
460 | ||
461 | gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr, | |
462 | DMA_NONE, opcode); | |
463 | } | |
464 | ||
465 | /* Skip IP packet processing on the next data transfer on a TX channel */ | |
466 | static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id) | |
467 | { | |
468 | struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); | |
469 | enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_INIT; | |
470 | enum dma_data_direction direction = DMA_TO_DEVICE; | |
471 | struct ipa_cmd_ip_packet_init *payload; | |
472 | union ipa_cmd_payload *cmd_payload; | |
473 | dma_addr_t payload_addr; | |
474 | ||
475 | /* assert(endpoint_id < | |
476 | field_max(IPA_PACKET_INIT_DEST_ENDPOINT_FMASK)); */ | |
477 | ||
478 | cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr); | |
479 | payload = &cmd_payload->ip_packet_init; | |
480 | ||
481 | payload->dest_endpoint = u8_encode_bits(endpoint_id, | |
482 | IPA_PACKET_INIT_DEST_ENDPOINT_FMASK); | |
483 | ||
484 | gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr, | |
485 | direction, opcode); | |
486 | } | |
487 | ||
731c46ed AE |
488 | /* Use a DMA command to read or write a block of IPA-resident memory */ |
489 | void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size, | |
490 | dma_addr_t addr, bool toward_ipa) | |
491 | { | |
492 | struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); | |
493 | enum ipa_cmd_opcode opcode = IPA_CMD_DMA_SHARED_MEM; | |
494 | struct ipa_cmd_hw_dma_mem_mem *payload; | |
495 | union ipa_cmd_payload *cmd_payload; | |
496 | enum dma_data_direction direction; | |
497 | dma_addr_t payload_addr; | |
498 | u16 flags; | |
499 | ||
500 | /* size and offset must fit in 16 bit fields */ | |
501 | /* assert(size > 0 && size <= U16_MAX); */ | |
502 | /* assert(offset <= U16_MAX && ipa->mem_offset <= U16_MAX - offset); */ | |
503 | ||
504 | offset += ipa->mem_offset; | |
505 | ||
506 | cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr); | |
507 | payload = &cmd_payload->dma_shared_mem; | |
508 | ||
509 | /* payload->clear_after_read was reserved prior to IPA v4.0. It's | |
510 | * never needed for current code, so it's 0 regardless of version. | |
511 | */ | |
512 | payload->size = cpu_to_le16(size); | |
513 | payload->local_addr = cpu_to_le16(offset); | |
514 | /* payload->flags: | |
515 | * direction: 0 = write to IPA, 1 read from IPA | |
516 | * Starting at v4.0 these are reserved; either way, all zero: | |
517 | * pipeline clear: 0 = wait for pipeline clear (don't skip) | |
518 | * clear_options: 0 = pipeline_clear_hps | |
519 | * Instead, for v4.0+ these are encoded in the opcode. But again | |
520 | * since both values are 0 we won't bother OR'ing them in. | |
521 | */ | |
522 | flags = toward_ipa ? 0 : DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK; | |
523 | payload->flags = cpu_to_le16(flags); | |
524 | payload->system_addr = cpu_to_le64(addr); | |
525 | ||
526 | direction = toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE; | |
527 | ||
528 | gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr, | |
529 | direction, opcode); | |
530 | } | |
531 | ||
532 | static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans, u64 tag) | |
533 | { | |
534 | struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); | |
535 | enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_TAG_STATUS; | |
536 | enum dma_data_direction direction = DMA_TO_DEVICE; | |
537 | struct ipa_cmd_ip_packet_tag_status *payload; | |
538 | union ipa_cmd_payload *cmd_payload; | |
539 | dma_addr_t payload_addr; | |
540 | ||
541 | /* assert(tag <= field_max(IP_PACKET_TAG_STATUS_TAG_FMASK)); */ | |
542 | ||
543 | cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr); | |
544 | payload = &cmd_payload->ip_packet_tag_status; | |
545 | ||
546 | payload->tag = u64_encode_bits(tag, IP_PACKET_TAG_STATUS_TAG_FMASK); | |
547 | ||
548 | gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr, | |
549 | direction, opcode); | |
550 | } | |
551 | ||
552 | /* Issue a small command TX data transfer */ | |
553 | static void ipa_cmd_transfer_add(struct gsi_trans *trans, u16 size) | |
554 | { | |
555 | struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); | |
556 | enum dma_data_direction direction = DMA_TO_DEVICE; | |
557 | enum ipa_cmd_opcode opcode = IPA_CMD_NONE; | |
558 | union ipa_cmd_payload *payload; | |
559 | dma_addr_t payload_addr; | |
560 | ||
561 | /* assert(size <= sizeof(*payload)); */ | |
562 | ||
563 | /* Just transfer a zero-filled payload structure */ | |
564 | payload = ipa_cmd_payload_alloc(ipa, &payload_addr); | |
565 | ||
566 | gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr, | |
567 | direction, opcode); | |
568 | } | |
569 | ||
570 | void ipa_cmd_tag_process_add(struct gsi_trans *trans) | |
571 | { | |
731c46ed | 572 | struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); |
2c4bb809 | 573 | struct ipa_endpoint *endpoint; |
731c46ed AE |
574 | |
575 | endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]; | |
731c46ed | 576 | |
2c4bb809 AE |
577 | ipa_cmd_register_write_add(trans, 0, 0, 0, true); |
578 | ipa_cmd_ip_packet_init_add(trans, endpoint->endpoint_id); | |
731c46ed | 579 | ipa_cmd_ip_tag_status_add(trans, 0xcba987654321); |
731c46ed | 580 | ipa_cmd_transfer_add(trans, 4); |
731c46ed AE |
581 | } |
582 | ||
583 | /* Returns the number of commands required for the tag process */ | |
584 | u32 ipa_cmd_tag_process_count(void) | |
585 | { | |
586 | return 4; | |
587 | } | |
588 | ||
6cb63ea6 AE |
589 | void ipa_cmd_tag_process(struct ipa *ipa) |
590 | { | |
591 | u32 count = ipa_cmd_tag_process_count(); | |
592 | struct gsi_trans *trans; | |
593 | ||
594 | trans = ipa_cmd_trans_alloc(ipa, count); | |
595 | if (trans) { | |
596 | ipa_cmd_tag_process_add(trans); | |
597 | gsi_trans_commit_wait(trans); | |
598 | } else { | |
599 | dev_err(&ipa->pdev->dev, | |
600 | "error allocating %u entry tag transaction\n", count); | |
601 | } | |
602 | } | |
603 | ||
731c46ed AE |
604 | static struct ipa_cmd_info * |
605 | ipa_cmd_info_alloc(struct ipa_endpoint *endpoint, u32 tre_count) | |
606 | { | |
607 | struct gsi_channel *channel; | |
608 | ||
609 | channel = &endpoint->ipa->gsi.channel[endpoint->channel_id]; | |
610 | ||
611 | return gsi_trans_pool_alloc(&channel->trans_info.info_pool, tre_count); | |
612 | } | |
613 | ||
614 | /* Allocate a transaction for the command TX endpoint */ | |
615 | struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count) | |
616 | { | |
617 | struct ipa_endpoint *endpoint; | |
618 | struct gsi_trans *trans; | |
619 | ||
620 | endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]; | |
621 | ||
622 | trans = gsi_channel_trans_alloc(&ipa->gsi, endpoint->channel_id, | |
623 | tre_count, DMA_NONE); | |
624 | if (trans) | |
625 | trans->info = ipa_cmd_info_alloc(endpoint, tre_count); | |
626 | ||
627 | return trans; | |
628 | } |