]>
Commit | Line | Data |
---|---|---|
cc2d3216 | 1 | /* |
d7276b80 | 2 | * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved. |
cc2d3216 MZ |
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | */ | |
17 | ||
3f010cf1 | 18 | #include <linux/acpi.h> |
8d3554b8 | 19 | #include <linux/acpi_iort.h> |
cc2d3216 MZ |
20 | #include <linux/bitmap.h> |
21 | #include <linux/cpu.h> | |
22 | #include <linux/delay.h> | |
44bb7e24 | 23 | #include <linux/dma-iommu.h> |
cc2d3216 | 24 | #include <linux/interrupt.h> |
3f010cf1 | 25 | #include <linux/irqdomain.h> |
cc2d3216 MZ |
26 | #include <linux/log2.h> |
27 | #include <linux/mm.h> | |
28 | #include <linux/msi.h> | |
29 | #include <linux/of.h> | |
30 | #include <linux/of_address.h> | |
31 | #include <linux/of_irq.h> | |
32 | #include <linux/of_pci.h> | |
33 | #include <linux/of_platform.h> | |
34 | #include <linux/percpu.h> | |
35 | #include <linux/slab.h> | |
36 | ||
41a83e06 | 37 | #include <linux/irqchip.h> |
cc2d3216 | 38 | #include <linux/irqchip/arm-gic-v3.h> |
c808eea8 | 39 | #include <linux/irqchip/arm-gic-v4.h> |
cc2d3216 | 40 | |
cc2d3216 MZ |
41 | #include <asm/cputype.h> |
42 | #include <asm/exception.h> | |
43 | ||
67510cca RR |
44 | #include "irq-gic-common.h" |
45 | ||
94100970 RR |
46 | #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) |
47 | #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) | |
fbf8f40e | 48 | #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2) |
cc2d3216 | 49 | |
c48ed51c MZ |
50 | #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) |
51 | ||
a13b0404 MZ |
52 | static u32 lpi_id_bits; |
53 | ||
54 | /* | |
55 | * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to | |
56 | * deal with (one configuration byte per interrupt). PENDBASE has to | |
57 | * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI). | |
58 | */ | |
59 | #define LPI_NRBITS lpi_id_bits | |
60 | #define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K) | |
61 | #define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K) | |
62 | ||
63 | #define LPI_PROP_DEFAULT_PRIO 0xa0 | |
64 | ||
cc2d3216 MZ |
65 | /* |
66 | * Collection structure - just an ID, and a redistributor address to | |
67 | * ping. We use one per CPU as a bag of interrupts assigned to this | |
68 | * CPU. | |
69 | */ | |
70 | struct its_collection { | |
71 | u64 target_address; | |
72 | u16 col_id; | |
73 | }; | |
74 | ||
466b7d16 | 75 | /* |
9347359a SD |
76 | * The ITS_BASER structure - contains memory information, cached |
77 | * value of BASER register configuration and ITS page size. | |
466b7d16 SD |
78 | */ |
79 | struct its_baser { | |
80 | void *base; | |
81 | u64 val; | |
82 | u32 order; | |
9347359a | 83 | u32 psz; |
466b7d16 SD |
84 | }; |
85 | ||
cc2d3216 MZ |
86 | /* |
87 | * The ITS structure - contains most of the infrastructure, with the | |
841514ab MZ |
88 | * top-level MSI domain, the command queue, the collections, and the |
89 | * list of devices writing to it. | |
cc2d3216 MZ |
90 | */ |
91 | struct its_node { | |
92 | raw_spinlock_t lock; | |
93 | struct list_head entry; | |
cc2d3216 | 94 | void __iomem *base; |
db40f0a7 | 95 | phys_addr_t phys_base; |
cc2d3216 MZ |
96 | struct its_cmd_block *cmd_base; |
97 | struct its_cmd_block *cmd_write; | |
466b7d16 | 98 | struct its_baser tables[GITS_BASER_NR_REGS]; |
cc2d3216 MZ |
99 | struct its_collection *collections; |
100 | struct list_head its_device_list; | |
101 | u64 flags; | |
102 | u32 ite_size; | |
466b7d16 | 103 | u32 device_ids; |
fbf8f40e | 104 | int numa_node; |
3dfa576b | 105 | bool is_v4; |
cc2d3216 MZ |
106 | }; |
107 | ||
108 | #define ITS_ITT_ALIGN SZ_256 | |
109 | ||
2eca0d6c SD |
110 | /* Convert page order to size in bytes */ |
111 | #define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o)) | |
112 | ||
591e5bec MZ |
113 | struct event_lpi_map { |
114 | unsigned long *lpi_map; | |
115 | u16 *col_map; | |
116 | irq_hw_number_t lpi_base; | |
117 | int nr_lpis; | |
d011e4e6 MZ |
118 | struct mutex vlpi_lock; |
119 | struct its_vm *vm; | |
120 | struct its_vlpi_map *vlpi_maps; | |
121 | int nr_vlpis; | |
591e5bec MZ |
122 | }; |
123 | ||
cc2d3216 | 124 | /* |
d011e4e6 MZ |
125 | * The ITS view of a device - belongs to an ITS, owns an interrupt |
126 | * translation table, and a list of interrupts. If it some of its | |
127 | * LPIs are injected into a guest (GICv4), the event_map.vm field | |
128 | * indicates which one. | |
cc2d3216 MZ |
129 | */ |
130 | struct its_device { | |
131 | struct list_head entry; | |
132 | struct its_node *its; | |
591e5bec | 133 | struct event_lpi_map event_map; |
cc2d3216 | 134 | void *itt; |
cc2d3216 MZ |
135 | u32 nr_ites; |
136 | u32 device_id; | |
137 | }; | |
138 | ||
20b3d54e MZ |
139 | static struct { |
140 | raw_spinlock_t lock; | |
141 | struct its_device *dev; | |
142 | struct its_vpe **vpes; | |
143 | int next_victim; | |
144 | } vpe_proxy; | |
145 | ||
1ac19ca6 MZ |
146 | static LIST_HEAD(its_nodes); |
147 | static DEFINE_SPINLOCK(its_lock); | |
1ac19ca6 | 148 | static struct rdists *gic_rdists; |
db40f0a7 | 149 | static struct irq_domain *its_parent; |
1ac19ca6 | 150 | |
3dfa576b MZ |
151 | /* |
152 | * We have a maximum number of 16 ITSs in the whole system if we're | |
153 | * using the ITSList mechanism | |
154 | */ | |
155 | #define ITS_LIST_MAX 16 | |
156 | ||
157 | static unsigned long its_list_map; | |
3171a47a MZ |
158 | static u16 vmovp_seq_num; |
159 | static DEFINE_RAW_SPINLOCK(vmovp_lock); | |
160 | ||
7d75bbb4 | 161 | static DEFINE_IDA(its_vpeid_ida); |
3dfa576b | 162 | |
1ac19ca6 MZ |
163 | #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) |
164 | #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) | |
e643d803 | 165 | #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) |
1ac19ca6 | 166 | |
591e5bec MZ |
167 | static struct its_collection *dev_event_to_col(struct its_device *its_dev, |
168 | u32 event) | |
169 | { | |
170 | struct its_node *its = its_dev->its; | |
171 | ||
172 | return its->collections + its_dev->event_map.col_map[event]; | |
173 | } | |
174 | ||
cc2d3216 MZ |
175 | /* |
176 | * ITS command descriptors - parameters to be encoded in a command | |
177 | * block. | |
178 | */ | |
179 | struct its_cmd_desc { | |
180 | union { | |
181 | struct { | |
182 | struct its_device *dev; | |
183 | u32 event_id; | |
184 | } its_inv_cmd; | |
185 | ||
8d85dced MZ |
186 | struct { |
187 | struct its_device *dev; | |
188 | u32 event_id; | |
189 | } its_clear_cmd; | |
190 | ||
cc2d3216 MZ |
191 | struct { |
192 | struct its_device *dev; | |
193 | u32 event_id; | |
194 | } its_int_cmd; | |
195 | ||
196 | struct { | |
197 | struct its_device *dev; | |
198 | int valid; | |
199 | } its_mapd_cmd; | |
200 | ||
201 | struct { | |
202 | struct its_collection *col; | |
203 | int valid; | |
204 | } its_mapc_cmd; | |
205 | ||
206 | struct { | |
207 | struct its_device *dev; | |
208 | u32 phys_id; | |
209 | u32 event_id; | |
6a25ad3a | 210 | } its_mapti_cmd; |
cc2d3216 MZ |
211 | |
212 | struct { | |
213 | struct its_device *dev; | |
214 | struct its_collection *col; | |
591e5bec | 215 | u32 event_id; |
cc2d3216 MZ |
216 | } its_movi_cmd; |
217 | ||
218 | struct { | |
219 | struct its_device *dev; | |
220 | u32 event_id; | |
221 | } its_discard_cmd; | |
222 | ||
223 | struct { | |
224 | struct its_collection *col; | |
225 | } its_invall_cmd; | |
d011e4e6 | 226 | |
eb78192b MZ |
227 | struct { |
228 | struct its_vpe *vpe; | |
229 | } its_vinvall_cmd; | |
230 | ||
231 | struct { | |
232 | struct its_vpe *vpe; | |
233 | struct its_collection *col; | |
234 | bool valid; | |
235 | } its_vmapp_cmd; | |
236 | ||
d011e4e6 MZ |
237 | struct { |
238 | struct its_vpe *vpe; | |
239 | struct its_device *dev; | |
240 | u32 virt_id; | |
241 | u32 event_id; | |
242 | bool db_enabled; | |
243 | } its_vmapti_cmd; | |
244 | ||
245 | struct { | |
246 | struct its_vpe *vpe; | |
247 | struct its_device *dev; | |
248 | u32 event_id; | |
249 | bool db_enabled; | |
250 | } its_vmovi_cmd; | |
3171a47a MZ |
251 | |
252 | struct { | |
253 | struct its_vpe *vpe; | |
254 | struct its_collection *col; | |
255 | u16 seq_num; | |
256 | u16 its_list; | |
257 | } its_vmovp_cmd; | |
cc2d3216 MZ |
258 | }; |
259 | }; | |
260 | ||
261 | /* | |
262 | * The ITS command block, which is what the ITS actually parses. | |
263 | */ | |
264 | struct its_cmd_block { | |
265 | u64 raw_cmd[4]; | |
266 | }; | |
267 | ||
268 | #define ITS_CMD_QUEUE_SZ SZ_64K | |
269 | #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block)) | |
270 | ||
271 | typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *, | |
272 | struct its_cmd_desc *); | |
273 | ||
d011e4e6 MZ |
274 | typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_cmd_block *, |
275 | struct its_cmd_desc *); | |
276 | ||
4d36f136 MZ |
277 | static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l) |
278 | { | |
279 | u64 mask = GENMASK_ULL(h, l); | |
280 | *raw_cmd &= ~mask; | |
281 | *raw_cmd |= (val << l) & mask; | |
282 | } | |
283 | ||
cc2d3216 MZ |
284 | static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr) |
285 | { | |
4d36f136 | 286 | its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0); |
cc2d3216 MZ |
287 | } |
288 | ||
289 | static void its_encode_devid(struct its_cmd_block *cmd, u32 devid) | |
290 | { | |
4d36f136 | 291 | its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32); |
cc2d3216 MZ |
292 | } |
293 | ||
294 | static void its_encode_event_id(struct its_cmd_block *cmd, u32 id) | |
295 | { | |
4d36f136 | 296 | its_mask_encode(&cmd->raw_cmd[1], id, 31, 0); |
cc2d3216 MZ |
297 | } |
298 | ||
299 | static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id) | |
300 | { | |
4d36f136 | 301 | its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32); |
cc2d3216 MZ |
302 | } |
303 | ||
304 | static void its_encode_size(struct its_cmd_block *cmd, u8 size) | |
305 | { | |
4d36f136 | 306 | its_mask_encode(&cmd->raw_cmd[1], size, 4, 0); |
cc2d3216 MZ |
307 | } |
308 | ||
309 | static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr) | |
310 | { | |
4d36f136 | 311 | its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 50, 8); |
cc2d3216 MZ |
312 | } |
313 | ||
314 | static void its_encode_valid(struct its_cmd_block *cmd, int valid) | |
315 | { | |
4d36f136 | 316 | its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63); |
cc2d3216 MZ |
317 | } |
318 | ||
319 | static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr) | |
320 | { | |
4d36f136 | 321 | its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 50, 16); |
cc2d3216 MZ |
322 | } |
323 | ||
324 | static void its_encode_collection(struct its_cmd_block *cmd, u16 col) | |
325 | { | |
4d36f136 | 326 | its_mask_encode(&cmd->raw_cmd[2], col, 15, 0); |
cc2d3216 MZ |
327 | } |
328 | ||
d011e4e6 MZ |
329 | static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid) |
330 | { | |
331 | its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32); | |
332 | } | |
333 | ||
334 | static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id) | |
335 | { | |
336 | its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0); | |
337 | } | |
338 | ||
339 | static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id) | |
340 | { | |
341 | its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32); | |
342 | } | |
343 | ||
344 | static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid) | |
345 | { | |
346 | its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0); | |
347 | } | |
348 | ||
3171a47a MZ |
349 | static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num) |
350 | { | |
351 | its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32); | |
352 | } | |
353 | ||
354 | static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list) | |
355 | { | |
356 | its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0); | |
357 | } | |
358 | ||
eb78192b MZ |
359 | static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa) |
360 | { | |
361 | its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 50, 16); | |
362 | } | |
363 | ||
364 | static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size) | |
365 | { | |
366 | its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0); | |
367 | } | |
368 | ||
cc2d3216 MZ |
369 | static inline void its_fixup_cmd(struct its_cmd_block *cmd) |
370 | { | |
371 | /* Let's fixup BE commands */ | |
372 | cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]); | |
373 | cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]); | |
374 | cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]); | |
375 | cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]); | |
376 | } | |
377 | ||
378 | static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd, | |
379 | struct its_cmd_desc *desc) | |
380 | { | |
381 | unsigned long itt_addr; | |
c8481267 | 382 | u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites); |
cc2d3216 MZ |
383 | |
384 | itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt); | |
385 | itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN); | |
386 | ||
387 | its_encode_cmd(cmd, GITS_CMD_MAPD); | |
388 | its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id); | |
389 | its_encode_size(cmd, size - 1); | |
390 | its_encode_itt(cmd, itt_addr); | |
391 | its_encode_valid(cmd, desc->its_mapd_cmd.valid); | |
392 | ||
393 | its_fixup_cmd(cmd); | |
394 | ||
591e5bec | 395 | return NULL; |
cc2d3216 MZ |
396 | } |
397 | ||
398 | static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd, | |
399 | struct its_cmd_desc *desc) | |
400 | { | |
401 | its_encode_cmd(cmd, GITS_CMD_MAPC); | |
402 | its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); | |
403 | its_encode_target(cmd, desc->its_mapc_cmd.col->target_address); | |
404 | its_encode_valid(cmd, desc->its_mapc_cmd.valid); | |
405 | ||
406 | its_fixup_cmd(cmd); | |
407 | ||
408 | return desc->its_mapc_cmd.col; | |
409 | } | |
410 | ||
6a25ad3a | 411 | static struct its_collection *its_build_mapti_cmd(struct its_cmd_block *cmd, |
cc2d3216 MZ |
412 | struct its_cmd_desc *desc) |
413 | { | |
591e5bec MZ |
414 | struct its_collection *col; |
415 | ||
6a25ad3a MZ |
416 | col = dev_event_to_col(desc->its_mapti_cmd.dev, |
417 | desc->its_mapti_cmd.event_id); | |
591e5bec | 418 | |
6a25ad3a MZ |
419 | its_encode_cmd(cmd, GITS_CMD_MAPTI); |
420 | its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id); | |
421 | its_encode_event_id(cmd, desc->its_mapti_cmd.event_id); | |
422 | its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id); | |
591e5bec | 423 | its_encode_collection(cmd, col->col_id); |
cc2d3216 MZ |
424 | |
425 | its_fixup_cmd(cmd); | |
426 | ||
591e5bec | 427 | return col; |
cc2d3216 MZ |
428 | } |
429 | ||
430 | static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd, | |
431 | struct its_cmd_desc *desc) | |
432 | { | |
591e5bec MZ |
433 | struct its_collection *col; |
434 | ||
435 | col = dev_event_to_col(desc->its_movi_cmd.dev, | |
436 | desc->its_movi_cmd.event_id); | |
437 | ||
cc2d3216 MZ |
438 | its_encode_cmd(cmd, GITS_CMD_MOVI); |
439 | its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id); | |
591e5bec | 440 | its_encode_event_id(cmd, desc->its_movi_cmd.event_id); |
cc2d3216 MZ |
441 | its_encode_collection(cmd, desc->its_movi_cmd.col->col_id); |
442 | ||
443 | its_fixup_cmd(cmd); | |
444 | ||
591e5bec | 445 | return col; |
cc2d3216 MZ |
446 | } |
447 | ||
448 | static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd, | |
449 | struct its_cmd_desc *desc) | |
450 | { | |
591e5bec MZ |
451 | struct its_collection *col; |
452 | ||
453 | col = dev_event_to_col(desc->its_discard_cmd.dev, | |
454 | desc->its_discard_cmd.event_id); | |
455 | ||
cc2d3216 MZ |
456 | its_encode_cmd(cmd, GITS_CMD_DISCARD); |
457 | its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id); | |
458 | its_encode_event_id(cmd, desc->its_discard_cmd.event_id); | |
459 | ||
460 | its_fixup_cmd(cmd); | |
461 | ||
591e5bec | 462 | return col; |
cc2d3216 MZ |
463 | } |
464 | ||
465 | static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd, | |
466 | struct its_cmd_desc *desc) | |
467 | { | |
591e5bec MZ |
468 | struct its_collection *col; |
469 | ||
470 | col = dev_event_to_col(desc->its_inv_cmd.dev, | |
471 | desc->its_inv_cmd.event_id); | |
472 | ||
cc2d3216 MZ |
473 | its_encode_cmd(cmd, GITS_CMD_INV); |
474 | its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); | |
475 | its_encode_event_id(cmd, desc->its_inv_cmd.event_id); | |
476 | ||
477 | its_fixup_cmd(cmd); | |
478 | ||
591e5bec | 479 | return col; |
cc2d3216 MZ |
480 | } |
481 | ||
8d85dced MZ |
482 | static struct its_collection *its_build_int_cmd(struct its_cmd_block *cmd, |
483 | struct its_cmd_desc *desc) | |
484 | { | |
485 | struct its_collection *col; | |
486 | ||
487 | col = dev_event_to_col(desc->its_int_cmd.dev, | |
488 | desc->its_int_cmd.event_id); | |
489 | ||
490 | its_encode_cmd(cmd, GITS_CMD_INT); | |
491 | its_encode_devid(cmd, desc->its_int_cmd.dev->device_id); | |
492 | its_encode_event_id(cmd, desc->its_int_cmd.event_id); | |
493 | ||
494 | its_fixup_cmd(cmd); | |
495 | ||
496 | return col; | |
497 | } | |
498 | ||
499 | static struct its_collection *its_build_clear_cmd(struct its_cmd_block *cmd, | |
500 | struct its_cmd_desc *desc) | |
501 | { | |
502 | struct its_collection *col; | |
503 | ||
504 | col = dev_event_to_col(desc->its_clear_cmd.dev, | |
505 | desc->its_clear_cmd.event_id); | |
506 | ||
507 | its_encode_cmd(cmd, GITS_CMD_CLEAR); | |
508 | its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id); | |
509 | its_encode_event_id(cmd, desc->its_clear_cmd.event_id); | |
510 | ||
511 | its_fixup_cmd(cmd); | |
512 | ||
513 | return col; | |
514 | } | |
515 | ||
cc2d3216 MZ |
516 | static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd, |
517 | struct its_cmd_desc *desc) | |
518 | { | |
519 | its_encode_cmd(cmd, GITS_CMD_INVALL); | |
520 | its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); | |
521 | ||
522 | its_fixup_cmd(cmd); | |
523 | ||
524 | return NULL; | |
525 | } | |
526 | ||
eb78192b MZ |
527 | static struct its_vpe *its_build_vinvall_cmd(struct its_cmd_block *cmd, |
528 | struct its_cmd_desc *desc) | |
529 | { | |
530 | its_encode_cmd(cmd, GITS_CMD_VINVALL); | |
531 | its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id); | |
532 | ||
533 | its_fixup_cmd(cmd); | |
534 | ||
535 | return desc->its_vinvall_cmd.vpe; | |
536 | } | |
537 | ||
538 | static struct its_vpe *its_build_vmapp_cmd(struct its_cmd_block *cmd, | |
539 | struct its_cmd_desc *desc) | |
540 | { | |
541 | unsigned long vpt_addr; | |
542 | ||
543 | vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page)); | |
544 | ||
545 | its_encode_cmd(cmd, GITS_CMD_VMAPP); | |
546 | its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id); | |
547 | its_encode_valid(cmd, desc->its_vmapp_cmd.valid); | |
548 | its_encode_target(cmd, desc->its_vmapp_cmd.col->target_address); | |
549 | its_encode_vpt_addr(cmd, vpt_addr); | |
550 | its_encode_vpt_size(cmd, LPI_NRBITS - 1); | |
551 | ||
552 | its_fixup_cmd(cmd); | |
553 | ||
554 | return desc->its_vmapp_cmd.vpe; | |
555 | } | |
556 | ||
d011e4e6 MZ |
557 | static struct its_vpe *its_build_vmapti_cmd(struct its_cmd_block *cmd, |
558 | struct its_cmd_desc *desc) | |
559 | { | |
560 | u32 db; | |
561 | ||
562 | if (desc->its_vmapti_cmd.db_enabled) | |
563 | db = desc->its_vmapti_cmd.vpe->vpe_db_lpi; | |
564 | else | |
565 | db = 1023; | |
566 | ||
567 | its_encode_cmd(cmd, GITS_CMD_VMAPTI); | |
568 | its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id); | |
569 | its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id); | |
570 | its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id); | |
571 | its_encode_db_phys_id(cmd, db); | |
572 | its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id); | |
573 | ||
574 | its_fixup_cmd(cmd); | |
575 | ||
576 | return desc->its_vmapti_cmd.vpe; | |
577 | } | |
578 | ||
579 | static struct its_vpe *its_build_vmovi_cmd(struct its_cmd_block *cmd, | |
580 | struct its_cmd_desc *desc) | |
581 | { | |
582 | u32 db; | |
583 | ||
584 | if (desc->its_vmovi_cmd.db_enabled) | |
585 | db = desc->its_vmovi_cmd.vpe->vpe_db_lpi; | |
586 | else | |
587 | db = 1023; | |
588 | ||
589 | its_encode_cmd(cmd, GITS_CMD_VMOVI); | |
590 | its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id); | |
591 | its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id); | |
592 | its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id); | |
593 | its_encode_db_phys_id(cmd, db); | |
594 | its_encode_db_valid(cmd, true); | |
595 | ||
596 | its_fixup_cmd(cmd); | |
597 | ||
598 | return desc->its_vmovi_cmd.vpe; | |
599 | } | |
600 | ||
3171a47a MZ |
601 | static struct its_vpe *its_build_vmovp_cmd(struct its_cmd_block *cmd, |
602 | struct its_cmd_desc *desc) | |
603 | { | |
604 | its_encode_cmd(cmd, GITS_CMD_VMOVP); | |
605 | its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num); | |
606 | its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list); | |
607 | its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id); | |
608 | its_encode_target(cmd, desc->its_vmovp_cmd.col->target_address); | |
609 | ||
610 | its_fixup_cmd(cmd); | |
611 | ||
612 | return desc->its_vmovp_cmd.vpe; | |
613 | } | |
614 | ||
cc2d3216 MZ |
615 | static u64 its_cmd_ptr_to_offset(struct its_node *its, |
616 | struct its_cmd_block *ptr) | |
617 | { | |
618 | return (ptr - its->cmd_base) * sizeof(*ptr); | |
619 | } | |
620 | ||
621 | static int its_queue_full(struct its_node *its) | |
622 | { | |
623 | int widx; | |
624 | int ridx; | |
625 | ||
626 | widx = its->cmd_write - its->cmd_base; | |
627 | ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block); | |
628 | ||
629 | /* This is incredibly unlikely to happen, unless the ITS locks up. */ | |
630 | if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx) | |
631 | return 1; | |
632 | ||
633 | return 0; | |
634 | } | |
635 | ||
636 | static struct its_cmd_block *its_allocate_entry(struct its_node *its) | |
637 | { | |
638 | struct its_cmd_block *cmd; | |
639 | u32 count = 1000000; /* 1s! */ | |
640 | ||
641 | while (its_queue_full(its)) { | |
642 | count--; | |
643 | if (!count) { | |
644 | pr_err_ratelimited("ITS queue not draining\n"); | |
645 | return NULL; | |
646 | } | |
647 | cpu_relax(); | |
648 | udelay(1); | |
649 | } | |
650 | ||
651 | cmd = its->cmd_write++; | |
652 | ||
653 | /* Handle queue wrapping */ | |
654 | if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES)) | |
655 | its->cmd_write = its->cmd_base; | |
656 | ||
34d677a9 MZ |
657 | /* Clear command */ |
658 | cmd->raw_cmd[0] = 0; | |
659 | cmd->raw_cmd[1] = 0; | |
660 | cmd->raw_cmd[2] = 0; | |
661 | cmd->raw_cmd[3] = 0; | |
662 | ||
cc2d3216 MZ |
663 | return cmd; |
664 | } | |
665 | ||
666 | static struct its_cmd_block *its_post_commands(struct its_node *its) | |
667 | { | |
668 | u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write); | |
669 | ||
670 | writel_relaxed(wr, its->base + GITS_CWRITER); | |
671 | ||
672 | return its->cmd_write; | |
673 | } | |
674 | ||
675 | static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd) | |
676 | { | |
677 | /* | |
678 | * Make sure the commands written to memory are observable by | |
679 | * the ITS. | |
680 | */ | |
681 | if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING) | |
328191c0 | 682 | gic_flush_dcache_to_poc(cmd, sizeof(*cmd)); |
cc2d3216 MZ |
683 | else |
684 | dsb(ishst); | |
685 | } | |
686 | ||
687 | static void its_wait_for_range_completion(struct its_node *its, | |
688 | struct its_cmd_block *from, | |
689 | struct its_cmd_block *to) | |
690 | { | |
691 | u64 rd_idx, from_idx, to_idx; | |
692 | u32 count = 1000000; /* 1s! */ | |
693 | ||
694 | from_idx = its_cmd_ptr_to_offset(its, from); | |
695 | to_idx = its_cmd_ptr_to_offset(its, to); | |
696 | ||
697 | while (1) { | |
698 | rd_idx = readl_relaxed(its->base + GITS_CREADR); | |
9bdd8b1c MZ |
699 | |
700 | /* Direct case */ | |
701 | if (from_idx < to_idx && rd_idx >= to_idx) | |
702 | break; | |
703 | ||
704 | /* Wrapped case */ | |
705 | if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx) | |
cc2d3216 MZ |
706 | break; |
707 | ||
708 | count--; | |
709 | if (!count) { | |
710 | pr_err_ratelimited("ITS queue timeout\n"); | |
711 | return; | |
712 | } | |
713 | cpu_relax(); | |
714 | udelay(1); | |
715 | } | |
716 | } | |
717 | ||
e4f9094b MZ |
718 | /* Warning, macro hell follows */ |
719 | #define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \ | |
720 | void name(struct its_node *its, \ | |
721 | buildtype builder, \ | |
722 | struct its_cmd_desc *desc) \ | |
723 | { \ | |
724 | struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \ | |
725 | synctype *sync_obj; \ | |
726 | unsigned long flags; \ | |
727 | \ | |
728 | raw_spin_lock_irqsave(&its->lock, flags); \ | |
729 | \ | |
730 | cmd = its_allocate_entry(its); \ | |
731 | if (!cmd) { /* We're soooooo screewed... */ \ | |
732 | raw_spin_unlock_irqrestore(&its->lock, flags); \ | |
733 | return; \ | |
734 | } \ | |
735 | sync_obj = builder(cmd, desc); \ | |
736 | its_flush_cmd(its, cmd); \ | |
737 | \ | |
738 | if (sync_obj) { \ | |
739 | sync_cmd = its_allocate_entry(its); \ | |
740 | if (!sync_cmd) \ | |
741 | goto post; \ | |
742 | \ | |
743 | buildfn(sync_cmd, sync_obj); \ | |
744 | its_flush_cmd(its, sync_cmd); \ | |
745 | } \ | |
746 | \ | |
747 | post: \ | |
748 | next_cmd = its_post_commands(its); \ | |
749 | raw_spin_unlock_irqrestore(&its->lock, flags); \ | |
750 | \ | |
751 | its_wait_for_range_completion(its, cmd, next_cmd); \ | |
752 | } | |
cc2d3216 | 753 | |
e4f9094b MZ |
754 | static void its_build_sync_cmd(struct its_cmd_block *sync_cmd, |
755 | struct its_collection *sync_col) | |
756 | { | |
757 | its_encode_cmd(sync_cmd, GITS_CMD_SYNC); | |
758 | its_encode_target(sync_cmd, sync_col->target_address); | |
cc2d3216 | 759 | |
e4f9094b | 760 | its_fixup_cmd(sync_cmd); |
cc2d3216 MZ |
761 | } |
762 | ||
e4f9094b MZ |
763 | static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t, |
764 | struct its_collection, its_build_sync_cmd) | |
765 | ||
d011e4e6 MZ |
766 | static void its_build_vsync_cmd(struct its_cmd_block *sync_cmd, |
767 | struct its_vpe *sync_vpe) | |
768 | { | |
769 | its_encode_cmd(sync_cmd, GITS_CMD_VSYNC); | |
770 | its_encode_vpeid(sync_cmd, sync_vpe->vpe_id); | |
771 | ||
772 | its_fixup_cmd(sync_cmd); | |
773 | } | |
774 | ||
775 | static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t, | |
776 | struct its_vpe, its_build_vsync_cmd) | |
777 | ||
8d85dced MZ |
778 | static void its_send_int(struct its_device *dev, u32 event_id) |
779 | { | |
780 | struct its_cmd_desc desc; | |
781 | ||
782 | desc.its_int_cmd.dev = dev; | |
783 | desc.its_int_cmd.event_id = event_id; | |
784 | ||
785 | its_send_single_command(dev->its, its_build_int_cmd, &desc); | |
786 | } | |
787 | ||
788 | static void its_send_clear(struct its_device *dev, u32 event_id) | |
789 | { | |
790 | struct its_cmd_desc desc; | |
791 | ||
792 | desc.its_clear_cmd.dev = dev; | |
793 | desc.its_clear_cmd.event_id = event_id; | |
794 | ||
795 | its_send_single_command(dev->its, its_build_clear_cmd, &desc); | |
796 | } | |
797 | ||
cc2d3216 MZ |
798 | static void its_send_inv(struct its_device *dev, u32 event_id) |
799 | { | |
800 | struct its_cmd_desc desc; | |
801 | ||
802 | desc.its_inv_cmd.dev = dev; | |
803 | desc.its_inv_cmd.event_id = event_id; | |
804 | ||
805 | its_send_single_command(dev->its, its_build_inv_cmd, &desc); | |
806 | } | |
807 | ||
808 | static void its_send_mapd(struct its_device *dev, int valid) | |
809 | { | |
810 | struct its_cmd_desc desc; | |
811 | ||
812 | desc.its_mapd_cmd.dev = dev; | |
813 | desc.its_mapd_cmd.valid = !!valid; | |
814 | ||
815 | its_send_single_command(dev->its, its_build_mapd_cmd, &desc); | |
816 | } | |
817 | ||
818 | static void its_send_mapc(struct its_node *its, struct its_collection *col, | |
819 | int valid) | |
820 | { | |
821 | struct its_cmd_desc desc; | |
822 | ||
823 | desc.its_mapc_cmd.col = col; | |
824 | desc.its_mapc_cmd.valid = !!valid; | |
825 | ||
826 | its_send_single_command(its, its_build_mapc_cmd, &desc); | |
827 | } | |
828 | ||
6a25ad3a | 829 | static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id) |
cc2d3216 MZ |
830 | { |
831 | struct its_cmd_desc desc; | |
832 | ||
6a25ad3a MZ |
833 | desc.its_mapti_cmd.dev = dev; |
834 | desc.its_mapti_cmd.phys_id = irq_id; | |
835 | desc.its_mapti_cmd.event_id = id; | |
cc2d3216 | 836 | |
6a25ad3a | 837 | its_send_single_command(dev->its, its_build_mapti_cmd, &desc); |
cc2d3216 MZ |
838 | } |
839 | ||
840 | static void its_send_movi(struct its_device *dev, | |
841 | struct its_collection *col, u32 id) | |
842 | { | |
843 | struct its_cmd_desc desc; | |
844 | ||
845 | desc.its_movi_cmd.dev = dev; | |
846 | desc.its_movi_cmd.col = col; | |
591e5bec | 847 | desc.its_movi_cmd.event_id = id; |
cc2d3216 MZ |
848 | |
849 | its_send_single_command(dev->its, its_build_movi_cmd, &desc); | |
850 | } | |
851 | ||
852 | static void its_send_discard(struct its_device *dev, u32 id) | |
853 | { | |
854 | struct its_cmd_desc desc; | |
855 | ||
856 | desc.its_discard_cmd.dev = dev; | |
857 | desc.its_discard_cmd.event_id = id; | |
858 | ||
859 | its_send_single_command(dev->its, its_build_discard_cmd, &desc); | |
860 | } | |
861 | ||
862 | static void its_send_invall(struct its_node *its, struct its_collection *col) | |
863 | { | |
864 | struct its_cmd_desc desc; | |
865 | ||
866 | desc.its_invall_cmd.col = col; | |
867 | ||
868 | its_send_single_command(its, its_build_invall_cmd, &desc); | |
869 | } | |
c48ed51c | 870 | |
d011e4e6 MZ |
871 | static void its_send_vmapti(struct its_device *dev, u32 id) |
872 | { | |
873 | struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id]; | |
874 | struct its_cmd_desc desc; | |
875 | ||
876 | desc.its_vmapti_cmd.vpe = map->vpe; | |
877 | desc.its_vmapti_cmd.dev = dev; | |
878 | desc.its_vmapti_cmd.virt_id = map->vintid; | |
879 | desc.its_vmapti_cmd.event_id = id; | |
880 | desc.its_vmapti_cmd.db_enabled = map->db_enabled; | |
881 | ||
882 | its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc); | |
883 | } | |
884 | ||
885 | static void its_send_vmovi(struct its_device *dev, u32 id) | |
886 | { | |
887 | struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id]; | |
888 | struct its_cmd_desc desc; | |
889 | ||
890 | desc.its_vmovi_cmd.vpe = map->vpe; | |
891 | desc.its_vmovi_cmd.dev = dev; | |
892 | desc.its_vmovi_cmd.event_id = id; | |
893 | desc.its_vmovi_cmd.db_enabled = map->db_enabled; | |
894 | ||
895 | its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc); | |
896 | } | |
897 | ||
eb78192b MZ |
898 | static void its_send_vmapp(struct its_vpe *vpe, bool valid) |
899 | { | |
900 | struct its_cmd_desc desc; | |
901 | struct its_node *its; | |
902 | ||
903 | desc.its_vmapp_cmd.vpe = vpe; | |
904 | desc.its_vmapp_cmd.valid = valid; | |
905 | ||
906 | list_for_each_entry(its, &its_nodes, entry) { | |
907 | if (!its->is_v4) | |
908 | continue; | |
909 | ||
910 | desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx]; | |
911 | its_send_single_vcommand(its, its_build_vmapp_cmd, &desc); | |
912 | } | |
913 | } | |
914 | ||
3171a47a MZ |
915 | static void its_send_vmovp(struct its_vpe *vpe) |
916 | { | |
917 | struct its_cmd_desc desc; | |
918 | struct its_node *its; | |
919 | unsigned long flags; | |
920 | int col_id = vpe->col_idx; | |
921 | ||
922 | desc.its_vmovp_cmd.vpe = vpe; | |
923 | desc.its_vmovp_cmd.its_list = (u16)its_list_map; | |
924 | ||
925 | if (!its_list_map) { | |
926 | its = list_first_entry(&its_nodes, struct its_node, entry); | |
927 | desc.its_vmovp_cmd.seq_num = 0; | |
928 | desc.its_vmovp_cmd.col = &its->collections[col_id]; | |
929 | its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); | |
930 | return; | |
931 | } | |
932 | ||
933 | /* | |
934 | * Yet another marvel of the architecture. If using the | |
935 | * its_list "feature", we need to make sure that all ITSs | |
936 | * receive all VMOVP commands in the same order. The only way | |
937 | * to guarantee this is to make vmovp a serialization point. | |
938 | * | |
939 | * Wall <-- Head. | |
940 | */ | |
941 | raw_spin_lock_irqsave(&vmovp_lock, flags); | |
942 | ||
943 | desc.its_vmovp_cmd.seq_num = vmovp_seq_num++; | |
944 | ||
945 | /* Emit VMOVPs */ | |
946 | list_for_each_entry(its, &its_nodes, entry) { | |
947 | if (!its->is_v4) | |
948 | continue; | |
949 | ||
950 | desc.its_vmovp_cmd.col = &its->collections[col_id]; | |
951 | its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); | |
952 | } | |
953 | ||
954 | raw_spin_unlock_irqrestore(&vmovp_lock, flags); | |
955 | } | |
956 | ||
eb78192b MZ |
957 | static void its_send_vinvall(struct its_vpe *vpe) |
958 | { | |
959 | struct its_cmd_desc desc; | |
960 | struct its_node *its; | |
961 | ||
962 | desc.its_vinvall_cmd.vpe = vpe; | |
963 | ||
964 | list_for_each_entry(its, &its_nodes, entry) { | |
965 | if (!its->is_v4) | |
966 | continue; | |
967 | its_send_single_vcommand(its, its_build_vinvall_cmd, &desc); | |
968 | } | |
969 | } | |
970 | ||
c48ed51c MZ |
971 | /* |
972 | * irqchip functions - assumes MSI, mostly. | |
973 | */ | |
974 | ||
975 | static inline u32 its_get_event_id(struct irq_data *d) | |
976 | { | |
977 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
591e5bec | 978 | return d->hwirq - its_dev->event_map.lpi_base; |
c48ed51c MZ |
979 | } |
980 | ||
015ec038 | 981 | static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) |
c48ed51c | 982 | { |
015ec038 | 983 | irq_hw_number_t hwirq; |
adcdb94e MZ |
984 | struct page *prop_page; |
985 | u8 *cfg; | |
c48ed51c | 986 | |
015ec038 MZ |
987 | if (irqd_is_forwarded_to_vcpu(d)) { |
988 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
989 | u32 event = its_get_event_id(d); | |
990 | ||
991 | prop_page = its_dev->event_map.vm->vprop_page; | |
992 | hwirq = its_dev->event_map.vlpi_maps[event].vintid; | |
993 | } else { | |
994 | prop_page = gic_rdists->prop_page; | |
995 | hwirq = d->hwirq; | |
996 | } | |
adcdb94e MZ |
997 | |
998 | cfg = page_address(prop_page) + hwirq - 8192; | |
999 | *cfg &= ~clr; | |
015ec038 | 1000 | *cfg |= set | LPI_PROP_GROUP1; |
c48ed51c MZ |
1001 | |
1002 | /* | |
1003 | * Make the above write visible to the redistributors. | |
1004 | * And yes, we're flushing exactly: One. Single. Byte. | |
1005 | * Humpf... | |
1006 | */ | |
1007 | if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING) | |
328191c0 | 1008 | gic_flush_dcache_to_poc(cfg, sizeof(*cfg)); |
c48ed51c MZ |
1009 | else |
1010 | dsb(ishst); | |
015ec038 MZ |
1011 | } |
1012 | ||
1013 | static void lpi_update_config(struct irq_data *d, u8 clr, u8 set) | |
1014 | { | |
1015 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1016 | ||
1017 | lpi_write_config(d, clr, set); | |
adcdb94e | 1018 | its_send_inv(its_dev, its_get_event_id(d)); |
c48ed51c MZ |
1019 | } |
1020 | ||
015ec038 MZ |
1021 | static void its_vlpi_set_doorbell(struct irq_data *d, bool enable) |
1022 | { | |
1023 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1024 | u32 event = its_get_event_id(d); | |
1025 | ||
1026 | if (its_dev->event_map.vlpi_maps[event].db_enabled == enable) | |
1027 | return; | |
1028 | ||
1029 | its_dev->event_map.vlpi_maps[event].db_enabled = enable; | |
1030 | ||
1031 | /* | |
1032 | * More fun with the architecture: | |
1033 | * | |
1034 | * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI | |
1035 | * value or to 1023, depending on the enable bit. But that | |
1036 | * would be issueing a mapping for an /existing/ DevID+EventID | |
1037 | * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI | |
1038 | * to the /same/ vPE, using this opportunity to adjust the | |
1039 | * doorbell. Mouahahahaha. We loves it, Precious. | |
1040 | */ | |
1041 | its_send_vmovi(its_dev, event); | |
1042 | } | |
1043 | ||
c48ed51c MZ |
1044 | static void its_mask_irq(struct irq_data *d) |
1045 | { | |
015ec038 MZ |
1046 | if (irqd_is_forwarded_to_vcpu(d)) |
1047 | its_vlpi_set_doorbell(d, false); | |
1048 | ||
adcdb94e | 1049 | lpi_update_config(d, LPI_PROP_ENABLED, 0); |
c48ed51c MZ |
1050 | } |
1051 | ||
1052 | static void its_unmask_irq(struct irq_data *d) | |
1053 | { | |
015ec038 MZ |
1054 | if (irqd_is_forwarded_to_vcpu(d)) |
1055 | its_vlpi_set_doorbell(d, true); | |
1056 | ||
adcdb94e | 1057 | lpi_update_config(d, 0, LPI_PROP_ENABLED); |
c48ed51c MZ |
1058 | } |
1059 | ||
c48ed51c MZ |
1060 | static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, |
1061 | bool force) | |
1062 | { | |
fbf8f40e GK |
1063 | unsigned int cpu; |
1064 | const struct cpumask *cpu_mask = cpu_online_mask; | |
c48ed51c MZ |
1065 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
1066 | struct its_collection *target_col; | |
1067 | u32 id = its_get_event_id(d); | |
1068 | ||
015ec038 MZ |
1069 | /* A forwarded interrupt should use irq_set_vcpu_affinity */ |
1070 | if (irqd_is_forwarded_to_vcpu(d)) | |
1071 | return -EINVAL; | |
1072 | ||
fbf8f40e GK |
1073 | /* lpi cannot be routed to a redistributor that is on a foreign node */ |
1074 | if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { | |
1075 | if (its_dev->its->numa_node >= 0) { | |
1076 | cpu_mask = cpumask_of_node(its_dev->its->numa_node); | |
1077 | if (!cpumask_intersects(mask_val, cpu_mask)) | |
1078 | return -EINVAL; | |
1079 | } | |
1080 | } | |
1081 | ||
1082 | cpu = cpumask_any_and(mask_val, cpu_mask); | |
1083 | ||
c48ed51c MZ |
1084 | if (cpu >= nr_cpu_ids) |
1085 | return -EINVAL; | |
1086 | ||
8b8d94a7 M |
1087 | /* don't set the affinity when the target cpu is same as current one */ |
1088 | if (cpu != its_dev->event_map.col_map[id]) { | |
1089 | target_col = &its_dev->its->collections[cpu]; | |
1090 | its_send_movi(its_dev, target_col, id); | |
1091 | its_dev->event_map.col_map[id] = cpu; | |
1092 | } | |
c48ed51c MZ |
1093 | |
1094 | return IRQ_SET_MASK_OK_DONE; | |
1095 | } | |
1096 | ||
b48ac83d MZ |
1097 | static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) |
1098 | { | |
1099 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1100 | struct its_node *its; | |
1101 | u64 addr; | |
1102 | ||
1103 | its = its_dev->its; | |
1104 | addr = its->phys_base + GITS_TRANSLATER; | |
1105 | ||
b11283eb VM |
1106 | msg->address_lo = lower_32_bits(addr); |
1107 | msg->address_hi = upper_32_bits(addr); | |
b48ac83d | 1108 | msg->data = its_get_event_id(d); |
44bb7e24 RM |
1109 | |
1110 | iommu_dma_map_msi_msg(d->irq, msg); | |
b48ac83d MZ |
1111 | } |
1112 | ||
8d85dced MZ |
1113 | static int its_irq_set_irqchip_state(struct irq_data *d, |
1114 | enum irqchip_irq_state which, | |
1115 | bool state) | |
1116 | { | |
1117 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1118 | u32 event = its_get_event_id(d); | |
1119 | ||
1120 | if (which != IRQCHIP_STATE_PENDING) | |
1121 | return -EINVAL; | |
1122 | ||
1123 | if (state) | |
1124 | its_send_int(its_dev, event); | |
1125 | else | |
1126 | its_send_clear(its_dev, event); | |
1127 | ||
1128 | return 0; | |
1129 | } | |
1130 | ||
d011e4e6 MZ |
1131 | static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) |
1132 | { | |
1133 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1134 | u32 event = its_get_event_id(d); | |
1135 | int ret = 0; | |
1136 | ||
1137 | if (!info->map) | |
1138 | return -EINVAL; | |
1139 | ||
1140 | mutex_lock(&its_dev->event_map.vlpi_lock); | |
1141 | ||
1142 | if (!its_dev->event_map.vm) { | |
1143 | struct its_vlpi_map *maps; | |
1144 | ||
1145 | maps = kzalloc(sizeof(*maps) * its_dev->event_map.nr_lpis, | |
1146 | GFP_KERNEL); | |
1147 | if (!maps) { | |
1148 | ret = -ENOMEM; | |
1149 | goto out; | |
1150 | } | |
1151 | ||
1152 | its_dev->event_map.vm = info->map->vm; | |
1153 | its_dev->event_map.vlpi_maps = maps; | |
1154 | } else if (its_dev->event_map.vm != info->map->vm) { | |
1155 | ret = -EINVAL; | |
1156 | goto out; | |
1157 | } | |
1158 | ||
1159 | /* Get our private copy of the mapping information */ | |
1160 | its_dev->event_map.vlpi_maps[event] = *info->map; | |
1161 | ||
1162 | if (irqd_is_forwarded_to_vcpu(d)) { | |
1163 | /* Already mapped, move it around */ | |
1164 | its_send_vmovi(its_dev, event); | |
1165 | } else { | |
1166 | /* Drop the physical mapping */ | |
1167 | its_send_discard(its_dev, event); | |
1168 | ||
1169 | /* and install the virtual one */ | |
1170 | its_send_vmapti(its_dev, event); | |
1171 | irqd_set_forwarded_to_vcpu(d); | |
1172 | ||
1173 | /* Increment the number of VLPIs */ | |
1174 | its_dev->event_map.nr_vlpis++; | |
1175 | } | |
1176 | ||
1177 | out: | |
1178 | mutex_unlock(&its_dev->event_map.vlpi_lock); | |
1179 | return ret; | |
1180 | } | |
1181 | ||
1182 | static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info) | |
1183 | { | |
1184 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1185 | u32 event = its_get_event_id(d); | |
1186 | int ret = 0; | |
1187 | ||
1188 | mutex_lock(&its_dev->event_map.vlpi_lock); | |
1189 | ||
1190 | if (!its_dev->event_map.vm || | |
1191 | !its_dev->event_map.vlpi_maps[event].vm) { | |
1192 | ret = -EINVAL; | |
1193 | goto out; | |
1194 | } | |
1195 | ||
1196 | /* Copy our mapping information to the incoming request */ | |
1197 | *info->map = its_dev->event_map.vlpi_maps[event]; | |
1198 | ||
1199 | out: | |
1200 | mutex_unlock(&its_dev->event_map.vlpi_lock); | |
1201 | return ret; | |
1202 | } | |
1203 | ||
1204 | static int its_vlpi_unmap(struct irq_data *d) | |
1205 | { | |
1206 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1207 | u32 event = its_get_event_id(d); | |
1208 | int ret = 0; | |
1209 | ||
1210 | mutex_lock(&its_dev->event_map.vlpi_lock); | |
1211 | ||
1212 | if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) { | |
1213 | ret = -EINVAL; | |
1214 | goto out; | |
1215 | } | |
1216 | ||
1217 | /* Drop the virtual mapping */ | |
1218 | its_send_discard(its_dev, event); | |
1219 | ||
1220 | /* and restore the physical one */ | |
1221 | irqd_clr_forwarded_to_vcpu(d); | |
1222 | its_send_mapti(its_dev, d->hwirq, event); | |
1223 | lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO | | |
1224 | LPI_PROP_ENABLED | | |
1225 | LPI_PROP_GROUP1)); | |
1226 | ||
1227 | /* | |
1228 | * Drop the refcount and make the device available again if | |
1229 | * this was the last VLPI. | |
1230 | */ | |
1231 | if (!--its_dev->event_map.nr_vlpis) { | |
1232 | its_dev->event_map.vm = NULL; | |
1233 | kfree(its_dev->event_map.vlpi_maps); | |
1234 | } | |
1235 | ||
1236 | out: | |
1237 | mutex_unlock(&its_dev->event_map.vlpi_lock); | |
1238 | return ret; | |
1239 | } | |
1240 | ||
015ec038 MZ |
1241 | static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info) |
1242 | { | |
1243 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1244 | ||
1245 | if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) | |
1246 | return -EINVAL; | |
1247 | ||
1248 | if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI) | |
1249 | lpi_update_config(d, 0xff, info->config); | |
1250 | else | |
1251 | lpi_write_config(d, 0xff, info->config); | |
1252 | its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED)); | |
1253 | ||
1254 | return 0; | |
1255 | } | |
1256 | ||
c808eea8 MZ |
1257 | static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) |
1258 | { | |
1259 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1260 | struct its_cmd_info *info = vcpu_info; | |
1261 | ||
1262 | /* Need a v4 ITS */ | |
d011e4e6 | 1263 | if (!its_dev->its->is_v4) |
c808eea8 MZ |
1264 | return -EINVAL; |
1265 | ||
d011e4e6 MZ |
1266 | /* Unmap request? */ |
1267 | if (!info) | |
1268 | return its_vlpi_unmap(d); | |
1269 | ||
c808eea8 MZ |
1270 | switch (info->cmd_type) { |
1271 | case MAP_VLPI: | |
d011e4e6 | 1272 | return its_vlpi_map(d, info); |
c808eea8 MZ |
1273 | |
1274 | case GET_VLPI: | |
d011e4e6 | 1275 | return its_vlpi_get(d, info); |
c808eea8 MZ |
1276 | |
1277 | case PROP_UPDATE_VLPI: | |
1278 | case PROP_UPDATE_AND_INV_VLPI: | |
015ec038 | 1279 | return its_vlpi_prop_update(d, info); |
c808eea8 MZ |
1280 | |
1281 | default: | |
1282 | return -EINVAL; | |
1283 | } | |
1284 | } | |
1285 | ||
c48ed51c MZ |
1286 | static struct irq_chip its_irq_chip = { |
1287 | .name = "ITS", | |
1288 | .irq_mask = its_mask_irq, | |
1289 | .irq_unmask = its_unmask_irq, | |
004fa08d | 1290 | .irq_eoi = irq_chip_eoi_parent, |
c48ed51c | 1291 | .irq_set_affinity = its_set_affinity, |
b48ac83d | 1292 | .irq_compose_msi_msg = its_irq_compose_msi_msg, |
8d85dced | 1293 | .irq_set_irqchip_state = its_irq_set_irqchip_state, |
c808eea8 | 1294 | .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity, |
b48ac83d MZ |
1295 | }; |
1296 | ||
bf9529f8 MZ |
1297 | /* |
1298 | * How we allocate LPIs: | |
1299 | * | |
1300 | * The GIC has id_bits bits for interrupt identifiers. From there, we | |
1301 | * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as | |
1302 | * we allocate LPIs by chunks of 32, we can shift the whole thing by 5 | |
1303 | * bits to the right. | |
1304 | * | |
1305 | * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations. | |
1306 | */ | |
1307 | #define IRQS_PER_CHUNK_SHIFT 5 | |
1308 | #define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT) | |
6c31e123 | 1309 | #define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */ |
bf9529f8 MZ |
1310 | |
1311 | static unsigned long *lpi_bitmap; | |
1312 | static u32 lpi_chunks; | |
1313 | static DEFINE_SPINLOCK(lpi_lock); | |
1314 | ||
1315 | static int its_lpi_to_chunk(int lpi) | |
1316 | { | |
1317 | return (lpi - 8192) >> IRQS_PER_CHUNK_SHIFT; | |
1318 | } | |
1319 | ||
1320 | static int its_chunk_to_lpi(int chunk) | |
1321 | { | |
1322 | return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192; | |
1323 | } | |
1324 | ||
04a0e4de | 1325 | static int __init its_lpi_init(u32 id_bits) |
bf9529f8 MZ |
1326 | { |
1327 | lpi_chunks = its_lpi_to_chunk(1UL << id_bits); | |
1328 | ||
1329 | lpi_bitmap = kzalloc(BITS_TO_LONGS(lpi_chunks) * sizeof(long), | |
1330 | GFP_KERNEL); | |
1331 | if (!lpi_bitmap) { | |
1332 | lpi_chunks = 0; | |
1333 | return -ENOMEM; | |
1334 | } | |
1335 | ||
1336 | pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks); | |
1337 | return 0; | |
1338 | } | |
1339 | ||
1340 | static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids) | |
1341 | { | |
1342 | unsigned long *bitmap = NULL; | |
1343 | int chunk_id; | |
1344 | int nr_chunks; | |
1345 | int i; | |
1346 | ||
1347 | nr_chunks = DIV_ROUND_UP(nr_irqs, IRQS_PER_CHUNK); | |
1348 | ||
1349 | spin_lock(&lpi_lock); | |
1350 | ||
1351 | do { | |
1352 | chunk_id = bitmap_find_next_zero_area(lpi_bitmap, lpi_chunks, | |
1353 | 0, nr_chunks, 0); | |
1354 | if (chunk_id < lpi_chunks) | |
1355 | break; | |
1356 | ||
1357 | nr_chunks--; | |
1358 | } while (nr_chunks > 0); | |
1359 | ||
1360 | if (!nr_chunks) | |
1361 | goto out; | |
1362 | ||
1363 | bitmap = kzalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK) * sizeof (long), | |
1364 | GFP_ATOMIC); | |
1365 | if (!bitmap) | |
1366 | goto out; | |
1367 | ||
1368 | for (i = 0; i < nr_chunks; i++) | |
1369 | set_bit(chunk_id + i, lpi_bitmap); | |
1370 | ||
1371 | *base = its_chunk_to_lpi(chunk_id); | |
1372 | *nr_ids = nr_chunks * IRQS_PER_CHUNK; | |
1373 | ||
1374 | out: | |
1375 | spin_unlock(&lpi_lock); | |
1376 | ||
c8415b94 MZ |
1377 | if (!bitmap) |
1378 | *base = *nr_ids = 0; | |
1379 | ||
bf9529f8 MZ |
1380 | return bitmap; |
1381 | } | |
1382 | ||
cf2be8ba | 1383 | static void its_lpi_free_chunks(unsigned long *bitmap, int base, int nr_ids) |
bf9529f8 MZ |
1384 | { |
1385 | int lpi; | |
1386 | ||
1387 | spin_lock(&lpi_lock); | |
1388 | ||
1389 | for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) { | |
1390 | int chunk = its_lpi_to_chunk(lpi); | |
cf2be8ba | 1391 | |
bf9529f8 MZ |
1392 | BUG_ON(chunk > lpi_chunks); |
1393 | if (test_bit(chunk, lpi_bitmap)) { | |
1394 | clear_bit(chunk, lpi_bitmap); | |
1395 | } else { | |
1396 | pr_err("Bad LPI chunk %d\n", chunk); | |
1397 | } | |
1398 | } | |
1399 | ||
1400 | spin_unlock(&lpi_lock); | |
1401 | ||
cf2be8ba | 1402 | kfree(bitmap); |
bf9529f8 | 1403 | } |
1ac19ca6 | 1404 | |
0e5ccf91 MZ |
1405 | static struct page *its_allocate_prop_table(gfp_t gfp_flags) |
1406 | { | |
1407 | struct page *prop_page; | |
1408 | ||
1409 | prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ)); | |
1410 | if (!prop_page) | |
1411 | return NULL; | |
1412 | ||
1413 | /* Priority 0xa0, Group-1, disabled */ | |
1414 | memset(page_address(prop_page), | |
1415 | LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, | |
1416 | LPI_PROPBASE_SZ); | |
1417 | ||
1418 | /* Make sure the GIC will observe the written configuration */ | |
1419 | gic_flush_dcache_to_poc(page_address(prop_page), LPI_PROPBASE_SZ); | |
1420 | ||
1421 | return prop_page; | |
1422 | } | |
1423 | ||
7d75bbb4 MZ |
1424 | static void its_free_prop_table(struct page *prop_page) |
1425 | { | |
1426 | free_pages((unsigned long)page_address(prop_page), | |
1427 | get_order(LPI_PROPBASE_SZ)); | |
1428 | } | |
0e5ccf91 | 1429 | |
1ac19ca6 MZ |
1430 | static int __init its_alloc_lpi_tables(void) |
1431 | { | |
1432 | phys_addr_t paddr; | |
1433 | ||
6c31e123 | 1434 | lpi_id_bits = min_t(u32, gic_rdists->id_bits, ITS_MAX_LPI_NRBITS); |
0e5ccf91 | 1435 | gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT); |
1ac19ca6 MZ |
1436 | if (!gic_rdists->prop_page) { |
1437 | pr_err("Failed to allocate PROPBASE\n"); | |
1438 | return -ENOMEM; | |
1439 | } | |
1440 | ||
1441 | paddr = page_to_phys(gic_rdists->prop_page); | |
1442 | pr_info("GIC: using LPI property table @%pa\n", &paddr); | |
1443 | ||
6c31e123 | 1444 | return its_lpi_init(lpi_id_bits); |
1ac19ca6 MZ |
1445 | } |
1446 | ||
1447 | static const char *its_base_type_string[] = { | |
1448 | [GITS_BASER_TYPE_DEVICE] = "Devices", | |
1449 | [GITS_BASER_TYPE_VCPU] = "Virtual CPUs", | |
4f46de9d | 1450 | [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)", |
1ac19ca6 MZ |
1451 | [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections", |
1452 | [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)", | |
1453 | [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)", | |
1454 | [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)", | |
1455 | }; | |
1456 | ||
2d81d425 SD |
1457 | static u64 its_read_baser(struct its_node *its, struct its_baser *baser) |
1458 | { | |
1459 | u32 idx = baser - its->tables; | |
1460 | ||
0968a619 | 1461 | return gits_read_baser(its->base + GITS_BASER + (idx << 3)); |
2d81d425 SD |
1462 | } |
1463 | ||
1464 | static void its_write_baser(struct its_node *its, struct its_baser *baser, | |
1465 | u64 val) | |
1466 | { | |
1467 | u32 idx = baser - its->tables; | |
1468 | ||
0968a619 | 1469 | gits_write_baser(val, its->base + GITS_BASER + (idx << 3)); |
2d81d425 SD |
1470 | baser->val = its_read_baser(its, baser); |
1471 | } | |
1472 | ||
9347359a | 1473 | static int its_setup_baser(struct its_node *its, struct its_baser *baser, |
3faf24ea SD |
1474 | u64 cache, u64 shr, u32 psz, u32 order, |
1475 | bool indirect) | |
9347359a SD |
1476 | { |
1477 | u64 val = its_read_baser(its, baser); | |
1478 | u64 esz = GITS_BASER_ENTRY_SIZE(val); | |
1479 | u64 type = GITS_BASER_TYPE(val); | |
1480 | u32 alloc_pages; | |
1481 | void *base; | |
1482 | u64 tmp; | |
1483 | ||
1484 | retry_alloc_baser: | |
1485 | alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz); | |
1486 | if (alloc_pages > GITS_BASER_PAGES_MAX) { | |
1487 | pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n", | |
1488 | &its->phys_base, its_base_type_string[type], | |
1489 | alloc_pages, GITS_BASER_PAGES_MAX); | |
1490 | alloc_pages = GITS_BASER_PAGES_MAX; | |
1491 | order = get_order(GITS_BASER_PAGES_MAX * psz); | |
1492 | } | |
1493 | ||
1494 | base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); | |
1495 | if (!base) | |
1496 | return -ENOMEM; | |
1497 | ||
1498 | retry_baser: | |
1499 | val = (virt_to_phys(base) | | |
1500 | (type << GITS_BASER_TYPE_SHIFT) | | |
1501 | ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | | |
1502 | ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) | | |
1503 | cache | | |
1504 | shr | | |
1505 | GITS_BASER_VALID); | |
1506 | ||
3faf24ea SD |
1507 | val |= indirect ? GITS_BASER_INDIRECT : 0x0; |
1508 | ||
9347359a SD |
1509 | switch (psz) { |
1510 | case SZ_4K: | |
1511 | val |= GITS_BASER_PAGE_SIZE_4K; | |
1512 | break; | |
1513 | case SZ_16K: | |
1514 | val |= GITS_BASER_PAGE_SIZE_16K; | |
1515 | break; | |
1516 | case SZ_64K: | |
1517 | val |= GITS_BASER_PAGE_SIZE_64K; | |
1518 | break; | |
1519 | } | |
1520 | ||
1521 | its_write_baser(its, baser, val); | |
1522 | tmp = baser->val; | |
1523 | ||
1524 | if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) { | |
1525 | /* | |
1526 | * Shareability didn't stick. Just use | |
1527 | * whatever the read reported, which is likely | |
1528 | * to be the only thing this redistributor | |
1529 | * supports. If that's zero, make it | |
1530 | * non-cacheable as well. | |
1531 | */ | |
1532 | shr = tmp & GITS_BASER_SHAREABILITY_MASK; | |
1533 | if (!shr) { | |
1534 | cache = GITS_BASER_nC; | |
328191c0 | 1535 | gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order)); |
9347359a SD |
1536 | } |
1537 | goto retry_baser; | |
1538 | } | |
1539 | ||
1540 | if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) { | |
1541 | /* | |
1542 | * Page size didn't stick. Let's try a smaller | |
1543 | * size and retry. If we reach 4K, then | |
1544 | * something is horribly wrong... | |
1545 | */ | |
1546 | free_pages((unsigned long)base, order); | |
1547 | baser->base = NULL; | |
1548 | ||
1549 | switch (psz) { | |
1550 | case SZ_16K: | |
1551 | psz = SZ_4K; | |
1552 | goto retry_alloc_baser; | |
1553 | case SZ_64K: | |
1554 | psz = SZ_16K; | |
1555 | goto retry_alloc_baser; | |
1556 | } | |
1557 | } | |
1558 | ||
1559 | if (val != tmp) { | |
b11283eb | 1560 | pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n", |
9347359a | 1561 | &its->phys_base, its_base_type_string[type], |
b11283eb | 1562 | val, tmp); |
9347359a SD |
1563 | free_pages((unsigned long)base, order); |
1564 | return -ENXIO; | |
1565 | } | |
1566 | ||
1567 | baser->order = order; | |
1568 | baser->base = base; | |
1569 | baser->psz = psz; | |
3faf24ea | 1570 | tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz; |
9347359a | 1571 | |
3faf24ea | 1572 | pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n", |
d524eaa2 | 1573 | &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp), |
9347359a SD |
1574 | its_base_type_string[type], |
1575 | (unsigned long)virt_to_phys(base), | |
3faf24ea | 1576 | indirect ? "indirect" : "flat", (int)esz, |
9347359a SD |
1577 | psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); |
1578 | ||
1579 | return 0; | |
1580 | } | |
1581 | ||
4cacac57 MZ |
1582 | static bool its_parse_indirect_baser(struct its_node *its, |
1583 | struct its_baser *baser, | |
1584 | u32 psz, u32 *order) | |
4b75c459 | 1585 | { |
4cacac57 MZ |
1586 | u64 tmp = its_read_baser(its, baser); |
1587 | u64 type = GITS_BASER_TYPE(tmp); | |
1588 | u64 esz = GITS_BASER_ENTRY_SIZE(tmp); | |
2fd632a0 | 1589 | u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb; |
4b75c459 SD |
1590 | u32 ids = its->device_ids; |
1591 | u32 new_order = *order; | |
3faf24ea SD |
1592 | bool indirect = false; |
1593 | ||
1594 | /* No need to enable Indirection if memory requirement < (psz*2)bytes */ | |
1595 | if ((esz << ids) > (psz * 2)) { | |
1596 | /* | |
1597 | * Find out whether hw supports a single or two-level table by | |
1598 | * table by reading bit at offset '62' after writing '1' to it. | |
1599 | */ | |
1600 | its_write_baser(its, baser, val | GITS_BASER_INDIRECT); | |
1601 | indirect = !!(baser->val & GITS_BASER_INDIRECT); | |
1602 | ||
1603 | if (indirect) { | |
1604 | /* | |
1605 | * The size of the lvl2 table is equal to ITS page size | |
1606 | * which is 'psz'. For computing lvl1 table size, | |
1607 | * subtract ID bits that sparse lvl2 table from 'ids' | |
1608 | * which is reported by ITS hardware times lvl1 table | |
1609 | * entry size. | |
1610 | */ | |
d524eaa2 | 1611 | ids -= ilog2(psz / (int)esz); |
3faf24ea SD |
1612 | esz = GITS_LVL1_ENTRY_SIZE; |
1613 | } | |
1614 | } | |
4b75c459 SD |
1615 | |
1616 | /* | |
1617 | * Allocate as many entries as required to fit the | |
1618 | * range of device IDs that the ITS can grok... The ID | |
1619 | * space being incredibly sparse, this results in a | |
3faf24ea SD |
1620 | * massive waste of memory if two-level device table |
1621 | * feature is not supported by hardware. | |
4b75c459 SD |
1622 | */ |
1623 | new_order = max_t(u32, get_order(esz << ids), new_order); | |
1624 | if (new_order >= MAX_ORDER) { | |
1625 | new_order = MAX_ORDER - 1; | |
d524eaa2 | 1626 | ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz); |
4cacac57 MZ |
1627 | pr_warn("ITS@%pa: %s Table too large, reduce ids %u->%u\n", |
1628 | &its->phys_base, its_base_type_string[type], | |
1629 | its->device_ids, ids); | |
4b75c459 SD |
1630 | } |
1631 | ||
1632 | *order = new_order; | |
3faf24ea SD |
1633 | |
1634 | return indirect; | |
4b75c459 SD |
1635 | } |
1636 | ||
1ac19ca6 MZ |
1637 | static void its_free_tables(struct its_node *its) |
1638 | { | |
1639 | int i; | |
1640 | ||
1641 | for (i = 0; i < GITS_BASER_NR_REGS; i++) { | |
1a485f4d SD |
1642 | if (its->tables[i].base) { |
1643 | free_pages((unsigned long)its->tables[i].base, | |
1644 | its->tables[i].order); | |
1645 | its->tables[i].base = NULL; | |
1ac19ca6 MZ |
1646 | } |
1647 | } | |
1648 | } | |
1649 | ||
0e0b0f69 | 1650 | static int its_alloc_tables(struct its_node *its) |
1ac19ca6 | 1651 | { |
589ce5f4 | 1652 | u64 typer = gic_read_typer(its->base + GITS_TYPER); |
9347359a | 1653 | u32 ids = GITS_TYPER_DEVBITS(typer); |
1ac19ca6 | 1654 | u64 shr = GITS_BASER_InnerShareable; |
2fd632a0 | 1655 | u64 cache = GITS_BASER_RaWaWb; |
9347359a SD |
1656 | u32 psz = SZ_64K; |
1657 | int err, i; | |
94100970 RR |
1658 | |
1659 | if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) { | |
1660 | /* | |
9347359a SD |
1661 | * erratum 22375: only alloc 8MB table size |
1662 | * erratum 24313: ignore memory access type | |
1663 | */ | |
1664 | cache = GITS_BASER_nCnB; | |
1665 | ids = 0x14; /* 20 bits, 8MB */ | |
94100970 | 1666 | } |
1ac19ca6 | 1667 | |
466b7d16 SD |
1668 | its->device_ids = ids; |
1669 | ||
1ac19ca6 | 1670 | for (i = 0; i < GITS_BASER_NR_REGS; i++) { |
2d81d425 SD |
1671 | struct its_baser *baser = its->tables + i; |
1672 | u64 val = its_read_baser(its, baser); | |
1ac19ca6 | 1673 | u64 type = GITS_BASER_TYPE(val); |
9347359a | 1674 | u32 order = get_order(psz); |
3faf24ea | 1675 | bool indirect = false; |
1ac19ca6 | 1676 | |
4cacac57 MZ |
1677 | switch (type) { |
1678 | case GITS_BASER_TYPE_NONE: | |
1ac19ca6 MZ |
1679 | continue; |
1680 | ||
4cacac57 MZ |
1681 | case GITS_BASER_TYPE_DEVICE: |
1682 | case GITS_BASER_TYPE_VCPU: | |
1683 | indirect = its_parse_indirect_baser(its, baser, | |
1684 | psz, &order); | |
1685 | break; | |
1686 | } | |
f54b97ed | 1687 | |
3faf24ea | 1688 | err = its_setup_baser(its, baser, cache, shr, psz, order, indirect); |
9347359a SD |
1689 | if (err < 0) { |
1690 | its_free_tables(its); | |
1691 | return err; | |
1ac19ca6 MZ |
1692 | } |
1693 | ||
9347359a SD |
1694 | /* Update settings which will be used for next BASERn */ |
1695 | psz = baser->psz; | |
1696 | cache = baser->val & GITS_BASER_CACHEABILITY_MASK; | |
1697 | shr = baser->val & GITS_BASER_SHAREABILITY_MASK; | |
1ac19ca6 MZ |
1698 | } |
1699 | ||
1700 | return 0; | |
1ac19ca6 MZ |
1701 | } |
1702 | ||
1703 | static int its_alloc_collections(struct its_node *its) | |
1704 | { | |
1705 | its->collections = kzalloc(nr_cpu_ids * sizeof(*its->collections), | |
1706 | GFP_KERNEL); | |
1707 | if (!its->collections) | |
1708 | return -ENOMEM; | |
1709 | ||
1710 | return 0; | |
1711 | } | |
1712 | ||
7c297a2d MZ |
1713 | static struct page *its_allocate_pending_table(gfp_t gfp_flags) |
1714 | { | |
1715 | struct page *pend_page; | |
1716 | /* | |
1717 | * The pending pages have to be at least 64kB aligned, | |
1718 | * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below. | |
1719 | */ | |
1720 | pend_page = alloc_pages(gfp_flags | __GFP_ZERO, | |
1721 | get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K))); | |
1722 | if (!pend_page) | |
1723 | return NULL; | |
1724 | ||
1725 | /* Make sure the GIC will observe the zero-ed page */ | |
1726 | gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ); | |
1727 | ||
1728 | return pend_page; | |
1729 | } | |
1730 | ||
7d75bbb4 MZ |
1731 | static void its_free_pending_table(struct page *pt) |
1732 | { | |
1733 | free_pages((unsigned long)page_address(pt), | |
1734 | get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K))); | |
1735 | } | |
1736 | ||
1ac19ca6 MZ |
1737 | static void its_cpu_init_lpis(void) |
1738 | { | |
1739 | void __iomem *rbase = gic_data_rdist_rd_base(); | |
1740 | struct page *pend_page; | |
1741 | u64 val, tmp; | |
1742 | ||
1743 | /* If we didn't allocate the pending table yet, do it now */ | |
1744 | pend_page = gic_data_rdist()->pend_page; | |
1745 | if (!pend_page) { | |
1746 | phys_addr_t paddr; | |
7c297a2d MZ |
1747 | |
1748 | pend_page = its_allocate_pending_table(GFP_NOWAIT); | |
1ac19ca6 MZ |
1749 | if (!pend_page) { |
1750 | pr_err("Failed to allocate PENDBASE for CPU%d\n", | |
1751 | smp_processor_id()); | |
1752 | return; | |
1753 | } | |
1754 | ||
1ac19ca6 MZ |
1755 | paddr = page_to_phys(pend_page); |
1756 | pr_info("CPU%d: using LPI pending table @%pa\n", | |
1757 | smp_processor_id(), &paddr); | |
1758 | gic_data_rdist()->pend_page = pend_page; | |
1759 | } | |
1760 | ||
1761 | /* Disable LPIs */ | |
1762 | val = readl_relaxed(rbase + GICR_CTLR); | |
1763 | val &= ~GICR_CTLR_ENABLE_LPIS; | |
1764 | writel_relaxed(val, rbase + GICR_CTLR); | |
1765 | ||
1766 | /* | |
1767 | * Make sure any change to the table is observable by the GIC. | |
1768 | */ | |
1769 | dsb(sy); | |
1770 | ||
1771 | /* set PROPBASE */ | |
1772 | val = (page_to_phys(gic_rdists->prop_page) | | |
1773 | GICR_PROPBASER_InnerShareable | | |
2fd632a0 | 1774 | GICR_PROPBASER_RaWaWb | |
1ac19ca6 MZ |
1775 | ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK)); |
1776 | ||
0968a619 VM |
1777 | gicr_write_propbaser(val, rbase + GICR_PROPBASER); |
1778 | tmp = gicr_read_propbaser(rbase + GICR_PROPBASER); | |
1ac19ca6 MZ |
1779 | |
1780 | if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { | |
241a386c MZ |
1781 | if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) { |
1782 | /* | |
1783 | * The HW reports non-shareable, we must | |
1784 | * remove the cacheability attributes as | |
1785 | * well. | |
1786 | */ | |
1787 | val &= ~(GICR_PROPBASER_SHAREABILITY_MASK | | |
1788 | GICR_PROPBASER_CACHEABILITY_MASK); | |
1789 | val |= GICR_PROPBASER_nC; | |
0968a619 | 1790 | gicr_write_propbaser(val, rbase + GICR_PROPBASER); |
241a386c | 1791 | } |
1ac19ca6 MZ |
1792 | pr_info_once("GIC: using cache flushing for LPI property table\n"); |
1793 | gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING; | |
1794 | } | |
1795 | ||
1796 | /* set PENDBASE */ | |
1797 | val = (page_to_phys(pend_page) | | |
4ad3e363 | 1798 | GICR_PENDBASER_InnerShareable | |
2fd632a0 | 1799 | GICR_PENDBASER_RaWaWb); |
1ac19ca6 | 1800 | |
0968a619 VM |
1801 | gicr_write_pendbaser(val, rbase + GICR_PENDBASER); |
1802 | tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER); | |
241a386c MZ |
1803 | |
1804 | if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) { | |
1805 | /* | |
1806 | * The HW reports non-shareable, we must remove the | |
1807 | * cacheability attributes as well. | |
1808 | */ | |
1809 | val &= ~(GICR_PENDBASER_SHAREABILITY_MASK | | |
1810 | GICR_PENDBASER_CACHEABILITY_MASK); | |
1811 | val |= GICR_PENDBASER_nC; | |
0968a619 | 1812 | gicr_write_pendbaser(val, rbase + GICR_PENDBASER); |
241a386c | 1813 | } |
1ac19ca6 MZ |
1814 | |
1815 | /* Enable LPIs */ | |
1816 | val = readl_relaxed(rbase + GICR_CTLR); | |
1817 | val |= GICR_CTLR_ENABLE_LPIS; | |
1818 | writel_relaxed(val, rbase + GICR_CTLR); | |
1819 | ||
1820 | /* Make sure the GIC has seen the above */ | |
1821 | dsb(sy); | |
1822 | } | |
1823 | ||
1824 | static void its_cpu_init_collection(void) | |
1825 | { | |
1826 | struct its_node *its; | |
1827 | int cpu; | |
1828 | ||
1829 | spin_lock(&its_lock); | |
1830 | cpu = smp_processor_id(); | |
1831 | ||
1832 | list_for_each_entry(its, &its_nodes, entry) { | |
1833 | u64 target; | |
1834 | ||
fbf8f40e GK |
1835 | /* avoid cross node collections and its mapping */ |
1836 | if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { | |
1837 | struct device_node *cpu_node; | |
1838 | ||
1839 | cpu_node = of_get_cpu_node(cpu, NULL); | |
1840 | if (its->numa_node != NUMA_NO_NODE && | |
1841 | its->numa_node != of_node_to_nid(cpu_node)) | |
1842 | continue; | |
1843 | } | |
1844 | ||
1ac19ca6 MZ |
1845 | /* |
1846 | * We now have to bind each collection to its target | |
1847 | * redistributor. | |
1848 | */ | |
589ce5f4 | 1849 | if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) { |
1ac19ca6 MZ |
1850 | /* |
1851 | * This ITS wants the physical address of the | |
1852 | * redistributor. | |
1853 | */ | |
1854 | target = gic_data_rdist()->phys_base; | |
1855 | } else { | |
1856 | /* | |
1857 | * This ITS wants a linear CPU number. | |
1858 | */ | |
589ce5f4 | 1859 | target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); |
263fcd31 | 1860 | target = GICR_TYPER_CPU_NUMBER(target) << 16; |
1ac19ca6 MZ |
1861 | } |
1862 | ||
1863 | /* Perform collection mapping */ | |
1864 | its->collections[cpu].target_address = target; | |
1865 | its->collections[cpu].col_id = cpu; | |
1866 | ||
1867 | its_send_mapc(its, &its->collections[cpu], 1); | |
1868 | its_send_invall(its, &its->collections[cpu]); | |
1869 | } | |
1870 | ||
1871 | spin_unlock(&its_lock); | |
1872 | } | |
84a6a2e7 MZ |
1873 | |
1874 | static struct its_device *its_find_device(struct its_node *its, u32 dev_id) | |
1875 | { | |
1876 | struct its_device *its_dev = NULL, *tmp; | |
3e39e8f5 | 1877 | unsigned long flags; |
84a6a2e7 | 1878 | |
3e39e8f5 | 1879 | raw_spin_lock_irqsave(&its->lock, flags); |
84a6a2e7 MZ |
1880 | |
1881 | list_for_each_entry(tmp, &its->its_device_list, entry) { | |
1882 | if (tmp->device_id == dev_id) { | |
1883 | its_dev = tmp; | |
1884 | break; | |
1885 | } | |
1886 | } | |
1887 | ||
3e39e8f5 | 1888 | raw_spin_unlock_irqrestore(&its->lock, flags); |
84a6a2e7 MZ |
1889 | |
1890 | return its_dev; | |
1891 | } | |
1892 | ||
466b7d16 SD |
1893 | static struct its_baser *its_get_baser(struct its_node *its, u32 type) |
1894 | { | |
1895 | int i; | |
1896 | ||
1897 | for (i = 0; i < GITS_BASER_NR_REGS; i++) { | |
1898 | if (GITS_BASER_TYPE(its->tables[i].val) == type) | |
1899 | return &its->tables[i]; | |
1900 | } | |
1901 | ||
1902 | return NULL; | |
1903 | } | |
1904 | ||
70cc81ed | 1905 | static bool its_alloc_table_entry(struct its_baser *baser, u32 id) |
3faf24ea | 1906 | { |
3faf24ea SD |
1907 | struct page *page; |
1908 | u32 esz, idx; | |
1909 | __le64 *table; | |
1910 | ||
3faf24ea SD |
1911 | /* Don't allow device id that exceeds single, flat table limit */ |
1912 | esz = GITS_BASER_ENTRY_SIZE(baser->val); | |
1913 | if (!(baser->val & GITS_BASER_INDIRECT)) | |
70cc81ed | 1914 | return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz)); |
3faf24ea SD |
1915 | |
1916 | /* Compute 1st level table index & check if that exceeds table limit */ | |
70cc81ed | 1917 | idx = id >> ilog2(baser->psz / esz); |
3faf24ea SD |
1918 | if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE)) |
1919 | return false; | |
1920 | ||
1921 | table = baser->base; | |
1922 | ||
1923 | /* Allocate memory for 2nd level table */ | |
1924 | if (!table[idx]) { | |
1925 | page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(baser->psz)); | |
1926 | if (!page) | |
1927 | return false; | |
1928 | ||
1929 | /* Flush Lvl2 table to PoC if hw doesn't support coherency */ | |
1930 | if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) | |
328191c0 | 1931 | gic_flush_dcache_to_poc(page_address(page), baser->psz); |
3faf24ea SD |
1932 | |
1933 | table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID); | |
1934 | ||
1935 | /* Flush Lvl1 entry to PoC if hw doesn't support coherency */ | |
1936 | if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) | |
328191c0 | 1937 | gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE); |
3faf24ea SD |
1938 | |
1939 | /* Ensure updated table contents are visible to ITS hardware */ | |
1940 | dsb(sy); | |
1941 | } | |
1942 | ||
1943 | return true; | |
1944 | } | |
1945 | ||
70cc81ed MZ |
1946 | static bool its_alloc_device_table(struct its_node *its, u32 dev_id) |
1947 | { | |
1948 | struct its_baser *baser; | |
1949 | ||
1950 | baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE); | |
1951 | ||
1952 | /* Don't allow device id that exceeds ITS hardware limit */ | |
1953 | if (!baser) | |
1954 | return (ilog2(dev_id) < its->device_ids); | |
1955 | ||
1956 | return its_alloc_table_entry(baser, dev_id); | |
1957 | } | |
1958 | ||
7d75bbb4 MZ |
1959 | static bool its_alloc_vpe_table(u32 vpe_id) |
1960 | { | |
1961 | struct its_node *its; | |
1962 | ||
1963 | /* | |
1964 | * Make sure the L2 tables are allocated on *all* v4 ITSs. We | |
1965 | * could try and only do it on ITSs corresponding to devices | |
1966 | * that have interrupts targeted at this VPE, but the | |
1967 | * complexity becomes crazy (and you have tons of memory | |
1968 | * anyway, right?). | |
1969 | */ | |
1970 | list_for_each_entry(its, &its_nodes, entry) { | |
1971 | struct its_baser *baser; | |
1972 | ||
1973 | if (!its->is_v4) | |
1974 | continue; | |
1975 | ||
1976 | baser = its_get_baser(its, GITS_BASER_TYPE_VCPU); | |
1977 | if (!baser) | |
1978 | return false; | |
1979 | ||
1980 | if (!its_alloc_table_entry(baser, vpe_id)) | |
1981 | return false; | |
1982 | } | |
1983 | ||
1984 | return true; | |
1985 | } | |
1986 | ||
84a6a2e7 | 1987 | static struct its_device *its_create_device(struct its_node *its, u32 dev_id, |
93f94ea0 | 1988 | int nvecs, bool alloc_lpis) |
84a6a2e7 MZ |
1989 | { |
1990 | struct its_device *dev; | |
93f94ea0 | 1991 | unsigned long *lpi_map = NULL; |
3e39e8f5 | 1992 | unsigned long flags; |
591e5bec | 1993 | u16 *col_map = NULL; |
84a6a2e7 MZ |
1994 | void *itt; |
1995 | int lpi_base; | |
1996 | int nr_lpis; | |
c8481267 | 1997 | int nr_ites; |
84a6a2e7 MZ |
1998 | int sz; |
1999 | ||
3faf24ea | 2000 | if (!its_alloc_device_table(its, dev_id)) |
466b7d16 SD |
2001 | return NULL; |
2002 | ||
84a6a2e7 | 2003 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); |
c8481267 MZ |
2004 | /* |
2005 | * At least one bit of EventID is being used, hence a minimum | |
2006 | * of two entries. No, the architecture doesn't let you | |
2007 | * express an ITT with a single entry. | |
2008 | */ | |
96555c47 | 2009 | nr_ites = max(2UL, roundup_pow_of_two(nvecs)); |
c8481267 | 2010 | sz = nr_ites * its->ite_size; |
84a6a2e7 | 2011 | sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; |
6c834125 | 2012 | itt = kzalloc(sz, GFP_KERNEL); |
93f94ea0 MZ |
2013 | if (alloc_lpis) { |
2014 | lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis); | |
2015 | if (lpi_map) | |
2016 | col_map = kzalloc(sizeof(*col_map) * nr_lpis, | |
2017 | GFP_KERNEL); | |
2018 | } else { | |
2019 | col_map = kzalloc(sizeof(*col_map) * nr_ites, GFP_KERNEL); | |
2020 | nr_lpis = 0; | |
2021 | lpi_base = 0; | |
2022 | } | |
84a6a2e7 | 2023 | |
93f94ea0 | 2024 | if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) { |
84a6a2e7 MZ |
2025 | kfree(dev); |
2026 | kfree(itt); | |
2027 | kfree(lpi_map); | |
591e5bec | 2028 | kfree(col_map); |
84a6a2e7 MZ |
2029 | return NULL; |
2030 | } | |
2031 | ||
328191c0 | 2032 | gic_flush_dcache_to_poc(itt, sz); |
5a9a8915 | 2033 | |
84a6a2e7 MZ |
2034 | dev->its = its; |
2035 | dev->itt = itt; | |
c8481267 | 2036 | dev->nr_ites = nr_ites; |
591e5bec MZ |
2037 | dev->event_map.lpi_map = lpi_map; |
2038 | dev->event_map.col_map = col_map; | |
2039 | dev->event_map.lpi_base = lpi_base; | |
2040 | dev->event_map.nr_lpis = nr_lpis; | |
d011e4e6 | 2041 | mutex_init(&dev->event_map.vlpi_lock); |
84a6a2e7 MZ |
2042 | dev->device_id = dev_id; |
2043 | INIT_LIST_HEAD(&dev->entry); | |
2044 | ||
3e39e8f5 | 2045 | raw_spin_lock_irqsave(&its->lock, flags); |
84a6a2e7 | 2046 | list_add(&dev->entry, &its->its_device_list); |
3e39e8f5 | 2047 | raw_spin_unlock_irqrestore(&its->lock, flags); |
84a6a2e7 | 2048 | |
84a6a2e7 MZ |
2049 | /* Map device to its ITT */ |
2050 | its_send_mapd(dev, 1); | |
2051 | ||
2052 | return dev; | |
2053 | } | |
2054 | ||
2055 | static void its_free_device(struct its_device *its_dev) | |
2056 | { | |
3e39e8f5 MZ |
2057 | unsigned long flags; |
2058 | ||
2059 | raw_spin_lock_irqsave(&its_dev->its->lock, flags); | |
84a6a2e7 | 2060 | list_del(&its_dev->entry); |
3e39e8f5 | 2061 | raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); |
84a6a2e7 MZ |
2062 | kfree(its_dev->itt); |
2063 | kfree(its_dev); | |
2064 | } | |
b48ac83d MZ |
2065 | |
2066 | static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq) | |
2067 | { | |
2068 | int idx; | |
2069 | ||
591e5bec MZ |
2070 | idx = find_first_zero_bit(dev->event_map.lpi_map, |
2071 | dev->event_map.nr_lpis); | |
2072 | if (idx == dev->event_map.nr_lpis) | |
b48ac83d MZ |
2073 | return -ENOSPC; |
2074 | ||
591e5bec MZ |
2075 | *hwirq = dev->event_map.lpi_base + idx; |
2076 | set_bit(idx, dev->event_map.lpi_map); | |
b48ac83d | 2077 | |
b48ac83d MZ |
2078 | return 0; |
2079 | } | |
2080 | ||
54456db9 MZ |
2081 | static int its_msi_prepare(struct irq_domain *domain, struct device *dev, |
2082 | int nvec, msi_alloc_info_t *info) | |
e8137f4f | 2083 | { |
b48ac83d | 2084 | struct its_node *its; |
b48ac83d | 2085 | struct its_device *its_dev; |
54456db9 MZ |
2086 | struct msi_domain_info *msi_info; |
2087 | u32 dev_id; | |
2088 | ||
2089 | /* | |
2090 | * We ignore "dev" entierely, and rely on the dev_id that has | |
2091 | * been passed via the scratchpad. This limits this domain's | |
2092 | * usefulness to upper layers that definitely know that they | |
2093 | * are built on top of the ITS. | |
2094 | */ | |
2095 | dev_id = info->scratchpad[0].ul; | |
2096 | ||
2097 | msi_info = msi_get_domain_info(domain); | |
2098 | its = msi_info->data; | |
e8137f4f | 2099 | |
20b3d54e MZ |
2100 | if (!gic_rdists->has_direct_lpi && |
2101 | vpe_proxy.dev && | |
2102 | vpe_proxy.dev->its == its && | |
2103 | dev_id == vpe_proxy.dev->device_id) { | |
2104 | /* Bad luck. Get yourself a better implementation */ | |
2105 | WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n", | |
2106 | dev_id); | |
2107 | return -EINVAL; | |
2108 | } | |
2109 | ||
f130420e | 2110 | its_dev = its_find_device(its, dev_id); |
e8137f4f MZ |
2111 | if (its_dev) { |
2112 | /* | |
2113 | * We already have seen this ID, probably through | |
2114 | * another alias (PCI bridge of some sort). No need to | |
2115 | * create the device. | |
2116 | */ | |
f130420e | 2117 | pr_debug("Reusing ITT for devID %x\n", dev_id); |
e8137f4f MZ |
2118 | goto out; |
2119 | } | |
b48ac83d | 2120 | |
93f94ea0 | 2121 | its_dev = its_create_device(its, dev_id, nvec, true); |
b48ac83d MZ |
2122 | if (!its_dev) |
2123 | return -ENOMEM; | |
2124 | ||
f130420e | 2125 | pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec)); |
e8137f4f | 2126 | out: |
b48ac83d | 2127 | info->scratchpad[0].ptr = its_dev; |
b48ac83d MZ |
2128 | return 0; |
2129 | } | |
2130 | ||
54456db9 MZ |
2131 | static struct msi_domain_ops its_msi_domain_ops = { |
2132 | .msi_prepare = its_msi_prepare, | |
2133 | }; | |
2134 | ||
b48ac83d MZ |
2135 | static int its_irq_gic_domain_alloc(struct irq_domain *domain, |
2136 | unsigned int virq, | |
2137 | irq_hw_number_t hwirq) | |
2138 | { | |
f833f57f MZ |
2139 | struct irq_fwspec fwspec; |
2140 | ||
2141 | if (irq_domain_get_of_node(domain->parent)) { | |
2142 | fwspec.fwnode = domain->parent->fwnode; | |
2143 | fwspec.param_count = 3; | |
2144 | fwspec.param[0] = GIC_IRQ_TYPE_LPI; | |
2145 | fwspec.param[1] = hwirq; | |
2146 | fwspec.param[2] = IRQ_TYPE_EDGE_RISING; | |
3f010cf1 TN |
2147 | } else if (is_fwnode_irqchip(domain->parent->fwnode)) { |
2148 | fwspec.fwnode = domain->parent->fwnode; | |
2149 | fwspec.param_count = 2; | |
2150 | fwspec.param[0] = hwirq; | |
2151 | fwspec.param[1] = IRQ_TYPE_EDGE_RISING; | |
f833f57f MZ |
2152 | } else { |
2153 | return -EINVAL; | |
2154 | } | |
b48ac83d | 2155 | |
f833f57f | 2156 | return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); |
b48ac83d MZ |
2157 | } |
2158 | ||
2159 | static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | |
2160 | unsigned int nr_irqs, void *args) | |
2161 | { | |
2162 | msi_alloc_info_t *info = args; | |
2163 | struct its_device *its_dev = info->scratchpad[0].ptr; | |
2164 | irq_hw_number_t hwirq; | |
2165 | int err; | |
2166 | int i; | |
2167 | ||
2168 | for (i = 0; i < nr_irqs; i++) { | |
2169 | err = its_alloc_device_irq(its_dev, &hwirq); | |
2170 | if (err) | |
2171 | return err; | |
2172 | ||
2173 | err = its_irq_gic_domain_alloc(domain, virq + i, hwirq); | |
2174 | if (err) | |
2175 | return err; | |
2176 | ||
2177 | irq_domain_set_hwirq_and_chip(domain, virq + i, | |
2178 | hwirq, &its_irq_chip, its_dev); | |
f130420e MZ |
2179 | pr_debug("ID:%d pID:%d vID:%d\n", |
2180 | (int)(hwirq - its_dev->event_map.lpi_base), | |
2181 | (int) hwirq, virq + i); | |
b48ac83d MZ |
2182 | } |
2183 | ||
2184 | return 0; | |
2185 | } | |
2186 | ||
aca268df MZ |
2187 | static void its_irq_domain_activate(struct irq_domain *domain, |
2188 | struct irq_data *d) | |
2189 | { | |
2190 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
2191 | u32 event = its_get_event_id(d); | |
fbf8f40e GK |
2192 | const struct cpumask *cpu_mask = cpu_online_mask; |
2193 | ||
2194 | /* get the cpu_mask of local node */ | |
2195 | if (its_dev->its->numa_node >= 0) | |
2196 | cpu_mask = cpumask_of_node(its_dev->its->numa_node); | |
aca268df | 2197 | |
591e5bec | 2198 | /* Bind the LPI to the first possible CPU */ |
fbf8f40e | 2199 | its_dev->event_map.col_map[event] = cpumask_first(cpu_mask); |
591e5bec | 2200 | |
aca268df | 2201 | /* Map the GIC IRQ and event to the device */ |
6a25ad3a | 2202 | its_send_mapti(its_dev, d->hwirq, event); |
aca268df MZ |
2203 | } |
2204 | ||
2205 | static void its_irq_domain_deactivate(struct irq_domain *domain, | |
2206 | struct irq_data *d) | |
2207 | { | |
2208 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
2209 | u32 event = its_get_event_id(d); | |
2210 | ||
2211 | /* Stop the delivery of interrupts */ | |
2212 | its_send_discard(its_dev, event); | |
2213 | } | |
2214 | ||
b48ac83d MZ |
2215 | static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, |
2216 | unsigned int nr_irqs) | |
2217 | { | |
2218 | struct irq_data *d = irq_domain_get_irq_data(domain, virq); | |
2219 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
2220 | int i; | |
2221 | ||
2222 | for (i = 0; i < nr_irqs; i++) { | |
2223 | struct irq_data *data = irq_domain_get_irq_data(domain, | |
2224 | virq + i); | |
aca268df | 2225 | u32 event = its_get_event_id(data); |
b48ac83d MZ |
2226 | |
2227 | /* Mark interrupt index as unused */ | |
591e5bec | 2228 | clear_bit(event, its_dev->event_map.lpi_map); |
b48ac83d MZ |
2229 | |
2230 | /* Nuke the entry in the domain */ | |
2da39949 | 2231 | irq_domain_reset_irq_data(data); |
b48ac83d MZ |
2232 | } |
2233 | ||
2234 | /* If all interrupts have been freed, start mopping the floor */ | |
591e5bec MZ |
2235 | if (bitmap_empty(its_dev->event_map.lpi_map, |
2236 | its_dev->event_map.nr_lpis)) { | |
cf2be8ba MZ |
2237 | its_lpi_free_chunks(its_dev->event_map.lpi_map, |
2238 | its_dev->event_map.lpi_base, | |
2239 | its_dev->event_map.nr_lpis); | |
2240 | kfree(its_dev->event_map.col_map); | |
b48ac83d MZ |
2241 | |
2242 | /* Unmap device/itt */ | |
2243 | its_send_mapd(its_dev, 0); | |
2244 | its_free_device(its_dev); | |
2245 | } | |
2246 | ||
2247 | irq_domain_free_irqs_parent(domain, virq, nr_irqs); | |
2248 | } | |
2249 | ||
2250 | static const struct irq_domain_ops its_domain_ops = { | |
2251 | .alloc = its_irq_domain_alloc, | |
2252 | .free = its_irq_domain_free, | |
aca268df MZ |
2253 | .activate = its_irq_domain_activate, |
2254 | .deactivate = its_irq_domain_deactivate, | |
b48ac83d | 2255 | }; |
4c21f3c2 | 2256 | |
20b3d54e MZ |
2257 | /* |
2258 | * This is insane. | |
2259 | * | |
2260 | * If a GICv4 doesn't implement Direct LPIs (which is extremely | |
2261 | * likely), the only way to perform an invalidate is to use a fake | |
2262 | * device to issue an INV command, implying that the LPI has first | |
2263 | * been mapped to some event on that device. Since this is not exactly | |
2264 | * cheap, we try to keep that mapping around as long as possible, and | |
2265 | * only issue an UNMAP if we're short on available slots. | |
2266 | * | |
2267 | * Broken by design(tm). | |
2268 | */ | |
2269 | static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe) | |
2270 | { | |
2271 | /* Already unmapped? */ | |
2272 | if (vpe->vpe_proxy_event == -1) | |
2273 | return; | |
2274 | ||
2275 | its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event); | |
2276 | vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL; | |
2277 | ||
2278 | /* | |
2279 | * We don't track empty slots at all, so let's move the | |
2280 | * next_victim pointer if we can quickly reuse that slot | |
2281 | * instead of nuking an existing entry. Not clear that this is | |
2282 | * always a win though, and this might just generate a ripple | |
2283 | * effect... Let's just hope VPEs don't migrate too often. | |
2284 | */ | |
2285 | if (vpe_proxy.vpes[vpe_proxy.next_victim]) | |
2286 | vpe_proxy.next_victim = vpe->vpe_proxy_event; | |
2287 | ||
2288 | vpe->vpe_proxy_event = -1; | |
2289 | } | |
2290 | ||
2291 | static void its_vpe_db_proxy_unmap(struct its_vpe *vpe) | |
2292 | { | |
2293 | if (!gic_rdists->has_direct_lpi) { | |
2294 | unsigned long flags; | |
2295 | ||
2296 | raw_spin_lock_irqsave(&vpe_proxy.lock, flags); | |
2297 | its_vpe_db_proxy_unmap_locked(vpe); | |
2298 | raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); | |
2299 | } | |
2300 | } | |
2301 | ||
2302 | static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe) | |
2303 | { | |
2304 | /* Already mapped? */ | |
2305 | if (vpe->vpe_proxy_event != -1) | |
2306 | return; | |
2307 | ||
2308 | /* This slot was already allocated. Kick the other VPE out. */ | |
2309 | if (vpe_proxy.vpes[vpe_proxy.next_victim]) | |
2310 | its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]); | |
2311 | ||
2312 | /* Map the new VPE instead */ | |
2313 | vpe_proxy.vpes[vpe_proxy.next_victim] = vpe; | |
2314 | vpe->vpe_proxy_event = vpe_proxy.next_victim; | |
2315 | vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites; | |
2316 | ||
2317 | vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx; | |
2318 | its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event); | |
2319 | } | |
2320 | ||
958b90d1 MZ |
2321 | static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to) |
2322 | { | |
2323 | unsigned long flags; | |
2324 | struct its_collection *target_col; | |
2325 | ||
2326 | if (gic_rdists->has_direct_lpi) { | |
2327 | void __iomem *rdbase; | |
2328 | ||
2329 | rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base; | |
2330 | gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); | |
2331 | while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) | |
2332 | cpu_relax(); | |
2333 | ||
2334 | return; | |
2335 | } | |
2336 | ||
2337 | raw_spin_lock_irqsave(&vpe_proxy.lock, flags); | |
2338 | ||
2339 | its_vpe_db_proxy_map_locked(vpe); | |
2340 | ||
2341 | target_col = &vpe_proxy.dev->its->collections[to]; | |
2342 | its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event); | |
2343 | vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to; | |
2344 | ||
2345 | raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); | |
2346 | } | |
2347 | ||
3171a47a MZ |
2348 | static int its_vpe_set_affinity(struct irq_data *d, |
2349 | const struct cpumask *mask_val, | |
2350 | bool force) | |
2351 | { | |
2352 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); | |
2353 | int cpu = cpumask_first(mask_val); | |
2354 | ||
2355 | /* | |
2356 | * Changing affinity is mega expensive, so let's be as lazy as | |
20b3d54e | 2357 | * we can and only do it if we really have to. Also, if mapped |
958b90d1 MZ |
2358 | * into the proxy device, we need to move the doorbell |
2359 | * interrupt to its new location. | |
3171a47a MZ |
2360 | */ |
2361 | if (vpe->col_idx != cpu) { | |
958b90d1 MZ |
2362 | int from = vpe->col_idx; |
2363 | ||
3171a47a MZ |
2364 | vpe->col_idx = cpu; |
2365 | its_send_vmovp(vpe); | |
958b90d1 | 2366 | its_vpe_db_proxy_move(vpe, from, cpu); |
3171a47a MZ |
2367 | } |
2368 | ||
2369 | return IRQ_SET_MASK_OK_DONE; | |
2370 | } | |
2371 | ||
e643d803 MZ |
2372 | static void its_vpe_schedule(struct its_vpe *vpe) |
2373 | { | |
2374 | void * __iomem vlpi_base = gic_data_rdist_vlpi_base(); | |
2375 | u64 val; | |
2376 | ||
2377 | /* Schedule the VPE */ | |
2378 | val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) & | |
2379 | GENMASK_ULL(51, 12); | |
2380 | val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; | |
2381 | val |= GICR_VPROPBASER_RaWb; | |
2382 | val |= GICR_VPROPBASER_InnerShareable; | |
2383 | gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); | |
2384 | ||
2385 | val = virt_to_phys(page_address(vpe->vpt_page)) & | |
2386 | GENMASK_ULL(51, 16); | |
2387 | val |= GICR_VPENDBASER_RaWaWb; | |
2388 | val |= GICR_VPENDBASER_NonShareable; | |
2389 | /* | |
2390 | * There is no good way of finding out if the pending table is | |
2391 | * empty as we can race against the doorbell interrupt very | |
2392 | * easily. So in the end, vpe->pending_last is only an | |
2393 | * indication that the vcpu has something pending, not one | |
2394 | * that the pending table is empty. A good implementation | |
2395 | * would be able to read its coarse map pretty quickly anyway, | |
2396 | * making this a tolerable issue. | |
2397 | */ | |
2398 | val |= GICR_VPENDBASER_PendingLast; | |
2399 | val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0; | |
2400 | val |= GICR_VPENDBASER_Valid; | |
2401 | gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); | |
2402 | } | |
2403 | ||
2404 | static void its_vpe_deschedule(struct its_vpe *vpe) | |
2405 | { | |
2406 | void * __iomem vlpi_base = gic_data_rdist_vlpi_base(); | |
2407 | u32 count = 1000000; /* 1s! */ | |
2408 | bool clean; | |
2409 | u64 val; | |
2410 | ||
2411 | /* We're being scheduled out */ | |
2412 | val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); | |
2413 | val &= ~GICR_VPENDBASER_Valid; | |
2414 | gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); | |
2415 | ||
2416 | do { | |
2417 | val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); | |
2418 | clean = !(val & GICR_VPENDBASER_Dirty); | |
2419 | if (!clean) { | |
2420 | count--; | |
2421 | cpu_relax(); | |
2422 | udelay(1); | |
2423 | } | |
2424 | } while (!clean && count); | |
2425 | ||
2426 | if (unlikely(!clean && !count)) { | |
2427 | pr_err_ratelimited("ITS virtual pending table not cleaning\n"); | |
2428 | vpe->idai = false; | |
2429 | vpe->pending_last = true; | |
2430 | } else { | |
2431 | vpe->idai = !!(val & GICR_VPENDBASER_IDAI); | |
2432 | vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); | |
2433 | } | |
2434 | } | |
2435 | ||
2436 | static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) | |
2437 | { | |
2438 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); | |
2439 | struct its_cmd_info *info = vcpu_info; | |
2440 | ||
2441 | switch (info->cmd_type) { | |
2442 | case SCHEDULE_VPE: | |
2443 | its_vpe_schedule(vpe); | |
2444 | return 0; | |
2445 | ||
2446 | case DESCHEDULE_VPE: | |
2447 | its_vpe_deschedule(vpe); | |
2448 | return 0; | |
2449 | ||
5e2f7642 MZ |
2450 | case INVALL_VPE: |
2451 | its_send_vinvall(vpe); | |
2452 | return 0; | |
2453 | ||
e643d803 MZ |
2454 | default: |
2455 | return -EINVAL; | |
2456 | } | |
2457 | } | |
2458 | ||
20b3d54e MZ |
2459 | static void its_vpe_send_cmd(struct its_vpe *vpe, |
2460 | void (*cmd)(struct its_device *, u32)) | |
2461 | { | |
2462 | unsigned long flags; | |
2463 | ||
2464 | raw_spin_lock_irqsave(&vpe_proxy.lock, flags); | |
2465 | ||
2466 | its_vpe_db_proxy_map_locked(vpe); | |
2467 | cmd(vpe_proxy.dev, vpe->vpe_proxy_event); | |
2468 | ||
2469 | raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); | |
2470 | } | |
2471 | ||
f6a91da7 MZ |
2472 | static void its_vpe_send_inv(struct irq_data *d) |
2473 | { | |
2474 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); | |
f6a91da7 | 2475 | |
20b3d54e MZ |
2476 | if (gic_rdists->has_direct_lpi) { |
2477 | void __iomem *rdbase; | |
2478 | ||
2479 | rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; | |
2480 | gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_INVLPIR); | |
2481 | while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) | |
2482 | cpu_relax(); | |
2483 | } else { | |
2484 | its_vpe_send_cmd(vpe, its_send_inv); | |
2485 | } | |
f6a91da7 MZ |
2486 | } |
2487 | ||
2488 | static void its_vpe_mask_irq(struct irq_data *d) | |
2489 | { | |
2490 | /* | |
2491 | * We need to unmask the LPI, which is described by the parent | |
2492 | * irq_data. Instead of calling into the parent (which won't | |
2493 | * exactly do the right thing, let's simply use the | |
2494 | * parent_data pointer. Yes, I'm naughty. | |
2495 | */ | |
2496 | lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0); | |
2497 | its_vpe_send_inv(d); | |
2498 | } | |
2499 | ||
2500 | static void its_vpe_unmask_irq(struct irq_data *d) | |
2501 | { | |
2502 | /* Same hack as above... */ | |
2503 | lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED); | |
2504 | its_vpe_send_inv(d); | |
2505 | } | |
2506 | ||
8fff27ae MZ |
2507 | static struct irq_chip its_vpe_irq_chip = { |
2508 | .name = "GICv4-vpe", | |
f6a91da7 MZ |
2509 | .irq_mask = its_vpe_mask_irq, |
2510 | .irq_unmask = its_vpe_unmask_irq, | |
2511 | .irq_eoi = irq_chip_eoi_parent, | |
3171a47a | 2512 | .irq_set_affinity = its_vpe_set_affinity, |
e643d803 | 2513 | .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity, |
8fff27ae MZ |
2514 | }; |
2515 | ||
7d75bbb4 MZ |
2516 | static int its_vpe_id_alloc(void) |
2517 | { | |
2518 | return ida_simple_get(&its_vpeid_ida, 0, 1 << 16, GFP_KERNEL); | |
2519 | } | |
2520 | ||
2521 | static void its_vpe_id_free(u16 id) | |
2522 | { | |
2523 | ida_simple_remove(&its_vpeid_ida, id); | |
2524 | } | |
2525 | ||
2526 | static int its_vpe_init(struct its_vpe *vpe) | |
2527 | { | |
2528 | struct page *vpt_page; | |
2529 | int vpe_id; | |
2530 | ||
2531 | /* Allocate vpe_id */ | |
2532 | vpe_id = its_vpe_id_alloc(); | |
2533 | if (vpe_id < 0) | |
2534 | return vpe_id; | |
2535 | ||
2536 | /* Allocate VPT */ | |
2537 | vpt_page = its_allocate_pending_table(GFP_KERNEL); | |
2538 | if (!vpt_page) { | |
2539 | its_vpe_id_free(vpe_id); | |
2540 | return -ENOMEM; | |
2541 | } | |
2542 | ||
2543 | if (!its_alloc_vpe_table(vpe_id)) { | |
2544 | its_vpe_id_free(vpe_id); | |
2545 | its_free_pending_table(vpe->vpt_page); | |
2546 | return -ENOMEM; | |
2547 | } | |
2548 | ||
2549 | vpe->vpe_id = vpe_id; | |
2550 | vpe->vpt_page = vpt_page; | |
20b3d54e | 2551 | vpe->vpe_proxy_event = -1; |
7d75bbb4 MZ |
2552 | |
2553 | return 0; | |
2554 | } | |
2555 | ||
2556 | static void its_vpe_teardown(struct its_vpe *vpe) | |
2557 | { | |
20b3d54e | 2558 | its_vpe_db_proxy_unmap(vpe); |
7d75bbb4 MZ |
2559 | its_vpe_id_free(vpe->vpe_id); |
2560 | its_free_pending_table(vpe->vpt_page); | |
2561 | } | |
2562 | ||
2563 | static void its_vpe_irq_domain_free(struct irq_domain *domain, | |
2564 | unsigned int virq, | |
2565 | unsigned int nr_irqs) | |
2566 | { | |
2567 | struct its_vm *vm = domain->host_data; | |
2568 | int i; | |
2569 | ||
2570 | irq_domain_free_irqs_parent(domain, virq, nr_irqs); | |
2571 | ||
2572 | for (i = 0; i < nr_irqs; i++) { | |
2573 | struct irq_data *data = irq_domain_get_irq_data(domain, | |
2574 | virq + i); | |
2575 | struct its_vpe *vpe = irq_data_get_irq_chip_data(data); | |
2576 | ||
2577 | BUG_ON(vm != vpe->its_vm); | |
2578 | ||
2579 | clear_bit(data->hwirq, vm->db_bitmap); | |
2580 | its_vpe_teardown(vpe); | |
2581 | irq_domain_reset_irq_data(data); | |
2582 | } | |
2583 | ||
2584 | if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) { | |
2585 | its_lpi_free_chunks(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis); | |
2586 | its_free_prop_table(vm->vprop_page); | |
2587 | } | |
2588 | } | |
2589 | ||
2590 | static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | |
2591 | unsigned int nr_irqs, void *args) | |
2592 | { | |
2593 | struct its_vm *vm = args; | |
2594 | unsigned long *bitmap; | |
2595 | struct page *vprop_page; | |
2596 | int base, nr_ids, i, err = 0; | |
2597 | ||
2598 | BUG_ON(!vm); | |
2599 | ||
2600 | bitmap = its_lpi_alloc_chunks(nr_irqs, &base, &nr_ids); | |
2601 | if (!bitmap) | |
2602 | return -ENOMEM; | |
2603 | ||
2604 | if (nr_ids < nr_irqs) { | |
2605 | its_lpi_free_chunks(bitmap, base, nr_ids); | |
2606 | return -ENOMEM; | |
2607 | } | |
2608 | ||
2609 | vprop_page = its_allocate_prop_table(GFP_KERNEL); | |
2610 | if (!vprop_page) { | |
2611 | its_lpi_free_chunks(bitmap, base, nr_ids); | |
2612 | return -ENOMEM; | |
2613 | } | |
2614 | ||
2615 | vm->db_bitmap = bitmap; | |
2616 | vm->db_lpi_base = base; | |
2617 | vm->nr_db_lpis = nr_ids; | |
2618 | vm->vprop_page = vprop_page; | |
2619 | ||
2620 | for (i = 0; i < nr_irqs; i++) { | |
2621 | vm->vpes[i]->vpe_db_lpi = base + i; | |
2622 | err = its_vpe_init(vm->vpes[i]); | |
2623 | if (err) | |
2624 | break; | |
2625 | err = its_irq_gic_domain_alloc(domain, virq + i, | |
2626 | vm->vpes[i]->vpe_db_lpi); | |
2627 | if (err) | |
2628 | break; | |
2629 | irq_domain_set_hwirq_and_chip(domain, virq + i, i, | |
2630 | &its_vpe_irq_chip, vm->vpes[i]); | |
2631 | set_bit(i, bitmap); | |
2632 | } | |
2633 | ||
2634 | if (err) { | |
2635 | if (i > 0) | |
2636 | its_vpe_irq_domain_free(domain, virq, i - 1); | |
2637 | ||
2638 | its_lpi_free_chunks(bitmap, base, nr_ids); | |
2639 | its_free_prop_table(vprop_page); | |
2640 | } | |
2641 | ||
2642 | return err; | |
2643 | } | |
2644 | ||
eb78192b MZ |
2645 | static void its_vpe_irq_domain_activate(struct irq_domain *domain, |
2646 | struct irq_data *d) | |
2647 | { | |
2648 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); | |
2649 | ||
2650 | /* Map the VPE to the first possible CPU */ | |
2651 | vpe->col_idx = cpumask_first(cpu_online_mask); | |
2652 | its_send_vmapp(vpe, true); | |
2653 | its_send_vinvall(vpe); | |
2654 | } | |
2655 | ||
2656 | static void its_vpe_irq_domain_deactivate(struct irq_domain *domain, | |
2657 | struct irq_data *d) | |
2658 | { | |
2659 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); | |
2660 | ||
2661 | its_send_vmapp(vpe, false); | |
2662 | } | |
2663 | ||
8fff27ae | 2664 | static const struct irq_domain_ops its_vpe_domain_ops = { |
7d75bbb4 MZ |
2665 | .alloc = its_vpe_irq_domain_alloc, |
2666 | .free = its_vpe_irq_domain_free, | |
eb78192b MZ |
2667 | .activate = its_vpe_irq_domain_activate, |
2668 | .deactivate = its_vpe_irq_domain_deactivate, | |
8fff27ae MZ |
2669 | }; |
2670 | ||
4559fbb3 YW |
2671 | static int its_force_quiescent(void __iomem *base) |
2672 | { | |
2673 | u32 count = 1000000; /* 1s */ | |
2674 | u32 val; | |
2675 | ||
2676 | val = readl_relaxed(base + GITS_CTLR); | |
7611da86 DD |
2677 | /* |
2678 | * GIC architecture specification requires the ITS to be both | |
2679 | * disabled and quiescent for writes to GITS_BASER<n> or | |
2680 | * GITS_CBASER to not have UNPREDICTABLE results. | |
2681 | */ | |
2682 | if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE)) | |
4559fbb3 YW |
2683 | return 0; |
2684 | ||
2685 | /* Disable the generation of all interrupts to this ITS */ | |
2686 | val &= ~GITS_CTLR_ENABLE; | |
2687 | writel_relaxed(val, base + GITS_CTLR); | |
2688 | ||
2689 | /* Poll GITS_CTLR and wait until ITS becomes quiescent */ | |
2690 | while (1) { | |
2691 | val = readl_relaxed(base + GITS_CTLR); | |
2692 | if (val & GITS_CTLR_QUIESCENT) | |
2693 | return 0; | |
2694 | ||
2695 | count--; | |
2696 | if (!count) | |
2697 | return -EBUSY; | |
2698 | ||
2699 | cpu_relax(); | |
2700 | udelay(1); | |
2701 | } | |
2702 | } | |
2703 | ||
94100970 RR |
2704 | static void __maybe_unused its_enable_quirk_cavium_22375(void *data) |
2705 | { | |
2706 | struct its_node *its = data; | |
2707 | ||
2708 | its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; | |
2709 | } | |
2710 | ||
fbf8f40e GK |
2711 | static void __maybe_unused its_enable_quirk_cavium_23144(void *data) |
2712 | { | |
2713 | struct its_node *its = data; | |
2714 | ||
2715 | its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; | |
2716 | } | |
2717 | ||
90922a2d SD |
2718 | static void __maybe_unused its_enable_quirk_qdf2400_e0065(void *data) |
2719 | { | |
2720 | struct its_node *its = data; | |
2721 | ||
2722 | /* On QDF2400, the size of the ITE is 16Bytes */ | |
2723 | its->ite_size = 16; | |
2724 | } | |
2725 | ||
67510cca | 2726 | static const struct gic_quirk its_quirks[] = { |
94100970 RR |
2727 | #ifdef CONFIG_CAVIUM_ERRATUM_22375 |
2728 | { | |
2729 | .desc = "ITS: Cavium errata 22375, 24313", | |
2730 | .iidr = 0xa100034c, /* ThunderX pass 1.x */ | |
2731 | .mask = 0xffff0fff, | |
2732 | .init = its_enable_quirk_cavium_22375, | |
2733 | }, | |
fbf8f40e GK |
2734 | #endif |
2735 | #ifdef CONFIG_CAVIUM_ERRATUM_23144 | |
2736 | { | |
2737 | .desc = "ITS: Cavium erratum 23144", | |
2738 | .iidr = 0xa100034c, /* ThunderX pass 1.x */ | |
2739 | .mask = 0xffff0fff, | |
2740 | .init = its_enable_quirk_cavium_23144, | |
2741 | }, | |
90922a2d SD |
2742 | #endif |
2743 | #ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065 | |
2744 | { | |
2745 | .desc = "ITS: QDF2400 erratum 0065", | |
2746 | .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */ | |
2747 | .mask = 0xffffffff, | |
2748 | .init = its_enable_quirk_qdf2400_e0065, | |
2749 | }, | |
94100970 | 2750 | #endif |
67510cca RR |
2751 | { |
2752 | } | |
2753 | }; | |
2754 | ||
2755 | static void its_enable_quirks(struct its_node *its) | |
2756 | { | |
2757 | u32 iidr = readl_relaxed(its->base + GITS_IIDR); | |
2758 | ||
2759 | gic_enable_quirks(iidr, its_quirks, its); | |
2760 | } | |
2761 | ||
db40f0a7 | 2762 | static int its_init_domain(struct fwnode_handle *handle, struct its_node *its) |
d14ae5e6 TN |
2763 | { |
2764 | struct irq_domain *inner_domain; | |
2765 | struct msi_domain_info *info; | |
2766 | ||
2767 | info = kzalloc(sizeof(*info), GFP_KERNEL); | |
2768 | if (!info) | |
2769 | return -ENOMEM; | |
2770 | ||
db40f0a7 | 2771 | inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its); |
d14ae5e6 TN |
2772 | if (!inner_domain) { |
2773 | kfree(info); | |
2774 | return -ENOMEM; | |
2775 | } | |
2776 | ||
db40f0a7 | 2777 | inner_domain->parent = its_parent; |
96f0d93a | 2778 | irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS); |
59768527 | 2779 | inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_REMAP; |
d14ae5e6 TN |
2780 | info->ops = &its_msi_domain_ops; |
2781 | info->data = its; | |
2782 | inner_domain->host_data = info; | |
2783 | ||
2784 | return 0; | |
2785 | } | |
2786 | ||
8fff27ae MZ |
2787 | static int its_init_vpe_domain(void) |
2788 | { | |
20b3d54e MZ |
2789 | struct its_node *its; |
2790 | u32 devid; | |
2791 | int entries; | |
2792 | ||
2793 | if (gic_rdists->has_direct_lpi) { | |
2794 | pr_info("ITS: Using DirectLPI for VPE invalidation\n"); | |
2795 | return 0; | |
2796 | } | |
2797 | ||
2798 | /* Any ITS will do, even if not v4 */ | |
2799 | its = list_first_entry(&its_nodes, struct its_node, entry); | |
2800 | ||
2801 | entries = roundup_pow_of_two(nr_cpu_ids); | |
2802 | vpe_proxy.vpes = kzalloc(sizeof(*vpe_proxy.vpes) * entries, | |
2803 | GFP_KERNEL); | |
2804 | if (!vpe_proxy.vpes) { | |
2805 | pr_err("ITS: Can't allocate GICv4 proxy device array\n"); | |
2806 | return -ENOMEM; | |
2807 | } | |
2808 | ||
2809 | /* Use the last possible DevID */ | |
2810 | devid = GENMASK(its->device_ids - 1, 0); | |
2811 | vpe_proxy.dev = its_create_device(its, devid, entries, false); | |
2812 | if (!vpe_proxy.dev) { | |
2813 | kfree(vpe_proxy.vpes); | |
2814 | pr_err("ITS: Can't allocate GICv4 proxy device\n"); | |
2815 | return -ENOMEM; | |
2816 | } | |
2817 | ||
2818 | BUG_ON(entries != vpe_proxy.dev->nr_ites); | |
2819 | ||
2820 | raw_spin_lock_init(&vpe_proxy.lock); | |
2821 | vpe_proxy.next_victim = 0; | |
2822 | pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n", | |
2823 | devid, vpe_proxy.dev->nr_ites); | |
2824 | ||
8fff27ae MZ |
2825 | return 0; |
2826 | } | |
2827 | ||
3dfa576b MZ |
2828 | static int __init its_compute_its_list_map(struct resource *res, |
2829 | void __iomem *its_base) | |
2830 | { | |
2831 | int its_number; | |
2832 | u32 ctlr; | |
2833 | ||
2834 | /* | |
2835 | * This is assumed to be done early enough that we're | |
2836 | * guaranteed to be single-threaded, hence no | |
2837 | * locking. Should this change, we should address | |
2838 | * this. | |
2839 | */ | |
2840 | its_number = find_first_zero_bit(&its_list_map, ITS_LIST_MAX); | |
2841 | if (its_number >= ITS_LIST_MAX) { | |
2842 | pr_err("ITS@%pa: No ITSList entry available!\n", | |
2843 | &res->start); | |
2844 | return -EINVAL; | |
2845 | } | |
2846 | ||
2847 | ctlr = readl_relaxed(its_base + GITS_CTLR); | |
2848 | ctlr &= ~GITS_CTLR_ITS_NUMBER; | |
2849 | ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT; | |
2850 | writel_relaxed(ctlr, its_base + GITS_CTLR); | |
2851 | ctlr = readl_relaxed(its_base + GITS_CTLR); | |
2852 | if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) { | |
2853 | its_number = ctlr & GITS_CTLR_ITS_NUMBER; | |
2854 | its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT; | |
2855 | } | |
2856 | ||
2857 | if (test_and_set_bit(its_number, &its_list_map)) { | |
2858 | pr_err("ITS@%pa: Duplicate ITSList entry %d\n", | |
2859 | &res->start, its_number); | |
2860 | return -EINVAL; | |
2861 | } | |
2862 | ||
2863 | return its_number; | |
2864 | } | |
2865 | ||
db40f0a7 TN |
2866 | static int __init its_probe_one(struct resource *res, |
2867 | struct fwnode_handle *handle, int numa_node) | |
4c21f3c2 | 2868 | { |
4c21f3c2 MZ |
2869 | struct its_node *its; |
2870 | void __iomem *its_base; | |
3dfa576b MZ |
2871 | u32 val, ctlr; |
2872 | u64 baser, tmp, typer; | |
4c21f3c2 MZ |
2873 | int err; |
2874 | ||
db40f0a7 | 2875 | its_base = ioremap(res->start, resource_size(res)); |
4c21f3c2 | 2876 | if (!its_base) { |
db40f0a7 | 2877 | pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start); |
4c21f3c2 MZ |
2878 | return -ENOMEM; |
2879 | } | |
2880 | ||
2881 | val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK; | |
2882 | if (val != 0x30 && val != 0x40) { | |
db40f0a7 | 2883 | pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start); |
4c21f3c2 MZ |
2884 | err = -ENODEV; |
2885 | goto out_unmap; | |
2886 | } | |
2887 | ||
4559fbb3 YW |
2888 | err = its_force_quiescent(its_base); |
2889 | if (err) { | |
db40f0a7 | 2890 | pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start); |
4559fbb3 YW |
2891 | goto out_unmap; |
2892 | } | |
2893 | ||
db40f0a7 | 2894 | pr_info("ITS %pR\n", res); |
4c21f3c2 MZ |
2895 | |
2896 | its = kzalloc(sizeof(*its), GFP_KERNEL); | |
2897 | if (!its) { | |
2898 | err = -ENOMEM; | |
2899 | goto out_unmap; | |
2900 | } | |
2901 | ||
2902 | raw_spin_lock_init(&its->lock); | |
2903 | INIT_LIST_HEAD(&its->entry); | |
2904 | INIT_LIST_HEAD(&its->its_device_list); | |
3dfa576b | 2905 | typer = gic_read_typer(its_base + GITS_TYPER); |
4c21f3c2 | 2906 | its->base = its_base; |
db40f0a7 | 2907 | its->phys_base = res->start; |
3dfa576b MZ |
2908 | its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer); |
2909 | its->is_v4 = !!(typer & GITS_TYPER_VLPIS); | |
2910 | if (its->is_v4) { | |
2911 | if (!(typer & GITS_TYPER_VMOVP)) { | |
2912 | err = its_compute_its_list_map(res, its_base); | |
2913 | if (err < 0) | |
2914 | goto out_free_its; | |
2915 | ||
2916 | pr_info("ITS@%pa: Using ITS number %d\n", | |
2917 | &res->start, err); | |
2918 | } else { | |
2919 | pr_info("ITS@%pa: Single VMOVP capable\n", &res->start); | |
2920 | } | |
2921 | } | |
2922 | ||
db40f0a7 | 2923 | its->numa_node = numa_node; |
4c21f3c2 | 2924 | |
5bc13c2c RR |
2925 | its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
2926 | get_order(ITS_CMD_QUEUE_SZ)); | |
4c21f3c2 MZ |
2927 | if (!its->cmd_base) { |
2928 | err = -ENOMEM; | |
2929 | goto out_free_its; | |
2930 | } | |
2931 | its->cmd_write = its->cmd_base; | |
2932 | ||
67510cca RR |
2933 | its_enable_quirks(its); |
2934 | ||
0e0b0f69 | 2935 | err = its_alloc_tables(its); |
4c21f3c2 MZ |
2936 | if (err) |
2937 | goto out_free_cmd; | |
2938 | ||
2939 | err = its_alloc_collections(its); | |
2940 | if (err) | |
2941 | goto out_free_tables; | |
2942 | ||
2943 | baser = (virt_to_phys(its->cmd_base) | | |
2fd632a0 | 2944 | GITS_CBASER_RaWaWb | |
4c21f3c2 MZ |
2945 | GITS_CBASER_InnerShareable | |
2946 | (ITS_CMD_QUEUE_SZ / SZ_4K - 1) | | |
2947 | GITS_CBASER_VALID); | |
2948 | ||
0968a619 VM |
2949 | gits_write_cbaser(baser, its->base + GITS_CBASER); |
2950 | tmp = gits_read_cbaser(its->base + GITS_CBASER); | |
4c21f3c2 | 2951 | |
4ad3e363 | 2952 | if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) { |
241a386c MZ |
2953 | if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) { |
2954 | /* | |
2955 | * The HW reports non-shareable, we must | |
2956 | * remove the cacheability attributes as | |
2957 | * well. | |
2958 | */ | |
2959 | baser &= ~(GITS_CBASER_SHAREABILITY_MASK | | |
2960 | GITS_CBASER_CACHEABILITY_MASK); | |
2961 | baser |= GITS_CBASER_nC; | |
0968a619 | 2962 | gits_write_cbaser(baser, its->base + GITS_CBASER); |
241a386c | 2963 | } |
4c21f3c2 MZ |
2964 | pr_info("ITS: using cache flushing for cmd queue\n"); |
2965 | its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING; | |
2966 | } | |
2967 | ||
0968a619 | 2968 | gits_write_cwriter(0, its->base + GITS_CWRITER); |
3dfa576b MZ |
2969 | ctlr = readl_relaxed(its->base + GITS_CTLR); |
2970 | writel_relaxed(ctlr | GITS_CTLR_ENABLE, its->base + GITS_CTLR); | |
241a386c | 2971 | |
db40f0a7 | 2972 | err = its_init_domain(handle, its); |
d14ae5e6 TN |
2973 | if (err) |
2974 | goto out_free_tables; | |
4c21f3c2 MZ |
2975 | |
2976 | spin_lock(&its_lock); | |
2977 | list_add(&its->entry, &its_nodes); | |
2978 | spin_unlock(&its_lock); | |
2979 | ||
2980 | return 0; | |
2981 | ||
4c21f3c2 MZ |
2982 | out_free_tables: |
2983 | its_free_tables(its); | |
2984 | out_free_cmd: | |
5bc13c2c | 2985 | free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ)); |
4c21f3c2 MZ |
2986 | out_free_its: |
2987 | kfree(its); | |
2988 | out_unmap: | |
2989 | iounmap(its_base); | |
db40f0a7 | 2990 | pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err); |
4c21f3c2 MZ |
2991 | return err; |
2992 | } | |
2993 | ||
2994 | static bool gic_rdists_supports_plpis(void) | |
2995 | { | |
589ce5f4 | 2996 | return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS); |
4c21f3c2 MZ |
2997 | } |
2998 | ||
2999 | int its_cpu_init(void) | |
3000 | { | |
4c21f3c2 | 3001 | if (!list_empty(&its_nodes)) { |
16acae72 VM |
3002 | if (!gic_rdists_supports_plpis()) { |
3003 | pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); | |
3004 | return -ENXIO; | |
3005 | } | |
4c21f3c2 MZ |
3006 | its_cpu_init_lpis(); |
3007 | its_cpu_init_collection(); | |
3008 | } | |
3009 | ||
3010 | return 0; | |
3011 | } | |
3012 | ||
935bba7c | 3013 | static const struct of_device_id its_device_id[] = { |
4c21f3c2 MZ |
3014 | { .compatible = "arm,gic-v3-its", }, |
3015 | {}, | |
3016 | }; | |
3017 | ||
db40f0a7 | 3018 | static int __init its_of_probe(struct device_node *node) |
4c21f3c2 MZ |
3019 | { |
3020 | struct device_node *np; | |
db40f0a7 | 3021 | struct resource res; |
4c21f3c2 MZ |
3022 | |
3023 | for (np = of_find_matching_node(node, its_device_id); np; | |
3024 | np = of_find_matching_node(np, its_device_id)) { | |
d14ae5e6 | 3025 | if (!of_property_read_bool(np, "msi-controller")) { |
e81f54c6 RH |
3026 | pr_warn("%pOF: no msi-controller property, ITS ignored\n", |
3027 | np); | |
d14ae5e6 TN |
3028 | continue; |
3029 | } | |
3030 | ||
db40f0a7 | 3031 | if (of_address_to_resource(np, 0, &res)) { |
e81f54c6 | 3032 | pr_warn("%pOF: no regs?\n", np); |
db40f0a7 TN |
3033 | continue; |
3034 | } | |
3035 | ||
3036 | its_probe_one(&res, &np->fwnode, of_node_to_nid(np)); | |
4c21f3c2 | 3037 | } |
db40f0a7 TN |
3038 | return 0; |
3039 | } | |
3040 | ||
3f010cf1 TN |
3041 | #ifdef CONFIG_ACPI |
3042 | ||
3043 | #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K) | |
3044 | ||
dbd2b826 GK |
3045 | #if defined(CONFIG_ACPI_NUMA) && (ACPI_CA_VERSION >= 0x20170531) |
3046 | struct its_srat_map { | |
3047 | /* numa node id */ | |
3048 | u32 numa_node; | |
3049 | /* GIC ITS ID */ | |
3050 | u32 its_id; | |
3051 | }; | |
3052 | ||
3053 | static struct its_srat_map its_srat_maps[MAX_NUMNODES] __initdata; | |
3054 | static int its_in_srat __initdata; | |
3055 | ||
3056 | static int __init acpi_get_its_numa_node(u32 its_id) | |
3057 | { | |
3058 | int i; | |
3059 | ||
3060 | for (i = 0; i < its_in_srat; i++) { | |
3061 | if (its_id == its_srat_maps[i].its_id) | |
3062 | return its_srat_maps[i].numa_node; | |
3063 | } | |
3064 | return NUMA_NO_NODE; | |
3065 | } | |
3066 | ||
3067 | static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header, | |
3068 | const unsigned long end) | |
3069 | { | |
3070 | int node; | |
3071 | struct acpi_srat_gic_its_affinity *its_affinity; | |
3072 | ||
3073 | its_affinity = (struct acpi_srat_gic_its_affinity *)header; | |
3074 | if (!its_affinity) | |
3075 | return -EINVAL; | |
3076 | ||
3077 | if (its_affinity->header.length < sizeof(*its_affinity)) { | |
3078 | pr_err("SRAT: Invalid header length %d in ITS affinity\n", | |
3079 | its_affinity->header.length); | |
3080 | return -EINVAL; | |
3081 | } | |
3082 | ||
3083 | if (its_in_srat >= MAX_NUMNODES) { | |
3084 | pr_err("SRAT: ITS affinity exceeding max count[%d]\n", | |
3085 | MAX_NUMNODES); | |
3086 | return -EINVAL; | |
3087 | } | |
3088 | ||
3089 | node = acpi_map_pxm_to_node(its_affinity->proximity_domain); | |
3090 | ||
3091 | if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) { | |
3092 | pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node); | |
3093 | return 0; | |
3094 | } | |
3095 | ||
3096 | its_srat_maps[its_in_srat].numa_node = node; | |
3097 | its_srat_maps[its_in_srat].its_id = its_affinity->its_id; | |
3098 | its_in_srat++; | |
3099 | pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n", | |
3100 | its_affinity->proximity_domain, its_affinity->its_id, node); | |
3101 | ||
3102 | return 0; | |
3103 | } | |
3104 | ||
3105 | static void __init acpi_table_parse_srat_its(void) | |
3106 | { | |
3107 | acpi_table_parse_entries(ACPI_SIG_SRAT, | |
3108 | sizeof(struct acpi_table_srat), | |
3109 | ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, | |
3110 | gic_acpi_parse_srat_its, 0); | |
3111 | } | |
3112 | #else | |
3113 | static void __init acpi_table_parse_srat_its(void) { } | |
3114 | static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; } | |
3115 | #endif | |
3116 | ||
3f010cf1 TN |
3117 | static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header, |
3118 | const unsigned long end) | |
3119 | { | |
3120 | struct acpi_madt_generic_translator *its_entry; | |
3121 | struct fwnode_handle *dom_handle; | |
3122 | struct resource res; | |
3123 | int err; | |
3124 | ||
3125 | its_entry = (struct acpi_madt_generic_translator *)header; | |
3126 | memset(&res, 0, sizeof(res)); | |
3127 | res.start = its_entry->base_address; | |
3128 | res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1; | |
3129 | res.flags = IORESOURCE_MEM; | |
3130 | ||
3131 | dom_handle = irq_domain_alloc_fwnode((void *)its_entry->base_address); | |
3132 | if (!dom_handle) { | |
3133 | pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n", | |
3134 | &res.start); | |
3135 | return -ENOMEM; | |
3136 | } | |
3137 | ||
3138 | err = iort_register_domain_token(its_entry->translation_id, dom_handle); | |
3139 | if (err) { | |
3140 | pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n", | |
3141 | &res.start, its_entry->translation_id); | |
3142 | goto dom_err; | |
3143 | } | |
3144 | ||
dbd2b826 GK |
3145 | err = its_probe_one(&res, dom_handle, |
3146 | acpi_get_its_numa_node(its_entry->translation_id)); | |
3f010cf1 TN |
3147 | if (!err) |
3148 | return 0; | |
3149 | ||
3150 | iort_deregister_domain_token(its_entry->translation_id); | |
3151 | dom_err: | |
3152 | irq_domain_free_fwnode(dom_handle); | |
3153 | return err; | |
3154 | } | |
3155 | ||
3156 | static void __init its_acpi_probe(void) | |
3157 | { | |
dbd2b826 | 3158 | acpi_table_parse_srat_its(); |
3f010cf1 TN |
3159 | acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, |
3160 | gic_acpi_parse_madt_its, 0); | |
3161 | } | |
3162 | #else | |
3163 | static void __init its_acpi_probe(void) { } | |
3164 | #endif | |
3165 | ||
db40f0a7 TN |
3166 | int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, |
3167 | struct irq_domain *parent_domain) | |
3168 | { | |
3169 | struct device_node *of_node; | |
8fff27ae MZ |
3170 | struct its_node *its; |
3171 | bool has_v4 = false; | |
3172 | int err; | |
db40f0a7 TN |
3173 | |
3174 | its_parent = parent_domain; | |
3175 | of_node = to_of_node(handle); | |
3176 | if (of_node) | |
3177 | its_of_probe(of_node); | |
3178 | else | |
3f010cf1 | 3179 | its_acpi_probe(); |
4c21f3c2 MZ |
3180 | |
3181 | if (list_empty(&its_nodes)) { | |
3182 | pr_warn("ITS: No ITS available, not enabling LPIs\n"); | |
3183 | return -ENXIO; | |
3184 | } | |
3185 | ||
3186 | gic_rdists = rdists; | |
8fff27ae MZ |
3187 | err = its_alloc_lpi_tables(); |
3188 | if (err) | |
3189 | return err; | |
3190 | ||
3191 | list_for_each_entry(its, &its_nodes, entry) | |
3192 | has_v4 |= its->is_v4; | |
3193 | ||
3194 | if (has_v4 & rdists->has_vlpis) { | |
3195 | if (its_init_vpe_domain()) { | |
3196 | rdists->has_vlpis = false; | |
3197 | pr_err("ITS: Disabling GICv4 support\n"); | |
3198 | } | |
3199 | } | |
3200 | ||
3201 | return 0; | |
4c21f3c2 | 3202 | } |