]>
Commit | Line | Data |
---|---|---|
3562139f FB |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | // Copyright 2017 IBM Corp. | |
3 | #include <linux/sched/mm.h> | |
4 | #include <linux/mutex.h> | |
5 | #include <linux/mmu_context.h> | |
6 | #include <asm/copro.h> | |
7 | #include <asm/pnv-ocxl.h> | |
0f2d7994 | 8 | #include <misc/ocxl.h> |
3562139f FB |
9 | #include "ocxl_internal.h" |
10 | ||
11 | ||
12 | #define SPA_PASID_BITS 15 | |
13 | #define SPA_PASID_MAX ((1 << SPA_PASID_BITS) - 1) | |
14 | #define SPA_PE_MASK SPA_PASID_MAX | |
15 | #define SPA_SPA_SIZE_LOG 22 /* Each SPA is 4 Mb */ | |
16 | ||
17 | #define SPA_CFG_SF (1ull << (63-0)) | |
18 | #define SPA_CFG_TA (1ull << (63-1)) | |
19 | #define SPA_CFG_HV (1ull << (63-3)) | |
20 | #define SPA_CFG_UV (1ull << (63-4)) | |
21 | #define SPA_CFG_XLAT_hpt (0ull << (63-6)) /* Hashed page table (HPT) mode */ | |
22 | #define SPA_CFG_XLAT_roh (2ull << (63-6)) /* Radix on HPT mode */ | |
23 | #define SPA_CFG_XLAT_ror (3ull << (63-6)) /* Radix on Radix mode */ | |
24 | #define SPA_CFG_PR (1ull << (63-49)) | |
25 | #define SPA_CFG_TC (1ull << (63-54)) | |
26 | #define SPA_CFG_DR (1ull << (63-59)) | |
27 | ||
28 | #define SPA_XSL_TF (1ull << (63-3)) /* Translation fault */ | |
29 | #define SPA_XSL_S (1ull << (63-38)) /* Store operation */ | |
30 | ||
31 | #define SPA_PE_VALID 0x80000000 | |
32 | ||
33 | ||
34 | struct pe_data { | |
35 | struct mm_struct *mm; | |
36 | /* callback to trigger when a translation fault occurs */ | |
37 | void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr); | |
38 | /* opaque pointer to be passed to the above callback */ | |
39 | void *xsl_err_data; | |
40 | struct rcu_head rcu; | |
41 | }; | |
42 | ||
43 | struct spa { | |
44 | struct ocxl_process_element *spa_mem; | |
45 | int spa_order; | |
46 | struct mutex spa_lock; | |
47 | struct radix_tree_root pe_tree; /* Maps PE handles to pe_data */ | |
48 | char *irq_name; | |
49 | int virq; | |
50 | void __iomem *reg_dsisr; | |
51 | void __iomem *reg_dar; | |
52 | void __iomem *reg_tfc; | |
53 | void __iomem *reg_pe_handle; | |
54 | /* | |
55 | * The following field are used by the memory fault | |
56 | * interrupt handler. We can only have one interrupt at a | |
57 | * time. The NPU won't raise another interrupt until the | |
58 | * previous one has been ack'd by writing to the TFC register | |
59 | */ | |
60 | struct xsl_fault { | |
61 | struct work_struct fault_work; | |
62 | u64 pe; | |
63 | u64 dsisr; | |
64 | u64 dar; | |
65 | struct pe_data pe_data; | |
66 | } xsl_fault; | |
67 | }; | |
68 | ||
69 | /* | |
70 | * A opencapi link can be used be by several PCI functions. We have | |
71 | * one link per device slot. | |
72 | * | |
73 | * A linked list of opencapi links should suffice, as there's a | |
74 | * limited number of opencapi slots on a system and lookup is only | |
75 | * done when the device is probed | |
76 | */ | |
77 | struct link { | |
78 | struct list_head list; | |
79 | struct kref ref; | |
80 | int domain; | |
81 | int bus; | |
82 | int dev; | |
83 | atomic_t irq_available; | |
84 | struct spa *spa; | |
85 | void *platform_data; | |
86 | }; | |
87 | static struct list_head links_list = LIST_HEAD_INIT(links_list); | |
88 | static DEFINE_MUTEX(links_list_lock); | |
89 | ||
90 | enum xsl_response { | |
91 | CONTINUE, | |
92 | ADDRESS_ERROR, | |
93 | RESTART, | |
94 | }; | |
95 | ||
96 | ||
97 | static void read_irq(struct spa *spa, u64 *dsisr, u64 *dar, u64 *pe) | |
98 | { | |
99 | u64 reg; | |
100 | ||
101 | *dsisr = in_be64(spa->reg_dsisr); | |
102 | *dar = in_be64(spa->reg_dar); | |
103 | reg = in_be64(spa->reg_pe_handle); | |
104 | *pe = reg & SPA_PE_MASK; | |
105 | } | |
106 | ||
107 | static void ack_irq(struct spa *spa, enum xsl_response r) | |
108 | { | |
109 | u64 reg = 0; | |
110 | ||
111 | /* continue is not supported */ | |
112 | if (r == RESTART) | |
113 | reg = PPC_BIT(31); | |
114 | else if (r == ADDRESS_ERROR) | |
115 | reg = PPC_BIT(30); | |
116 | else | |
117 | WARN(1, "Invalid irq response %d\n", r); | |
118 | ||
119 | if (reg) | |
120 | out_be64(spa->reg_tfc, reg); | |
121 | } | |
122 | ||
123 | static void xsl_fault_handler_bh(struct work_struct *fault_work) | |
124 | { | |
125 | unsigned int flt = 0; | |
126 | unsigned long access, flags, inv_flags = 0; | |
127 | enum xsl_response r; | |
128 | struct xsl_fault *fault = container_of(fault_work, struct xsl_fault, | |
129 | fault_work); | |
130 | struct spa *spa = container_of(fault, struct spa, xsl_fault); | |
131 | ||
132 | int rc; | |
133 | ||
134 | /* | |
135 | * We need to release a reference on the mm whenever exiting this | |
136 | * function (taken in the memory fault interrupt handler) | |
137 | */ | |
138 | rc = copro_handle_mm_fault(fault->pe_data.mm, fault->dar, fault->dsisr, | |
139 | &flt); | |
140 | if (rc) { | |
141 | pr_debug("copro_handle_mm_fault failed: %d\n", rc); | |
142 | if (fault->pe_data.xsl_err_cb) { | |
143 | fault->pe_data.xsl_err_cb( | |
144 | fault->pe_data.xsl_err_data, | |
145 | fault->dar, fault->dsisr); | |
146 | } | |
147 | r = ADDRESS_ERROR; | |
148 | goto ack; | |
149 | } | |
150 | ||
151 | if (!radix_enabled()) { | |
152 | /* | |
153 | * update_mmu_cache() will not have loaded the hash | |
154 | * since current->trap is not a 0x400 or 0x300, so | |
155 | * just call hash_page_mm() here. | |
156 | */ | |
157 | access = _PAGE_PRESENT | _PAGE_READ; | |
158 | if (fault->dsisr & SPA_XSL_S) | |
159 | access |= _PAGE_WRITE; | |
160 | ||
161 | if (REGION_ID(fault->dar) != USER_REGION_ID) | |
162 | access |= _PAGE_PRIVILEGED; | |
163 | ||
164 | local_irq_save(flags); | |
165 | hash_page_mm(fault->pe_data.mm, fault->dar, access, 0x300, | |
166 | inv_flags); | |
167 | local_irq_restore(flags); | |
168 | } | |
169 | r = RESTART; | |
170 | ack: | |
171 | mmdrop(fault->pe_data.mm); | |
172 | ack_irq(spa, r); | |
173 | } | |
174 | ||
175 | static irqreturn_t xsl_fault_handler(int irq, void *data) | |
176 | { | |
177 | struct link *link = (struct link *) data; | |
178 | struct spa *spa = link->spa; | |
179 | u64 dsisr, dar, pe_handle; | |
180 | struct pe_data *pe_data; | |
181 | struct ocxl_process_element *pe; | |
182 | int lpid, pid, tid; | |
183 | ||
184 | read_irq(spa, &dsisr, &dar, &pe_handle); | |
185 | ||
186 | WARN_ON(pe_handle > SPA_PE_MASK); | |
187 | pe = spa->spa_mem + pe_handle; | |
188 | lpid = be32_to_cpu(pe->lpid); | |
189 | pid = be32_to_cpu(pe->pid); | |
190 | tid = be32_to_cpu(pe->tid); | |
191 | /* We could be reading all null values here if the PE is being | |
192 | * removed while an interrupt kicks in. It's not supposed to | |
193 | * happen if the driver notified the AFU to terminate the | |
194 | * PASID, and the AFU waited for pending operations before | |
195 | * acknowledging. But even if it happens, we won't find a | |
196 | * memory context below and fail silently, so it should be ok. | |
197 | */ | |
198 | if (!(dsisr & SPA_XSL_TF)) { | |
199 | WARN(1, "Invalid xsl interrupt fault register %#llx\n", dsisr); | |
200 | ack_irq(spa, ADDRESS_ERROR); | |
201 | return IRQ_HANDLED; | |
202 | } | |
203 | ||
204 | rcu_read_lock(); | |
205 | pe_data = radix_tree_lookup(&spa->pe_tree, pe_handle); | |
206 | if (!pe_data) { | |
207 | /* | |
208 | * Could only happen if the driver didn't notify the | |
209 | * AFU about PASID termination before removing the PE, | |
210 | * or the AFU didn't wait for all memory access to | |
211 | * have completed. | |
212 | * | |
213 | * Either way, we fail early, but we shouldn't log an | |
214 | * error message, as it is a valid (if unexpected) | |
215 | * scenario | |
216 | */ | |
217 | rcu_read_unlock(); | |
218 | pr_debug("Unknown mm context for xsl interrupt\n"); | |
219 | ack_irq(spa, ADDRESS_ERROR); | |
220 | return IRQ_HANDLED; | |
221 | } | |
222 | WARN_ON(pe_data->mm->context.id != pid); | |
223 | ||
224 | spa->xsl_fault.pe = pe_handle; | |
225 | spa->xsl_fault.dar = dar; | |
226 | spa->xsl_fault.dsisr = dsisr; | |
227 | spa->xsl_fault.pe_data = *pe_data; | |
228 | mmgrab(pe_data->mm); /* mm count is released by bottom half */ | |
229 | ||
230 | rcu_read_unlock(); | |
231 | schedule_work(&spa->xsl_fault.fault_work); | |
232 | return IRQ_HANDLED; | |
233 | } | |
234 | ||
235 | static void unmap_irq_registers(struct spa *spa) | |
236 | { | |
237 | pnv_ocxl_unmap_xsl_regs(spa->reg_dsisr, spa->reg_dar, spa->reg_tfc, | |
238 | spa->reg_pe_handle); | |
239 | } | |
240 | ||
241 | static int map_irq_registers(struct pci_dev *dev, struct spa *spa) | |
242 | { | |
243 | return pnv_ocxl_map_xsl_regs(dev, &spa->reg_dsisr, &spa->reg_dar, | |
244 | &spa->reg_tfc, &spa->reg_pe_handle); | |
245 | } | |
246 | ||
247 | static int setup_xsl_irq(struct pci_dev *dev, struct link *link) | |
248 | { | |
249 | struct spa *spa = link->spa; | |
250 | int rc; | |
251 | int hwirq; | |
252 | ||
253 | rc = pnv_ocxl_get_xsl_irq(dev, &hwirq); | |
254 | if (rc) | |
255 | return rc; | |
256 | ||
257 | rc = map_irq_registers(dev, spa); | |
258 | if (rc) | |
259 | return rc; | |
260 | ||
261 | spa->irq_name = kasprintf(GFP_KERNEL, "ocxl-xsl-%x-%x-%x", | |
262 | link->domain, link->bus, link->dev); | |
263 | if (!spa->irq_name) { | |
264 | unmap_irq_registers(spa); | |
265 | dev_err(&dev->dev, "Can't allocate name for xsl interrupt\n"); | |
266 | return -ENOMEM; | |
267 | } | |
268 | /* | |
269 | * At some point, we'll need to look into allowing a higher | |
270 | * number of interrupts. Could we have an IRQ domain per link? | |
271 | */ | |
272 | spa->virq = irq_create_mapping(NULL, hwirq); | |
273 | if (!spa->virq) { | |
274 | kfree(spa->irq_name); | |
275 | unmap_irq_registers(spa); | |
276 | dev_err(&dev->dev, | |
277 | "irq_create_mapping failed for translation interrupt\n"); | |
278 | return -EINVAL; | |
279 | } | |
280 | ||
281 | dev_dbg(&dev->dev, "hwirq %d mapped to virq %d\n", hwirq, spa->virq); | |
282 | ||
283 | rc = request_irq(spa->virq, xsl_fault_handler, 0, spa->irq_name, | |
284 | link); | |
285 | if (rc) { | |
286 | irq_dispose_mapping(spa->virq); | |
287 | kfree(spa->irq_name); | |
288 | unmap_irq_registers(spa); | |
289 | dev_err(&dev->dev, | |
290 | "request_irq failed for translation interrupt: %d\n", | |
291 | rc); | |
292 | return -EINVAL; | |
293 | } | |
294 | return 0; | |
295 | } | |
296 | ||
297 | static void release_xsl_irq(struct link *link) | |
298 | { | |
299 | struct spa *spa = link->spa; | |
300 | ||
301 | if (spa->virq) { | |
302 | free_irq(spa->virq, link); | |
303 | irq_dispose_mapping(spa->virq); | |
304 | } | |
305 | kfree(spa->irq_name); | |
306 | unmap_irq_registers(spa); | |
307 | } | |
308 | ||
309 | static int alloc_spa(struct pci_dev *dev, struct link *link) | |
310 | { | |
311 | struct spa *spa; | |
312 | ||
313 | spa = kzalloc(sizeof(struct spa), GFP_KERNEL); | |
314 | if (!spa) | |
315 | return -ENOMEM; | |
316 | ||
317 | mutex_init(&spa->spa_lock); | |
318 | INIT_RADIX_TREE(&spa->pe_tree, GFP_KERNEL); | |
319 | INIT_WORK(&spa->xsl_fault.fault_work, xsl_fault_handler_bh); | |
320 | ||
321 | spa->spa_order = SPA_SPA_SIZE_LOG - PAGE_SHIFT; | |
322 | spa->spa_mem = (struct ocxl_process_element *) | |
323 | __get_free_pages(GFP_KERNEL | __GFP_ZERO, spa->spa_order); | |
324 | if (!spa->spa_mem) { | |
325 | dev_err(&dev->dev, "Can't allocate Shared Process Area\n"); | |
326 | kfree(spa); | |
327 | return -ENOMEM; | |
328 | } | |
329 | pr_debug("Allocated SPA for %x:%x:%x at %p\n", link->domain, link->bus, | |
330 | link->dev, spa->spa_mem); | |
331 | ||
332 | link->spa = spa; | |
333 | return 0; | |
334 | } | |
335 | ||
336 | static void free_spa(struct link *link) | |
337 | { | |
338 | struct spa *spa = link->spa; | |
339 | ||
340 | pr_debug("Freeing SPA for %x:%x:%x\n", link->domain, link->bus, | |
341 | link->dev); | |
342 | ||
343 | if (spa && spa->spa_mem) { | |
344 | free_pages((unsigned long) spa->spa_mem, spa->spa_order); | |
345 | kfree(spa); | |
346 | link->spa = NULL; | |
347 | } | |
348 | } | |
349 | ||
350 | static int alloc_link(struct pci_dev *dev, int PE_mask, struct link **out_link) | |
351 | { | |
352 | struct link *link; | |
353 | int rc; | |
354 | ||
355 | link = kzalloc(sizeof(struct link), GFP_KERNEL); | |
356 | if (!link) | |
357 | return -ENOMEM; | |
358 | ||
359 | kref_init(&link->ref); | |
360 | link->domain = pci_domain_nr(dev->bus); | |
361 | link->bus = dev->bus->number; | |
362 | link->dev = PCI_SLOT(dev->devfn); | |
363 | atomic_set(&link->irq_available, MAX_IRQ_PER_LINK); | |
364 | ||
365 | rc = alloc_spa(dev, link); | |
366 | if (rc) | |
367 | goto err_free; | |
368 | ||
369 | rc = setup_xsl_irq(dev, link); | |
370 | if (rc) | |
371 | goto err_spa; | |
372 | ||
373 | /* platform specific hook */ | |
374 | rc = pnv_ocxl_spa_setup(dev, link->spa->spa_mem, PE_mask, | |
375 | &link->platform_data); | |
376 | if (rc) | |
377 | goto err_xsl_irq; | |
378 | ||
379 | *out_link = link; | |
380 | return 0; | |
381 | ||
382 | err_xsl_irq: | |
383 | release_xsl_irq(link); | |
384 | err_spa: | |
385 | free_spa(link); | |
386 | err_free: | |
387 | kfree(link); | |
388 | return rc; | |
389 | } | |
390 | ||
391 | static void free_link(struct link *link) | |
392 | { | |
393 | release_xsl_irq(link); | |
394 | free_spa(link); | |
395 | kfree(link); | |
396 | } | |
397 | ||
398 | int ocxl_link_setup(struct pci_dev *dev, int PE_mask, void **link_handle) | |
399 | { | |
400 | int rc = 0; | |
401 | struct link *link; | |
402 | ||
403 | mutex_lock(&links_list_lock); | |
404 | list_for_each_entry(link, &links_list, list) { | |
405 | /* The functions of a device all share the same link */ | |
406 | if (link->domain == pci_domain_nr(dev->bus) && | |
407 | link->bus == dev->bus->number && | |
408 | link->dev == PCI_SLOT(dev->devfn)) { | |
409 | kref_get(&link->ref); | |
410 | *link_handle = link; | |
411 | goto unlock; | |
412 | } | |
413 | } | |
414 | rc = alloc_link(dev, PE_mask, &link); | |
415 | if (rc) | |
416 | goto unlock; | |
417 | ||
418 | list_add(&link->list, &links_list); | |
419 | *link_handle = link; | |
420 | unlock: | |
421 | mutex_unlock(&links_list_lock); | |
422 | return rc; | |
423 | } | |
0f2d7994 | 424 | EXPORT_SYMBOL_GPL(ocxl_link_setup); |
3562139f FB |
425 | |
426 | static void release_xsl(struct kref *ref) | |
427 | { | |
428 | struct link *link = container_of(ref, struct link, ref); | |
429 | ||
430 | list_del(&link->list); | |
431 | /* call platform code before releasing data */ | |
432 | pnv_ocxl_spa_release(link->platform_data); | |
433 | free_link(link); | |
434 | } | |
435 | ||
436 | void ocxl_link_release(struct pci_dev *dev, void *link_handle) | |
437 | { | |
438 | struct link *link = (struct link *) link_handle; | |
439 | ||
440 | mutex_lock(&links_list_lock); | |
441 | kref_put(&link->ref, release_xsl); | |
442 | mutex_unlock(&links_list_lock); | |
443 | } | |
0f2d7994 | 444 | EXPORT_SYMBOL_GPL(ocxl_link_release); |
3562139f FB |
445 | |
446 | static u64 calculate_cfg_state(bool kernel) | |
447 | { | |
448 | u64 state; | |
449 | ||
450 | state = SPA_CFG_DR; | |
451 | if (mfspr(SPRN_LPCR) & LPCR_TC) | |
452 | state |= SPA_CFG_TC; | |
453 | if (radix_enabled()) | |
454 | state |= SPA_CFG_XLAT_ror; | |
455 | else | |
456 | state |= SPA_CFG_XLAT_hpt; | |
457 | state |= SPA_CFG_HV; | |
458 | if (kernel) { | |
459 | if (mfmsr() & MSR_SF) | |
460 | state |= SPA_CFG_SF; | |
461 | } else { | |
462 | state |= SPA_CFG_PR; | |
463 | if (!test_tsk_thread_flag(current, TIF_32BIT)) | |
464 | state |= SPA_CFG_SF; | |
465 | } | |
466 | return state; | |
467 | } | |
468 | ||
469 | int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr, | |
470 | u64 amr, struct mm_struct *mm, | |
471 | void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr), | |
472 | void *xsl_err_data) | |
473 | { | |
474 | struct link *link = (struct link *) link_handle; | |
475 | struct spa *spa = link->spa; | |
476 | struct ocxl_process_element *pe; | |
477 | int pe_handle, rc = 0; | |
478 | struct pe_data *pe_data; | |
479 | ||
480 | BUILD_BUG_ON(sizeof(struct ocxl_process_element) != 128); | |
481 | if (pasid > SPA_PASID_MAX) | |
482 | return -EINVAL; | |
483 | ||
484 | mutex_lock(&spa->spa_lock); | |
485 | pe_handle = pasid & SPA_PE_MASK; | |
486 | pe = spa->spa_mem + pe_handle; | |
487 | ||
488 | if (pe->software_state) { | |
489 | rc = -EBUSY; | |
490 | goto unlock; | |
491 | } | |
492 | ||
493 | pe_data = kmalloc(sizeof(*pe_data), GFP_KERNEL); | |
494 | if (!pe_data) { | |
495 | rc = -ENOMEM; | |
496 | goto unlock; | |
497 | } | |
498 | ||
499 | pe_data->mm = mm; | |
500 | pe_data->xsl_err_cb = xsl_err_cb; | |
501 | pe_data->xsl_err_data = xsl_err_data; | |
502 | ||
503 | memset(pe, 0, sizeof(struct ocxl_process_element)); | |
504 | pe->config_state = cpu_to_be64(calculate_cfg_state(pidr == 0)); | |
505 | pe->lpid = cpu_to_be32(mfspr(SPRN_LPID)); | |
506 | pe->pid = cpu_to_be32(pidr); | |
507 | pe->tid = cpu_to_be32(tidr); | |
508 | pe->amr = cpu_to_be64(amr); | |
509 | pe->software_state = cpu_to_be32(SPA_PE_VALID); | |
510 | ||
511 | mm_context_add_copro(mm); | |
512 | /* | |
513 | * Barrier is to make sure PE is visible in the SPA before it | |
514 | * is used by the device. It also helps with the global TLBI | |
515 | * invalidation | |
516 | */ | |
517 | mb(); | |
518 | radix_tree_insert(&spa->pe_tree, pe_handle, pe_data); | |
519 | ||
520 | /* | |
521 | * The mm must stay valid for as long as the device uses it. We | |
522 | * lower the count when the context is removed from the SPA. | |
523 | * | |
524 | * We grab mm_count (and not mm_users), as we don't want to | |
525 | * end up in a circular dependency if a process mmaps its | |
526 | * mmio, therefore incrementing the file ref count when | |
527 | * calling mmap(), and forgets to unmap before exiting. In | |
528 | * that scenario, when the kernel handles the death of the | |
529 | * process, the file is not cleaned because unmap was not | |
530 | * called, and the mm wouldn't be freed because we would still | |
531 | * have a reference on mm_users. Incrementing mm_count solves | |
532 | * the problem. | |
533 | */ | |
534 | mmgrab(mm); | |
535 | unlock: | |
536 | mutex_unlock(&spa->spa_lock); | |
537 | return rc; | |
538 | } | |
0f2d7994 | 539 | EXPORT_SYMBOL_GPL(ocxl_link_add_pe); |
3562139f FB |
540 | |
541 | int ocxl_link_remove_pe(void *link_handle, int pasid) | |
542 | { | |
543 | struct link *link = (struct link *) link_handle; | |
544 | struct spa *spa = link->spa; | |
545 | struct ocxl_process_element *pe; | |
546 | struct pe_data *pe_data; | |
547 | int pe_handle, rc; | |
548 | ||
549 | if (pasid > SPA_PASID_MAX) | |
550 | return -EINVAL; | |
551 | ||
552 | /* | |
553 | * About synchronization with our memory fault handler: | |
554 | * | |
555 | * Before removing the PE, the driver is supposed to have | |
556 | * notified the AFU, which should have cleaned up and make | |
557 | * sure the PASID is no longer in use, including pending | |
558 | * interrupts. However, there's no way to be sure... | |
559 | * | |
560 | * We clear the PE and remove the context from our radix | |
561 | * tree. From that point on, any new interrupt for that | |
562 | * context will fail silently, which is ok. As mentioned | |
563 | * above, that's not expected, but it could happen if the | |
564 | * driver or AFU didn't do the right thing. | |
565 | * | |
566 | * There could still be a bottom half running, but we don't | |
567 | * need to wait/flush, as it is managing a reference count on | |
568 | * the mm it reads from the radix tree. | |
569 | */ | |
570 | pe_handle = pasid & SPA_PE_MASK; | |
571 | pe = spa->spa_mem + pe_handle; | |
572 | ||
573 | mutex_lock(&spa->spa_lock); | |
574 | ||
575 | if (!(be32_to_cpu(pe->software_state) & SPA_PE_VALID)) { | |
576 | rc = -EINVAL; | |
577 | goto unlock; | |
578 | } | |
579 | ||
580 | memset(pe, 0, sizeof(struct ocxl_process_element)); | |
581 | /* | |
582 | * The barrier makes sure the PE is removed from the SPA | |
583 | * before we clear the NPU context cache below, so that the | |
584 | * old PE cannot be reloaded erroneously. | |
585 | */ | |
586 | mb(); | |
587 | ||
588 | /* | |
589 | * hook to platform code | |
590 | * On powerpc, the entry needs to be cleared from the context | |
591 | * cache of the NPU. | |
592 | */ | |
593 | rc = pnv_ocxl_spa_remove_pe(link->platform_data, pe_handle); | |
594 | WARN_ON(rc); | |
595 | ||
596 | pe_data = radix_tree_delete(&spa->pe_tree, pe_handle); | |
597 | if (!pe_data) { | |
598 | WARN(1, "Couldn't find pe data when removing PE\n"); | |
599 | } else { | |
600 | mm_context_remove_copro(pe_data->mm); | |
601 | mmdrop(pe_data->mm); | |
602 | kfree_rcu(pe_data, rcu); | |
603 | } | |
604 | unlock: | |
605 | mutex_unlock(&spa->spa_lock); | |
606 | return rc; | |
607 | } | |
0f2d7994 | 608 | EXPORT_SYMBOL_GPL(ocxl_link_remove_pe); |
5bc71b82 FB |
609 | |
610 | int ocxl_link_irq_alloc(void *link_handle, int *hw_irq, u64 *trigger_addr) | |
611 | { | |
612 | struct link *link = (struct link *) link_handle; | |
613 | int rc, irq; | |
614 | u64 addr; | |
615 | ||
616 | if (atomic_dec_if_positive(&link->irq_available) < 0) | |
617 | return -ENOSPC; | |
618 | ||
619 | rc = pnv_ocxl_alloc_xive_irq(&irq, &addr); | |
620 | if (rc) { | |
621 | atomic_inc(&link->irq_available); | |
622 | return rc; | |
623 | } | |
624 | ||
625 | *hw_irq = irq; | |
626 | *trigger_addr = addr; | |
627 | return 0; | |
628 | } | |
0f2d7994 | 629 | EXPORT_SYMBOL_GPL(ocxl_link_irq_alloc); |
5bc71b82 FB |
630 | |
631 | void ocxl_link_free_irq(void *link_handle, int hw_irq) | |
632 | { | |
633 | struct link *link = (struct link *) link_handle; | |
634 | ||
635 | pnv_ocxl_free_xive_irq(hw_irq); | |
636 | atomic_inc(&link->irq_available); | |
637 | } | |
0f2d7994 | 638 | EXPORT_SYMBOL_GPL(ocxl_link_free_irq); |