2 * Copyright 2014 IBM Corp.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/pci.h>
11 #include <linux/slab.h>
12 #include <linux/anon_inodes.h>
13 #include <linux/file.h>
19 struct cxl_context
*cxl_dev_context_init(struct pci_dev
*dev
)
21 struct address_space
*mapping
;
23 struct cxl_context
*ctx
;
26 afu
= cxl_pci_to_afu(dev
);
28 ctx
= cxl_context_alloc();
34 ctx
->kernelapi
= true;
37 * Make our own address space since we won't have one from the
38 * filesystem like the user api has, and even if we do associate a file
39 * with this context we don't want to use the global anonymous inode's
40 * address space as that can invalidate unrelated users:
42 mapping
= kmalloc(sizeof(struct address_space
), GFP_KERNEL
);
47 address_space_init_once(mapping
);
49 /* Make it a slave context. We can promote it later? */
50 rc
= cxl_context_init(ctx
, afu
, false, mapping
);
63 EXPORT_SYMBOL_GPL(cxl_dev_context_init
);
65 struct cxl_context
*cxl_get_context(struct pci_dev
*dev
)
67 return dev
->dev
.archdata
.cxl_ctx
;
69 EXPORT_SYMBOL_GPL(cxl_get_context
);
71 int cxl_release_context(struct cxl_context
*ctx
)
73 if (ctx
->status
>= STARTED
)
76 cxl_context_free(ctx
);
80 EXPORT_SYMBOL_GPL(cxl_release_context
);
82 static irq_hw_number_t
cxl_find_afu_irq(struct cxl_context
*ctx
, int num
)
87 for (r
= 0; r
< CXL_IRQ_RANGES
; r
++) {
88 range
= ctx
->irqs
.range
[r
];
90 return ctx
->irqs
.offset
[r
] + num
;
98 int cxl_set_priv(struct cxl_context
*ctx
, void *priv
)
107 EXPORT_SYMBOL_GPL(cxl_set_priv
);
109 void *cxl_get_priv(struct cxl_context
*ctx
)
112 return ERR_PTR(-EINVAL
);
116 EXPORT_SYMBOL_GPL(cxl_get_priv
);
118 int cxl_allocate_afu_irqs(struct cxl_context
*ctx
, int num
)
121 irq_hw_number_t hwirq
;
124 num
= ctx
->afu
->pp_irqs
;
125 res
= afu_allocate_irqs(ctx
, num
);
129 if (!cpu_has_feature(CPU_FTR_HVMODE
)) {
130 /* In a guest, the PSL interrupt is not multiplexed. It was
131 * allocated above, and we need to set its handler
133 hwirq
= cxl_find_afu_irq(ctx
, 0);
135 cxl_map_irq(ctx
->afu
->adapter
, hwirq
, cxl_ops
->psl_interrupt
, ctx
, "psl");
138 if (ctx
->status
== STARTED
) {
139 if (cxl_ops
->update_ivtes
)
140 cxl_ops
->update_ivtes(ctx
);
141 else WARN(1, "BUG: cxl_allocate_afu_irqs must be called prior to starting the context on this platform\n");
146 EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs
);
148 void cxl_free_afu_irqs(struct cxl_context
*ctx
)
150 irq_hw_number_t hwirq
;
153 if (!cpu_has_feature(CPU_FTR_HVMODE
)) {
154 hwirq
= cxl_find_afu_irq(ctx
, 0);
156 virq
= irq_find_mapping(NULL
, hwirq
);
158 cxl_unmap_irq(virq
, ctx
);
161 afu_irq_name_free(ctx
);
162 cxl_ops
->release_irq_ranges(&ctx
->irqs
, ctx
->afu
->adapter
);
164 EXPORT_SYMBOL_GPL(cxl_free_afu_irqs
);
166 int cxl_map_afu_irq(struct cxl_context
*ctx
, int num
,
167 irq_handler_t handler
, void *cookie
, char *name
)
169 irq_hw_number_t hwirq
;
172 * Find interrupt we are to register.
174 hwirq
= cxl_find_afu_irq(ctx
, num
);
178 return cxl_map_irq(ctx
->afu
->adapter
, hwirq
, handler
, cookie
, name
);
180 EXPORT_SYMBOL_GPL(cxl_map_afu_irq
);
182 void cxl_unmap_afu_irq(struct cxl_context
*ctx
, int num
, void *cookie
)
184 irq_hw_number_t hwirq
;
187 hwirq
= cxl_find_afu_irq(ctx
, num
);
191 virq
= irq_find_mapping(NULL
, hwirq
);
193 cxl_unmap_irq(virq
, cookie
);
195 EXPORT_SYMBOL_GPL(cxl_unmap_afu_irq
);
199 * Code here similar to afu_ioctl_start_work().
201 int cxl_start_context(struct cxl_context
*ctx
, u64 wed
,
202 struct task_struct
*task
)
207 pr_devel("%s: pe: %i\n", __func__
, ctx
->pe
);
209 mutex_lock(&ctx
->status_mutex
);
210 if (ctx
->status
== STARTED
)
211 goto out
; /* already started */
214 ctx
->pid
= get_task_pid(task
, PIDTYPE_PID
);
215 ctx
->glpid
= get_task_pid(task
->group_leader
, PIDTYPE_PID
);
217 ctx
->real_mode
= false;
222 if ((rc
= cxl_ops
->attach_process(ctx
, kernel
, wed
, 0))) {
228 ctx
->status
= STARTED
;
230 mutex_unlock(&ctx
->status_mutex
);
233 EXPORT_SYMBOL_GPL(cxl_start_context
);
235 int cxl_process_element(struct cxl_context
*ctx
)
237 return ctx
->external_pe
;
239 EXPORT_SYMBOL_GPL(cxl_process_element
);
241 /* Stop a context. Returns 0 on success, otherwise -Errno */
242 int cxl_stop_context(struct cxl_context
*ctx
)
244 return __detach_context(ctx
);
246 EXPORT_SYMBOL_GPL(cxl_stop_context
);
248 void cxl_set_master(struct cxl_context
*ctx
)
252 EXPORT_SYMBOL_GPL(cxl_set_master
);
254 int cxl_set_translation_mode(struct cxl_context
*ctx
, bool real_mode
)
256 if (ctx
->status
== STARTED
) {
258 * We could potentially update the PE and issue an update LLCMD
259 * to support this, but it doesn't seem to have a good use case
260 * since it's trivial to just create a second kernel context
261 * with different translation modes, so until someone convinces
267 ctx
->real_mode
= real_mode
;
270 EXPORT_SYMBOL_GPL(cxl_set_translation_mode
);
272 /* wrappers around afu_* file ops which are EXPORTED */
273 int cxl_fd_open(struct inode
*inode
, struct file
*file
)
275 return afu_open(inode
, file
);
277 EXPORT_SYMBOL_GPL(cxl_fd_open
);
278 int cxl_fd_release(struct inode
*inode
, struct file
*file
)
280 return afu_release(inode
, file
);
282 EXPORT_SYMBOL_GPL(cxl_fd_release
);
283 long cxl_fd_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
285 return afu_ioctl(file
, cmd
, arg
);
287 EXPORT_SYMBOL_GPL(cxl_fd_ioctl
);
288 int cxl_fd_mmap(struct file
*file
, struct vm_area_struct
*vm
)
290 return afu_mmap(file
, vm
);
292 EXPORT_SYMBOL_GPL(cxl_fd_mmap
);
293 unsigned int cxl_fd_poll(struct file
*file
, struct poll_table_struct
*poll
)
295 return afu_poll(file
, poll
);
297 EXPORT_SYMBOL_GPL(cxl_fd_poll
);
298 ssize_t
cxl_fd_read(struct file
*file
, char __user
*buf
, size_t count
,
301 return afu_read(file
, buf
, count
, off
);
303 EXPORT_SYMBOL_GPL(cxl_fd_read
);
305 #define PATCH_FOPS(NAME) if (!fops->NAME) fops->NAME = afu_fops.NAME
307 /* Get a struct file and fd for a context and attach the ops */
308 struct file
*cxl_get_fd(struct cxl_context
*ctx
, struct file_operations
*fops
,
312 int rc
, flags
, fdtmp
;
314 flags
= O_RDWR
| O_CLOEXEC
;
316 /* This code is similar to anon_inode_getfd() */
317 rc
= get_unused_fd_flags(flags
);
323 * Patch the file ops. Needs to be careful that this is rentrant safe.
330 PATCH_FOPS(unlocked_ioctl
);
331 PATCH_FOPS(compat_ioctl
);
333 } else /* use default ops */
334 fops
= (struct file_operations
*)&afu_fops
;
336 file
= anon_inode_getfile("cxl", fops
, ctx
, flags
);
340 file
->f_mapping
= ctx
->mapping
;
346 put_unused_fd(fdtmp
);
349 EXPORT_SYMBOL_GPL(cxl_get_fd
);
351 struct cxl_context
*cxl_fops_get_context(struct file
*file
)
353 return file
->private_data
;
355 EXPORT_SYMBOL_GPL(cxl_fops_get_context
);
357 void cxl_set_driver_ops(struct cxl_context
*ctx
,
358 struct cxl_afu_driver_ops
*ops
)
360 WARN_ON(!ops
->fetch_event
|| !ops
->event_delivered
);
361 atomic_set(&ctx
->afu_driver_events
, 0);
362 ctx
->afu_driver_ops
= ops
;
364 EXPORT_SYMBOL_GPL(cxl_set_driver_ops
);
366 void cxl_context_events_pending(struct cxl_context
*ctx
,
367 unsigned int new_events
)
369 atomic_add(new_events
, &ctx
->afu_driver_events
);
370 wake_up_all(&ctx
->wq
);
372 EXPORT_SYMBOL_GPL(cxl_context_events_pending
);
374 int cxl_start_work(struct cxl_context
*ctx
,
375 struct cxl_ioctl_start_work
*work
)
379 /* code taken from afu_ioctl_start_work */
380 if (!(work
->flags
& CXL_START_WORK_NUM_IRQS
))
381 work
->num_interrupts
= ctx
->afu
->pp_irqs
;
382 else if ((work
->num_interrupts
< ctx
->afu
->pp_irqs
) ||
383 (work
->num_interrupts
> ctx
->afu
->irqs_max
)) {
387 rc
= afu_register_irqs(ctx
, work
->num_interrupts
);
391 rc
= cxl_start_context(ctx
, work
->work_element_descriptor
, current
);
393 afu_release_irqs(ctx
, ctx
);
399 EXPORT_SYMBOL_GPL(cxl_start_work
);
401 void __iomem
*cxl_psa_map(struct cxl_context
*ctx
)
403 if (ctx
->status
!= STARTED
)
406 pr_devel("%s: psn_phys%llx size:%llx\n",
407 __func__
, ctx
->psn_phys
, ctx
->psn_size
);
408 return ioremap(ctx
->psn_phys
, ctx
->psn_size
);
410 EXPORT_SYMBOL_GPL(cxl_psa_map
);
412 void cxl_psa_unmap(void __iomem
*addr
)
416 EXPORT_SYMBOL_GPL(cxl_psa_unmap
);
418 int cxl_afu_reset(struct cxl_context
*ctx
)
420 struct cxl_afu
*afu
= ctx
->afu
;
423 rc
= cxl_ops
->afu_reset(afu
);
427 return cxl_ops
->afu_check_and_enable(afu
);
429 EXPORT_SYMBOL_GPL(cxl_afu_reset
);
431 void cxl_perst_reloads_same_image(struct cxl_afu
*afu
,
432 bool perst_reloads_same_image
)
434 afu
->adapter
->perst_same_image
= perst_reloads_same_image
;
436 EXPORT_SYMBOL_GPL(cxl_perst_reloads_same_image
);
438 ssize_t
cxl_read_adapter_vpd(struct pci_dev
*dev
, void *buf
, size_t count
)
440 struct cxl_afu
*afu
= cxl_pci_to_afu(dev
);
442 return cxl_ops
->read_adapter_vpd(afu
->adapter
, buf
, count
);
444 EXPORT_SYMBOL_GPL(cxl_read_adapter_vpd
);