2 * Copyright 2014 IBM Corp.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/pci.h>
11 #include <linux/slab.h>
12 #include <linux/anon_inodes.h>
13 #include <linux/file.h>
16 #include <asm/pnv-pci.h>
17 #include <linux/msi.h>
21 struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
23 struct address_space *mapping;
25 struct cxl_context *ctx;
28 afu = cxl_pci_to_afu(dev);
32 ctx = cxl_context_alloc();
38 ctx->kernelapi = true;
41 * Make our own address space since we won't have one from the
42 * filesystem like the user api has, and even if we do associate a file
43 * with this context we don't want to use the global anonymous inode's
44 * address space as that can invalidate unrelated users:
46 mapping = kmalloc(sizeof(struct address_space), GFP_KERNEL);
51 address_space_init_once(mapping);
53 /* Make it a slave context. We can promote it later? */
54 rc = cxl_context_init(ctx, afu, false, mapping);
67 EXPORT_SYMBOL_GPL(cxl_dev_context_init);
69 struct cxl_context *cxl_get_context(struct pci_dev *dev)
71 return dev->dev.archdata.cxl_ctx;
73 EXPORT_SYMBOL_GPL(cxl_get_context);
75 int cxl_release_context(struct cxl_context *ctx)
77 if (ctx->status >= STARTED)
80 cxl_context_free(ctx);
84 EXPORT_SYMBOL_GPL(cxl_release_context);
86 static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num)
91 for (r = 0; r < CXL_IRQ_RANGES; r++) {
92 range = ctx->irqs.range[r];
94 return ctx->irqs.offset[r] + num;
101 int _cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq)
103 if (*ctx == NULL || *afu_irq == 0) {
105 *ctx = cxl_get_context(pdev);
108 if (*afu_irq > cxl_get_max_irqs_per_process(pdev)) {
109 *ctx = list_next_entry(*ctx, extra_irq_contexts);
113 return cxl_find_afu_irq(*ctx, *afu_irq);
115 /* Exported via cxl_base */
117 int cxl_set_priv(struct cxl_context *ctx, void *priv)
126 EXPORT_SYMBOL_GPL(cxl_set_priv);
128 void *cxl_get_priv(struct cxl_context *ctx)
131 return ERR_PTR(-EINVAL);
135 EXPORT_SYMBOL_GPL(cxl_get_priv);
137 int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num)
140 irq_hw_number_t hwirq;
143 num = ctx->afu->pp_irqs;
144 res = afu_allocate_irqs(ctx, num);
148 if (!cpu_has_feature(CPU_FTR_HVMODE)) {
149 /* In a guest, the PSL interrupt is not multiplexed. It was
150 * allocated above, and we need to set its handler
152 hwirq = cxl_find_afu_irq(ctx, 0);
154 cxl_map_irq(ctx->afu->adapter, hwirq, cxl_ops->psl_interrupt, ctx, "psl");
157 if (ctx->status == STARTED) {
158 if (cxl_ops->update_ivtes)
159 cxl_ops->update_ivtes(ctx);
160 else WARN(1, "BUG: cxl_allocate_afu_irqs must be called prior to starting the context on this platform\n");
165 EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs);
167 void cxl_free_afu_irqs(struct cxl_context *ctx)
169 irq_hw_number_t hwirq;
172 if (!cpu_has_feature(CPU_FTR_HVMODE)) {
173 hwirq = cxl_find_afu_irq(ctx, 0);
175 virq = irq_find_mapping(NULL, hwirq);
177 cxl_unmap_irq(virq, ctx);
180 afu_irq_name_free(ctx);
181 cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
183 EXPORT_SYMBOL_GPL(cxl_free_afu_irqs);
185 int cxl_map_afu_irq(struct cxl_context *ctx, int num,
186 irq_handler_t handler, void *cookie, char *name)
188 irq_hw_number_t hwirq;
191 * Find interrupt we are to register.
193 hwirq = cxl_find_afu_irq(ctx, num);
197 return cxl_map_irq(ctx->afu->adapter, hwirq, handler, cookie, name);
199 EXPORT_SYMBOL_GPL(cxl_map_afu_irq);
201 void cxl_unmap_afu_irq(struct cxl_context *ctx, int num, void *cookie)
203 irq_hw_number_t hwirq;
206 hwirq = cxl_find_afu_irq(ctx, num);
210 virq = irq_find_mapping(NULL, hwirq);
212 cxl_unmap_irq(virq, cookie);
214 EXPORT_SYMBOL_GPL(cxl_unmap_afu_irq);
218 * Code here similar to afu_ioctl_start_work().
220 int cxl_start_context(struct cxl_context *ctx, u64 wed,
221 struct task_struct *task)
226 pr_devel("%s: pe: %i\n", __func__, ctx->pe);
228 mutex_lock(&ctx->status_mutex);
229 if (ctx->status == STARTED)
230 goto out; /* already started */
233 * Increment the mapped context count for adapter. This also checks
234 * if adapter_context_lock is taken.
236 rc = cxl_adapter_context_get(ctx->afu->adapter);
241 ctx->pid = get_task_pid(task, PIDTYPE_PID);
242 ctx->glpid = get_task_pid(task->group_leader, PIDTYPE_PID);
244 ctx->real_mode = false;
249 if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) {
251 cxl_adapter_context_put(ctx->afu->adapter);
256 ctx->status = STARTED;
258 mutex_unlock(&ctx->status_mutex);
261 EXPORT_SYMBOL_GPL(cxl_start_context);
263 int cxl_process_element(struct cxl_context *ctx)
265 return ctx->external_pe;
267 EXPORT_SYMBOL_GPL(cxl_process_element);
269 /* Stop a context. Returns 0 on success, otherwise -Errno */
270 int cxl_stop_context(struct cxl_context *ctx)
272 return __detach_context(ctx);
274 EXPORT_SYMBOL_GPL(cxl_stop_context);
276 void cxl_set_master(struct cxl_context *ctx)
280 EXPORT_SYMBOL_GPL(cxl_set_master);
282 int cxl_set_translation_mode(struct cxl_context *ctx, bool real_mode)
284 if (ctx->status == STARTED) {
286 * We could potentially update the PE and issue an update LLCMD
287 * to support this, but it doesn't seem to have a good use case
288 * since it's trivial to just create a second kernel context
289 * with different translation modes, so until someone convinces
295 ctx->real_mode = real_mode;
298 EXPORT_SYMBOL_GPL(cxl_set_translation_mode);
300 /* wrappers around afu_* file ops which are EXPORTED */
301 int cxl_fd_open(struct inode *inode, struct file *file)
303 return afu_open(inode, file);
305 EXPORT_SYMBOL_GPL(cxl_fd_open);
306 int cxl_fd_release(struct inode *inode, struct file *file)
308 return afu_release(inode, file);
310 EXPORT_SYMBOL_GPL(cxl_fd_release);
311 long cxl_fd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
313 return afu_ioctl(file, cmd, arg);
315 EXPORT_SYMBOL_GPL(cxl_fd_ioctl);
316 int cxl_fd_mmap(struct file *file, struct vm_area_struct *vm)
318 return afu_mmap(file, vm);
320 EXPORT_SYMBOL_GPL(cxl_fd_mmap);
321 unsigned int cxl_fd_poll(struct file *file, struct poll_table_struct *poll)
323 return afu_poll(file, poll);
325 EXPORT_SYMBOL_GPL(cxl_fd_poll);
326 ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count,
329 return afu_read(file, buf, count, off);
331 EXPORT_SYMBOL_GPL(cxl_fd_read);
333 #define PATCH_FOPS(NAME) if (!fops->NAME) fops->NAME = afu_fops.NAME
335 /* Get a struct file and fd for a context and attach the ops */
336 struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops,
340 int rc, flags, fdtmp;
342 flags = O_RDWR | O_CLOEXEC;
344 /* This code is similar to anon_inode_getfd() */
345 rc = get_unused_fd_flags(flags);
351 * Patch the file ops. Needs to be careful that this is rentrant safe.
358 PATCH_FOPS(unlocked_ioctl);
359 PATCH_FOPS(compat_ioctl);
361 } else /* use default ops */
362 fops = (struct file_operations *)&afu_fops;
364 file = anon_inode_getfile("cxl", fops, ctx, flags);
368 file->f_mapping = ctx->mapping;
374 put_unused_fd(fdtmp);
377 EXPORT_SYMBOL_GPL(cxl_get_fd);
379 struct cxl_context *cxl_fops_get_context(struct file *file)
381 return file->private_data;
383 EXPORT_SYMBOL_GPL(cxl_fops_get_context);
385 void cxl_set_driver_ops(struct cxl_context *ctx,
386 struct cxl_afu_driver_ops *ops)
388 WARN_ON(!ops->fetch_event || !ops->event_delivered);
389 atomic_set(&ctx->afu_driver_events, 0);
390 ctx->afu_driver_ops = ops;
392 EXPORT_SYMBOL_GPL(cxl_set_driver_ops);
394 void cxl_context_events_pending(struct cxl_context *ctx,
395 unsigned int new_events)
397 atomic_add(new_events, &ctx->afu_driver_events);
398 wake_up_all(&ctx->wq);
400 EXPORT_SYMBOL_GPL(cxl_context_events_pending);
402 int cxl_start_work(struct cxl_context *ctx,
403 struct cxl_ioctl_start_work *work)
407 /* code taken from afu_ioctl_start_work */
408 if (!(work->flags & CXL_START_WORK_NUM_IRQS))
409 work->num_interrupts = ctx->afu->pp_irqs;
410 else if ((work->num_interrupts < ctx->afu->pp_irqs) ||
411 (work->num_interrupts > ctx->afu->irqs_max)) {
415 rc = afu_register_irqs(ctx, work->num_interrupts);
419 rc = cxl_start_context(ctx, work->work_element_descriptor, current);
421 afu_release_irqs(ctx, ctx);
427 EXPORT_SYMBOL_GPL(cxl_start_work);
429 void __iomem *cxl_psa_map(struct cxl_context *ctx)
431 if (ctx->status != STARTED)
434 pr_devel("%s: psn_phys%llx size:%llx\n",
435 __func__, ctx->psn_phys, ctx->psn_size);
436 return ioremap(ctx->psn_phys, ctx->psn_size);
438 EXPORT_SYMBOL_GPL(cxl_psa_map);
440 void cxl_psa_unmap(void __iomem *addr)
444 EXPORT_SYMBOL_GPL(cxl_psa_unmap);
446 int cxl_afu_reset(struct cxl_context *ctx)
448 struct cxl_afu *afu = ctx->afu;
451 rc = cxl_ops->afu_reset(afu);
455 return cxl_ops->afu_check_and_enable(afu);
457 EXPORT_SYMBOL_GPL(cxl_afu_reset);
459 void cxl_perst_reloads_same_image(struct cxl_afu *afu,
460 bool perst_reloads_same_image)
462 afu->adapter->perst_same_image = perst_reloads_same_image;
464 EXPORT_SYMBOL_GPL(cxl_perst_reloads_same_image);
466 ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count)
468 struct cxl_afu *afu = cxl_pci_to_afu(dev);
472 return cxl_ops->read_adapter_vpd(afu->adapter, buf, count);
474 EXPORT_SYMBOL_GPL(cxl_read_adapter_vpd);
476 int cxl_set_max_irqs_per_process(struct pci_dev *dev, int irqs)
478 struct cxl_afu *afu = cxl_pci_to_afu(dev);
482 if (irqs > afu->adapter->user_irqs)
485 /* Limit user_irqs to prevent the user increasing this via sysfs */
486 afu->adapter->user_irqs = irqs;
487 afu->irqs_max = irqs;
491 EXPORT_SYMBOL_GPL(cxl_set_max_irqs_per_process);
493 int cxl_get_max_irqs_per_process(struct pci_dev *dev)
495 struct cxl_afu *afu = cxl_pci_to_afu(dev);
499 return afu->irqs_max;
501 EXPORT_SYMBOL_GPL(cxl_get_max_irqs_per_process);
504 * This is a special interrupt allocation routine called from the PHB's MSI
505 * setup function. When capi interrupts are allocated in this manner they must
506 * still be associated with a running context, but since the MSI APIs have no
507 * way to specify this we use the default context associated with the device.
509 * The Mellanox CX4 has a hardware limitation that restricts the maximum AFU
510 * interrupt number, so in order to overcome this their driver informs us of
511 * the restriction by setting the maximum interrupts per context, and we
512 * allocate additional contexts as necessary so that we can keep the AFU
513 * interrupt number within the supported range.
515 int _cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
517 struct cxl_context *ctx, *new_ctx, *default_ctx;
521 ctx = default_ctx = cxl_get_context(pdev);
522 if (WARN_ON(!default_ctx))
526 while (remaining > 0) {
527 rc = cxl_allocate_afu_irqs(ctx, min(remaining, ctx->afu->irqs_max));
529 pr_warn("%s: Failed to find enough free MSIs\n", pci_name(pdev));
532 remaining -= ctx->afu->irqs_max;
534 if (ctx != default_ctx && default_ctx->status == STARTED) {
535 WARN_ON(cxl_start_context(ctx,
536 be64_to_cpu(default_ctx->elem->common.wed),
541 new_ctx = cxl_dev_context_init(pdev);
543 pr_warn("%s: Failed to allocate enough contexts for MSIs\n", pci_name(pdev));
546 list_add(&new_ctx->extra_irq_contexts, &ctx->extra_irq_contexts);
553 /* Exported via cxl_base */
555 void _cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev)
557 struct cxl_context *ctx, *pos, *tmp;
559 ctx = cxl_get_context(pdev);
563 cxl_free_afu_irqs(ctx);
564 list_for_each_entry_safe(pos, tmp, &ctx->extra_irq_contexts, extra_irq_contexts) {
565 cxl_stop_context(pos);
566 cxl_free_afu_irqs(pos);
567 list_del(&pos->extra_irq_contexts);
568 cxl_release_context(pos);
571 /* Exported via cxl_base */