2 * Copyright(c) 2015, 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/poll.h>
48 #include <linux/cdev.h>
49 #include <linux/vmalloc.h>
59 #include "user_sdma.h"
60 #include "user_exp_rcv.h"
66 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
68 #define SEND_CTXT_HALT_TIMEOUT 1000 /* msecs */
71 * File operation functions
73 static int hfi1_file_open(struct inode *, struct file *);
74 static int hfi1_file_close(struct inode *, struct file *);
75 static ssize_t hfi1_file_write(struct file *, const char __user *,
77 static ssize_t hfi1_write_iter(struct kiocb *, struct iov_iter *);
78 static unsigned int hfi1_poll(struct file *, struct poll_table_struct *);
79 static int hfi1_file_mmap(struct file *, struct vm_area_struct *);
81 static u64 kvirt_to_phys(void *);
82 static int assign_ctxt(struct file *, struct hfi1_user_info *);
83 static int init_subctxts(struct hfi1_ctxtdata *, const struct hfi1_user_info *);
84 static int user_init(struct file *);
85 static int get_ctxt_info(struct file *, void __user *, __u32);
86 static int get_base_info(struct file *, void __user *, __u32);
87 static int setup_ctxt(struct file *);
88 static int setup_subctxt(struct hfi1_ctxtdata *);
89 static int get_user_context(struct file *, struct hfi1_user_info *, int);
90 static int find_shared_ctxt(struct file *, const struct hfi1_user_info *);
91 static int allocate_ctxt(struct file *, struct hfi1_devdata *,
92 struct hfi1_user_info *);
93 static unsigned int poll_urgent(struct file *, struct poll_table_struct *);
94 static unsigned int poll_next(struct file *, struct poll_table_struct *);
95 static int user_event_ack(struct hfi1_ctxtdata *, int, unsigned long);
96 static int set_ctxt_pkey(struct hfi1_ctxtdata *, unsigned, u16);
97 static int manage_rcvq(struct hfi1_ctxtdata *, unsigned, int);
98 static int vma_fault(struct vm_area_struct *, struct vm_fault *);
100 static const struct file_operations hfi1_file_ops = {
101 .owner = THIS_MODULE,
102 .write = hfi1_file_write,
103 .write_iter = hfi1_write_iter,
104 .open = hfi1_file_open,
105 .release = hfi1_file_close,
107 .mmap = hfi1_file_mmap,
108 .llseek = noop_llseek,
111 static struct vm_operations_struct vm_ops = {
116 * Types of memories mapped into user processes' space
135 * Masks and offsets defining the mmap tokens
137 #define HFI1_MMAP_OFFSET_MASK 0xfffULL
138 #define HFI1_MMAP_OFFSET_SHIFT 0
139 #define HFI1_MMAP_SUBCTXT_MASK 0xfULL
140 #define HFI1_MMAP_SUBCTXT_SHIFT 12
141 #define HFI1_MMAP_CTXT_MASK 0xffULL
142 #define HFI1_MMAP_CTXT_SHIFT 16
143 #define HFI1_MMAP_TYPE_MASK 0xfULL
144 #define HFI1_MMAP_TYPE_SHIFT 24
145 #define HFI1_MMAP_MAGIC_MASK 0xffffffffULL
146 #define HFI1_MMAP_MAGIC_SHIFT 32
148 #define HFI1_MMAP_MAGIC 0xdabbad00
150 #define HFI1_MMAP_TOKEN_SET(field, val) \
151 (((val) & HFI1_MMAP_##field##_MASK) << HFI1_MMAP_##field##_SHIFT)
152 #define HFI1_MMAP_TOKEN_GET(field, token) \
153 (((token) >> HFI1_MMAP_##field##_SHIFT) & HFI1_MMAP_##field##_MASK)
154 #define HFI1_MMAP_TOKEN(type, ctxt, subctxt, addr) \
155 (HFI1_MMAP_TOKEN_SET(MAGIC, HFI1_MMAP_MAGIC) | \
156 HFI1_MMAP_TOKEN_SET(TYPE, type) | \
157 HFI1_MMAP_TOKEN_SET(CTXT, ctxt) | \
158 HFI1_MMAP_TOKEN_SET(SUBCTXT, subctxt) | \
159 HFI1_MMAP_TOKEN_SET(OFFSET, (offset_in_page(addr))))
161 #define dbg(fmt, ...) \
162 pr_info(fmt, ##__VA_ARGS__)
164 static inline int is_valid_mmap(u64 token)
166 return (HFI1_MMAP_TOKEN_GET(MAGIC, token) == HFI1_MMAP_MAGIC);
169 static int hfi1_file_open(struct inode *inode, struct file *fp)
171 /* The real work is performed later in assign_ctxt() */
172 fp->private_data = kzalloc(sizeof(struct hfi1_filedata), GFP_KERNEL);
173 if (fp->private_data) /* no cpu affinity by default */
174 ((struct hfi1_filedata *)fp->private_data)->rec_cpu_num = -1;
175 return fp->private_data ? 0 : -ENOMEM;
178 static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
179 size_t count, loff_t *offset)
181 const struct hfi1_cmd __user *ucmd;
182 struct hfi1_filedata *fd = fp->private_data;
183 struct hfi1_ctxtdata *uctxt = fd->uctxt;
185 struct hfi1_user_info uinfo;
186 struct hfi1_tid_info tinfo;
188 ssize_t consumed = 0, copy = 0, ret = 0;
191 int uctxt_required = 1;
192 int must_be_root = 0;
194 /* FIXME: This interface cannot continue out of staging */
195 if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
198 if (count < sizeof(cmd)) {
203 ucmd = (const struct hfi1_cmd __user *)data;
204 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) {
209 consumed = sizeof(cmd);
212 case HFI1_CMD_ASSIGN_CTXT:
213 uctxt_required = 0; /* assigned user context not required */
214 copy = sizeof(uinfo);
217 case HFI1_CMD_SDMA_STATUS_UPD:
218 case HFI1_CMD_CREDIT_UPD:
221 case HFI1_CMD_TID_UPDATE:
222 case HFI1_CMD_TID_FREE:
223 case HFI1_CMD_TID_INVAL_READ:
224 copy = sizeof(tinfo);
227 case HFI1_CMD_USER_INFO:
228 case HFI1_CMD_RECV_CTRL:
229 case HFI1_CMD_POLL_TYPE:
230 case HFI1_CMD_ACK_EVENT:
231 case HFI1_CMD_CTXT_INFO:
232 case HFI1_CMD_SET_PKEY:
233 case HFI1_CMD_CTXT_RESET:
237 case HFI1_CMD_EP_INFO:
238 case HFI1_CMD_EP_ERASE_CHIP:
239 case HFI1_CMD_EP_ERASE_RANGE:
240 case HFI1_CMD_EP_READ_RANGE:
241 case HFI1_CMD_EP_WRITE_RANGE:
242 uctxt_required = 0; /* assigned user context not required */
243 must_be_root = 1; /* validate user */
251 /* If the command comes with user data, copy it. */
253 if (copy_from_user(dest, (void __user *)cmd.addr, copy)) {
261 * Make sure there is a uctxt when needed.
263 if (uctxt_required && !uctxt) {
268 /* only root can do these operations */
269 if (must_be_root && !capable(CAP_SYS_ADMIN)) {
275 case HFI1_CMD_ASSIGN_CTXT:
276 ret = assign_ctxt(fp, &uinfo);
279 ret = setup_ctxt(fp);
284 case HFI1_CMD_CTXT_INFO:
285 ret = get_ctxt_info(fp, (void __user *)(unsigned long)
288 case HFI1_CMD_USER_INFO:
289 ret = get_base_info(fp, (void __user *)(unsigned long)
292 case HFI1_CMD_SDMA_STATUS_UPD:
294 case HFI1_CMD_CREDIT_UPD:
295 if (uctxt && uctxt->sc)
296 sc_return_credits(uctxt->sc);
298 case HFI1_CMD_TID_UPDATE:
299 ret = hfi1_user_exp_rcv_setup(fp, &tinfo);
302 * Copy the number of tidlist entries we used
303 * and the length of the buffer we registered.
304 * These fields are adjacent in the structure so
305 * we can copy them at the same time.
307 addr = (unsigned long)cmd.addr +
308 offsetof(struct hfi1_tid_info, tidcnt);
309 if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
310 sizeof(tinfo.tidcnt) +
311 sizeof(tinfo.length)))
315 case HFI1_CMD_TID_INVAL_READ:
316 ret = hfi1_user_exp_rcv_invalid(fp, &tinfo);
319 addr = (unsigned long)cmd.addr +
320 offsetof(struct hfi1_tid_info, tidcnt);
321 if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
322 sizeof(tinfo.tidcnt)))
325 case HFI1_CMD_TID_FREE:
326 ret = hfi1_user_exp_rcv_clear(fp, &tinfo);
329 addr = (unsigned long)cmd.addr +
330 offsetof(struct hfi1_tid_info, tidcnt);
331 if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
332 sizeof(tinfo.tidcnt)))
335 case HFI1_CMD_RECV_CTRL:
336 ret = manage_rcvq(uctxt, fd->subctxt, (int)user_val);
338 case HFI1_CMD_POLL_TYPE:
339 uctxt->poll_type = (typeof(uctxt->poll_type))user_val;
341 case HFI1_CMD_ACK_EVENT:
342 ret = user_event_ack(uctxt, fd->subctxt, user_val);
344 case HFI1_CMD_SET_PKEY:
345 if (HFI1_CAP_IS_USET(PKEY_CHECK))
346 ret = set_ctxt_pkey(uctxt, fd->subctxt, user_val);
350 case HFI1_CMD_CTXT_RESET: {
351 struct send_context *sc;
352 struct hfi1_devdata *dd;
354 if (!uctxt || !uctxt->dd || !uctxt->sc) {
359 * There is no protection here. User level has to
360 * guarantee that no one will be writing to the send
361 * context while it is being re-initialized.
362 * If user level breaks that guarantee, it will break
363 * it's own context and no one else's.
368 * Wait until the interrupt handler has marked the
369 * context as halted or frozen. Report error if we time
372 wait_event_interruptible_timeout(
373 sc->halt_wait, (sc->flags & SCF_HALTED),
374 msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
375 if (!(sc->flags & SCF_HALTED)) {
380 * If the send context was halted due to a Freeze,
381 * wait until the device has been "unfrozen" before
382 * resetting the context.
384 if (sc->flags & SCF_FROZEN) {
385 wait_event_interruptible_timeout(
387 !(ACCESS_ONCE(dd->flags) & HFI1_FROZEN),
388 msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
389 if (dd->flags & HFI1_FROZEN) {
393 if (dd->flags & HFI1_FORCED_FREEZE) {
395 * Don't allow context reset if we are into
403 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB,
406 ret = sc_restart(sc);
409 sc_return_credits(sc);
412 case HFI1_CMD_EP_INFO:
413 case HFI1_CMD_EP_ERASE_CHIP:
414 case HFI1_CMD_EP_ERASE_RANGE:
415 case HFI1_CMD_EP_READ_RANGE:
416 case HFI1_CMD_EP_WRITE_RANGE:
417 ret = handle_eprom_command(fp, &cmd);
427 static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
429 struct hfi1_filedata *fd = kiocb->ki_filp->private_data;
430 struct hfi1_user_sdma_pkt_q *pq = fd->pq;
431 struct hfi1_user_sdma_comp_q *cq = fd->cq;
432 int ret = 0, done = 0, reqs = 0;
433 unsigned long dim = from->nr_segs;
440 if (!iter_is_iovec(from) || !dim) {
445 hfi1_cdbg(SDMA, "SDMA request from %u:%u (%lu)",
446 fd->uctxt->ctxt, fd->subctxt, dim);
448 if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) {
454 unsigned long count = 0;
456 ret = hfi1_user_sdma_process_request(
457 kiocb->ki_filp, (struct iovec *)(from->iov + done),
466 return ret ? ret : reqs;
469 static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
471 struct hfi1_filedata *fd = fp->private_data;
472 struct hfi1_ctxtdata *uctxt = fd->uctxt;
473 struct hfi1_devdata *dd;
474 unsigned long flags, pfn;
475 u64 token = vma->vm_pgoff << PAGE_SHIFT,
477 u8 subctxt, mapio = 0, vmf = 0, type;
482 if (!is_valid_mmap(token) || !uctxt ||
483 !(vma->vm_flags & VM_SHARED)) {
488 ctxt = HFI1_MMAP_TOKEN_GET(CTXT, token);
489 subctxt = HFI1_MMAP_TOKEN_GET(SUBCTXT, token);
490 type = HFI1_MMAP_TOKEN_GET(TYPE, token);
491 if (ctxt != uctxt->ctxt || subctxt != fd->subctxt) {
496 flags = vma->vm_flags;
501 memaddr = ((dd->physaddr + TXE_PIO_SEND) +
503 (uctxt->sc->hw_context * BIT(16))) +
504 /* 64K PIO space / ctxt */
505 (type == PIO_BUFS_SOP ?
506 (TXE_PIO_SIZE / 2) : 0); /* sop? */
508 * Map only the amount allocated to the context, not the
509 * entire available context's PIO space.
511 memlen = PAGE_ALIGN(uctxt->sc->credits * PIO_BLOCK_SIZE);
512 flags &= ~VM_MAYREAD;
513 flags |= VM_DONTCOPY | VM_DONTEXPAND;
514 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
518 if (flags & VM_WRITE) {
523 * The credit return location for this context could be on the
524 * second or third page allocated for credit returns (if number
525 * of enabled contexts > 64 and 128 respectively).
527 memaddr = dd->cr_base[uctxt->numa_id].pa +
528 (((u64)uctxt->sc->hw_free -
529 (u64)dd->cr_base[uctxt->numa_id].va) & PAGE_MASK);
531 flags &= ~VM_MAYWRITE;
532 flags |= VM_DONTCOPY | VM_DONTEXPAND;
534 * The driver has already allocated memory for credit
535 * returns and programmed it into the chip. Has that
536 * memory been flagged as non-cached?
538 /* vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); */
542 memaddr = uctxt->rcvhdrq_phys;
543 memlen = uctxt->rcvhdrq_size;
549 * The RcvEgr buffer need to be handled differently
550 * as multiple non-contiguous pages need to be mapped
551 * into the user process.
553 memlen = uctxt->egrbufs.size;
554 if ((vma->vm_end - vma->vm_start) != memlen) {
555 dd_dev_err(dd, "Eager buffer map size invalid (%lu != %lu)\n",
556 (vma->vm_end - vma->vm_start), memlen);
560 if (vma->vm_flags & VM_WRITE) {
564 vma->vm_flags &= ~VM_MAYWRITE;
565 addr = vma->vm_start;
566 for (i = 0 ; i < uctxt->egrbufs.numbufs; i++) {
567 ret = remap_pfn_range(
569 uctxt->egrbufs.buffers[i].phys >> PAGE_SHIFT,
570 uctxt->egrbufs.buffers[i].len,
574 addr += uctxt->egrbufs.buffers[i].len;
581 * Map only the page that contains this context's user
584 memaddr = (unsigned long)
585 (dd->physaddr + RXE_PER_CONTEXT_USER)
586 + (uctxt->ctxt * RXE_PER_CONTEXT_SIZE);
588 * TidFlow table is on the same page as the rest of the
592 flags |= VM_DONTCOPY | VM_DONTEXPAND;
593 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
598 * Use the page where this context's flags are. User level
599 * knows where it's own bitmap is within the page.
601 memaddr = (unsigned long)(dd->events +
602 ((uctxt->ctxt - dd->first_user_ctxt) *
603 HFI1_MAX_SHARED_CTXTS)) & PAGE_MASK;
606 * v3.7 removes VM_RESERVED but the effect is kept by
609 flags |= VM_IO | VM_DONTEXPAND;
613 memaddr = kvirt_to_phys((void *)dd->status);
615 flags |= VM_IO | VM_DONTEXPAND;
618 if (!HFI1_CAP_IS_USET(DMA_RTAIL)) {
620 * If the memory allocation failed, the context alloc
621 * also would have failed, so we would never get here
626 if (flags & VM_WRITE) {
630 memaddr = uctxt->rcvhdrqtailaddr_phys;
632 flags &= ~VM_MAYWRITE;
635 memaddr = (u64)uctxt->subctxt_uregbase;
637 flags |= VM_IO | VM_DONTEXPAND;
640 case SUBCTXT_RCV_HDRQ:
641 memaddr = (u64)uctxt->subctxt_rcvhdr_base;
642 memlen = uctxt->rcvhdrq_size * uctxt->subctxt_cnt;
643 flags |= VM_IO | VM_DONTEXPAND;
647 memaddr = (u64)uctxt->subctxt_rcvegrbuf;
648 memlen = uctxt->egrbufs.size * uctxt->subctxt_cnt;
649 flags |= VM_IO | VM_DONTEXPAND;
650 flags &= ~VM_MAYWRITE;
654 struct hfi1_user_sdma_comp_q *cq = fd->cq;
660 memaddr = (u64)cq->comps;
661 memlen = PAGE_ALIGN(sizeof(*cq->comps) * cq->nentries);
662 flags |= VM_IO | VM_DONTEXPAND;
671 if ((vma->vm_end - vma->vm_start) != memlen) {
672 hfi1_cdbg(PROC, "%u:%u Memory size mismatch %lu:%lu",
673 uctxt->ctxt, fd->subctxt,
674 (vma->vm_end - vma->vm_start), memlen);
679 vma->vm_flags = flags;
681 "%u:%u type:%u io/vf:%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx\n",
682 ctxt, subctxt, type, mapio, vmf, memaddr, memlen,
683 vma->vm_end - vma->vm_start, vma->vm_flags);
684 pfn = (unsigned long)(memaddr >> PAGE_SHIFT);
687 vma->vm_ops = &vm_ops;
690 ret = io_remap_pfn_range(vma, vma->vm_start, pfn, memlen,
693 ret = remap_pfn_range(vma, vma->vm_start, pfn, memlen,
701 * Local (non-chip) user memory is not mapped right away but as it is
702 * accessed by the user-level code.
704 static int vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
708 page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
710 return VM_FAULT_SIGBUS;
718 static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt)
720 struct hfi1_ctxtdata *uctxt;
723 uctxt = ((struct hfi1_filedata *)fp->private_data)->uctxt;
726 else if (uctxt->poll_type == HFI1_POLL_TYPE_URGENT)
727 pollflag = poll_urgent(fp, pt);
728 else if (uctxt->poll_type == HFI1_POLL_TYPE_ANYRCV)
729 pollflag = poll_next(fp, pt);
736 static int hfi1_file_close(struct inode *inode, struct file *fp)
738 struct hfi1_filedata *fdata = fp->private_data;
739 struct hfi1_ctxtdata *uctxt = fdata->uctxt;
740 struct hfi1_devdata *dd;
741 unsigned long flags, *ev;
743 fp->private_data = NULL;
748 hfi1_cdbg(PROC, "freeing ctxt %u:%u", uctxt->ctxt, fdata->subctxt);
750 mutex_lock(&hfi1_mutex);
753 /* drain user sdma queue */
754 hfi1_user_sdma_free_queues(fdata);
756 /* release the cpu */
757 hfi1_put_proc_affinity(dd, fdata->rec_cpu_num);
760 * Clear any left over, unhandled events so the next process that
761 * gets this context doesn't get confused.
763 ev = dd->events + ((uctxt->ctxt - dd->first_user_ctxt) *
764 HFI1_MAX_SHARED_CTXTS) + fdata->subctxt;
768 uctxt->active_slaves &= ~(1 << fdata->subctxt);
769 uctxt->subpid[fdata->subctxt] = 0;
770 mutex_unlock(&hfi1_mutex);
774 spin_lock_irqsave(&dd->uctxt_lock, flags);
776 * Disable receive context and interrupt available, reset all
777 * RcvCtxtCtrl bits to default values.
779 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
780 HFI1_RCVCTRL_TIDFLOW_DIS |
781 HFI1_RCVCTRL_INTRAVAIL_DIS |
782 HFI1_RCVCTRL_TAILUPD_DIS |
783 HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
784 HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
785 HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt->ctxt);
786 /* Clear the context's J_KEY */
787 hfi1_clear_ctxt_jkey(dd, uctxt->ctxt);
789 * Reset context integrity checks to default.
790 * (writes to CSRs probably belong in chip.c)
792 write_kctxt_csr(dd, uctxt->sc->hw_context, SEND_CTXT_CHECK_ENABLE,
793 hfi1_pkt_default_send_ctxt_mask(dd, uctxt->sc->type));
794 sc_disable(uctxt->sc);
796 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
798 dd->rcd[uctxt->ctxt] = NULL;
800 hfi1_user_exp_rcv_free(fdata);
801 hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
803 uctxt->rcvwait_to = 0;
804 uctxt->piowait_to = 0;
805 uctxt->rcvnowait = 0;
806 uctxt->pionowait = 0;
807 uctxt->event_flags = 0;
809 hfi1_stats.sps_ctxts--;
810 if (++dd->freectxts == dd->num_user_contexts)
812 mutex_unlock(&hfi1_mutex);
813 hfi1_free_ctxtdata(dd, uctxt);
820 * Convert kernel *virtual* addresses to physical addresses.
821 * This is used to vmalloc'ed addresses.
823 static u64 kvirt_to_phys(void *addr)
828 page = vmalloc_to_page(addr);
830 paddr = page_to_pfn(page) << PAGE_SHIFT;
835 static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo)
837 int i_minor, ret = 0;
838 unsigned int swmajor, swminor;
840 swmajor = uinfo->userversion >> 16;
841 if (swmajor != HFI1_USER_SWMAJOR) {
846 swminor = uinfo->userversion & 0xffff;
848 mutex_lock(&hfi1_mutex);
849 /* First, lets check if we need to setup a shared context? */
850 if (uinfo->subctxt_cnt) {
851 struct hfi1_filedata *fd = fp->private_data;
853 ret = find_shared_ctxt(fp, uinfo);
857 fd->rec_cpu_num = hfi1_get_proc_affinity(
858 fd->uctxt->dd, fd->uctxt->numa_id);
862 * We execute the following block if we couldn't find a
863 * shared context or if context sharing is not required.
866 i_minor = iminor(file_inode(fp)) - HFI1_USER_MINOR_BASE;
867 ret = get_user_context(fp, uinfo, i_minor);
870 mutex_unlock(&hfi1_mutex);
875 static int get_user_context(struct file *fp, struct hfi1_user_info *uinfo,
878 struct hfi1_devdata *dd = NULL;
879 int devmax, npresent, nup;
881 devmax = hfi1_count_units(&npresent, &nup);
888 dd = hfi1_lookup(devno);
891 else if (!dd->freectxts)
894 return allocate_ctxt(fp, dd, uinfo);
897 static int find_shared_ctxt(struct file *fp,
898 const struct hfi1_user_info *uinfo)
902 struct hfi1_filedata *fd = fp->private_data;
904 devmax = hfi1_count_units(NULL, NULL);
906 for (ndev = 0; ndev < devmax; ndev++) {
907 struct hfi1_devdata *dd = hfi1_lookup(ndev);
909 if (!(dd && (dd->flags & HFI1_PRESENT) && dd->kregbase))
911 for (i = dd->first_user_ctxt; i < dd->num_rcv_contexts; i++) {
912 struct hfi1_ctxtdata *uctxt = dd->rcd[i];
914 /* Skip ctxts which are not yet open */
915 if (!uctxt || !uctxt->cnt)
917 /* Skip ctxt if it doesn't match the requested one */
918 if (memcmp(uctxt->uuid, uinfo->uuid,
919 sizeof(uctxt->uuid)) ||
920 uctxt->jkey != generate_jkey(current_uid()) ||
921 uctxt->subctxt_id != uinfo->subctxt_id ||
922 uctxt->subctxt_cnt != uinfo->subctxt_cnt)
925 /* Verify the sharing process matches the master */
926 if (uctxt->userversion != uinfo->userversion ||
927 uctxt->cnt >= uctxt->subctxt_cnt) {
932 fd->subctxt = uctxt->cnt++;
933 uctxt->subpid[fd->subctxt] = current->pid;
934 uctxt->active_slaves |= 1 << fd->subctxt;
944 static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd,
945 struct hfi1_user_info *uinfo)
947 struct hfi1_filedata *fd = fp->private_data;
948 struct hfi1_ctxtdata *uctxt;
952 if (dd->flags & HFI1_FROZEN) {
954 * Pick an error that is unique from all other errors
955 * that are returned so the user process knows that
956 * it tried to allocate while the SPC was frozen. It
957 * it should be able to retry with success in a short
963 for (ctxt = dd->first_user_ctxt; ctxt < dd->num_rcv_contexts; ctxt++)
967 if (ctxt == dd->num_rcv_contexts)
970 fd->rec_cpu_num = hfi1_get_proc_affinity(dd, -1);
971 if (fd->rec_cpu_num != -1)
972 numa = cpu_to_node(fd->rec_cpu_num);
974 numa = numa_node_id();
975 uctxt = hfi1_create_ctxtdata(dd->pport, ctxt, numa);
978 "Unable to allocate ctxtdata memory, failing open\n");
981 hfi1_cdbg(PROC, "[%u:%u] pid %u assigned to CPU %d (NUMA %u)",
982 uctxt->ctxt, fd->subctxt, current->pid, fd->rec_cpu_num,
986 * Allocate and enable a PIO send context.
988 uctxt->sc = sc_alloc(dd, SC_USER, uctxt->rcvhdrqentsize,
993 hfi1_cdbg(PROC, "allocated send context %u(%u)\n", uctxt->sc->sw_index,
994 uctxt->sc->hw_context);
995 ret = sc_enable(uctxt->sc);
999 * Setup shared context resources if the user-level has requested
1000 * shared contexts and this is the 'master' process.
1001 * This has to be done here so the rest of the sub-contexts find the
1004 if (uinfo->subctxt_cnt && !fd->subctxt) {
1005 ret = init_subctxts(uctxt, uinfo);
1007 * On error, we don't need to disable and de-allocate the
1008 * send context because it will be done during file close
1013 uctxt->userversion = uinfo->userversion;
1014 uctxt->pid = current->pid;
1015 uctxt->flags = HFI1_CAP_UGET(MASK);
1016 init_waitqueue_head(&uctxt->wait);
1017 strlcpy(uctxt->comm, current->comm, sizeof(uctxt->comm));
1018 memcpy(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid));
1019 uctxt->jkey = generate_jkey(current_uid());
1020 INIT_LIST_HEAD(&uctxt->sdma_queues);
1021 spin_lock_init(&uctxt->sdma_qlock);
1022 hfi1_stats.sps_ctxts++;
1024 * Disable ASPM when there are open user/PSM contexts to avoid
1025 * issues with ASPM L1 exit latency
1027 if (dd->freectxts-- == dd->num_user_contexts)
1028 aspm_disable_all(dd);
1034 static int init_subctxts(struct hfi1_ctxtdata *uctxt,
1035 const struct hfi1_user_info *uinfo)
1037 unsigned num_subctxts;
1039 num_subctxts = uinfo->subctxt_cnt;
1040 if (num_subctxts > HFI1_MAX_SHARED_CTXTS)
1043 uctxt->subctxt_cnt = uinfo->subctxt_cnt;
1044 uctxt->subctxt_id = uinfo->subctxt_id;
1045 uctxt->active_slaves = 1;
1046 uctxt->redirect_seq_cnt = 1;
1047 set_bit(HFI1_CTXT_MASTER_UNINIT, &uctxt->event_flags);
1052 static int setup_subctxt(struct hfi1_ctxtdata *uctxt)
1055 unsigned num_subctxts = uctxt->subctxt_cnt;
1057 uctxt->subctxt_uregbase = vmalloc_user(PAGE_SIZE);
1058 if (!uctxt->subctxt_uregbase) {
1062 /* We can take the size of the RcvHdr Queue from the master */
1063 uctxt->subctxt_rcvhdr_base = vmalloc_user(uctxt->rcvhdrq_size *
1065 if (!uctxt->subctxt_rcvhdr_base) {
1070 uctxt->subctxt_rcvegrbuf = vmalloc_user(uctxt->egrbufs.size *
1072 if (!uctxt->subctxt_rcvegrbuf) {
1078 vfree(uctxt->subctxt_rcvhdr_base);
1080 vfree(uctxt->subctxt_uregbase);
1081 uctxt->subctxt_uregbase = NULL;
1086 static int user_init(struct file *fp)
1088 unsigned int rcvctrl_ops = 0;
1089 struct hfi1_filedata *fd = fp->private_data;
1090 struct hfi1_ctxtdata *uctxt = fd->uctxt;
1092 /* make sure that the context has already been setup */
1093 if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags))
1096 /* initialize poll variables... */
1098 uctxt->urgent_poll = 0;
1101 * Now enable the ctxt for receive.
1102 * For chips that are set to DMA the tail register to memory
1103 * when they change (and when the update bit transitions from
1104 * 0 to 1. So for those chips, we turn it off and then back on.
1105 * This will (very briefly) affect any other open ctxts, but the
1106 * duration is very short, and therefore isn't an issue. We
1107 * explicitly set the in-memory tail copy to 0 beforehand, so we
1108 * don't have to wait to be sure the DMA update has happened
1109 * (chip resets head/tail to 0 on transition to enable).
1111 if (uctxt->rcvhdrtail_kvaddr)
1112 clear_rcvhdrtail(uctxt);
1114 /* Setup J_KEY before enabling the context */
1115 hfi1_set_ctxt_jkey(uctxt->dd, uctxt->ctxt, uctxt->jkey);
1117 rcvctrl_ops = HFI1_RCVCTRL_CTXT_ENB;
1118 if (HFI1_CAP_KGET_MASK(uctxt->flags, HDRSUPP))
1119 rcvctrl_ops |= HFI1_RCVCTRL_TIDFLOW_ENB;
1121 * Ignore the bit in the flags for now until proper
1122 * support for multiple packet per rcv array entry is
1125 if (!HFI1_CAP_KGET_MASK(uctxt->flags, MULTI_PKT_EGR))
1126 rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
1127 if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_EGR_FULL))
1128 rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
1129 if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
1130 rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
1132 * The RcvCtxtCtrl.TailUpd bit has to be explicitly written.
1133 * We can't rely on the correct value to be set from prior
1134 * uses of the chip or ctxt. Therefore, add the rcvctrl op
1137 if (HFI1_CAP_KGET_MASK(uctxt->flags, DMA_RTAIL))
1138 rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
1140 rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_DIS;
1141 hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt->ctxt);
1143 /* Notify any waiting slaves */
1144 if (uctxt->subctxt_cnt) {
1145 clear_bit(HFI1_CTXT_MASTER_UNINIT, &uctxt->event_flags);
1146 wake_up(&uctxt->wait);
1152 static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len)
1154 struct hfi1_ctxt_info cinfo;
1155 struct hfi1_filedata *fd = fp->private_data;
1156 struct hfi1_ctxtdata *uctxt = fd->uctxt;
1159 memset(&cinfo, 0, sizeof(cinfo));
1160 ret = hfi1_get_base_kinfo(uctxt, &cinfo);
1163 cinfo.num_active = hfi1_count_active_units();
1164 cinfo.unit = uctxt->dd->unit;
1165 cinfo.ctxt = uctxt->ctxt;
1166 cinfo.subctxt = fd->subctxt;
1167 cinfo.rcvtids = roundup(uctxt->egrbufs.alloced,
1168 uctxt->dd->rcv_entries.group_size) +
1169 uctxt->expected_count;
1170 cinfo.credits = uctxt->sc->credits;
1171 cinfo.numa_node = uctxt->numa_id;
1172 cinfo.rec_cpu = fd->rec_cpu_num;
1173 cinfo.send_ctxt = uctxt->sc->hw_context;
1175 cinfo.egrtids = uctxt->egrbufs.alloced;
1176 cinfo.rcvhdrq_cnt = uctxt->rcvhdrq_cnt;
1177 cinfo.rcvhdrq_entsize = uctxt->rcvhdrqentsize << 2;
1178 cinfo.sdma_ring_size = fd->cq->nentries;
1179 cinfo.rcvegr_size = uctxt->egrbufs.rcvtid_size;
1181 trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, fd->subctxt, cinfo);
1182 if (copy_to_user(ubase, &cinfo, sizeof(cinfo)))
1188 static int setup_ctxt(struct file *fp)
1190 struct hfi1_filedata *fd = fp->private_data;
1191 struct hfi1_ctxtdata *uctxt = fd->uctxt;
1192 struct hfi1_devdata *dd = uctxt->dd;
1196 * Context should be set up only once, including allocation and
1197 * programming of eager buffers. This is done if context sharing
1198 * is not requested or by the master process.
1200 if (!uctxt->subctxt_cnt || !fd->subctxt) {
1201 ret = hfi1_init_ctxt(uctxt->sc);
1205 /* Now allocate the RcvHdr queue and eager buffers. */
1206 ret = hfi1_create_rcvhdrq(dd, uctxt);
1209 ret = hfi1_setup_eagerbufs(uctxt);
1212 if (uctxt->subctxt_cnt && !fd->subctxt) {
1213 ret = setup_subctxt(uctxt);
1218 ret = wait_event_interruptible(uctxt->wait, !test_bit(
1219 HFI1_CTXT_MASTER_UNINIT,
1220 &uctxt->event_flags));
1225 ret = hfi1_user_sdma_alloc_queues(uctxt, fp);
1229 * Expected receive has to be setup for all processes (including
1230 * shared contexts). However, it has to be done after the master
1231 * context has been fully configured as it depends on the
1232 * eager/expected split of the RcvArray entries.
1233 * Setting it up here ensures that the subcontexts will be waiting
1234 * (due to the above wait_event_interruptible() until the master
1237 ret = hfi1_user_exp_rcv_init(fp);
1241 set_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags);
1246 static int get_base_info(struct file *fp, void __user *ubase, __u32 len)
1248 struct hfi1_base_info binfo;
1249 struct hfi1_filedata *fd = fp->private_data;
1250 struct hfi1_ctxtdata *uctxt = fd->uctxt;
1251 struct hfi1_devdata *dd = uctxt->dd;
1256 trace_hfi1_uctxtdata(uctxt->dd, uctxt);
1258 memset(&binfo, 0, sizeof(binfo));
1259 binfo.hw_version = dd->revision;
1260 binfo.sw_version = HFI1_KERN_SWVERSION;
1261 binfo.bthqp = kdeth_qp;
1262 binfo.jkey = uctxt->jkey;
1264 * If more than 64 contexts are enabled the allocated credit
1265 * return will span two or three contiguous pages. Since we only
1266 * map the page containing the context's credit return address,
1267 * we need to calculate the offset in the proper page.
1269 offset = ((u64)uctxt->sc->hw_free -
1270 (u64)dd->cr_base[uctxt->numa_id].va) % PAGE_SIZE;
1271 binfo.sc_credits_addr = HFI1_MMAP_TOKEN(PIO_CRED, uctxt->ctxt,
1272 fd->subctxt, offset);
1273 binfo.pio_bufbase = HFI1_MMAP_TOKEN(PIO_BUFS, uctxt->ctxt,
1275 uctxt->sc->base_addr);
1276 binfo.pio_bufbase_sop = HFI1_MMAP_TOKEN(PIO_BUFS_SOP,
1279 uctxt->sc->base_addr);
1280 binfo.rcvhdr_bufbase = HFI1_MMAP_TOKEN(RCV_HDRQ, uctxt->ctxt,
1283 binfo.rcvegr_bufbase = HFI1_MMAP_TOKEN(RCV_EGRBUF, uctxt->ctxt,
1285 uctxt->egrbufs.rcvtids[0].phys);
1286 binfo.sdma_comp_bufbase = HFI1_MMAP_TOKEN(SDMA_COMP, uctxt->ctxt,
1290 * (RXE_PER_CONTEXT_USER + (ctxt * RXE_PER_CONTEXT_SIZE))
1292 binfo.user_regbase = HFI1_MMAP_TOKEN(UREGS, uctxt->ctxt,
1294 offset = offset_in_page((((uctxt->ctxt - dd->first_user_ctxt) *
1295 HFI1_MAX_SHARED_CTXTS) + fd->subctxt) *
1296 sizeof(*dd->events));
1297 binfo.events_bufbase = HFI1_MMAP_TOKEN(EVENTS, uctxt->ctxt,
1300 binfo.status_bufbase = HFI1_MMAP_TOKEN(STATUS, uctxt->ctxt,
1303 if (HFI1_CAP_IS_USET(DMA_RTAIL))
1304 binfo.rcvhdrtail_base = HFI1_MMAP_TOKEN(RTAIL, uctxt->ctxt,
1306 if (uctxt->subctxt_cnt) {
1307 binfo.subctxt_uregbase = HFI1_MMAP_TOKEN(SUBCTXT_UREGS,
1310 binfo.subctxt_rcvhdrbuf = HFI1_MMAP_TOKEN(SUBCTXT_RCV_HDRQ,
1313 binfo.subctxt_rcvegrbuf = HFI1_MMAP_TOKEN(SUBCTXT_EGRBUF,
1317 sz = (len < sizeof(binfo)) ? len : sizeof(binfo);
1318 if (copy_to_user(ubase, &binfo, sz))
1323 static unsigned int poll_urgent(struct file *fp,
1324 struct poll_table_struct *pt)
1326 struct hfi1_filedata *fd = fp->private_data;
1327 struct hfi1_ctxtdata *uctxt = fd->uctxt;
1328 struct hfi1_devdata *dd = uctxt->dd;
1331 poll_wait(fp, &uctxt->wait, pt);
1333 spin_lock_irq(&dd->uctxt_lock);
1334 if (uctxt->urgent != uctxt->urgent_poll) {
1335 pollflag = POLLIN | POLLRDNORM;
1336 uctxt->urgent_poll = uctxt->urgent;
1339 set_bit(HFI1_CTXT_WAITING_URG, &uctxt->event_flags);
1341 spin_unlock_irq(&dd->uctxt_lock);
1346 static unsigned int poll_next(struct file *fp,
1347 struct poll_table_struct *pt)
1349 struct hfi1_filedata *fd = fp->private_data;
1350 struct hfi1_ctxtdata *uctxt = fd->uctxt;
1351 struct hfi1_devdata *dd = uctxt->dd;
1354 poll_wait(fp, &uctxt->wait, pt);
1356 spin_lock_irq(&dd->uctxt_lock);
1357 if (hdrqempty(uctxt)) {
1358 set_bit(HFI1_CTXT_WAITING_RCV, &uctxt->event_flags);
1359 hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_ENB, uctxt->ctxt);
1362 pollflag = POLLIN | POLLRDNORM;
1364 spin_unlock_irq(&dd->uctxt_lock);
1370 * Find all user contexts in use, and set the specified bit in their
1372 * See also find_ctxt() for a similar use, that is specific to send buffers.
1374 int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit)
1376 struct hfi1_ctxtdata *uctxt;
1377 struct hfi1_devdata *dd = ppd->dd;
1380 unsigned long flags;
1387 spin_lock_irqsave(&dd->uctxt_lock, flags);
1388 for (ctxt = dd->first_user_ctxt; ctxt < dd->num_rcv_contexts;
1390 uctxt = dd->rcd[ctxt];
1392 unsigned long *evs = dd->events +
1393 (uctxt->ctxt - dd->first_user_ctxt) *
1394 HFI1_MAX_SHARED_CTXTS;
1397 * subctxt_cnt is 0 if not shared, so do base
1398 * separately, first, then remaining subctxt, if any
1400 set_bit(evtbit, evs);
1401 for (i = 1; i < uctxt->subctxt_cnt; i++)
1402 set_bit(evtbit, evs + i);
1405 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1411 * manage_rcvq - manage a context's receive queue
1412 * @uctxt: the context
1413 * @subctxt: the sub-context
1414 * @start_stop: action to carry out
1416 * start_stop == 0 disables receive on the context, for use in queue
1417 * overflow conditions. start_stop==1 re-enables, to be used to
1418 * re-init the software copy of the head register
1420 static int manage_rcvq(struct hfi1_ctxtdata *uctxt, unsigned subctxt,
1423 struct hfi1_devdata *dd = uctxt->dd;
1424 unsigned int rcvctrl_op;
1428 /* atomically clear receive enable ctxt. */
1431 * On enable, force in-memory copy of the tail register to
1432 * 0, so that protocol code doesn't have to worry about
1433 * whether or not the chip has yet updated the in-memory
1434 * copy or not on return from the system call. The chip
1435 * always resets it's tail register back to 0 on a
1436 * transition from disabled to enabled.
1438 if (uctxt->rcvhdrtail_kvaddr)
1439 clear_rcvhdrtail(uctxt);
1440 rcvctrl_op = HFI1_RCVCTRL_CTXT_ENB;
1442 rcvctrl_op = HFI1_RCVCTRL_CTXT_DIS;
1444 hfi1_rcvctrl(dd, rcvctrl_op, uctxt->ctxt);
1445 /* always; new head should be equal to new tail; see above */
1451 * clear the event notifier events for this context.
1452 * User process then performs actions appropriate to bit having been
1453 * set, if desired, and checks again in future.
1455 static int user_event_ack(struct hfi1_ctxtdata *uctxt, int subctxt,
1456 unsigned long events)
1459 struct hfi1_devdata *dd = uctxt->dd;
1465 evs = dd->events + ((uctxt->ctxt - dd->first_user_ctxt) *
1466 HFI1_MAX_SHARED_CTXTS) + subctxt;
1468 for (i = 0; i <= _HFI1_MAX_EVENT_BIT; i++) {
1469 if (!test_bit(i, &events))
1476 static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned subctxt,
1479 int ret = -ENOENT, i, intable = 0;
1480 struct hfi1_pportdata *ppd = uctxt->ppd;
1481 struct hfi1_devdata *dd = uctxt->dd;
1483 if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY) {
1488 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++)
1489 if (pkey == ppd->pkeys[i]) {
1495 ret = hfi1_set_ctxt_pkey(dd, uctxt->ctxt, pkey);
1500 static int ui_open(struct inode *inode, struct file *filp)
1502 struct hfi1_devdata *dd;
1504 dd = container_of(inode->i_cdev, struct hfi1_devdata, ui_cdev);
1505 filp->private_data = dd; /* for other methods */
1509 static int ui_release(struct inode *inode, struct file *filp)
1515 static loff_t ui_lseek(struct file *filp, loff_t offset, int whence)
1517 struct hfi1_devdata *dd = filp->private_data;
1519 return fixed_size_llseek(filp, offset, whence,
1520 (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE);
1523 /* NOTE: assumes unsigned long is 8 bytes */
1524 static ssize_t ui_read(struct file *filp, char __user *buf, size_t count,
1527 struct hfi1_devdata *dd = filp->private_data;
1528 void __iomem *base = dd->kregbase;
1529 unsigned long total, csr_off,
1530 barlen = (dd->kregend - dd->kregbase);
1533 /* only read 8 byte quantities */
1534 if ((count % 8) != 0)
1536 /* offset must be 8-byte aligned */
1537 if ((*f_pos % 8) != 0)
1539 /* destination buffer must be 8-byte aligned */
1540 if ((unsigned long)buf % 8 != 0)
1542 /* must be in range */
1543 if (*f_pos + count > (barlen + DC8051_DATA_MEM_SIZE))
1545 /* only set the base if we are not starting past the BAR */
1546 if (*f_pos < barlen)
1549 for (total = 0; total < count; total += 8, csr_off += 8) {
1550 /* accessing LCB CSRs requires more checks */
1551 if (is_lcb_offset(csr_off)) {
1552 if (read_lcb_csr(dd, csr_off, (u64 *)&data))
1556 * Cannot read ASIC GPIO/QSFP* clear and force CSRs without a
1557 * false parity error. Avoid the whole issue by not reading
1558 * them. These registers are defined as having a read value
1561 else if (csr_off == ASIC_GPIO_CLEAR ||
1562 csr_off == ASIC_GPIO_FORCE ||
1563 csr_off == ASIC_QSFP1_CLEAR ||
1564 csr_off == ASIC_QSFP1_FORCE ||
1565 csr_off == ASIC_QSFP2_CLEAR ||
1566 csr_off == ASIC_QSFP2_FORCE)
1568 else if (csr_off >= barlen) {
1570 * read_8051_data can read more than just 8 bytes at
1571 * a time. However, folding this into the loop and
1572 * handling the reads in 8 byte increments allows us
1573 * to smoothly transition from chip memory to 8051
1576 if (read_8051_data(dd,
1577 (u32)(csr_off - barlen),
1578 sizeof(data), &data))
1581 data = readq(base + total);
1582 if (put_user(data, (unsigned long __user *)(buf + total)))
1589 /* NOTE: assumes unsigned long is 8 bytes */
1590 static ssize_t ui_write(struct file *filp, const char __user *buf,
1591 size_t count, loff_t *f_pos)
1593 struct hfi1_devdata *dd = filp->private_data;
1595 unsigned long total, data, csr_off;
1598 /* only write 8 byte quantities */
1599 if ((count % 8) != 0)
1601 /* offset must be 8-byte aligned */
1602 if ((*f_pos % 8) != 0)
1604 /* source buffer must be 8-byte aligned */
1605 if ((unsigned long)buf % 8 != 0)
1607 /* must be in range */
1608 if (*f_pos + count > dd->kregend - dd->kregbase)
1611 base = (void __iomem *)dd->kregbase + *f_pos;
1614 for (total = 0; total < count; total += 8, csr_off += 8) {
1615 if (get_user(data, (unsigned long __user *)(buf + total)))
1617 /* accessing LCB CSRs requires a special procedure */
1618 if (is_lcb_offset(csr_off)) {
1620 int ret = acquire_lcb_access(dd, 1);
1628 release_lcb_access(dd, 1);
1632 writeq(data, base + total);
1635 release_lcb_access(dd, 1);
1640 static const struct file_operations ui_file_ops = {
1641 .owner = THIS_MODULE,
1646 .release = ui_release,
1649 #define UI_OFFSET 192 /* device minor offset for UI devices */
1650 static int create_ui = 1;
1652 static void user_remove(struct hfi1_devdata *dd)
1655 hfi1_cdev_cleanup(&dd->user_cdev, &dd->user_device);
1656 hfi1_cdev_cleanup(&dd->ui_cdev, &dd->ui_device);
1659 static int user_add(struct hfi1_devdata *dd)
1664 snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit);
1665 ret = hfi1_cdev_init(dd->unit, name, &hfi1_file_ops,
1666 &dd->user_cdev, &dd->user_device,
1672 snprintf(name, sizeof(name),
1673 "%s_ui%d", class_name(), dd->unit);
1674 ret = hfi1_cdev_init(dd->unit + UI_OFFSET, name, &ui_file_ops,
1675 &dd->ui_cdev, &dd->ui_device,
1688 * Create per-unit files in /dev
1690 int hfi1_device_create(struct hfi1_devdata *dd)
1695 ret = hfi1_diag_add(dd);
1702 * Remove per-unit files in /dev
1703 * void, core kernel returns no errors for this stuff
1705 void hfi1_device_remove(struct hfi1_devdata *dd)
1708 hfi1_diag_remove(dd);