Merge remote-tracking branches 'regmap/topic/core' and 'regmap/topic/debugfs' into...
[cascardo/linux.git] / drivers / infiniband / hw / hfi1 / init.c
1 /*
2  * Copyright(c) 2015, 2016 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47
48 #include <linux/pci.h>
49 #include <linux/netdevice.h>
50 #include <linux/vmalloc.h>
51 #include <linux/delay.h>
52 #include <linux/idr.h>
53 #include <linux/module.h>
54 #include <linux/printk.h>
55 #include <linux/hrtimer.h>
56 #include <rdma/rdma_vt.h>
57
58 #include "hfi.h"
59 #include "device.h"
60 #include "common.h"
61 #include "trace.h"
62 #include "mad.h"
63 #include "sdma.h"
64 #include "debugfs.h"
65 #include "verbs.h"
66 #include "aspm.h"
67 #include "affinity.h"
68
69 #undef pr_fmt
70 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
71
72 /*
73  * min buffers we want to have per context, after driver
74  */
75 #define HFI1_MIN_USER_CTXT_BUFCNT 7
76
77 #define HFI1_MIN_HDRQ_EGRBUF_CNT 2
78 #define HFI1_MAX_HDRQ_EGRBUF_CNT 16352
79 #define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */
80 #define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */
81
82 /*
83  * Number of user receive contexts we are configured to use (to allow for more
84  * pio buffers per ctxt, etc.)  Zero means use one user context per CPU.
85  */
86 int num_user_contexts = -1;
87 module_param_named(num_user_contexts, num_user_contexts, uint, S_IRUGO);
88 MODULE_PARM_DESC(
89         num_user_contexts, "Set max number of user contexts to use");
90
91 uint krcvqs[RXE_NUM_DATA_VL];
92 int krcvqsset;
93 module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO);
94 MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL");
95
96 /* computed based on above array */
97 unsigned long n_krcvqs;
98
99 static unsigned hfi1_rcvarr_split = 25;
100 module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO);
101 MODULE_PARM_DESC(rcvarr_split, "Percent of context's RcvArray entries used for Eager buffers");
102
103 static uint eager_buffer_size = (2 << 20); /* 2MB */
104 module_param(eager_buffer_size, uint, S_IRUGO);
105 MODULE_PARM_DESC(eager_buffer_size, "Size of the eager buffers, default: 2MB");
106
107 static uint rcvhdrcnt = 2048; /* 2x the max eager buffer count */
108 module_param_named(rcvhdrcnt, rcvhdrcnt, uint, S_IRUGO);
109 MODULE_PARM_DESC(rcvhdrcnt, "Receive header queue count (default 2048)");
110
111 static uint hfi1_hdrq_entsize = 32;
112 module_param_named(hdrq_entsize, hfi1_hdrq_entsize, uint, S_IRUGO);
113 MODULE_PARM_DESC(hdrq_entsize, "Size of header queue entries: 2 - 8B, 16 - 64B (default), 32 - 128B");
114
115 unsigned int user_credit_return_threshold = 33; /* default is 33% */
116 module_param(user_credit_return_threshold, uint, S_IRUGO);
117 MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)");
118
119 static inline u64 encode_rcv_header_entry_size(u16);
120
121 static struct idr hfi1_unit_table;
122 u32 hfi1_cpulist_count;
123 unsigned long *hfi1_cpulist;
124
125 /*
126  * Common code for creating the receive context array.
127  */
128 int hfi1_create_ctxts(struct hfi1_devdata *dd)
129 {
130         unsigned i;
131         int ret;
132
133         /* Control context has to be always 0 */
134         BUILD_BUG_ON(HFI1_CTRL_CTXT != 0);
135
136         dd->rcd = kzalloc_node(dd->num_rcv_contexts * sizeof(*dd->rcd),
137                                GFP_KERNEL, dd->node);
138         if (!dd->rcd)
139                 goto nomem;
140
141         /* create one or more kernel contexts */
142         for (i = 0; i < dd->first_user_ctxt; ++i) {
143                 struct hfi1_pportdata *ppd;
144                 struct hfi1_ctxtdata *rcd;
145
146                 ppd = dd->pport + (i % dd->num_pports);
147                 rcd = hfi1_create_ctxtdata(ppd, i, dd->node);
148                 if (!rcd) {
149                         dd_dev_err(dd,
150                                    "Unable to allocate kernel receive context, failing\n");
151                         goto nomem;
152                 }
153                 /*
154                  * Set up the kernel context flags here and now because they
155                  * use default values for all receive side memories.  User
156                  * contexts will be handled as they are created.
157                  */
158                 rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) |
159                         HFI1_CAP_KGET(NODROP_RHQ_FULL) |
160                         HFI1_CAP_KGET(NODROP_EGR_FULL) |
161                         HFI1_CAP_KGET(DMA_RTAIL);
162
163                 /* Control context must use DMA_RTAIL */
164                 if (rcd->ctxt == HFI1_CTRL_CTXT)
165                         rcd->flags |= HFI1_CAP_DMA_RTAIL;
166                 rcd->seq_cnt = 1;
167
168                 rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node);
169                 if (!rcd->sc) {
170                         dd_dev_err(dd,
171                                    "Unable to allocate kernel send context, failing\n");
172                         dd->rcd[rcd->ctxt] = NULL;
173                         hfi1_free_ctxtdata(dd, rcd);
174                         goto nomem;
175                 }
176
177                 ret = hfi1_init_ctxt(rcd->sc);
178                 if (ret < 0) {
179                         dd_dev_err(dd,
180                                    "Failed to setup kernel receive context, failing\n");
181                         sc_free(rcd->sc);
182                         dd->rcd[rcd->ctxt] = NULL;
183                         hfi1_free_ctxtdata(dd, rcd);
184                         ret = -EFAULT;
185                         goto bail;
186                 }
187         }
188
189         /*
190          * Initialize aspm, to be done after gen3 transition and setting up
191          * contexts and before enabling interrupts
192          */
193         aspm_init(dd);
194
195         return 0;
196 nomem:
197         ret = -ENOMEM;
198 bail:
199         kfree(dd->rcd);
200         dd->rcd = NULL;
201         return ret;
202 }
203
204 /*
205  * Common code for user and kernel context setup.
206  */
207 struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt,
208                                            int numa)
209 {
210         struct hfi1_devdata *dd = ppd->dd;
211         struct hfi1_ctxtdata *rcd;
212         unsigned kctxt_ngroups = 0;
213         u32 base;
214
215         if (dd->rcv_entries.nctxt_extra >
216             dd->num_rcv_contexts - dd->first_user_ctxt)
217                 kctxt_ngroups = (dd->rcv_entries.nctxt_extra -
218                                  (dd->num_rcv_contexts - dd->first_user_ctxt));
219         rcd = kzalloc(sizeof(*rcd), GFP_KERNEL);
220         if (rcd) {
221                 u32 rcvtids, max_entries;
222
223                 hfi1_cdbg(PROC, "setting up context %u\n", ctxt);
224
225                 INIT_LIST_HEAD(&rcd->qp_wait_list);
226                 rcd->ppd = ppd;
227                 rcd->dd = dd;
228                 rcd->cnt = 1;
229                 rcd->ctxt = ctxt;
230                 dd->rcd[ctxt] = rcd;
231                 rcd->numa_id = numa;
232                 rcd->rcv_array_groups = dd->rcv_entries.ngroups;
233
234                 mutex_init(&rcd->exp_lock);
235
236                 /*
237                  * Calculate the context's RcvArray entry starting point.
238                  * We do this here because we have to take into account all
239                  * the RcvArray entries that previous context would have
240                  * taken and we have to account for any extra groups
241                  * assigned to the kernel or user contexts.
242                  */
243                 if (ctxt < dd->first_user_ctxt) {
244                         if (ctxt < kctxt_ngroups) {
245                                 base = ctxt * (dd->rcv_entries.ngroups + 1);
246                                 rcd->rcv_array_groups++;
247                         } else
248                                 base = kctxt_ngroups +
249                                         (ctxt * dd->rcv_entries.ngroups);
250                 } else {
251                         u16 ct = ctxt - dd->first_user_ctxt;
252
253                         base = ((dd->n_krcv_queues * dd->rcv_entries.ngroups) +
254                                 kctxt_ngroups);
255                         if (ct < dd->rcv_entries.nctxt_extra) {
256                                 base += ct * (dd->rcv_entries.ngroups + 1);
257                                 rcd->rcv_array_groups++;
258                         } else
259                                 base += dd->rcv_entries.nctxt_extra +
260                                         (ct * dd->rcv_entries.ngroups);
261                 }
262                 rcd->eager_base = base * dd->rcv_entries.group_size;
263
264                 /* Validate and initialize Rcv Hdr Q variables */
265                 if (rcvhdrcnt % HDRQ_INCREMENT) {
266                         dd_dev_err(dd,
267                                    "ctxt%u: header queue count %d must be divisible by %lu\n",
268                                    rcd->ctxt, rcvhdrcnt, HDRQ_INCREMENT);
269                         goto bail;
270                 }
271                 rcd->rcvhdrq_cnt = rcvhdrcnt;
272                 rcd->rcvhdrqentsize = hfi1_hdrq_entsize;
273                 /*
274                  * Simple Eager buffer allocation: we have already pre-allocated
275                  * the number of RcvArray entry groups. Each ctxtdata structure
276                  * holds the number of groups for that context.
277                  *
278                  * To follow CSR requirements and maintain cacheline alignment,
279                  * make sure all sizes and bases are multiples of group_size.
280                  *
281                  * The expected entry count is what is left after assigning
282                  * eager.
283                  */
284                 max_entries = rcd->rcv_array_groups *
285                         dd->rcv_entries.group_size;
286                 rcvtids = ((max_entries * hfi1_rcvarr_split) / 100);
287                 rcd->egrbufs.count = round_down(rcvtids,
288                                                 dd->rcv_entries.group_size);
289                 if (rcd->egrbufs.count > MAX_EAGER_ENTRIES) {
290                         dd_dev_err(dd, "ctxt%u: requested too many RcvArray entries.\n",
291                                    rcd->ctxt);
292                         rcd->egrbufs.count = MAX_EAGER_ENTRIES;
293                 }
294                 hfi1_cdbg(PROC,
295                           "ctxt%u: max Eager buffer RcvArray entries: %u\n",
296                           rcd->ctxt, rcd->egrbufs.count);
297
298                 /*
299                  * Allocate array that will hold the eager buffer accounting
300                  * data.
301                  * This will allocate the maximum possible buffer count based
302                  * on the value of the RcvArray split parameter.
303                  * The resulting value will be rounded down to the closest
304                  * multiple of dd->rcv_entries.group_size.
305                  */
306                 rcd->egrbufs.buffers = kcalloc(rcd->egrbufs.count,
307                                                sizeof(*rcd->egrbufs.buffers),
308                                                GFP_KERNEL);
309                 if (!rcd->egrbufs.buffers)
310                         goto bail;
311                 rcd->egrbufs.rcvtids = kcalloc(rcd->egrbufs.count,
312                                                sizeof(*rcd->egrbufs.rcvtids),
313                                                GFP_KERNEL);
314                 if (!rcd->egrbufs.rcvtids)
315                         goto bail;
316                 rcd->egrbufs.size = eager_buffer_size;
317                 /*
318                  * The size of the buffers programmed into the RcvArray
319                  * entries needs to be big enough to handle the highest
320                  * MTU supported.
321                  */
322                 if (rcd->egrbufs.size < hfi1_max_mtu) {
323                         rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu);
324                         hfi1_cdbg(PROC,
325                                   "ctxt%u: eager bufs size too small. Adjusting to %zu\n",
326                                     rcd->ctxt, rcd->egrbufs.size);
327                 }
328                 rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE;
329
330                 if (ctxt < dd->first_user_ctxt) { /* N/A for PSM contexts */
331                         rcd->opstats = kzalloc(sizeof(*rcd->opstats),
332                                 GFP_KERNEL);
333                         if (!rcd->opstats)
334                                 goto bail;
335                 }
336         }
337         return rcd;
338 bail:
339         kfree(rcd->egrbufs.rcvtids);
340         kfree(rcd->egrbufs.buffers);
341         kfree(rcd);
342         return NULL;
343 }
344
345 /*
346  * Convert a receive header entry size that to the encoding used in the CSR.
347  *
348  * Return a zero if the given size is invalid.
349  */
350 static inline u64 encode_rcv_header_entry_size(u16 size)
351 {
352         /* there are only 3 valid receive header entry sizes */
353         if (size == 2)
354                 return 1;
355         if (size == 16)
356                 return 2;
357         else if (size == 32)
358                 return 4;
359         return 0; /* invalid */
360 }
361
362 /*
363  * Select the largest ccti value over all SLs to determine the intra-
364  * packet gap for the link.
365  *
366  * called with cca_timer_lock held (to protect access to cca_timer
367  * array), and rcu_read_lock() (to protect access to cc_state).
368  */
369 void set_link_ipg(struct hfi1_pportdata *ppd)
370 {
371         struct hfi1_devdata *dd = ppd->dd;
372         struct cc_state *cc_state;
373         int i;
374         u16 cce, ccti_limit, max_ccti = 0;
375         u16 shift, mult;
376         u64 src;
377         u32 current_egress_rate; /* Mbits /sec */
378         u32 max_pkt_time;
379         /*
380          * max_pkt_time is the maximum packet egress time in units
381          * of the fabric clock period 1/(805 MHz).
382          */
383
384         cc_state = get_cc_state(ppd);
385
386         if (!cc_state)
387                 /*
388                  * This should _never_ happen - rcu_read_lock() is held,
389                  * and set_link_ipg() should not be called if cc_state
390                  * is NULL.
391                  */
392                 return;
393
394         for (i = 0; i < OPA_MAX_SLS; i++) {
395                 u16 ccti = ppd->cca_timer[i].ccti;
396
397                 if (ccti > max_ccti)
398                         max_ccti = ccti;
399         }
400
401         ccti_limit = cc_state->cct.ccti_limit;
402         if (max_ccti > ccti_limit)
403                 max_ccti = ccti_limit;
404
405         cce = cc_state->cct.entries[max_ccti].entry;
406         shift = (cce & 0xc000) >> 14;
407         mult = (cce & 0x3fff);
408
409         current_egress_rate = active_egress_rate(ppd);
410
411         max_pkt_time = egress_cycles(ppd->ibmaxlen, current_egress_rate);
412
413         src = (max_pkt_time >> shift) * mult;
414
415         src &= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK;
416         src <<= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT;
417
418         write_csr(dd, SEND_STATIC_RATE_CONTROL, src);
419 }
420
421 static enum hrtimer_restart cca_timer_fn(struct hrtimer *t)
422 {
423         struct cca_timer *cca_timer;
424         struct hfi1_pportdata *ppd;
425         int sl;
426         u16 ccti_timer, ccti_min;
427         struct cc_state *cc_state;
428         unsigned long flags;
429         enum hrtimer_restart ret = HRTIMER_NORESTART;
430
431         cca_timer = container_of(t, struct cca_timer, hrtimer);
432         ppd = cca_timer->ppd;
433         sl = cca_timer->sl;
434
435         rcu_read_lock();
436
437         cc_state = get_cc_state(ppd);
438
439         if (!cc_state) {
440                 rcu_read_unlock();
441                 return HRTIMER_NORESTART;
442         }
443
444         /*
445          * 1) decrement ccti for SL
446          * 2) calculate IPG for link (set_link_ipg())
447          * 3) restart timer, unless ccti is at min value
448          */
449
450         ccti_min = cc_state->cong_setting.entries[sl].ccti_min;
451         ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer;
452
453         spin_lock_irqsave(&ppd->cca_timer_lock, flags);
454
455         if (cca_timer->ccti > ccti_min) {
456                 cca_timer->ccti--;
457                 set_link_ipg(ppd);
458         }
459
460         if (cca_timer->ccti > ccti_min) {
461                 unsigned long nsec = 1024 * ccti_timer;
462                 /* ccti_timer is in units of 1.024 usec */
463                 hrtimer_forward_now(t, ns_to_ktime(nsec));
464                 ret = HRTIMER_RESTART;
465         }
466
467         spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);
468         rcu_read_unlock();
469         return ret;
470 }
471
472 /*
473  * Common code for initializing the physical port structure.
474  */
475 void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
476                          struct hfi1_devdata *dd, u8 hw_pidx, u8 port)
477 {
478         int i;
479         uint default_pkey_idx;
480         struct cc_state *cc_state;
481
482         ppd->dd = dd;
483         ppd->hw_pidx = hw_pidx;
484         ppd->port = port; /* IB port number, not index */
485
486         default_pkey_idx = 1;
487
488         ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY;
489         if (loopback) {
490                 hfi1_early_err(&pdev->dev,
491                                "Faking data partition 0x8001 in idx %u\n",
492                                !default_pkey_idx);
493                 ppd->pkeys[!default_pkey_idx] = 0x8001;
494         }
495
496         INIT_WORK(&ppd->link_vc_work, handle_verify_cap);
497         INIT_WORK(&ppd->link_up_work, handle_link_up);
498         INIT_WORK(&ppd->link_down_work, handle_link_down);
499         INIT_WORK(&ppd->freeze_work, handle_freeze);
500         INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade);
501         INIT_WORK(&ppd->sma_message_work, handle_sma_message);
502         INIT_WORK(&ppd->link_bounce_work, handle_link_bounce);
503         INIT_DELAYED_WORK(&ppd->start_link_work, handle_start_link);
504         INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work);
505         INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event);
506
507         mutex_init(&ppd->hls_lock);
508         spin_lock_init(&ppd->sdma_alllock);
509         spin_lock_init(&ppd->qsfp_info.qsfp_lock);
510
511         ppd->qsfp_info.ppd = ppd;
512         ppd->sm_trap_qp = 0x0;
513         ppd->sa_qp = 0x1;
514
515         ppd->hfi1_wq = NULL;
516
517         spin_lock_init(&ppd->cca_timer_lock);
518
519         for (i = 0; i < OPA_MAX_SLS; i++) {
520                 hrtimer_init(&ppd->cca_timer[i].hrtimer, CLOCK_MONOTONIC,
521                              HRTIMER_MODE_REL);
522                 ppd->cca_timer[i].ppd = ppd;
523                 ppd->cca_timer[i].sl = i;
524                 ppd->cca_timer[i].ccti = 0;
525                 ppd->cca_timer[i].hrtimer.function = cca_timer_fn;
526         }
527
528         ppd->cc_max_table_entries = IB_CC_TABLE_CAP_DEFAULT;
529
530         spin_lock_init(&ppd->cc_state_lock);
531         spin_lock_init(&ppd->cc_log_lock);
532         cc_state = kzalloc(sizeof(*cc_state), GFP_KERNEL);
533         RCU_INIT_POINTER(ppd->cc_state, cc_state);
534         if (!cc_state)
535                 goto bail;
536         return;
537
538 bail:
539
540         hfi1_early_err(&pdev->dev,
541                        "Congestion Control Agent disabled for port %d\n", port);
542 }
543
544 /*
545  * Do initialization for device that is only needed on
546  * first detect, not on resets.
547  */
548 static int loadtime_init(struct hfi1_devdata *dd)
549 {
550         return 0;
551 }
552
553 /**
554  * init_after_reset - re-initialize after a reset
555  * @dd: the hfi1_ib device
556  *
557  * sanity check at least some of the values after reset, and
558  * ensure no receive or transmit (explicitly, in case reset
559  * failed
560  */
561 static int init_after_reset(struct hfi1_devdata *dd)
562 {
563         int i;
564
565         /*
566          * Ensure chip does no sends or receives, tail updates, or
567          * pioavail updates while we re-initialize.  This is mostly
568          * for the driver data structures, not chip registers.
569          */
570         for (i = 0; i < dd->num_rcv_contexts; i++)
571                 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
572                                   HFI1_RCVCTRL_INTRAVAIL_DIS |
573                                   HFI1_RCVCTRL_TAILUPD_DIS, i);
574         pio_send_control(dd, PSC_GLOBAL_DISABLE);
575         for (i = 0; i < dd->num_send_contexts; i++)
576                 sc_disable(dd->send_contexts[i].sc);
577
578         return 0;
579 }
580
581 static void enable_chip(struct hfi1_devdata *dd)
582 {
583         u32 rcvmask;
584         u32 i;
585
586         /* enable PIO send */
587         pio_send_control(dd, PSC_GLOBAL_ENABLE);
588
589         /*
590          * Enable kernel ctxts' receive and receive interrupt.
591          * Other ctxts done as user opens and initializes them.
592          */
593         for (i = 0; i < dd->first_user_ctxt; ++i) {
594                 rcvmask = HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB;
595                 rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
596                         HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
597                 if (!HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, MULTI_PKT_EGR))
598                         rcvmask |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
599                 if (HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, NODROP_RHQ_FULL))
600                         rcvmask |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
601                 if (HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, NODROP_EGR_FULL))
602                         rcvmask |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
603                 hfi1_rcvctrl(dd, rcvmask, i);
604                 sc_enable(dd->rcd[i]->sc);
605         }
606 }
607
608 /**
609  * create_workqueues - create per port workqueues
610  * @dd: the hfi1_ib device
611  */
612 static int create_workqueues(struct hfi1_devdata *dd)
613 {
614         int pidx;
615         struct hfi1_pportdata *ppd;
616
617         for (pidx = 0; pidx < dd->num_pports; ++pidx) {
618                 ppd = dd->pport + pidx;
619                 if (!ppd->hfi1_wq) {
620                         ppd->hfi1_wq =
621                                 alloc_workqueue(
622                                     "hfi%d_%d",
623                                     WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE,
624                                     dd->num_sdma,
625                                     dd->unit, pidx);
626                         if (!ppd->hfi1_wq)
627                                 goto wq_error;
628                 }
629         }
630         return 0;
631 wq_error:
632         pr_err("alloc_workqueue failed for port %d\n", pidx + 1);
633         for (pidx = 0; pidx < dd->num_pports; ++pidx) {
634                 ppd = dd->pport + pidx;
635                 if (ppd->hfi1_wq) {
636                         destroy_workqueue(ppd->hfi1_wq);
637                         ppd->hfi1_wq = NULL;
638                 }
639         }
640         return -ENOMEM;
641 }
642
643 /**
644  * hfi1_init - do the actual initialization sequence on the chip
645  * @dd: the hfi1_ib device
646  * @reinit: re-initializing, so don't allocate new memory
647  *
648  * Do the actual initialization sequence on the chip.  This is done
649  * both from the init routine called from the PCI infrastructure, and
650  * when we reset the chip, or detect that it was reset internally,
651  * or it's administratively re-enabled.
652  *
653  * Memory allocation here and in called routines is only done in
654  * the first case (reinit == 0).  We have to be careful, because even
655  * without memory allocation, we need to re-write all the chip registers
656  * TIDs, etc. after the reset or enable has completed.
657  */
658 int hfi1_init(struct hfi1_devdata *dd, int reinit)
659 {
660         int ret = 0, pidx, lastfail = 0;
661         unsigned i, len;
662         struct hfi1_ctxtdata *rcd;
663         struct hfi1_pportdata *ppd;
664
665         /* Set up recv low level handlers */
666         dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EXPECTED] =
667                                                 kdeth_process_expected;
668         dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EAGER] =
669                                                 kdeth_process_eager;
670         dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_IB] = process_receive_ib;
671         dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_ERROR] =
672                                                 process_receive_error;
673         dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_BYPASS] =
674                                                 process_receive_bypass;
675         dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID5] =
676                                                 process_receive_invalid;
677         dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID6] =
678                                                 process_receive_invalid;
679         dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID7] =
680                                                 process_receive_invalid;
681         dd->rhf_rcv_function_map = dd->normal_rhf_rcv_functions;
682
683         /* Set up send low level handlers */
684         dd->process_pio_send = hfi1_verbs_send_pio;
685         dd->process_dma_send = hfi1_verbs_send_dma;
686         dd->pio_inline_send = pio_copy;
687
688         if (is_ax(dd)) {
689                 atomic_set(&dd->drop_packet, DROP_PACKET_ON);
690                 dd->do_drop = 1;
691         } else {
692                 atomic_set(&dd->drop_packet, DROP_PACKET_OFF);
693                 dd->do_drop = 0;
694         }
695
696         /* make sure the link is not "up" */
697         for (pidx = 0; pidx < dd->num_pports; ++pidx) {
698                 ppd = dd->pport + pidx;
699                 ppd->linkup = 0;
700         }
701
702         if (reinit)
703                 ret = init_after_reset(dd);
704         else
705                 ret = loadtime_init(dd);
706         if (ret)
707                 goto done;
708
709         /* allocate dummy tail memory for all receive contexts */
710         dd->rcvhdrtail_dummy_kvaddr = dma_zalloc_coherent(
711                 &dd->pcidev->dev, sizeof(u64),
712                 &dd->rcvhdrtail_dummy_physaddr,
713                 GFP_KERNEL);
714
715         if (!dd->rcvhdrtail_dummy_kvaddr) {
716                 dd_dev_err(dd, "cannot allocate dummy tail memory\n");
717                 ret = -ENOMEM;
718                 goto done;
719         }
720
721         /* dd->rcd can be NULL if early initialization failed */
722         for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
723                 /*
724                  * Set up the (kernel) rcvhdr queue and egr TIDs.  If doing
725                  * re-init, the simplest way to handle this is to free
726                  * existing, and re-allocate.
727                  * Need to re-create rest of ctxt 0 ctxtdata as well.
728                  */
729                 rcd = dd->rcd[i];
730                 if (!rcd)
731                         continue;
732
733                 rcd->do_interrupt = &handle_receive_interrupt;
734
735                 lastfail = hfi1_create_rcvhdrq(dd, rcd);
736                 if (!lastfail)
737                         lastfail = hfi1_setup_eagerbufs(rcd);
738                 if (lastfail) {
739                         dd_dev_err(dd,
740                                    "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
741                         ret = lastfail;
742                 }
743         }
744
745         /* Allocate enough memory for user event notification. */
746         len = PAGE_ALIGN(dd->chip_rcv_contexts * HFI1_MAX_SHARED_CTXTS *
747                          sizeof(*dd->events));
748         dd->events = vmalloc_user(len);
749         if (!dd->events)
750                 dd_dev_err(dd, "Failed to allocate user events page\n");
751         /*
752          * Allocate a page for device and port status.
753          * Page will be shared amongst all user processes.
754          */
755         dd->status = vmalloc_user(PAGE_SIZE);
756         if (!dd->status)
757                 dd_dev_err(dd, "Failed to allocate dev status page\n");
758         else
759                 dd->freezelen = PAGE_SIZE - (sizeof(*dd->status) -
760                                              sizeof(dd->status->freezemsg));
761         for (pidx = 0; pidx < dd->num_pports; ++pidx) {
762                 ppd = dd->pport + pidx;
763                 if (dd->status)
764                         /* Currently, we only have one port */
765                         ppd->statusp = &dd->status->port;
766
767                 set_mtu(ppd);
768         }
769
770         /* enable chip even if we have an error, so we can debug cause */
771         enable_chip(dd);
772
773 done:
774         /*
775          * Set status even if port serdes is not initialized
776          * so that diags will work.
777          */
778         if (dd->status)
779                 dd->status->dev |= HFI1_STATUS_CHIP_PRESENT |
780                         HFI1_STATUS_INITTED;
781         if (!ret) {
782                 /* enable all interrupts from the chip */
783                 set_intr_state(dd, 1);
784
785                 /* chip is OK for user apps; mark it as initialized */
786                 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
787                         ppd = dd->pport + pidx;
788
789                         /*
790                          * start the serdes - must be after interrupts are
791                          * enabled so we are notified when the link goes up
792                          */
793                         lastfail = bringup_serdes(ppd);
794                         if (lastfail)
795                                 dd_dev_info(dd,
796                                             "Failed to bring up port %u\n",
797                                             ppd->port);
798
799                         /*
800                          * Set status even if port serdes is not initialized
801                          * so that diags will work.
802                          */
803                         if (ppd->statusp)
804                                 *ppd->statusp |= HFI1_STATUS_CHIP_PRESENT |
805                                                         HFI1_STATUS_INITTED;
806                         if (!ppd->link_speed_enabled)
807                                 continue;
808                 }
809         }
810
811         /* if ret is non-zero, we probably should do some cleanup here... */
812         return ret;
813 }
814
815 static inline struct hfi1_devdata *__hfi1_lookup(int unit)
816 {
817         return idr_find(&hfi1_unit_table, unit);
818 }
819
820 struct hfi1_devdata *hfi1_lookup(int unit)
821 {
822         struct hfi1_devdata *dd;
823         unsigned long flags;
824
825         spin_lock_irqsave(&hfi1_devs_lock, flags);
826         dd = __hfi1_lookup(unit);
827         spin_unlock_irqrestore(&hfi1_devs_lock, flags);
828
829         return dd;
830 }
831
832 /*
833  * Stop the timers during unit shutdown, or after an error late
834  * in initialization.
835  */
836 static void stop_timers(struct hfi1_devdata *dd)
837 {
838         struct hfi1_pportdata *ppd;
839         int pidx;
840
841         for (pidx = 0; pidx < dd->num_pports; ++pidx) {
842                 ppd = dd->pport + pidx;
843                 if (ppd->led_override_timer.data) {
844                         del_timer_sync(&ppd->led_override_timer);
845                         atomic_set(&ppd->led_override_timer_active, 0);
846                 }
847         }
848 }
849
850 /**
851  * shutdown_device - shut down a device
852  * @dd: the hfi1_ib device
853  *
854  * This is called to make the device quiet when we are about to
855  * unload the driver, and also when the device is administratively
856  * disabled.   It does not free any data structures.
857  * Everything it does has to be setup again by hfi1_init(dd, 1)
858  */
859 static void shutdown_device(struct hfi1_devdata *dd)
860 {
861         struct hfi1_pportdata *ppd;
862         unsigned pidx;
863         int i;
864
865         for (pidx = 0; pidx < dd->num_pports; ++pidx) {
866                 ppd = dd->pport + pidx;
867
868                 ppd->linkup = 0;
869                 if (ppd->statusp)
870                         *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
871                                            HFI1_STATUS_IB_READY);
872         }
873         dd->flags &= ~HFI1_INITTED;
874
875         /* mask interrupts, but not errors */
876         set_intr_state(dd, 0);
877
878         for (pidx = 0; pidx < dd->num_pports; ++pidx) {
879                 ppd = dd->pport + pidx;
880                 for (i = 0; i < dd->num_rcv_contexts; i++)
881                         hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS |
882                                           HFI1_RCVCTRL_CTXT_DIS |
883                                           HFI1_RCVCTRL_INTRAVAIL_DIS |
884                                           HFI1_RCVCTRL_PKEY_DIS |
885                                           HFI1_RCVCTRL_ONE_PKT_EGR_DIS, i);
886                 /*
887                  * Gracefully stop all sends allowing any in progress to
888                  * trickle out first.
889                  */
890                 for (i = 0; i < dd->num_send_contexts; i++)
891                         sc_flush(dd->send_contexts[i].sc);
892         }
893
894         /*
895          * Enough for anything that's going to trickle out to have actually
896          * done so.
897          */
898         udelay(20);
899
900         for (pidx = 0; pidx < dd->num_pports; ++pidx) {
901                 ppd = dd->pport + pidx;
902
903                 /* disable all contexts */
904                 for (i = 0; i < dd->num_send_contexts; i++)
905                         sc_disable(dd->send_contexts[i].sc);
906                 /* disable the send device */
907                 pio_send_control(dd, PSC_GLOBAL_DISABLE);
908
909                 shutdown_led_override(ppd);
910
911                 /*
912                  * Clear SerdesEnable.
913                  * We can't count on interrupts since we are stopping.
914                  */
915                 hfi1_quiet_serdes(ppd);
916
917                 if (ppd->hfi1_wq) {
918                         destroy_workqueue(ppd->hfi1_wq);
919                         ppd->hfi1_wq = NULL;
920                 }
921         }
922         sdma_exit(dd);
923 }
924
925 /**
926  * hfi1_free_ctxtdata - free a context's allocated data
927  * @dd: the hfi1_ib device
928  * @rcd: the ctxtdata structure
929  *
930  * free up any allocated data for a context
931  * This should not touch anything that would affect a simultaneous
932  * re-allocation of context data, because it is called after hfi1_mutex
933  * is released (and can be called from reinit as well).
934  * It should never change any chip state, or global driver state.
935  */
936 void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
937 {
938         unsigned e;
939
940         if (!rcd)
941                 return;
942
943         if (rcd->rcvhdrq) {
944                 dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size,
945                                   rcd->rcvhdrq, rcd->rcvhdrq_phys);
946                 rcd->rcvhdrq = NULL;
947                 if (rcd->rcvhdrtail_kvaddr) {
948                         dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
949                                           (void *)rcd->rcvhdrtail_kvaddr,
950                                           rcd->rcvhdrqtailaddr_phys);
951                         rcd->rcvhdrtail_kvaddr = NULL;
952                 }
953         }
954
955         /* all the RcvArray entries should have been cleared by now */
956         kfree(rcd->egrbufs.rcvtids);
957
958         for (e = 0; e < rcd->egrbufs.alloced; e++) {
959                 if (rcd->egrbufs.buffers[e].phys)
960                         dma_free_coherent(&dd->pcidev->dev,
961                                           rcd->egrbufs.buffers[e].len,
962                                           rcd->egrbufs.buffers[e].addr,
963                                           rcd->egrbufs.buffers[e].phys);
964         }
965         kfree(rcd->egrbufs.buffers);
966
967         sc_free(rcd->sc);
968         vfree(rcd->user_event_mask);
969         vfree(rcd->subctxt_uregbase);
970         vfree(rcd->subctxt_rcvegrbuf);
971         vfree(rcd->subctxt_rcvhdr_base);
972         kfree(rcd->opstats);
973         kfree(rcd);
974 }
975
976 /*
977  * Release our hold on the shared asic data.  If we are the last one,
978  * return the structure to be finalized outside the lock.  Must be
979  * holding hfi1_devs_lock.
980  */
981 static struct hfi1_asic_data *release_asic_data(struct hfi1_devdata *dd)
982 {
983         struct hfi1_asic_data *ad;
984         int other;
985
986         if (!dd->asic_data)
987                 return NULL;
988         dd->asic_data->dds[dd->hfi1_id] = NULL;
989         other = dd->hfi1_id ? 0 : 1;
990         ad = dd->asic_data;
991         dd->asic_data = NULL;
992         /* return NULL if the other dd still has a link */
993         return ad->dds[other] ? NULL : ad;
994 }
995
996 static void finalize_asic_data(struct hfi1_devdata *dd,
997                                struct hfi1_asic_data *ad)
998 {
999         clean_up_i2c(dd, ad);
1000         kfree(ad);
1001 }
1002
1003 static void __hfi1_free_devdata(struct kobject *kobj)
1004 {
1005         struct hfi1_devdata *dd =
1006                 container_of(kobj, struct hfi1_devdata, kobj);
1007         struct hfi1_asic_data *ad;
1008         unsigned long flags;
1009
1010         spin_lock_irqsave(&hfi1_devs_lock, flags);
1011         idr_remove(&hfi1_unit_table, dd->unit);
1012         list_del(&dd->list);
1013         ad = release_asic_data(dd);
1014         spin_unlock_irqrestore(&hfi1_devs_lock, flags);
1015         if (ad)
1016                 finalize_asic_data(dd, ad);
1017         free_platform_config(dd);
1018         rcu_barrier(); /* wait for rcu callbacks to complete */
1019         free_percpu(dd->int_counter);
1020         free_percpu(dd->rcv_limit);
1021         free_percpu(dd->send_schedule);
1022         rvt_dealloc_device(&dd->verbs_dev.rdi);
1023 }
1024
1025 static struct kobj_type hfi1_devdata_type = {
1026         .release = __hfi1_free_devdata,
1027 };
1028
1029 void hfi1_free_devdata(struct hfi1_devdata *dd)
1030 {
1031         kobject_put(&dd->kobj);
1032 }
1033
1034 /*
1035  * Allocate our primary per-unit data structure.  Must be done via verbs
1036  * allocator, because the verbs cleanup process both does cleanup and
1037  * free of the data structure.
1038  * "extra" is for chip-specific data.
1039  *
1040  * Use the idr mechanism to get a unit number for this unit.
1041  */
1042 struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
1043 {
1044         unsigned long flags;
1045         struct hfi1_devdata *dd;
1046         int ret, nports;
1047
1048         /* extra is * number of ports */
1049         nports = extra / sizeof(struct hfi1_pportdata);
1050
1051         dd = (struct hfi1_devdata *)rvt_alloc_device(sizeof(*dd) + extra,
1052                                                      nports);
1053         if (!dd)
1054                 return ERR_PTR(-ENOMEM);
1055         dd->num_pports = nports;
1056         dd->pport = (struct hfi1_pportdata *)(dd + 1);
1057
1058         INIT_LIST_HEAD(&dd->list);
1059         idr_preload(GFP_KERNEL);
1060         spin_lock_irqsave(&hfi1_devs_lock, flags);
1061
1062         ret = idr_alloc(&hfi1_unit_table, dd, 0, 0, GFP_NOWAIT);
1063         if (ret >= 0) {
1064                 dd->unit = ret;
1065                 list_add(&dd->list, &hfi1_dev_list);
1066         }
1067
1068         spin_unlock_irqrestore(&hfi1_devs_lock, flags);
1069         idr_preload_end();
1070
1071         if (ret < 0) {
1072                 hfi1_early_err(&pdev->dev,
1073                                "Could not allocate unit ID: error %d\n", -ret);
1074                 goto bail;
1075         }
1076         /*
1077          * Initialize all locks for the device. This needs to be as early as
1078          * possible so locks are usable.
1079          */
1080         spin_lock_init(&dd->sc_lock);
1081         spin_lock_init(&dd->sendctrl_lock);
1082         spin_lock_init(&dd->rcvctrl_lock);
1083         spin_lock_init(&dd->uctxt_lock);
1084         spin_lock_init(&dd->hfi1_diag_trans_lock);
1085         spin_lock_init(&dd->sc_init_lock);
1086         spin_lock_init(&dd->dc8051_lock);
1087         spin_lock_init(&dd->dc8051_memlock);
1088         seqlock_init(&dd->sc2vl_lock);
1089         spin_lock_init(&dd->sde_map_lock);
1090         spin_lock_init(&dd->pio_map_lock);
1091         init_waitqueue_head(&dd->event_queue);
1092
1093         dd->int_counter = alloc_percpu(u64);
1094         if (!dd->int_counter) {
1095                 ret = -ENOMEM;
1096                 hfi1_early_err(&pdev->dev,
1097                                "Could not allocate per-cpu int_counter\n");
1098                 goto bail;
1099         }
1100
1101         dd->rcv_limit = alloc_percpu(u64);
1102         if (!dd->rcv_limit) {
1103                 ret = -ENOMEM;
1104                 hfi1_early_err(&pdev->dev,
1105                                "Could not allocate per-cpu rcv_limit\n");
1106                 goto bail;
1107         }
1108
1109         dd->send_schedule = alloc_percpu(u64);
1110         if (!dd->send_schedule) {
1111                 ret = -ENOMEM;
1112                 hfi1_early_err(&pdev->dev,
1113                                "Could not allocate per-cpu int_counter\n");
1114                 goto bail;
1115         }
1116
1117         if (!hfi1_cpulist_count) {
1118                 u32 count = num_online_cpus();
1119
1120                 hfi1_cpulist = kcalloc(BITS_TO_LONGS(count), sizeof(long),
1121                                        GFP_KERNEL);
1122                 if (hfi1_cpulist)
1123                         hfi1_cpulist_count = count;
1124                 else
1125                         hfi1_early_err(
1126                         &pdev->dev,
1127                         "Could not alloc cpulist info, cpu affinity might be wrong\n");
1128         }
1129         kobject_init(&dd->kobj, &hfi1_devdata_type);
1130         return dd;
1131
1132 bail:
1133         if (!list_empty(&dd->list))
1134                 list_del_init(&dd->list);
1135         rvt_dealloc_device(&dd->verbs_dev.rdi);
1136         return ERR_PTR(ret);
1137 }
1138
1139 /*
1140  * Called from freeze mode handlers, and from PCI error
1141  * reporting code.  Should be paranoid about state of
1142  * system and data structures.
1143  */
1144 void hfi1_disable_after_error(struct hfi1_devdata *dd)
1145 {
1146         if (dd->flags & HFI1_INITTED) {
1147                 u32 pidx;
1148
1149                 dd->flags &= ~HFI1_INITTED;
1150                 if (dd->pport)
1151                         for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1152                                 struct hfi1_pportdata *ppd;
1153
1154                                 ppd = dd->pport + pidx;
1155                                 if (dd->flags & HFI1_PRESENT)
1156                                         set_link_state(ppd, HLS_DN_DISABLE);
1157
1158                                 if (ppd->statusp)
1159                                         *ppd->statusp &= ~HFI1_STATUS_IB_READY;
1160                         }
1161         }
1162
1163         /*
1164          * Mark as having had an error for driver, and also
1165          * for /sys and status word mapped to user programs.
1166          * This marks unit as not usable, until reset.
1167          */
1168         if (dd->status)
1169                 dd->status->dev |= HFI1_STATUS_HWERROR;
1170 }
1171
1172 static void remove_one(struct pci_dev *);
1173 static int init_one(struct pci_dev *, const struct pci_device_id *);
1174
1175 #define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: "
1176 #define PFX DRIVER_NAME ": "
1177
1178 const struct pci_device_id hfi1_pci_tbl[] = {
1179         { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL0) },
1180         { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL1) },
1181         { 0, }
1182 };
1183
1184 MODULE_DEVICE_TABLE(pci, hfi1_pci_tbl);
1185
1186 static struct pci_driver hfi1_pci_driver = {
1187         .name = DRIVER_NAME,
1188         .probe = init_one,
1189         .remove = remove_one,
1190         .id_table = hfi1_pci_tbl,
1191         .err_handler = &hfi1_pci_err_handler,
1192 };
1193
1194 static void __init compute_krcvqs(void)
1195 {
1196         int i;
1197
1198         for (i = 0; i < krcvqsset; i++)
1199                 n_krcvqs += krcvqs[i];
1200 }
1201
1202 /*
1203  * Do all the generic driver unit- and chip-independent memory
1204  * allocation and initialization.
1205  */
1206 static int __init hfi1_mod_init(void)
1207 {
1208         int ret;
1209
1210         ret = dev_init();
1211         if (ret)
1212                 goto bail;
1213
1214         ret = node_affinity_init();
1215         if (ret)
1216                 goto bail;
1217
1218         /* validate max MTU before any devices start */
1219         if (!valid_opa_max_mtu(hfi1_max_mtu)) {
1220                 pr_err("Invalid max_mtu 0x%x, using 0x%x instead\n",
1221                        hfi1_max_mtu, HFI1_DEFAULT_MAX_MTU);
1222                 hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU;
1223         }
1224         /* valid CUs run from 1-128 in powers of 2 */
1225         if (hfi1_cu > 128 || !is_power_of_2(hfi1_cu))
1226                 hfi1_cu = 1;
1227         /* valid credit return threshold is 0-100, variable is unsigned */
1228         if (user_credit_return_threshold > 100)
1229                 user_credit_return_threshold = 100;
1230
1231         compute_krcvqs();
1232         /*
1233          * sanitize receive interrupt count, time must wait until after
1234          * the hardware type is known
1235          */
1236         if (rcv_intr_count > RCV_HDR_HEAD_COUNTER_MASK)
1237                 rcv_intr_count = RCV_HDR_HEAD_COUNTER_MASK;
1238         /* reject invalid combinations */
1239         if (rcv_intr_count == 0 && rcv_intr_timeout == 0) {
1240                 pr_err("Invalid mode: both receive interrupt count and available timeout are zero - setting interrupt count to 1\n");
1241                 rcv_intr_count = 1;
1242         }
1243         if (rcv_intr_count > 1 && rcv_intr_timeout == 0) {
1244                 /*
1245                  * Avoid indefinite packet delivery by requiring a timeout
1246                  * if count is > 1.
1247                  */
1248                 pr_err("Invalid mode: receive interrupt count greater than 1 and available timeout is zero - setting available timeout to 1\n");
1249                 rcv_intr_timeout = 1;
1250         }
1251         if (rcv_intr_dynamic && !(rcv_intr_count > 1 && rcv_intr_timeout > 0)) {
1252                 /*
1253                  * The dynamic algorithm expects a non-zero timeout
1254                  * and a count > 1.
1255                  */
1256                 pr_err("Invalid mode: dynamic receive interrupt mitigation with invalid count and timeout - turning dynamic off\n");
1257                 rcv_intr_dynamic = 0;
1258         }
1259
1260         /* sanitize link CRC options */
1261         link_crc_mask &= SUPPORTED_CRCS;
1262
1263         /*
1264          * These must be called before the driver is registered with
1265          * the PCI subsystem.
1266          */
1267         idr_init(&hfi1_unit_table);
1268
1269         hfi1_dbg_init();
1270         ret = hfi1_wss_init();
1271         if (ret < 0)
1272                 goto bail_wss;
1273         ret = pci_register_driver(&hfi1_pci_driver);
1274         if (ret < 0) {
1275                 pr_err("Unable to register driver: error %d\n", -ret);
1276                 goto bail_dev;
1277         }
1278         goto bail; /* all OK */
1279
1280 bail_dev:
1281         hfi1_wss_exit();
1282 bail_wss:
1283         hfi1_dbg_exit();
1284         idr_destroy(&hfi1_unit_table);
1285         dev_cleanup();
1286 bail:
1287         return ret;
1288 }
1289
1290 module_init(hfi1_mod_init);
1291
1292 /*
1293  * Do the non-unit driver cleanup, memory free, etc. at unload.
1294  */
1295 static void __exit hfi1_mod_cleanup(void)
1296 {
1297         pci_unregister_driver(&hfi1_pci_driver);
1298         node_affinity_destroy();
1299         hfi1_wss_exit();
1300         hfi1_dbg_exit();
1301         hfi1_cpulist_count = 0;
1302         kfree(hfi1_cpulist);
1303
1304         idr_destroy(&hfi1_unit_table);
1305         dispose_firmware();     /* asymmetric with obtain_firmware() */
1306         dev_cleanup();
1307 }
1308
1309 module_exit(hfi1_mod_cleanup);
1310
1311 /* this can only be called after a successful initialization */
1312 static void cleanup_device_data(struct hfi1_devdata *dd)
1313 {
1314         int ctxt;
1315         int pidx;
1316         struct hfi1_ctxtdata **tmp;
1317         unsigned long flags;
1318
1319         /* users can't do anything more with chip */
1320         for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1321                 struct hfi1_pportdata *ppd = &dd->pport[pidx];
1322                 struct cc_state *cc_state;
1323                 int i;
1324
1325                 if (ppd->statusp)
1326                         *ppd->statusp &= ~HFI1_STATUS_CHIP_PRESENT;
1327
1328                 for (i = 0; i < OPA_MAX_SLS; i++)
1329                         hrtimer_cancel(&ppd->cca_timer[i].hrtimer);
1330
1331                 spin_lock(&ppd->cc_state_lock);
1332                 cc_state = get_cc_state_protected(ppd);
1333                 RCU_INIT_POINTER(ppd->cc_state, NULL);
1334                 spin_unlock(&ppd->cc_state_lock);
1335
1336                 if (cc_state)
1337                         kfree_rcu(cc_state, rcu);
1338         }
1339
1340         free_credit_return(dd);
1341
1342         /*
1343          * Free any resources still in use (usually just kernel contexts)
1344          * at unload; we do for ctxtcnt, because that's what we allocate.
1345          * We acquire lock to be really paranoid that rcd isn't being
1346          * accessed from some interrupt-related code (that should not happen,
1347          * but best to be sure).
1348          */
1349         spin_lock_irqsave(&dd->uctxt_lock, flags);
1350         tmp = dd->rcd;
1351         dd->rcd = NULL;
1352         spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1353
1354         if (dd->rcvhdrtail_dummy_kvaddr) {
1355                 dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
1356                                   (void *)dd->rcvhdrtail_dummy_kvaddr,
1357                                   dd->rcvhdrtail_dummy_physaddr);
1358                 dd->rcvhdrtail_dummy_kvaddr = NULL;
1359         }
1360
1361         for (ctxt = 0; tmp && ctxt < dd->num_rcv_contexts; ctxt++) {
1362                 struct hfi1_ctxtdata *rcd = tmp[ctxt];
1363
1364                 tmp[ctxt] = NULL; /* debugging paranoia */
1365                 if (rcd) {
1366                         hfi1_clear_tids(rcd);
1367                         hfi1_free_ctxtdata(dd, rcd);
1368                 }
1369         }
1370         kfree(tmp);
1371         free_pio_map(dd);
1372         /* must follow rcv context free - need to remove rcv's hooks */
1373         for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++)
1374                 sc_free(dd->send_contexts[ctxt].sc);
1375         dd->num_send_contexts = 0;
1376         kfree(dd->send_contexts);
1377         dd->send_contexts = NULL;
1378         kfree(dd->hw_to_sw);
1379         dd->hw_to_sw = NULL;
1380         kfree(dd->boardname);
1381         vfree(dd->events);
1382         vfree(dd->status);
1383 }
1384
1385 /*
1386  * Clean up on unit shutdown, or error during unit load after
1387  * successful initialization.
1388  */
1389 static void postinit_cleanup(struct hfi1_devdata *dd)
1390 {
1391         hfi1_start_cleanup(dd);
1392
1393         hfi1_pcie_ddcleanup(dd);
1394         hfi1_pcie_cleanup(dd->pcidev);
1395
1396         cleanup_device_data(dd);
1397
1398         hfi1_free_devdata(dd);
1399 }
1400
1401 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1402 {
1403         int ret = 0, j, pidx, initfail;
1404         struct hfi1_devdata *dd = ERR_PTR(-EINVAL);
1405         struct hfi1_pportdata *ppd;
1406
1407         /* First, lock the non-writable module parameters */
1408         HFI1_CAP_LOCK();
1409
1410         /* Validate some global module parameters */
1411         if (rcvhdrcnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) {
1412                 hfi1_early_err(&pdev->dev, "Header queue  count too small\n");
1413                 ret = -EINVAL;
1414                 goto bail;
1415         }
1416         if (rcvhdrcnt > HFI1_MAX_HDRQ_EGRBUF_CNT) {
1417                 hfi1_early_err(&pdev->dev,
1418                                "Receive header queue count cannot be greater than %u\n",
1419                                HFI1_MAX_HDRQ_EGRBUF_CNT);
1420                 ret = -EINVAL;
1421                 goto bail;
1422         }
1423         /* use the encoding function as a sanitization check */
1424         if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) {
1425                 hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n",
1426                                hfi1_hdrq_entsize);
1427                 ret = -EINVAL;
1428                 goto bail;
1429         }
1430
1431         /* The receive eager buffer size must be set before the receive
1432          * contexts are created.
1433          *
1434          * Set the eager buffer size.  Validate that it falls in a range
1435          * allowed by the hardware - all powers of 2 between the min and
1436          * max.  The maximum valid MTU is within the eager buffer range
1437          * so we do not need to cap the max_mtu by an eager buffer size
1438          * setting.
1439          */
1440         if (eager_buffer_size) {
1441                 if (!is_power_of_2(eager_buffer_size))
1442                         eager_buffer_size =
1443                                 roundup_pow_of_two(eager_buffer_size);
1444                 eager_buffer_size =
1445                         clamp_val(eager_buffer_size,
1446                                   MIN_EAGER_BUFFER * 8,
1447                                   MAX_EAGER_BUFFER_TOTAL);
1448                 hfi1_early_info(&pdev->dev, "Eager buffer size %u\n",
1449                                 eager_buffer_size);
1450         } else {
1451                 hfi1_early_err(&pdev->dev, "Invalid Eager buffer size of 0\n");
1452                 ret = -EINVAL;
1453                 goto bail;
1454         }
1455
1456         /* restrict value of hfi1_rcvarr_split */
1457         hfi1_rcvarr_split = clamp_val(hfi1_rcvarr_split, 0, 100);
1458
1459         ret = hfi1_pcie_init(pdev, ent);
1460         if (ret)
1461                 goto bail;
1462
1463         /*
1464          * Do device-specific initialization, function table setup, dd
1465          * allocation, etc.
1466          */
1467         switch (ent->device) {
1468         case PCI_DEVICE_ID_INTEL0:
1469         case PCI_DEVICE_ID_INTEL1:
1470                 dd = hfi1_init_dd(pdev, ent);
1471                 break;
1472         default:
1473                 hfi1_early_err(&pdev->dev,
1474                                "Failing on unknown Intel deviceid 0x%x\n",
1475                                ent->device);
1476                 ret = -ENODEV;
1477         }
1478
1479         if (IS_ERR(dd))
1480                 ret = PTR_ERR(dd);
1481         if (ret)
1482                 goto clean_bail; /* error already printed */
1483
1484         ret = create_workqueues(dd);
1485         if (ret)
1486                 goto clean_bail;
1487
1488         /* do the generic initialization */
1489         initfail = hfi1_init(dd, 0);
1490
1491         ret = hfi1_register_ib_device(dd);
1492
1493         /*
1494          * Now ready for use.  this should be cleared whenever we
1495          * detect a reset, or initiate one.  If earlier failure,
1496          * we still create devices, so diags, etc. can be used
1497          * to determine cause of problem.
1498          */
1499         if (!initfail && !ret) {
1500                 dd->flags |= HFI1_INITTED;
1501                 /* create debufs files after init and ib register */
1502                 hfi1_dbg_ibdev_init(&dd->verbs_dev);
1503         }
1504
1505         j = hfi1_device_create(dd);
1506         if (j)
1507                 dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
1508
1509         if (initfail || ret) {
1510                 stop_timers(dd);
1511                 flush_workqueue(ib_wq);
1512                 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1513                         hfi1_quiet_serdes(dd->pport + pidx);
1514                         ppd = dd->pport + pidx;
1515                         if (ppd->hfi1_wq) {
1516                                 destroy_workqueue(ppd->hfi1_wq);
1517                                 ppd->hfi1_wq = NULL;
1518                         }
1519                 }
1520                 if (!j)
1521                         hfi1_device_remove(dd);
1522                 if (!ret)
1523                         hfi1_unregister_ib_device(dd);
1524                 postinit_cleanup(dd);
1525                 if (initfail)
1526                         ret = initfail;
1527                 goto bail;      /* everything already cleaned */
1528         }
1529
1530         sdma_start(dd);
1531
1532         return 0;
1533
1534 clean_bail:
1535         hfi1_pcie_cleanup(pdev);
1536 bail:
1537         return ret;
1538 }
1539
1540 static void remove_one(struct pci_dev *pdev)
1541 {
1542         struct hfi1_devdata *dd = pci_get_drvdata(pdev);
1543
1544         /* close debugfs files before ib unregister */
1545         hfi1_dbg_ibdev_exit(&dd->verbs_dev);
1546         /* unregister from IB core */
1547         hfi1_unregister_ib_device(dd);
1548
1549         /*
1550          * Disable the IB link, disable interrupts on the device,
1551          * clear dma engines, etc.
1552          */
1553         shutdown_device(dd);
1554
1555         stop_timers(dd);
1556
1557         /* wait until all of our (qsfp) queue_work() calls complete */
1558         flush_workqueue(ib_wq);
1559
1560         hfi1_device_remove(dd);
1561
1562         postinit_cleanup(dd);
1563 }
1564
1565 /**
1566  * hfi1_create_rcvhdrq - create a receive header queue
1567  * @dd: the hfi1_ib device
1568  * @rcd: the context data
1569  *
1570  * This must be contiguous memory (from an i/o perspective), and must be
1571  * DMA'able (which means for some systems, it will go through an IOMMU,
1572  * or be forced into a low address range).
1573  */
1574 int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
1575 {
1576         unsigned amt;
1577         u64 reg;
1578
1579         if (!rcd->rcvhdrq) {
1580                 dma_addr_t phys_hdrqtail;
1581                 gfp_t gfp_flags;
1582
1583                 /*
1584                  * rcvhdrqentsize is in DWs, so we have to convert to bytes
1585                  * (* sizeof(u32)).
1586                  */
1587                 amt = PAGE_ALIGN(rcd->rcvhdrq_cnt * rcd->rcvhdrqentsize *
1588                                  sizeof(u32));
1589
1590                 gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ?
1591                         GFP_USER : GFP_KERNEL;
1592                 rcd->rcvhdrq = dma_zalloc_coherent(
1593                         &dd->pcidev->dev, amt, &rcd->rcvhdrq_phys,
1594                         gfp_flags | __GFP_COMP);
1595
1596                 if (!rcd->rcvhdrq) {
1597                         dd_dev_err(dd,
1598                                    "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n",
1599                                    amt, rcd->ctxt);
1600                         goto bail;
1601                 }
1602
1603                 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
1604                         rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent(
1605                                 &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
1606                                 gfp_flags);
1607                         if (!rcd->rcvhdrtail_kvaddr)
1608                                 goto bail_free;
1609                         rcd->rcvhdrqtailaddr_phys = phys_hdrqtail;
1610                 }
1611
1612                 rcd->rcvhdrq_size = amt;
1613         }
1614         /*
1615          * These values are per-context:
1616          *      RcvHdrCnt
1617          *      RcvHdrEntSize
1618          *      RcvHdrSize
1619          */
1620         reg = ((u64)(rcd->rcvhdrq_cnt >> HDRQ_SIZE_SHIFT)
1621                         & RCV_HDR_CNT_CNT_MASK)
1622                 << RCV_HDR_CNT_CNT_SHIFT;
1623         write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_CNT, reg);
1624         reg = (encode_rcv_header_entry_size(rcd->rcvhdrqentsize)
1625                         & RCV_HDR_ENT_SIZE_ENT_SIZE_MASK)
1626                 << RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT;
1627         write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_ENT_SIZE, reg);
1628         reg = (dd->rcvhdrsize & RCV_HDR_SIZE_HDR_SIZE_MASK)
1629                 << RCV_HDR_SIZE_HDR_SIZE_SHIFT;
1630         write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_SIZE, reg);
1631
1632         /*
1633          * Program dummy tail address for every receive context
1634          * before enabling any receive context
1635          */
1636         write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_TAIL_ADDR,
1637                         dd->rcvhdrtail_dummy_physaddr);
1638
1639         return 0;
1640
1641 bail_free:
1642         dd_dev_err(dd,
1643                    "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n",
1644                    rcd->ctxt);
1645         vfree(rcd->user_event_mask);
1646         rcd->user_event_mask = NULL;
1647         dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
1648                           rcd->rcvhdrq_phys);
1649         rcd->rcvhdrq = NULL;
1650 bail:
1651         return -ENOMEM;
1652 }
1653
1654 /**
1655  * allocate eager buffers, both kernel and user contexts.
1656  * @rcd: the context we are setting up.
1657  *
1658  * Allocate the eager TID buffers and program them into hip.
1659  * They are no longer completely contiguous, we do multiple allocation
1660  * calls.  Otherwise we get the OOM code involved, by asking for too
1661  * much per call, with disastrous results on some kernels.
1662  */
1663 int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
1664 {
1665         struct hfi1_devdata *dd = rcd->dd;
1666         u32 max_entries, egrtop, alloced_bytes = 0, idx = 0;
1667         gfp_t gfp_flags;
1668         u16 order;
1669         int ret = 0;
1670         u16 round_mtu = roundup_pow_of_two(hfi1_max_mtu);
1671
1672         /*
1673          * GFP_USER, but without GFP_FS, so buffer cache can be
1674          * coalesced (we hope); otherwise, even at order 4,
1675          * heavy filesystem activity makes these fail, and we can
1676          * use compound pages.
1677          */
1678         gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
1679
1680         /*
1681          * The minimum size of the eager buffers is a groups of MTU-sized
1682          * buffers.
1683          * The global eager_buffer_size parameter is checked against the
1684          * theoretical lower limit of the value. Here, we check against the
1685          * MTU.
1686          */
1687         if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size))
1688                 rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size;
1689         /*
1690          * If using one-pkt-per-egr-buffer, lower the eager buffer
1691          * size to the max MTU (page-aligned).
1692          */
1693         if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR))
1694                 rcd->egrbufs.rcvtid_size = round_mtu;
1695
1696         /*
1697          * Eager buffers sizes of 1MB or less require smaller TID sizes
1698          * to satisfy the "multiple of 8 RcvArray entries" requirement.
1699          */
1700         if (rcd->egrbufs.size <= (1 << 20))
1701                 rcd->egrbufs.rcvtid_size = max((unsigned long)round_mtu,
1702                         rounddown_pow_of_two(rcd->egrbufs.size / 8));
1703
1704         while (alloced_bytes < rcd->egrbufs.size &&
1705                rcd->egrbufs.alloced < rcd->egrbufs.count) {
1706                 rcd->egrbufs.buffers[idx].addr =
1707                         dma_zalloc_coherent(&dd->pcidev->dev,
1708                                             rcd->egrbufs.rcvtid_size,
1709                                             &rcd->egrbufs.buffers[idx].phys,
1710                                             gfp_flags);
1711                 if (rcd->egrbufs.buffers[idx].addr) {
1712                         rcd->egrbufs.buffers[idx].len =
1713                                 rcd->egrbufs.rcvtid_size;
1714                         rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].addr =
1715                                 rcd->egrbufs.buffers[idx].addr;
1716                         rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].phys =
1717                                 rcd->egrbufs.buffers[idx].phys;
1718                         rcd->egrbufs.alloced++;
1719                         alloced_bytes += rcd->egrbufs.rcvtid_size;
1720                         idx++;
1721                 } else {
1722                         u32 new_size, i, j;
1723                         u64 offset = 0;
1724
1725                         /*
1726                          * Fail the eager buffer allocation if:
1727                          *   - we are already using the lowest acceptable size
1728                          *   - we are using one-pkt-per-egr-buffer (this implies
1729                          *     that we are accepting only one size)
1730                          */
1731                         if (rcd->egrbufs.rcvtid_size == round_mtu ||
1732                             !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) {
1733                                 dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n",
1734                                            rcd->ctxt);
1735                                 goto bail_rcvegrbuf_phys;
1736                         }
1737
1738                         new_size = rcd->egrbufs.rcvtid_size / 2;
1739
1740                         /*
1741                          * If the first attempt to allocate memory failed, don't
1742                          * fail everything but continue with the next lower
1743                          * size.
1744                          */
1745                         if (idx == 0) {
1746                                 rcd->egrbufs.rcvtid_size = new_size;
1747                                 continue;
1748                         }
1749
1750                         /*
1751                          * Re-partition already allocated buffers to a smaller
1752                          * size.
1753                          */
1754                         rcd->egrbufs.alloced = 0;
1755                         for (i = 0, j = 0, offset = 0; j < idx; i++) {
1756                                 if (i >= rcd->egrbufs.count)
1757                                         break;
1758                                 rcd->egrbufs.rcvtids[i].phys =
1759                                         rcd->egrbufs.buffers[j].phys + offset;
1760                                 rcd->egrbufs.rcvtids[i].addr =
1761                                         rcd->egrbufs.buffers[j].addr + offset;
1762                                 rcd->egrbufs.alloced++;
1763                                 if ((rcd->egrbufs.buffers[j].phys + offset +
1764                                      new_size) ==
1765                                     (rcd->egrbufs.buffers[j].phys +
1766                                      rcd->egrbufs.buffers[j].len)) {
1767                                         j++;
1768                                         offset = 0;
1769                                 } else {
1770                                         offset += new_size;
1771                                 }
1772                         }
1773                         rcd->egrbufs.rcvtid_size = new_size;
1774                 }
1775         }
1776         rcd->egrbufs.numbufs = idx;
1777         rcd->egrbufs.size = alloced_bytes;
1778
1779         hfi1_cdbg(PROC,
1780                   "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %zuKB\n",
1781                   rcd->ctxt, rcd->egrbufs.alloced,
1782                   rcd->egrbufs.rcvtid_size / 1024, rcd->egrbufs.size / 1024);
1783
1784         /*
1785          * Set the contexts rcv array head update threshold to the closest
1786          * power of 2 (so we can use a mask instead of modulo) below half
1787          * the allocated entries.
1788          */
1789         rcd->egrbufs.threshold =
1790                 rounddown_pow_of_two(rcd->egrbufs.alloced / 2);
1791         /*
1792          * Compute the expected RcvArray entry base. This is done after
1793          * allocating the eager buffers in order to maximize the
1794          * expected RcvArray entries for the context.
1795          */
1796         max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size;
1797         egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size);
1798         rcd->expected_count = max_entries - egrtop;
1799         if (rcd->expected_count > MAX_TID_PAIR_ENTRIES * 2)
1800                 rcd->expected_count = MAX_TID_PAIR_ENTRIES * 2;
1801
1802         rcd->expected_base = rcd->eager_base + egrtop;
1803         hfi1_cdbg(PROC, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u\n",
1804                   rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count,
1805                   rcd->eager_base, rcd->expected_base);
1806
1807         if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) {
1808                 hfi1_cdbg(PROC,
1809                           "ctxt%u: current Eager buffer size is invalid %u\n",
1810                           rcd->ctxt, rcd->egrbufs.rcvtid_size);
1811                 ret = -EINVAL;
1812                 goto bail;
1813         }
1814
1815         for (idx = 0; idx < rcd->egrbufs.alloced; idx++) {
1816                 hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER,
1817                              rcd->egrbufs.rcvtids[idx].phys, order);
1818                 cond_resched();
1819         }
1820         goto bail;
1821
1822 bail_rcvegrbuf_phys:
1823         for (idx = 0; idx < rcd->egrbufs.alloced &&
1824              rcd->egrbufs.buffers[idx].addr;
1825              idx++) {
1826                 dma_free_coherent(&dd->pcidev->dev,
1827                                   rcd->egrbufs.buffers[idx].len,
1828                                   rcd->egrbufs.buffers[idx].addr,
1829                                   rcd->egrbufs.buffers[idx].phys);
1830                 rcd->egrbufs.buffers[idx].addr = NULL;
1831                 rcd->egrbufs.buffers[idx].phys = 0;
1832                 rcd->egrbufs.buffers[idx].len = 0;
1833         }
1834 bail:
1835         return ret;
1836 }