Merge tag 'armsoc-dt' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[cascardo/linux.git] / drivers / staging / rdma / hfi1 / mad.c
1 /*
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2015 Intel Corporation.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of version 2 of the GNU General Public License as
12  * published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  *
19  * BSD LICENSE
20  *
21  * Copyright(c) 2015 Intel Corporation.
22  *
23  * Redistribution and use in source and binary forms, with or without
24  * modification, are permitted provided that the following conditions
25  * are met:
26  *
27  *  - Redistributions of source code must retain the above copyright
28  *    notice, this list of conditions and the following disclaimer.
29  *  - Redistributions in binary form must reproduce the above copyright
30  *    notice, this list of conditions and the following disclaimer in
31  *    the documentation and/or other materials provided with the
32  *    distribution.
33  *  - Neither the name of Intel Corporation nor the names of its
34  *    contributors may be used to endorse or promote products derived
35  *    from this software without specific prior written permission.
36  *
37  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48  *
49  */
50
51 #include <linux/net.h>
52 #define OPA_NUM_PKEY_BLOCKS_PER_SMP (OPA_SMP_DR_DATA_SIZE \
53                         / (OPA_PARTITION_TABLE_BLK_SIZE * sizeof(u16)))
54
55 #include "hfi.h"
56 #include "mad.h"
57 #include "trace.h"
58
59 /* the reset value from the FM is supposed to be 0xffff, handle both */
60 #define OPA_LINK_WIDTH_RESET_OLD 0x0fff
61 #define OPA_LINK_WIDTH_RESET 0xffff
62
63 static int reply(struct ib_mad_hdr *smp)
64 {
65         /*
66          * The verbs framework will handle the directed/LID route
67          * packet changes.
68          */
69         smp->method = IB_MGMT_METHOD_GET_RESP;
70         if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
71                 smp->status |= IB_SMP_DIRECTION;
72         return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
73 }
74
75 static inline void clear_opa_smp_data(struct opa_smp *smp)
76 {
77         void *data = opa_get_smp_data(smp);
78         size_t size = opa_get_smp_data_size(smp);
79
80         memset(data, 0, size);
81 }
82
83 static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len)
84 {
85         struct ib_mad_send_buf *send_buf;
86         struct ib_mad_agent *agent;
87         struct opa_smp *smp;
88         int ret;
89         unsigned long flags;
90         unsigned long timeout;
91         int pkey_idx;
92         u32 qpn = ppd_from_ibp(ibp)->sm_trap_qp;
93
94         agent = ibp->send_agent;
95         if (!agent)
96                 return;
97
98         /* o14-3.2.1 */
99         if (ppd_from_ibp(ibp)->lstate != IB_PORT_ACTIVE)
100                 return;
101
102         /* o14-2 */
103         if (ibp->trap_timeout && time_before(jiffies, ibp->trap_timeout))
104                 return;
105
106         pkey_idx = hfi1_lookup_pkey_idx(ibp, LIM_MGMT_P_KEY);
107         if (pkey_idx < 0) {
108                 pr_warn("%s: failed to find limited mgmt pkey, defaulting 0x%x\n",
109                         __func__, hfi1_get_pkey(ibp, 1));
110                 pkey_idx = 1;
111         }
112
113         send_buf = ib_create_send_mad(agent, qpn, pkey_idx, 0,
114                                       IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
115                                       GFP_ATOMIC, IB_MGMT_BASE_VERSION);
116         if (IS_ERR(send_buf))
117                 return;
118
119         smp = send_buf->mad;
120         smp->base_version = OPA_MGMT_BASE_VERSION;
121         smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
122         smp->class_version = OPA_SMI_CLASS_VERSION;
123         smp->method = IB_MGMT_METHOD_TRAP;
124         ibp->tid++;
125         smp->tid = cpu_to_be64(ibp->tid);
126         smp->attr_id = IB_SMP_ATTR_NOTICE;
127         /* o14-1: smp->mkey = 0; */
128         memcpy(smp->route.lid.data, data, len);
129
130         spin_lock_irqsave(&ibp->lock, flags);
131         if (!ibp->sm_ah) {
132                 if (ibp->sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
133                         struct ib_ah *ah;
134
135                         ah = hfi1_create_qp0_ah(ibp, ibp->sm_lid);
136                         if (IS_ERR(ah))
137                                 ret = PTR_ERR(ah);
138                         else {
139                                 send_buf->ah = ah;
140                                 ibp->sm_ah = to_iah(ah);
141                                 ret = 0;
142                         }
143                 } else
144                         ret = -EINVAL;
145         } else {
146                 send_buf->ah = &ibp->sm_ah->ibah;
147                 ret = 0;
148         }
149         spin_unlock_irqrestore(&ibp->lock, flags);
150
151         if (!ret)
152                 ret = ib_post_send_mad(send_buf, NULL);
153         if (!ret) {
154                 /* 4.096 usec. */
155                 timeout = (4096 * (1UL << ibp->subnet_timeout)) / 1000;
156                 ibp->trap_timeout = jiffies + usecs_to_jiffies(timeout);
157         } else {
158                 ib_free_send_mad(send_buf);
159                 ibp->trap_timeout = 0;
160         }
161 }
162
163 /*
164  * Send a bad [PQ]_Key trap (ch. 14.3.8).
165  */
166 void hfi1_bad_pqkey(struct hfi1_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
167                     u32 qp1, u32 qp2, u16 lid1, u16 lid2)
168 {
169         struct opa_mad_notice_attr data;
170         u32 lid = ppd_from_ibp(ibp)->lid;
171         u32 _lid1 = lid1;
172         u32 _lid2 = lid2;
173
174         memset(&data, 0, sizeof(data));
175
176         if (trap_num == OPA_TRAP_BAD_P_KEY)
177                 ibp->pkey_violations++;
178         else
179                 ibp->qkey_violations++;
180         ibp->n_pkt_drops++;
181
182         /* Send violation trap */
183         data.generic_type = IB_NOTICE_TYPE_SECURITY;
184         data.prod_type_lsb = IB_NOTICE_PROD_CA;
185         data.trap_num = trap_num;
186         data.issuer_lid = cpu_to_be32(lid);
187         data.ntc_257_258.lid1 = cpu_to_be32(_lid1);
188         data.ntc_257_258.lid2 = cpu_to_be32(_lid2);
189         data.ntc_257_258.key = cpu_to_be32(key);
190         data.ntc_257_258.sl = sl << 3;
191         data.ntc_257_258.qp1 = cpu_to_be32(qp1);
192         data.ntc_257_258.qp2 = cpu_to_be32(qp2);
193
194         send_trap(ibp, &data, sizeof(data));
195 }
196
197 /*
198  * Send a bad M_Key trap (ch. 14.3.9).
199  */
200 static void bad_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
201                      __be64 mkey, __be32 dr_slid, u8 return_path[], u8 hop_cnt)
202 {
203         struct opa_mad_notice_attr data;
204         u32 lid = ppd_from_ibp(ibp)->lid;
205
206         memset(&data, 0, sizeof(data));
207         /* Send violation trap */
208         data.generic_type = IB_NOTICE_TYPE_SECURITY;
209         data.prod_type_lsb = IB_NOTICE_PROD_CA;
210         data.trap_num = OPA_TRAP_BAD_M_KEY;
211         data.issuer_lid = cpu_to_be32(lid);
212         data.ntc_256.lid = data.issuer_lid;
213         data.ntc_256.method = mad->method;
214         data.ntc_256.attr_id = mad->attr_id;
215         data.ntc_256.attr_mod = mad->attr_mod;
216         data.ntc_256.mkey = mkey;
217         if (mad->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
218                 data.ntc_256.dr_slid = dr_slid;
219                 data.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE;
220                 if (hop_cnt > ARRAY_SIZE(data.ntc_256.dr_rtn_path)) {
221                         data.ntc_256.dr_trunc_hop |=
222                                 IB_NOTICE_TRAP_DR_TRUNC;
223                         hop_cnt = ARRAY_SIZE(data.ntc_256.dr_rtn_path);
224                 }
225                 data.ntc_256.dr_trunc_hop |= hop_cnt;
226                 memcpy(data.ntc_256.dr_rtn_path, return_path,
227                        hop_cnt);
228         }
229
230         send_trap(ibp, &data, sizeof(data));
231 }
232
233 /*
234  * Send a Port Capability Mask Changed trap (ch. 14.3.11).
235  */
236 void hfi1_cap_mask_chg(struct hfi1_ibport *ibp)
237 {
238         struct opa_mad_notice_attr data;
239         u32 lid = ppd_from_ibp(ibp)->lid;
240
241         memset(&data, 0, sizeof(data));
242
243         data.generic_type = IB_NOTICE_TYPE_INFO;
244         data.prod_type_lsb = IB_NOTICE_PROD_CA;
245         data.trap_num = OPA_TRAP_CHANGE_CAPABILITY;
246         data.issuer_lid = cpu_to_be32(lid);
247         data.ntc_144.lid = data.issuer_lid;
248         data.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags);
249
250         send_trap(ibp, &data, sizeof(data));
251 }
252
253 /*
254  * Send a System Image GUID Changed trap (ch. 14.3.12).
255  */
256 void hfi1_sys_guid_chg(struct hfi1_ibport *ibp)
257 {
258         struct opa_mad_notice_attr data;
259         u32 lid = ppd_from_ibp(ibp)->lid;
260
261         memset(&data, 0, sizeof(data));
262
263         data.generic_type = IB_NOTICE_TYPE_INFO;
264         data.prod_type_lsb = IB_NOTICE_PROD_CA;
265         data.trap_num = OPA_TRAP_CHANGE_SYSGUID;
266         data.issuer_lid = cpu_to_be32(lid);
267         data.ntc_145.new_sys_guid = ib_hfi1_sys_image_guid;
268         data.ntc_145.lid = data.issuer_lid;
269
270         send_trap(ibp, &data, sizeof(data));
271 }
272
273 /*
274  * Send a Node Description Changed trap (ch. 14.3.13).
275  */
276 void hfi1_node_desc_chg(struct hfi1_ibport *ibp)
277 {
278         struct opa_mad_notice_attr data;
279         u32 lid = ppd_from_ibp(ibp)->lid;
280
281         memset(&data, 0, sizeof(data));
282
283         data.generic_type = IB_NOTICE_TYPE_INFO;
284         data.prod_type_lsb = IB_NOTICE_PROD_CA;
285         data.trap_num = OPA_TRAP_CHANGE_CAPABILITY;
286         data.issuer_lid = cpu_to_be32(lid);
287         data.ntc_144.lid = data.issuer_lid;
288         data.ntc_144.change_flags =
289                 cpu_to_be16(OPA_NOTICE_TRAP_NODE_DESC_CHG);
290
291         send_trap(ibp, &data, sizeof(data));
292 }
293
294 static int __subn_get_opa_nodedesc(struct opa_smp *smp, u32 am,
295                                    u8 *data, struct ib_device *ibdev,
296                                    u8 port, u32 *resp_len)
297 {
298         struct opa_node_description *nd;
299
300         if (am) {
301                 smp->status |= IB_SMP_INVALID_FIELD;
302                 return reply((struct ib_mad_hdr *)smp);
303         }
304
305         nd = (struct opa_node_description *)data;
306
307         memcpy(nd->data, ibdev->node_desc, sizeof(nd->data));
308
309         if (resp_len)
310                 *resp_len += sizeof(*nd);
311
312         return reply((struct ib_mad_hdr *)smp);
313 }
314
315 static int __subn_get_opa_nodeinfo(struct opa_smp *smp, u32 am, u8 *data,
316                                    struct ib_device *ibdev, u8 port,
317                                    u32 *resp_len)
318 {
319         struct opa_node_info *ni;
320         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
321         unsigned pidx = port - 1; /* IB number port from 1, hw from 0 */
322
323         ni = (struct opa_node_info *)data;
324
325         /* GUID 0 is illegal */
326         if (am || pidx >= dd->num_pports || dd->pport[pidx].guid == 0) {
327                 smp->status |= IB_SMP_INVALID_FIELD;
328                 return reply((struct ib_mad_hdr *)smp);
329         }
330
331         ni->port_guid = cpu_to_be64(dd->pport[pidx].guid);
332         ni->base_version = OPA_MGMT_BASE_VERSION;
333         ni->class_version = OPA_SMI_CLASS_VERSION;
334         ni->node_type = 1;     /* channel adapter */
335         ni->num_ports = ibdev->phys_port_cnt;
336         /* This is already in network order */
337         ni->system_image_guid = ib_hfi1_sys_image_guid;
338         /* Use first-port GUID as node */
339         ni->node_guid = cpu_to_be64(dd->pport->guid);
340         ni->partition_cap = cpu_to_be16(hfi1_get_npkeys(dd));
341         ni->device_id = cpu_to_be16(dd->pcidev->device);
342         ni->revision = cpu_to_be32(dd->minrev);
343         ni->local_port_num = port;
344         ni->vendor_id[0] = dd->oui1;
345         ni->vendor_id[1] = dd->oui2;
346         ni->vendor_id[2] = dd->oui3;
347
348         if (resp_len)
349                 *resp_len += sizeof(*ni);
350
351         return reply((struct ib_mad_hdr *)smp);
352 }
353
354 static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
355                              u8 port)
356 {
357         struct ib_node_info *nip = (struct ib_node_info *)&smp->data;
358         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
359         unsigned pidx = port - 1; /* IB number port from 1, hw from 0 */
360
361         /* GUID 0 is illegal */
362         if (smp->attr_mod || pidx >= dd->num_pports ||
363             dd->pport[pidx].guid == 0)
364                 smp->status |= IB_SMP_INVALID_FIELD;
365         else
366                 nip->port_guid = cpu_to_be64(dd->pport[pidx].guid);
367
368         nip->base_version = OPA_MGMT_BASE_VERSION;
369         nip->class_version = OPA_SMI_CLASS_VERSION;
370         nip->node_type = 1;     /* channel adapter */
371         nip->num_ports = ibdev->phys_port_cnt;
372         /* This is already in network order */
373         nip->sys_guid = ib_hfi1_sys_image_guid;
374          /* Use first-port GUID as node */
375         nip->node_guid = cpu_to_be64(dd->pport->guid);
376         nip->partition_cap = cpu_to_be16(hfi1_get_npkeys(dd));
377         nip->device_id = cpu_to_be16(dd->pcidev->device);
378         nip->revision = cpu_to_be32(dd->minrev);
379         nip->local_port_num = port;
380         nip->vendor_id[0] = dd->oui1;
381         nip->vendor_id[1] = dd->oui2;
382         nip->vendor_id[2] = dd->oui3;
383
384         return reply((struct ib_mad_hdr *)smp);
385 }
386
387 static void set_link_width_enabled(struct hfi1_pportdata *ppd, u32 w)
388 {
389         (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LWID_ENB, w);
390 }
391
392 static void set_link_width_downgrade_enabled(struct hfi1_pportdata *ppd, u32 w)
393 {
394         (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LWID_DG_ENB, w);
395 }
396
397 static void set_link_speed_enabled(struct hfi1_pportdata *ppd, u32 s)
398 {
399         (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_SPD_ENB, s);
400 }
401
402 static int check_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
403                       int mad_flags, __be64 mkey, __be32 dr_slid,
404                       u8 return_path[], u8 hop_cnt)
405 {
406         int valid_mkey = 0;
407         int ret = 0;
408
409         /* Is the mkey in the process of expiring? */
410         if (ibp->mkey_lease_timeout &&
411             time_after_eq(jiffies, ibp->mkey_lease_timeout)) {
412                 /* Clear timeout and mkey protection field. */
413                 ibp->mkey_lease_timeout = 0;
414                 ibp->mkeyprot = 0;
415         }
416
417         if ((mad_flags & IB_MAD_IGNORE_MKEY) ||  ibp->mkey == 0 ||
418             ibp->mkey == mkey)
419                 valid_mkey = 1;
420
421         /* Unset lease timeout on any valid Get/Set/TrapRepress */
422         if (valid_mkey && ibp->mkey_lease_timeout &&
423             (mad->method == IB_MGMT_METHOD_GET ||
424              mad->method == IB_MGMT_METHOD_SET ||
425              mad->method == IB_MGMT_METHOD_TRAP_REPRESS))
426                 ibp->mkey_lease_timeout = 0;
427
428         if (!valid_mkey) {
429                 switch (mad->method) {
430                 case IB_MGMT_METHOD_GET:
431                         /* Bad mkey not a violation below level 2 */
432                         if (ibp->mkeyprot < 2)
433                                 break;
434                 case IB_MGMT_METHOD_SET:
435                 case IB_MGMT_METHOD_TRAP_REPRESS:
436                         if (ibp->mkey_violations != 0xFFFF)
437                                 ++ibp->mkey_violations;
438                         if (!ibp->mkey_lease_timeout && ibp->mkey_lease_period)
439                                 ibp->mkey_lease_timeout = jiffies +
440                                         ibp->mkey_lease_period * HZ;
441                         /* Generate a trap notice. */
442                         bad_mkey(ibp, mad, mkey, dr_slid, return_path,
443                                  hop_cnt);
444                         ret = 1;
445                 }
446         }
447
448         return ret;
449 }
450
451 /*
452  * The SMA caches reads from LCB registers in case the LCB is unavailable.
453  * (The LCB is unavailable in certain link states, for example.)
454  */
455 struct lcb_datum {
456         u32 off;
457         u64 val;
458 };
459
460 static struct lcb_datum lcb_cache[] = {
461         { DC_LCB_STS_ROUND_TRIP_LTP_CNT, 0 },
462 };
463
464 static int write_lcb_cache(u32 off, u64 val)
465 {
466         int i;
467
468         for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
469                 if (lcb_cache[i].off == off) {
470                         lcb_cache[i].val = val;
471                         return 0;
472                 }
473         }
474
475         pr_warn("%s bad offset 0x%x\n", __func__, off);
476         return -1;
477 }
478
479 static int read_lcb_cache(u32 off, u64 *val)
480 {
481         int i;
482
483         for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
484                 if (lcb_cache[i].off == off) {
485                         *val = lcb_cache[i].val;
486                         return 0;
487                 }
488         }
489
490         pr_warn("%s bad offset 0x%x\n", __func__, off);
491         return -1;
492 }
493
494 void read_ltp_rtt(struct hfi1_devdata *dd)
495 {
496         u64 reg;
497
498         if (read_lcb_csr(dd, DC_LCB_STS_ROUND_TRIP_LTP_CNT, &reg))
499                 dd_dev_err(dd, "%s: unable to read LTP RTT\n", __func__);
500         else
501                 write_lcb_cache(DC_LCB_STS_ROUND_TRIP_LTP_CNT, reg);
502 }
503
504 static u8 __opa_porttype(struct hfi1_pportdata *ppd)
505 {
506         if (qsfp_mod_present(ppd)) {
507                 if (ppd->qsfp_info.cache_valid)
508                         return OPA_PORT_TYPE_STANDARD;
509                 return OPA_PORT_TYPE_DISCONNECTED;
510         }
511         return OPA_PORT_TYPE_UNKNOWN;
512 }
513
514 static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
515                                    struct ib_device *ibdev, u8 port,
516                                    u32 *resp_len)
517 {
518         int i;
519         struct hfi1_devdata *dd;
520         struct hfi1_pportdata *ppd;
521         struct hfi1_ibport *ibp;
522         struct opa_port_info *pi = (struct opa_port_info *)data;
523         u8 mtu;
524         u8 credit_rate;
525         u32 state;
526         u32 num_ports = OPA_AM_NPORT(am);
527         u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
528         u32 buffer_units;
529         u64 tmp = 0;
530
531         if (num_ports != 1) {
532                 smp->status |= IB_SMP_INVALID_FIELD;
533                 return reply((struct ib_mad_hdr *)smp);
534         }
535
536         dd = dd_from_ibdev(ibdev);
537         /* IB numbers ports from 1, hw from 0 */
538         ppd = dd->pport + (port - 1);
539         ibp = &ppd->ibport_data;
540
541         if (ppd->vls_supported/2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) ||
542                 ppd->vls_supported > ARRAY_SIZE(dd->vld)) {
543                 smp->status |= IB_SMP_INVALID_FIELD;
544                 return reply((struct ib_mad_hdr *)smp);
545         }
546
547         pi->lid = cpu_to_be32(ppd->lid);
548
549         /* Only return the mkey if the protection field allows it. */
550         if (!(smp->method == IB_MGMT_METHOD_GET &&
551               ibp->mkey != smp->mkey &&
552               ibp->mkeyprot == 1))
553                 pi->mkey = ibp->mkey;
554
555         pi->subnet_prefix = ibp->gid_prefix;
556         pi->sm_lid = cpu_to_be32(ibp->sm_lid);
557         pi->ib_cap_mask = cpu_to_be32(ibp->port_cap_flags);
558         pi->mkey_lease_period = cpu_to_be16(ibp->mkey_lease_period);
559         pi->sm_trap_qp = cpu_to_be32(ppd->sm_trap_qp);
560         pi->sa_qp = cpu_to_be32(ppd->sa_qp);
561
562         pi->link_width.enabled = cpu_to_be16(ppd->link_width_enabled);
563         pi->link_width.supported = cpu_to_be16(ppd->link_width_supported);
564         pi->link_width.active = cpu_to_be16(ppd->link_width_active);
565
566         pi->link_width_downgrade.supported =
567                         cpu_to_be16(ppd->link_width_downgrade_supported);
568         pi->link_width_downgrade.enabled =
569                         cpu_to_be16(ppd->link_width_downgrade_enabled);
570         pi->link_width_downgrade.tx_active =
571                         cpu_to_be16(ppd->link_width_downgrade_tx_active);
572         pi->link_width_downgrade.rx_active =
573                         cpu_to_be16(ppd->link_width_downgrade_rx_active);
574
575         pi->link_speed.supported = cpu_to_be16(ppd->link_speed_supported);
576         pi->link_speed.active = cpu_to_be16(ppd->link_speed_active);
577         pi->link_speed.enabled = cpu_to_be16(ppd->link_speed_enabled);
578
579         state = driver_lstate(ppd);
580
581         if (start_of_sm_config && (state == IB_PORT_INIT))
582                 ppd->is_sm_config_started = 1;
583
584         pi->port_phys_conf = __opa_porttype(ppd) & 0xf;
585
586 #if PI_LED_ENABLE_SUP
587         pi->port_states.ledenable_offlinereason = ppd->neighbor_normal << 4;
588         pi->port_states.ledenable_offlinereason |=
589                 ppd->is_sm_config_started << 5;
590         pi->port_states.ledenable_offlinereason |=
591                 ppd->offline_disabled_reason & OPA_PI_MASK_OFFLINE_REASON;
592 #else
593         pi->port_states.offline_reason = ppd->neighbor_normal << 4;
594         pi->port_states.offline_reason |= ppd->is_sm_config_started << 5;
595         pi->port_states.offline_reason |= ppd->offline_disabled_reason &
596                                                 OPA_PI_MASK_OFFLINE_REASON;
597 #endif /* PI_LED_ENABLE_SUP */
598
599         pi->port_states.portphysstate_portstate =
600                 (hfi1_ibphys_portstate(ppd) << 4) | state;
601
602         pi->mkeyprotect_lmc = (ibp->mkeyprot << 6) | ppd->lmc;
603
604         memset(pi->neigh_mtu.pvlx_to_mtu, 0, sizeof(pi->neigh_mtu.pvlx_to_mtu));
605         for (i = 0; i < ppd->vls_supported; i++) {
606                 mtu = mtu_to_enum(dd->vld[i].mtu, HFI1_DEFAULT_ACTIVE_MTU);
607                 if ((i % 2) == 0)
608                         pi->neigh_mtu.pvlx_to_mtu[i/2] |= (mtu << 4);
609                 else
610                         pi->neigh_mtu.pvlx_to_mtu[i/2] |= mtu;
611         }
612         /* don't forget VL 15 */
613         mtu = mtu_to_enum(dd->vld[15].mtu, 2048);
614         pi->neigh_mtu.pvlx_to_mtu[15/2] |= mtu;
615         pi->smsl = ibp->sm_sl & OPA_PI_MASK_SMSL;
616         pi->operational_vls = hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_OP_VLS);
617         pi->partenforce_filterraw |=
618                 (ppd->linkinit_reason & OPA_PI_MASK_LINKINIT_REASON);
619         if (ppd->part_enforce & HFI1_PART_ENFORCE_IN)
620                 pi->partenforce_filterraw |= OPA_PI_MASK_PARTITION_ENFORCE_IN;
621         if (ppd->part_enforce & HFI1_PART_ENFORCE_OUT)
622                 pi->partenforce_filterraw |= OPA_PI_MASK_PARTITION_ENFORCE_OUT;
623         pi->mkey_violations = cpu_to_be16(ibp->mkey_violations);
624         /* P_KeyViolations are counted by hardware. */
625         pi->pkey_violations = cpu_to_be16(ibp->pkey_violations);
626         pi->qkey_violations = cpu_to_be16(ibp->qkey_violations);
627
628         pi->vl.cap = ppd->vls_supported;
629         pi->vl.high_limit = cpu_to_be16(ibp->vl_high_limit);
630         pi->vl.arb_high_cap = (u8)hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_VL_HIGH_CAP);
631         pi->vl.arb_low_cap = (u8)hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_VL_LOW_CAP);
632
633         pi->clientrereg_subnettimeout = ibp->subnet_timeout;
634
635         pi->port_link_mode  = cpu_to_be16(OPA_PORT_LINK_MODE_OPA << 10 |
636                                           OPA_PORT_LINK_MODE_OPA << 5 |
637                                           OPA_PORT_LINK_MODE_OPA);
638
639         pi->port_ltp_crc_mode = cpu_to_be16(ppd->port_ltp_crc_mode);
640
641         pi->port_mode = cpu_to_be16(
642                                 ppd->is_active_optimize_enabled ?
643                                         OPA_PI_MASK_PORT_ACTIVE_OPTOMIZE : 0);
644
645         pi->port_packet_format.supported =
646                 cpu_to_be16(OPA_PORT_PACKET_FORMAT_9B);
647         pi->port_packet_format.enabled =
648                 cpu_to_be16(OPA_PORT_PACKET_FORMAT_9B);
649
650         /* flit_control.interleave is (OPA V1, version .76):
651          * bits         use
652          * ----         ---
653          * 2            res
654          * 2            DistanceSupported
655          * 2            DistanceEnabled
656          * 5            MaxNextLevelTxEnabled
657          * 5            MaxNestLevelRxSupported
658          *
659          * HFI supports only "distance mode 1" (see OPA V1, version .76,
660          * section 9.6.2), so set DistanceSupported, DistanceEnabled
661          * to 0x1.
662          */
663         pi->flit_control.interleave = cpu_to_be16(0x1400);
664
665         pi->link_down_reason = ppd->local_link_down_reason.sma;
666         pi->neigh_link_down_reason = ppd->neigh_link_down_reason.sma;
667         pi->port_error_action = cpu_to_be32(ppd->port_error_action);
668         pi->mtucap = mtu_to_enum(hfi1_max_mtu, IB_MTU_4096);
669
670         /* 32.768 usec. response time (guessing) */
671         pi->resptimevalue = 3;
672
673         pi->local_port_num = port;
674
675         /* buffer info for FM */
676         pi->overall_buffer_space = cpu_to_be16(dd->link_credits);
677
678         pi->neigh_node_guid = cpu_to_be64(ppd->neighbor_guid);
679         pi->neigh_port_num = ppd->neighbor_port_number;
680         pi->port_neigh_mode =
681                 (ppd->neighbor_type & OPA_PI_MASK_NEIGH_NODE_TYPE) |
682                 (ppd->mgmt_allowed ? OPA_PI_MASK_NEIGH_MGMT_ALLOWED : 0) |
683                 (ppd->neighbor_fm_security ?
684                         OPA_PI_MASK_NEIGH_FW_AUTH_BYPASS : 0);
685
686         /* HFIs shall always return VL15 credits to their
687          * neighbor in a timely manner, without any credit return pacing.
688          */
689         credit_rate = 0;
690         buffer_units  = (dd->vau) & OPA_PI_MASK_BUF_UNIT_BUF_ALLOC;
691         buffer_units |= (dd->vcu << 3) & OPA_PI_MASK_BUF_UNIT_CREDIT_ACK;
692         buffer_units |= (credit_rate << 6) &
693                                 OPA_PI_MASK_BUF_UNIT_VL15_CREDIT_RATE;
694         buffer_units |= (dd->vl15_init << 11) & OPA_PI_MASK_BUF_UNIT_VL15_INIT;
695         pi->buffer_units = cpu_to_be32(buffer_units);
696
697         pi->opa_cap_mask = cpu_to_be16(OPA_CAP_MASK3_IsSharedSpaceSupported);
698
699         /* HFI supports a replay buffer 128 LTPs in size */
700         pi->replay_depth.buffer = 0x80;
701         /* read the cached value of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
702         read_lcb_cache(DC_LCB_STS_ROUND_TRIP_LTP_CNT, &tmp);
703
704         /* this counter is 16 bits wide, but the replay_depth.wire
705          * variable is only 8 bits */
706         if (tmp > 0xff)
707                 tmp = 0xff;
708         pi->replay_depth.wire = tmp;
709
710         if (resp_len)
711                 *resp_len += sizeof(struct opa_port_info);
712
713         return reply((struct ib_mad_hdr *)smp);
714 }
715
716 /**
717  * get_pkeys - return the PKEY table
718  * @dd: the hfi1_ib device
719  * @port: the IB port number
720  * @pkeys: the pkey table is placed here
721  */
722 static int get_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
723 {
724         struct hfi1_pportdata *ppd = dd->pport + port - 1;
725
726         memcpy(pkeys, ppd->pkeys, sizeof(ppd->pkeys));
727
728         return 0;
729 }
730
731 static int __subn_get_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
732                                     struct ib_device *ibdev, u8 port,
733                                     u32 *resp_len)
734 {
735         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
736         u32 n_blocks_req = OPA_AM_NBLK(am);
737         u32 start_block = am & 0x7ff;
738         __be16 *p;
739         u16 *q;
740         int i;
741         u16 n_blocks_avail;
742         unsigned npkeys = hfi1_get_npkeys(dd);
743         size_t size;
744
745         if (n_blocks_req == 0) {
746                 pr_warn("OPA Get PKey AM Invalid : P = %d; B = 0x%x; N = 0x%x\n",
747                         port, start_block, n_blocks_req);
748                 smp->status |= IB_SMP_INVALID_FIELD;
749                 return reply((struct ib_mad_hdr *)smp);
750         }
751
752         n_blocks_avail = (u16) (npkeys/OPA_PARTITION_TABLE_BLK_SIZE) + 1;
753
754         size = (n_blocks_req * OPA_PARTITION_TABLE_BLK_SIZE) * sizeof(u16);
755
756         if (start_block + n_blocks_req > n_blocks_avail ||
757             n_blocks_req > OPA_NUM_PKEY_BLOCKS_PER_SMP) {
758                 pr_warn("OPA Get PKey AM Invalid : s 0x%x; req 0x%x; "
759                         "avail 0x%x; blk/smp 0x%lx\n",
760                         start_block, n_blocks_req, n_blocks_avail,
761                         OPA_NUM_PKEY_BLOCKS_PER_SMP);
762                 smp->status |= IB_SMP_INVALID_FIELD;
763                 return reply((struct ib_mad_hdr *)smp);
764         }
765
766         p = (__be16 *) data;
767         q = (u16 *)data;
768         /* get the real pkeys if we are requesting the first block */
769         if (start_block == 0) {
770                 get_pkeys(dd, port, q);
771                 for (i = 0; i < npkeys; i++)
772                         p[i] = cpu_to_be16(q[i]);
773                 if (resp_len)
774                         *resp_len += size;
775         } else
776                 smp->status |= IB_SMP_INVALID_FIELD;
777
778         return reply((struct ib_mad_hdr *)smp);
779 }
780
781 enum {
782         HFI_TRANSITION_DISALLOWED,
783         HFI_TRANSITION_IGNORED,
784         HFI_TRANSITION_ALLOWED,
785         HFI_TRANSITION_UNDEFINED,
786 };
787
788 /*
789  * Use shortened names to improve readability of
790  * {logical,physical}_state_transitions
791  */
792 enum {
793         __D = HFI_TRANSITION_DISALLOWED,
794         __I = HFI_TRANSITION_IGNORED,
795         __A = HFI_TRANSITION_ALLOWED,
796         __U = HFI_TRANSITION_UNDEFINED,
797 };
798
799 /*
800  * IB_PORTPHYSSTATE_POLLING (2) through OPA_PORTPHYSSTATE_MAX (11) are
801  * represented in physical_state_transitions.
802  */
803 #define __N_PHYSTATES (OPA_PORTPHYSSTATE_MAX - IB_PORTPHYSSTATE_POLLING + 1)
804
805 /*
806  * Within physical_state_transitions, rows represent "old" states,
807  * columns "new" states, and physical_state_transitions.allowed[old][new]
808  * indicates if the transition from old state to new state is legal (see
809  * OPAg1v1, Table 6-4).
810  */
811 static const struct {
812         u8 allowed[__N_PHYSTATES][__N_PHYSTATES];
813 } physical_state_transitions = {
814         {
815                 /* 2    3    4    5    6    7    8    9   10   11 */
816         /* 2 */ { __A, __A, __D, __D, __D, __D, __D, __D, __D, __D },
817         /* 3 */ { __A, __I, __D, __D, __D, __D, __D, __D, __D, __A },
818         /* 4 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
819         /* 5 */ { __A, __A, __D, __I, __D, __D, __D, __D, __D, __D },
820         /* 6 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
821         /* 7 */ { __D, __A, __D, __D, __D, __I, __D, __D, __D, __D },
822         /* 8 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
823         /* 9 */ { __I, __A, __D, __D, __D, __D, __D, __I, __D, __D },
824         /*10 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
825         /*11 */ { __D, __A, __D, __D, __D, __D, __D, __D, __D, __I },
826         }
827 };
828
829 /*
830  * IB_PORT_DOWN (1) through IB_PORT_ACTIVE_DEFER (5) are represented
831  * logical_state_transitions
832  */
833
834 #define __N_LOGICAL_STATES (IB_PORT_ACTIVE_DEFER - IB_PORT_DOWN + 1)
835
836 /*
837  * Within logical_state_transitions rows represent "old" states,
838  * columns "new" states, and logical_state_transitions.allowed[old][new]
839  * indicates if the transition from old state to new state is legal (see
840  * OPAg1v1, Table 9-12).
841  */
842 static const struct {
843         u8 allowed[__N_LOGICAL_STATES][__N_LOGICAL_STATES];
844 } logical_state_transitions = {
845         {
846                 /* 1    2    3    4    5 */
847         /* 1 */ { __I, __D, __D, __D, __U},
848         /* 2 */ { __D, __I, __A, __D, __U},
849         /* 3 */ { __D, __D, __I, __A, __U},
850         /* 4 */ { __D, __D, __I, __I, __U},
851         /* 5 */ { __U, __U, __U, __U, __U},
852         }
853 };
854
855 static int logical_transition_allowed(int old, int new)
856 {
857         if (old < IB_PORT_NOP || old > IB_PORT_ACTIVE_DEFER ||
858             new < IB_PORT_NOP || new > IB_PORT_ACTIVE_DEFER) {
859                 pr_warn("invalid logical state(s) (old %d new %d)\n",
860                         old, new);
861                 return HFI_TRANSITION_UNDEFINED;
862         }
863
864         if (new == IB_PORT_NOP)
865                 return HFI_TRANSITION_ALLOWED; /* always allowed */
866
867         /* adjust states for indexing into logical_state_transitions */
868         old -= IB_PORT_DOWN;
869         new -= IB_PORT_DOWN;
870
871         if (old < 0 || new < 0)
872                 return HFI_TRANSITION_UNDEFINED;
873         return logical_state_transitions.allowed[old][new];
874 }
875
876 static int physical_transition_allowed(int old, int new)
877 {
878         if (old < IB_PORTPHYSSTATE_NOP || old > OPA_PORTPHYSSTATE_MAX ||
879             new < IB_PORTPHYSSTATE_NOP || new > OPA_PORTPHYSSTATE_MAX) {
880                 pr_warn("invalid physical state(s) (old %d new %d)\n",
881                         old, new);
882                 return HFI_TRANSITION_UNDEFINED;
883         }
884
885         if (new == IB_PORTPHYSSTATE_NOP)
886                 return HFI_TRANSITION_ALLOWED; /* always allowed */
887
888         /* adjust states for indexing into physical_state_transitions */
889         old -= IB_PORTPHYSSTATE_POLLING;
890         new -= IB_PORTPHYSSTATE_POLLING;
891
892         if (old < 0 || new < 0)
893                 return HFI_TRANSITION_UNDEFINED;
894         return physical_state_transitions.allowed[old][new];
895 }
896
897 static int port_states_transition_allowed(struct hfi1_pportdata *ppd,
898                                           u32 logical_new, u32 physical_new)
899 {
900         u32 physical_old = driver_physical_state(ppd);
901         u32 logical_old = driver_logical_state(ppd);
902         int ret, logical_allowed, physical_allowed;
903
904         logical_allowed = ret =
905                 logical_transition_allowed(logical_old, logical_new);
906
907         if (ret == HFI_TRANSITION_DISALLOWED ||
908             ret == HFI_TRANSITION_UNDEFINED) {
909                 pr_warn("invalid logical state transition %s -> %s\n",
910                         opa_lstate_name(logical_old),
911                         opa_lstate_name(logical_new));
912                 return ret;
913         }
914
915         physical_allowed = ret =
916                 physical_transition_allowed(physical_old, physical_new);
917
918         if (ret == HFI_TRANSITION_DISALLOWED ||
919             ret == HFI_TRANSITION_UNDEFINED) {
920                 pr_warn("invalid physical state transition %s -> %s\n",
921                         opa_pstate_name(physical_old),
922                         opa_pstate_name(physical_new));
923                 return ret;
924         }
925
926         if (logical_allowed == HFI_TRANSITION_IGNORED &&
927             physical_allowed == HFI_TRANSITION_IGNORED)
928                 return HFI_TRANSITION_IGNORED;
929
930         /*
931          * Either physical_allowed or logical_allowed is
932          * HFI_TRANSITION_ALLOWED.
933          */
934         return HFI_TRANSITION_ALLOWED;
935 }
936
937 static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp,
938                            u32 logical_state, u32 phys_state,
939                            int suppress_idle_sma)
940 {
941         struct hfi1_devdata *dd = ppd->dd;
942         u32 link_state;
943         int ret;
944
945         ret = port_states_transition_allowed(ppd, logical_state, phys_state);
946         if (ret == HFI_TRANSITION_DISALLOWED ||
947             ret == HFI_TRANSITION_UNDEFINED) {
948                 /* error message emitted above */
949                 smp->status |= IB_SMP_INVALID_FIELD;
950                 return 0;
951         }
952
953         if (ret == HFI_TRANSITION_IGNORED)
954                 return 0;
955
956         if ((phys_state != IB_PORTPHYSSTATE_NOP) &&
957             !(logical_state == IB_PORT_DOWN ||
958               logical_state == IB_PORT_NOP)){
959                 pr_warn("SubnSet(OPA_PortInfo) port state invalid: logical_state 0x%x physical_state 0x%x\n",
960                         logical_state, phys_state);
961                 smp->status |= IB_SMP_INVALID_FIELD;
962         }
963
964         /*
965          * Logical state changes are summarized in OPAv1g1 spec.,
966          * Table 9-12; physical state changes are summarized in
967          * OPAv1g1 spec., Table 6.4.
968          */
969         switch (logical_state) {
970         case IB_PORT_NOP:
971                 if (phys_state == IB_PORTPHYSSTATE_NOP)
972                         break;
973                 /* FALLTHROUGH */
974         case IB_PORT_DOWN:
975                 if (phys_state == IB_PORTPHYSSTATE_NOP)
976                         link_state = HLS_DN_DOWNDEF;
977                 else if (phys_state == IB_PORTPHYSSTATE_POLLING) {
978                         link_state = HLS_DN_POLL;
979                         set_link_down_reason(ppd,
980                              OPA_LINKDOWN_REASON_FM_BOUNCE, 0,
981                              OPA_LINKDOWN_REASON_FM_BOUNCE);
982                 } else if (phys_state == IB_PORTPHYSSTATE_DISABLED)
983                         link_state = HLS_DN_DISABLE;
984                 else {
985                         pr_warn("SubnSet(OPA_PortInfo) invalid physical state 0x%x\n",
986                                 phys_state);
987                         smp->status |= IB_SMP_INVALID_FIELD;
988                         break;
989                 }
990
991                 set_link_state(ppd, link_state);
992                 if (link_state == HLS_DN_DISABLE &&
993                     (ppd->offline_disabled_reason >
994                      OPA_LINKDOWN_REASON_SMA_DISABLED ||
995                      ppd->offline_disabled_reason ==
996                      OPA_LINKDOWN_REASON_NONE))
997                         ppd->offline_disabled_reason =
998                         OPA_LINKDOWN_REASON_SMA_DISABLED;
999                 /*
1000                  * Don't send a reply if the response would be sent
1001                  * through the disabled port.
1002                  */
1003                 if (link_state == HLS_DN_DISABLE && smp->hop_cnt)
1004                         return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
1005                 break;
1006         case IB_PORT_ARMED:
1007                 ret = set_link_state(ppd, HLS_UP_ARMED);
1008                 if ((ret == 0) && (suppress_idle_sma == 0))
1009                         send_idle_sma(dd, SMA_IDLE_ARM);
1010                 break;
1011         case IB_PORT_ACTIVE:
1012                 if (ppd->neighbor_normal) {
1013                         ret = set_link_state(ppd, HLS_UP_ACTIVE);
1014                         if (ret == 0)
1015                                 send_idle_sma(dd, SMA_IDLE_ACTIVE);
1016                 } else {
1017                         pr_warn("SubnSet(OPA_PortInfo) Cannot move to Active with NeighborNormal 0\n");
1018                         smp->status |= IB_SMP_INVALID_FIELD;
1019                 }
1020                 break;
1021         default:
1022                 pr_warn("SubnSet(OPA_PortInfo) invalid logical state 0x%x\n",
1023                         logical_state);
1024                 smp->status |= IB_SMP_INVALID_FIELD;
1025         }
1026
1027         return 0;
1028 }
1029
1030 /**
1031  * subn_set_opa_portinfo - set port information
1032  * @smp: the incoming SM packet
1033  * @ibdev: the infiniband device
1034  * @port: the port on the device
1035  *
1036  */
1037 static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
1038                                    struct ib_device *ibdev, u8 port,
1039                                    u32 *resp_len)
1040 {
1041         struct opa_port_info *pi = (struct opa_port_info *)data;
1042         struct ib_event event;
1043         struct hfi1_devdata *dd;
1044         struct hfi1_pportdata *ppd;
1045         struct hfi1_ibport *ibp;
1046         u8 clientrereg;
1047         unsigned long flags;
1048         u32 smlid, opa_lid; /* tmp vars to hold LID values */
1049         u16 lid;
1050         u8 ls_old, ls_new, ps_new;
1051         u8 vls;
1052         u8 msl;
1053         u8 crc_enabled;
1054         u16 lse, lwe, mtu;
1055         u32 num_ports = OPA_AM_NPORT(am);
1056         u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
1057         int ret, i, invalid = 0, call_set_mtu = 0;
1058         int call_link_downgrade_policy = 0;
1059
1060         if (num_ports != 1) {
1061                 smp->status |= IB_SMP_INVALID_FIELD;
1062                 return reply((struct ib_mad_hdr *)smp);
1063         }
1064
1065         opa_lid = be32_to_cpu(pi->lid);
1066         if (opa_lid & 0xFFFF0000) {
1067                 pr_warn("OPA_PortInfo lid out of range: %X\n", opa_lid);
1068                 smp->status |= IB_SMP_INVALID_FIELD;
1069                 goto get_only;
1070         }
1071
1072         lid = (u16)(opa_lid & 0x0000FFFF);
1073
1074         smlid = be32_to_cpu(pi->sm_lid);
1075         if (smlid & 0xFFFF0000) {
1076                 pr_warn("OPA_PortInfo SM lid out of range: %X\n", smlid);
1077                 smp->status |= IB_SMP_INVALID_FIELD;
1078                 goto get_only;
1079         }
1080         smlid &= 0x0000FFFF;
1081
1082         clientrereg = (pi->clientrereg_subnettimeout &
1083                         OPA_PI_MASK_CLIENT_REREGISTER);
1084
1085         dd = dd_from_ibdev(ibdev);
1086         /* IB numbers ports from 1, hw from 0 */
1087         ppd = dd->pport + (port - 1);
1088         ibp = &ppd->ibport_data;
1089         event.device = ibdev;
1090         event.element.port_num = port;
1091
1092         ls_old = driver_lstate(ppd);
1093
1094         ibp->mkey = pi->mkey;
1095         ibp->gid_prefix = pi->subnet_prefix;
1096         ibp->mkey_lease_period = be16_to_cpu(pi->mkey_lease_period);
1097
1098         /* Must be a valid unicast LID address. */
1099         if ((lid == 0 && ls_old > IB_PORT_INIT) ||
1100              lid >= HFI1_MULTICAST_LID_BASE) {
1101                 smp->status |= IB_SMP_INVALID_FIELD;
1102                 pr_warn("SubnSet(OPA_PortInfo) lid invalid 0x%x\n",
1103                         lid);
1104         } else if (ppd->lid != lid ||
1105                  ppd->lmc != (pi->mkeyprotect_lmc & OPA_PI_MASK_LMC)) {
1106                 if (ppd->lid != lid)
1107                         hfi1_set_uevent_bits(ppd, _HFI1_EVENT_LID_CHANGE_BIT);
1108                 if (ppd->lmc != (pi->mkeyprotect_lmc & OPA_PI_MASK_LMC))
1109                         hfi1_set_uevent_bits(ppd, _HFI1_EVENT_LMC_CHANGE_BIT);
1110                 hfi1_set_lid(ppd, lid, pi->mkeyprotect_lmc & OPA_PI_MASK_LMC);
1111                 event.event = IB_EVENT_LID_CHANGE;
1112                 ib_dispatch_event(&event);
1113         }
1114
1115         msl = pi->smsl & OPA_PI_MASK_SMSL;
1116         if (pi->partenforce_filterraw & OPA_PI_MASK_LINKINIT_REASON)
1117                 ppd->linkinit_reason =
1118                         (pi->partenforce_filterraw &
1119                          OPA_PI_MASK_LINKINIT_REASON);
1120         /* enable/disable SW pkey checking as per FM control */
1121         if (pi->partenforce_filterraw & OPA_PI_MASK_PARTITION_ENFORCE_IN)
1122                 ppd->part_enforce |= HFI1_PART_ENFORCE_IN;
1123         else
1124                 ppd->part_enforce &= ~HFI1_PART_ENFORCE_IN;
1125
1126         if (pi->partenforce_filterraw & OPA_PI_MASK_PARTITION_ENFORCE_OUT)
1127                 ppd->part_enforce |= HFI1_PART_ENFORCE_OUT;
1128         else
1129                 ppd->part_enforce &= ~HFI1_PART_ENFORCE_OUT;
1130
1131         /* Must be a valid unicast LID address. */
1132         if ((smlid == 0 && ls_old > IB_PORT_INIT) ||
1133              smlid >= HFI1_MULTICAST_LID_BASE) {
1134                 smp->status |= IB_SMP_INVALID_FIELD;
1135                 pr_warn("SubnSet(OPA_PortInfo) smlid invalid 0x%x\n", smlid);
1136         } else if (smlid != ibp->sm_lid || msl != ibp->sm_sl) {
1137                 pr_warn("SubnSet(OPA_PortInfo) smlid 0x%x\n", smlid);
1138                 spin_lock_irqsave(&ibp->lock, flags);
1139                 if (ibp->sm_ah) {
1140                         if (smlid != ibp->sm_lid)
1141                                 ibp->sm_ah->attr.dlid = smlid;
1142                         if (msl != ibp->sm_sl)
1143                                 ibp->sm_ah->attr.sl = msl;
1144                 }
1145                 spin_unlock_irqrestore(&ibp->lock, flags);
1146                 if (smlid != ibp->sm_lid)
1147                         ibp->sm_lid = smlid;
1148                 if (msl != ibp->sm_sl)
1149                         ibp->sm_sl = msl;
1150                 event.event = IB_EVENT_SM_CHANGE;
1151                 ib_dispatch_event(&event);
1152         }
1153
1154         if (pi->link_down_reason == 0) {
1155                 ppd->local_link_down_reason.sma = 0;
1156                 ppd->local_link_down_reason.latest = 0;
1157         }
1158
1159         if (pi->neigh_link_down_reason == 0) {
1160                 ppd->neigh_link_down_reason.sma = 0;
1161                 ppd->neigh_link_down_reason.latest = 0;
1162         }
1163
1164         ppd->sm_trap_qp = be32_to_cpu(pi->sm_trap_qp);
1165         ppd->sa_qp = be32_to_cpu(pi->sa_qp);
1166
1167         ppd->port_error_action = be32_to_cpu(pi->port_error_action);
1168         lwe = be16_to_cpu(pi->link_width.enabled);
1169         if (lwe) {
1170                 if (lwe == OPA_LINK_WIDTH_RESET
1171                                 || lwe == OPA_LINK_WIDTH_RESET_OLD)
1172                         set_link_width_enabled(ppd, ppd->link_width_supported);
1173                 else if ((lwe & ~ppd->link_width_supported) == 0)
1174                         set_link_width_enabled(ppd, lwe);
1175                 else
1176                         smp->status |= IB_SMP_INVALID_FIELD;
1177         }
1178         lwe = be16_to_cpu(pi->link_width_downgrade.enabled);
1179         /* LWD.E is always applied - 0 means "disabled" */
1180         if (lwe == OPA_LINK_WIDTH_RESET
1181                         || lwe == OPA_LINK_WIDTH_RESET_OLD) {
1182                 set_link_width_downgrade_enabled(ppd,
1183                                 ppd->link_width_downgrade_supported);
1184         } else if ((lwe & ~ppd->link_width_downgrade_supported) == 0) {
1185                 /* only set and apply if something changed */
1186                 if (lwe != ppd->link_width_downgrade_enabled) {
1187                         set_link_width_downgrade_enabled(ppd, lwe);
1188                         call_link_downgrade_policy = 1;
1189                 }
1190         } else
1191                 smp->status |= IB_SMP_INVALID_FIELD;
1192
1193         lse = be16_to_cpu(pi->link_speed.enabled);
1194         if (lse) {
1195                 if (lse & be16_to_cpu(pi->link_speed.supported))
1196                         set_link_speed_enabled(ppd, lse);
1197                 else
1198                         smp->status |= IB_SMP_INVALID_FIELD;
1199         }
1200
1201         ibp->mkeyprot = (pi->mkeyprotect_lmc & OPA_PI_MASK_MKEY_PROT_BIT) >> 6;
1202         ibp->vl_high_limit = be16_to_cpu(pi->vl.high_limit) & 0xFF;
1203         (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_VL_HIGH_LIMIT,
1204                                     ibp->vl_high_limit);
1205
1206         if (ppd->vls_supported/2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) ||
1207                 ppd->vls_supported > ARRAY_SIZE(dd->vld)) {
1208                 smp->status |= IB_SMP_INVALID_FIELD;
1209                 return reply((struct ib_mad_hdr *)smp);
1210         }
1211         for (i = 0; i < ppd->vls_supported; i++) {
1212                 if ((i % 2) == 0)
1213                         mtu = enum_to_mtu((pi->neigh_mtu.pvlx_to_mtu[i/2] >> 4)
1214                                           & 0xF);
1215                 else
1216                         mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[i/2] & 0xF);
1217                 if (mtu == 0xffff) {
1218                         pr_warn("SubnSet(OPA_PortInfo) mtu invalid %d (0x%x)\n",
1219                                 mtu,
1220                                 (pi->neigh_mtu.pvlx_to_mtu[0] >> 4) & 0xF);
1221                         smp->status |= IB_SMP_INVALID_FIELD;
1222                         mtu = hfi1_max_mtu; /* use a valid MTU */
1223                 }
1224                 if (dd->vld[i].mtu != mtu) {
1225                         dd_dev_info(dd,
1226                                 "MTU change on vl %d from %d to %d\n",
1227                                 i, dd->vld[i].mtu, mtu);
1228                         dd->vld[i].mtu = mtu;
1229                         call_set_mtu++;
1230                 }
1231         }
1232         /* As per OPAV1 spec: VL15 must support and be configured
1233          * for operation with a 2048 or larger MTU.
1234          */
1235         mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[15/2] & 0xF);
1236         if (mtu < 2048 || mtu == 0xffff)
1237                 mtu = 2048;
1238         if (dd->vld[15].mtu != mtu) {
1239                 dd_dev_info(dd,
1240                         "MTU change on vl 15 from %d to %d\n",
1241                         dd->vld[15].mtu, mtu);
1242                 dd->vld[15].mtu = mtu;
1243                 call_set_mtu++;
1244         }
1245         if (call_set_mtu)
1246                 set_mtu(ppd);
1247
1248         /* Set operational VLs */
1249         vls = pi->operational_vls & OPA_PI_MASK_OPERATIONAL_VL;
1250         if (vls) {
1251                 if (vls > ppd->vls_supported) {
1252                         pr_warn("SubnSet(OPA_PortInfo) VL's supported invalid %d\n",
1253                                 pi->operational_vls);
1254                         smp->status |= IB_SMP_INVALID_FIELD;
1255                 } else {
1256                         if (hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_OP_VLS,
1257                                                 vls) == -EINVAL)
1258                                 smp->status |= IB_SMP_INVALID_FIELD;
1259                 }
1260         }
1261
1262         if (pi->mkey_violations == 0)
1263                 ibp->mkey_violations = 0;
1264
1265         if (pi->pkey_violations == 0)
1266                 ibp->pkey_violations = 0;
1267
1268         if (pi->qkey_violations == 0)
1269                 ibp->qkey_violations = 0;
1270
1271         ibp->subnet_timeout =
1272                 pi->clientrereg_subnettimeout & OPA_PI_MASK_SUBNET_TIMEOUT;
1273
1274         crc_enabled = be16_to_cpu(pi->port_ltp_crc_mode);
1275         crc_enabled >>= 4;
1276         crc_enabled &= 0xf;
1277
1278         if (crc_enabled != 0)
1279                 ppd->port_crc_mode_enabled = port_ltp_to_cap(crc_enabled);
1280
1281         ppd->is_active_optimize_enabled =
1282                         !!(be16_to_cpu(pi->port_mode)
1283                                         & OPA_PI_MASK_PORT_ACTIVE_OPTOMIZE);
1284
1285         ls_new = pi->port_states.portphysstate_portstate &
1286                         OPA_PI_MASK_PORT_STATE;
1287         ps_new = (pi->port_states.portphysstate_portstate &
1288                         OPA_PI_MASK_PORT_PHYSICAL_STATE) >> 4;
1289
1290         if (ls_old == IB_PORT_INIT) {
1291                 if (start_of_sm_config) {
1292                         if (ls_new == ls_old || (ls_new == IB_PORT_ARMED))
1293                                 ppd->is_sm_config_started = 1;
1294                 } else if (ls_new == IB_PORT_ARMED) {
1295                         if (ppd->is_sm_config_started == 0)
1296                                 invalid = 1;
1297                 }
1298         }
1299
1300         /* Handle CLIENT_REREGISTER event b/c SM asked us for it */
1301         if (clientrereg) {
1302                 event.event = IB_EVENT_CLIENT_REREGISTER;
1303                 ib_dispatch_event(&event);
1304         }
1305
1306         /*
1307          * Do the port state change now that the other link parameters
1308          * have been set.
1309          * Changing the port physical state only makes sense if the link
1310          * is down or is being set to down.
1311          */
1312
1313         ret = set_port_states(ppd, smp, ls_new, ps_new, invalid);
1314         if (ret)
1315                 return ret;
1316
1317         ret = __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len);
1318
1319         /* restore re-reg bit per o14-12.2.1 */
1320         pi->clientrereg_subnettimeout |= clientrereg;
1321
1322         /*
1323          * Apply the new link downgrade policy.  This may result in a link
1324          * bounce.  Do this after everything else so things are settled.
1325          * Possible problem: if setting the port state above fails, then
1326          * the policy change is not applied.
1327          */
1328         if (call_link_downgrade_policy)
1329                 apply_link_downgrade_policy(ppd, 0);
1330
1331         return ret;
1332
1333 get_only:
1334         return __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len);
1335 }
1336
1337 /**
1338  * set_pkeys - set the PKEY table for ctxt 0
1339  * @dd: the hfi1_ib device
1340  * @port: the IB port number
1341  * @pkeys: the PKEY table
1342  */
1343 static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
1344 {
1345         struct hfi1_pportdata *ppd;
1346         int i;
1347         int changed = 0;
1348         int update_includes_mgmt_partition = 0;
1349
1350         /*
1351          * IB port one/two always maps to context zero/one,
1352          * always a kernel context, no locking needed
1353          * If we get here with ppd setup, no need to check
1354          * that rcd is valid.
1355          */
1356         ppd = dd->pport + (port - 1);
1357         /*
1358          * If the update does not include the management pkey, don't do it.
1359          */
1360         for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
1361                 if (pkeys[i] == LIM_MGMT_P_KEY) {
1362                         update_includes_mgmt_partition = 1;
1363                         break;
1364                 }
1365         }
1366
1367         if (!update_includes_mgmt_partition)
1368                 return 1;
1369
1370         for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
1371                 u16 key = pkeys[i];
1372                 u16 okey = ppd->pkeys[i];
1373
1374                 if (key == okey)
1375                         continue;
1376                 /*
1377                  * The SM gives us the complete PKey table. We have
1378                  * to ensure that we put the PKeys in the matching
1379                  * slots.
1380                  */
1381                 ppd->pkeys[i] = key;
1382                 changed = 1;
1383         }
1384
1385         if (changed) {
1386                 struct ib_event event;
1387
1388                 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
1389
1390                 event.event = IB_EVENT_PKEY_CHANGE;
1391                 event.device = &dd->verbs_dev.ibdev;
1392                 event.element.port_num = port;
1393                 ib_dispatch_event(&event);
1394         }
1395         return 0;
1396 }
1397
1398 static int __subn_set_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
1399                                     struct ib_device *ibdev, u8 port,
1400                                     u32 *resp_len)
1401 {
1402         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1403         u32 n_blocks_sent = OPA_AM_NBLK(am);
1404         u32 start_block = am & 0x7ff;
1405         u16 *p = (u16 *) data;
1406         __be16 *q = (__be16 *)data;
1407         int i;
1408         u16 n_blocks_avail;
1409         unsigned npkeys = hfi1_get_npkeys(dd);
1410
1411         if (n_blocks_sent == 0) {
1412                 pr_warn("OPA Get PKey AM Invalid : P = %d; B = 0x%x; N = 0x%x\n",
1413                         port, start_block, n_blocks_sent);
1414                 smp->status |= IB_SMP_INVALID_FIELD;
1415                 return reply((struct ib_mad_hdr *)smp);
1416         }
1417
1418         n_blocks_avail = (u16)(npkeys/OPA_PARTITION_TABLE_BLK_SIZE) + 1;
1419
1420         if (start_block + n_blocks_sent > n_blocks_avail ||
1421             n_blocks_sent > OPA_NUM_PKEY_BLOCKS_PER_SMP) {
1422                 pr_warn("OPA Set PKey AM Invalid : s 0x%x; req 0x%x; avail 0x%x; blk/smp 0x%lx\n",
1423                         start_block, n_blocks_sent, n_blocks_avail,
1424                         OPA_NUM_PKEY_BLOCKS_PER_SMP);
1425                 smp->status |= IB_SMP_INVALID_FIELD;
1426                 return reply((struct ib_mad_hdr *)smp);
1427         }
1428
1429         for (i = 0; i < n_blocks_sent * OPA_PARTITION_TABLE_BLK_SIZE; i++)
1430                 p[i] = be16_to_cpu(q[i]);
1431
1432         if (start_block == 0 && set_pkeys(dd, port, p) != 0) {
1433                 smp->status |= IB_SMP_INVALID_FIELD;
1434                 return reply((struct ib_mad_hdr *)smp);
1435         }
1436
1437         return __subn_get_opa_pkeytable(smp, am, data, ibdev, port, resp_len);
1438 }
1439
1440 static int get_sc2vlt_tables(struct hfi1_devdata *dd, void *data)
1441 {
1442         u64 *val = data;
1443
1444         *val++ = read_csr(dd, SEND_SC2VLT0);
1445         *val++ = read_csr(dd, SEND_SC2VLT1);
1446         *val++ = read_csr(dd, SEND_SC2VLT2);
1447         *val++ = read_csr(dd, SEND_SC2VLT3);
1448         return 0;
1449 }
1450
1451 #define ILLEGAL_VL 12
1452 /*
1453  * filter_sc2vlt changes mappings to VL15 to ILLEGAL_VL (except
1454  * for SC15, which must map to VL15). If we don't remap things this
1455  * way it is possible for VL15 counters to increment when we try to
1456  * send on a SC which is mapped to an invalid VL.
1457  */
1458 static void filter_sc2vlt(void *data)
1459 {
1460         int i;
1461         u8 *pd = data;
1462
1463         for (i = 0; i < OPA_MAX_SCS; i++) {
1464                 if (i == 15)
1465                         continue;
1466                 if ((pd[i] & 0x1f) == 0xf)
1467                         pd[i] = ILLEGAL_VL;
1468         }
1469 }
1470
1471 static int set_sc2vlt_tables(struct hfi1_devdata *dd, void *data)
1472 {
1473         u64 *val = data;
1474
1475         filter_sc2vlt(data);
1476
1477         write_csr(dd, SEND_SC2VLT0, *val++);
1478         write_csr(dd, SEND_SC2VLT1, *val++);
1479         write_csr(dd, SEND_SC2VLT2, *val++);
1480         write_csr(dd, SEND_SC2VLT3, *val++);
1481         write_seqlock_irq(&dd->sc2vl_lock);
1482         memcpy(dd->sc2vl, data, sizeof(dd->sc2vl));
1483         write_sequnlock_irq(&dd->sc2vl_lock);
1484         return 0;
1485 }
1486
1487 static int __subn_get_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
1488                                    struct ib_device *ibdev, u8 port,
1489                                    u32 *resp_len)
1490 {
1491         struct hfi1_ibport *ibp = to_iport(ibdev, port);
1492         u8 *p = data;
1493         size_t size = ARRAY_SIZE(ibp->sl_to_sc); /* == 32 */
1494         unsigned i;
1495
1496         if (am) {
1497                 smp->status |= IB_SMP_INVALID_FIELD;
1498                 return reply((struct ib_mad_hdr *)smp);
1499         }
1500
1501         for (i = 0; i < ARRAY_SIZE(ibp->sl_to_sc); i++)
1502                 *p++ = ibp->sl_to_sc[i];
1503
1504         if (resp_len)
1505                 *resp_len += size;
1506
1507         return reply((struct ib_mad_hdr *)smp);
1508 }
1509
1510 static int __subn_set_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
1511                                    struct ib_device *ibdev, u8 port,
1512                                    u32 *resp_len)
1513 {
1514         struct hfi1_ibport *ibp = to_iport(ibdev, port);
1515         u8 *p = data;
1516         int i;
1517
1518         if (am) {
1519                 smp->status |= IB_SMP_INVALID_FIELD;
1520                 return reply((struct ib_mad_hdr *)smp);
1521         }
1522
1523         for (i = 0; i <  ARRAY_SIZE(ibp->sl_to_sc); i++)
1524                 ibp->sl_to_sc[i] = *p++;
1525
1526         return __subn_get_opa_sl_to_sc(smp, am, data, ibdev, port, resp_len);
1527 }
1528
1529 static int __subn_get_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
1530                                    struct ib_device *ibdev, u8 port,
1531                                    u32 *resp_len)
1532 {
1533         struct hfi1_ibport *ibp = to_iport(ibdev, port);
1534         u8 *p = data;
1535         size_t size = ARRAY_SIZE(ibp->sc_to_sl); /* == 32 */
1536         unsigned i;
1537
1538         if (am) {
1539                 smp->status |= IB_SMP_INVALID_FIELD;
1540                 return reply((struct ib_mad_hdr *)smp);
1541         }
1542
1543         for (i = 0; i < ARRAY_SIZE(ibp->sc_to_sl); i++)
1544                 *p++ = ibp->sc_to_sl[i];
1545
1546         if (resp_len)
1547                 *resp_len += size;
1548
1549         return reply((struct ib_mad_hdr *)smp);
1550 }
1551
1552 static int __subn_set_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
1553                                    struct ib_device *ibdev, u8 port,
1554                                    u32 *resp_len)
1555 {
1556         struct hfi1_ibport *ibp = to_iport(ibdev, port);
1557         u8 *p = data;
1558         int i;
1559
1560         if (am) {
1561                 smp->status |= IB_SMP_INVALID_FIELD;
1562                 return reply((struct ib_mad_hdr *)smp);
1563         }
1564
1565         for (i = 0; i < ARRAY_SIZE(ibp->sc_to_sl); i++)
1566                 ibp->sc_to_sl[i] = *p++;
1567
1568         return __subn_get_opa_sc_to_sl(smp, am, data, ibdev, port, resp_len);
1569 }
1570
1571 static int __subn_get_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
1572                                     struct ib_device *ibdev, u8 port,
1573                                     u32 *resp_len)
1574 {
1575         u32 n_blocks = OPA_AM_NBLK(am);
1576         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1577         void *vp = (void *) data;
1578         size_t size = 4 * sizeof(u64);
1579
1580         if (n_blocks != 1) {
1581                 smp->status |= IB_SMP_INVALID_FIELD;
1582                 return reply((struct ib_mad_hdr *)smp);
1583         }
1584
1585         get_sc2vlt_tables(dd, vp);
1586
1587         if (resp_len)
1588                 *resp_len += size;
1589
1590         return reply((struct ib_mad_hdr *)smp);
1591 }
1592
1593 static int __subn_set_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
1594                                     struct ib_device *ibdev, u8 port,
1595                                     u32 *resp_len)
1596 {
1597         u32 n_blocks = OPA_AM_NBLK(am);
1598         int async_update = OPA_AM_ASYNC(am);
1599         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1600         void *vp = (void *) data;
1601         struct hfi1_pportdata *ppd;
1602         int lstate;
1603
1604         if (n_blocks != 1 || async_update) {
1605                 smp->status |= IB_SMP_INVALID_FIELD;
1606                 return reply((struct ib_mad_hdr *)smp);
1607         }
1608
1609         /* IB numbers ports from 1, hw from 0 */
1610         ppd = dd->pport + (port - 1);
1611         lstate = driver_lstate(ppd);
1612         /* it's known that async_update is 0 by this point, but include
1613          * the explicit check for clarity */
1614         if (!async_update &&
1615             (lstate == IB_PORT_ARMED || lstate == IB_PORT_ACTIVE)) {
1616                 smp->status |= IB_SMP_INVALID_FIELD;
1617                 return reply((struct ib_mad_hdr *)smp);
1618         }
1619
1620         set_sc2vlt_tables(dd, vp);
1621
1622         return __subn_get_opa_sc_to_vlt(smp, am, data, ibdev, port, resp_len);
1623 }
1624
1625 static int __subn_get_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
1626                                      struct ib_device *ibdev, u8 port,
1627                                      u32 *resp_len)
1628 {
1629         u32 n_blocks = OPA_AM_NPORT(am);
1630         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1631         struct hfi1_pportdata *ppd;
1632         void *vp = (void *) data;
1633         int size;
1634
1635         if (n_blocks != 1) {
1636                 smp->status |= IB_SMP_INVALID_FIELD;
1637                 return reply((struct ib_mad_hdr *)smp);
1638         }
1639
1640         ppd = dd->pport + (port - 1);
1641
1642         size = fm_get_table(ppd, FM_TBL_SC2VLNT, vp);
1643
1644         if (resp_len)
1645                 *resp_len += size;
1646
1647         return reply((struct ib_mad_hdr *)smp);
1648 }
1649
1650 static int __subn_set_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
1651                                      struct ib_device *ibdev, u8 port,
1652                                      u32 *resp_len)
1653 {
1654         u32 n_blocks = OPA_AM_NPORT(am);
1655         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1656         struct hfi1_pportdata *ppd;
1657         void *vp = (void *) data;
1658         int lstate;
1659
1660         if (n_blocks != 1) {
1661                 smp->status |= IB_SMP_INVALID_FIELD;
1662                 return reply((struct ib_mad_hdr *)smp);
1663         }
1664
1665         /* IB numbers ports from 1, hw from 0 */
1666         ppd = dd->pport + (port - 1);
1667         lstate = driver_lstate(ppd);
1668         if (lstate == IB_PORT_ARMED || lstate == IB_PORT_ACTIVE) {
1669                 smp->status |= IB_SMP_INVALID_FIELD;
1670                 return reply((struct ib_mad_hdr *)smp);
1671         }
1672
1673         ppd = dd->pport + (port - 1);
1674
1675         fm_set_table(ppd, FM_TBL_SC2VLNT, vp);
1676
1677         return __subn_get_opa_sc_to_vlnt(smp, am, data, ibdev, port,
1678                                          resp_len);
1679 }
1680
1681 static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
1682                               struct ib_device *ibdev, u8 port,
1683                               u32 *resp_len)
1684 {
1685         u32 nports = OPA_AM_NPORT(am);
1686         u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
1687         u32 lstate;
1688         struct hfi1_ibport *ibp;
1689         struct hfi1_pportdata *ppd;
1690         struct opa_port_state_info *psi = (struct opa_port_state_info *) data;
1691
1692         if (nports != 1) {
1693                 smp->status |= IB_SMP_INVALID_FIELD;
1694                 return reply((struct ib_mad_hdr *)smp);
1695         }
1696
1697         ibp = to_iport(ibdev, port);
1698         ppd = ppd_from_ibp(ibp);
1699
1700         lstate = driver_lstate(ppd);
1701
1702         if (start_of_sm_config && (lstate == IB_PORT_INIT))
1703                 ppd->is_sm_config_started = 1;
1704
1705 #if PI_LED_ENABLE_SUP
1706         psi->port_states.ledenable_offlinereason = ppd->neighbor_normal << 4;
1707         psi->port_states.ledenable_offlinereason |=
1708                 ppd->is_sm_config_started << 5;
1709         psi->port_states.ledenable_offlinereason |=
1710                 ppd->offline_disabled_reason & OPA_PI_MASK_OFFLINE_REASON;
1711 #else
1712         psi->port_states.offline_reason = ppd->neighbor_normal << 4;
1713         psi->port_states.offline_reason |= ppd->is_sm_config_started << 5;
1714         psi->port_states.offline_reason |= ppd->offline_disabled_reason &
1715                                 OPA_PI_MASK_OFFLINE_REASON;
1716 #endif /* PI_LED_ENABLE_SUP */
1717
1718         psi->port_states.portphysstate_portstate =
1719                 (hfi1_ibphys_portstate(ppd) << 4) | (lstate & 0xf);
1720         psi->link_width_downgrade_tx_active =
1721                 cpu_to_be16(ppd->link_width_downgrade_tx_active);
1722         psi->link_width_downgrade_rx_active =
1723                 cpu_to_be16(ppd->link_width_downgrade_rx_active);
1724         if (resp_len)
1725                 *resp_len += sizeof(struct opa_port_state_info);
1726
1727         return reply((struct ib_mad_hdr *)smp);
1728 }
1729
1730 static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
1731                               struct ib_device *ibdev, u8 port,
1732                               u32 *resp_len)
1733 {
1734         u32 nports = OPA_AM_NPORT(am);
1735         u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
1736         u32 ls_old;
1737         u8 ls_new, ps_new;
1738         struct hfi1_ibport *ibp;
1739         struct hfi1_pportdata *ppd;
1740         struct opa_port_state_info *psi = (struct opa_port_state_info *) data;
1741         int ret, invalid = 0;
1742
1743         if (nports != 1) {
1744                 smp->status |= IB_SMP_INVALID_FIELD;
1745                 return reply((struct ib_mad_hdr *)smp);
1746         }
1747
1748         ibp = to_iport(ibdev, port);
1749         ppd = ppd_from_ibp(ibp);
1750
1751         ls_old = driver_lstate(ppd);
1752
1753         ls_new = port_states_to_logical_state(&psi->port_states);
1754         ps_new = port_states_to_phys_state(&psi->port_states);
1755
1756         if (ls_old == IB_PORT_INIT) {
1757                 if (start_of_sm_config) {
1758                         if (ls_new == ls_old || (ls_new == IB_PORT_ARMED))
1759                                 ppd->is_sm_config_started = 1;
1760                 } else if (ls_new == IB_PORT_ARMED) {
1761                         if (ppd->is_sm_config_started == 0)
1762                                 invalid = 1;
1763                 }
1764         }
1765
1766         ret = set_port_states(ppd, smp, ls_new, ps_new, invalid);
1767         if (ret)
1768                 return ret;
1769
1770         if (invalid)
1771                 smp->status |= IB_SMP_INVALID_FIELD;
1772
1773         return __subn_get_opa_psi(smp, am, data, ibdev, port, resp_len);
1774 }
1775
1776 static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data,
1777                                      struct ib_device *ibdev, u8 port,
1778                                      u32 *resp_len)
1779 {
1780         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1781         u32 addr = OPA_AM_CI_ADDR(am);
1782         u32 len = OPA_AM_CI_LEN(am) + 1;
1783         int ret;
1784
1785 #define __CI_PAGE_SIZE (1 << 7) /* 128 bytes */
1786 #define __CI_PAGE_MASK ~(__CI_PAGE_SIZE - 1)
1787 #define __CI_PAGE_NUM(a) ((a) & __CI_PAGE_MASK)
1788
1789         /* check that addr is within spec, and
1790          * addr and (addr + len - 1) are on the same "page" */
1791         if (addr >= 4096 ||
1792                 (__CI_PAGE_NUM(addr) != __CI_PAGE_NUM(addr + len - 1))) {
1793                 smp->status |= IB_SMP_INVALID_FIELD;
1794                 return reply((struct ib_mad_hdr *)smp);
1795         }
1796
1797         ret = get_cable_info(dd, port, addr, len, data);
1798
1799         if (ret == -ENODEV) {
1800                 smp->status |= IB_SMP_UNSUP_METH_ATTR;
1801                 return reply((struct ib_mad_hdr *)smp);
1802         }
1803
1804         /* The address range for the CableInfo SMA query is wider than the
1805          * memory available on the QSFP cable. We want to return a valid
1806          * response, albeit zeroed out, for address ranges beyond available
1807          * memory but that are within the CableInfo query spec
1808          */
1809         if (ret < 0 && ret != -ERANGE) {
1810                 smp->status |= IB_SMP_INVALID_FIELD;
1811                 return reply((struct ib_mad_hdr *)smp);
1812         }
1813
1814         if (resp_len)
1815                 *resp_len += len;
1816
1817         return reply((struct ib_mad_hdr *)smp);
1818 }
1819
1820 static int __subn_get_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
1821                               struct ib_device *ibdev, u8 port, u32 *resp_len)
1822 {
1823         u32 num_ports = OPA_AM_NPORT(am);
1824         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1825         struct hfi1_pportdata *ppd;
1826         struct buffer_control *p = (struct buffer_control *) data;
1827         int size;
1828
1829         if (num_ports != 1) {
1830                 smp->status |= IB_SMP_INVALID_FIELD;
1831                 return reply((struct ib_mad_hdr *)smp);
1832         }
1833
1834         ppd = dd->pport + (port - 1);
1835         size = fm_get_table(ppd, FM_TBL_BUFFER_CONTROL, p);
1836         trace_bct_get(dd, p);
1837         if (resp_len)
1838                 *resp_len += size;
1839
1840         return reply((struct ib_mad_hdr *)smp);
1841 }
1842
1843 static int __subn_set_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
1844                               struct ib_device *ibdev, u8 port, u32 *resp_len)
1845 {
1846         u32 num_ports = OPA_AM_NPORT(am);
1847         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1848         struct hfi1_pportdata *ppd;
1849         struct buffer_control *p = (struct buffer_control *) data;
1850
1851         if (num_ports != 1) {
1852                 smp->status |= IB_SMP_INVALID_FIELD;
1853                 return reply((struct ib_mad_hdr *)smp);
1854         }
1855         ppd = dd->pport + (port - 1);
1856         trace_bct_set(dd, p);
1857         if (fm_set_table(ppd, FM_TBL_BUFFER_CONTROL, p) < 0) {
1858                 smp->status |= IB_SMP_INVALID_FIELD;
1859                 return reply((struct ib_mad_hdr *)smp);
1860         }
1861
1862         return __subn_get_opa_bct(smp, am, data, ibdev, port, resp_len);
1863 }
1864
1865 static int __subn_get_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
1866                                  struct ib_device *ibdev, u8 port,
1867                                  u32 *resp_len)
1868 {
1869         struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
1870         u32 num_ports = OPA_AM_NPORT(am);
1871         u8 section = (am & 0x00ff0000) >> 16;
1872         u8 *p = data;
1873         int size = 0;
1874
1875         if (num_ports != 1) {
1876                 smp->status |= IB_SMP_INVALID_FIELD;
1877                 return reply((struct ib_mad_hdr *)smp);
1878         }
1879
1880         switch (section) {
1881         case OPA_VLARB_LOW_ELEMENTS:
1882                 size = fm_get_table(ppd, FM_TBL_VL_LOW_ARB, p);
1883                 break;
1884         case OPA_VLARB_HIGH_ELEMENTS:
1885                 size = fm_get_table(ppd, FM_TBL_VL_HIGH_ARB, p);
1886                 break;
1887         case OPA_VLARB_PREEMPT_ELEMENTS:
1888                 size = fm_get_table(ppd, FM_TBL_VL_PREEMPT_ELEMS, p);
1889                 break;
1890         case OPA_VLARB_PREEMPT_MATRIX:
1891                 size = fm_get_table(ppd, FM_TBL_VL_PREEMPT_MATRIX, p);
1892                 break;
1893         default:
1894                 pr_warn("OPA SubnGet(VL Arb) AM Invalid : 0x%x\n",
1895                         be32_to_cpu(smp->attr_mod));
1896                 smp->status |= IB_SMP_INVALID_FIELD;
1897                 break;
1898         }
1899
1900         if (size > 0 && resp_len)
1901                 *resp_len += size;
1902
1903         return reply((struct ib_mad_hdr *)smp);
1904 }
1905
1906 static int __subn_set_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
1907                                  struct ib_device *ibdev, u8 port,
1908                                  u32 *resp_len)
1909 {
1910         struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
1911         u32 num_ports = OPA_AM_NPORT(am);
1912         u8 section = (am & 0x00ff0000) >> 16;
1913         u8 *p = data;
1914
1915         if (num_ports != 1) {
1916                 smp->status |= IB_SMP_INVALID_FIELD;
1917                 return reply((struct ib_mad_hdr *)smp);
1918         }
1919
1920         switch (section) {
1921         case OPA_VLARB_LOW_ELEMENTS:
1922                 (void) fm_set_table(ppd, FM_TBL_VL_LOW_ARB, p);
1923                 break;
1924         case OPA_VLARB_HIGH_ELEMENTS:
1925                 (void) fm_set_table(ppd, FM_TBL_VL_HIGH_ARB, p);
1926                 break;
1927         /* neither OPA_VLARB_PREEMPT_ELEMENTS, or OPA_VLARB_PREEMPT_MATRIX
1928          * can be changed from the default values */
1929         case OPA_VLARB_PREEMPT_ELEMENTS:
1930                 /* FALLTHROUGH */
1931         case OPA_VLARB_PREEMPT_MATRIX:
1932                 smp->status |= IB_SMP_UNSUP_METH_ATTR;
1933                 break;
1934         default:
1935                 pr_warn("OPA SubnSet(VL Arb) AM Invalid : 0x%x\n",
1936                         be32_to_cpu(smp->attr_mod));
1937                 smp->status |= IB_SMP_INVALID_FIELD;
1938                 break;
1939         }
1940
1941         return __subn_get_opa_vl_arb(smp, am, data, ibdev, port, resp_len);
1942 }
1943
1944 struct opa_pma_mad {
1945         struct ib_mad_hdr mad_hdr;
1946         u8 data[2024];
1947 } __packed;
1948
1949 struct opa_class_port_info {
1950         u8 base_version;
1951         u8 class_version;
1952         __be16 cap_mask;
1953         __be32 cap_mask2_resp_time;
1954
1955         u8 redirect_gid[16];
1956         __be32 redirect_tc_fl;
1957         __be32 redirect_lid;
1958         __be32 redirect_sl_qp;
1959         __be32 redirect_qkey;
1960
1961         u8 trap_gid[16];
1962         __be32 trap_tc_fl;
1963         __be32 trap_lid;
1964         __be32 trap_hl_qp;
1965         __be32 trap_qkey;
1966
1967         __be16 trap_pkey;
1968         __be16 redirect_pkey;
1969
1970         u8 trap_sl_rsvd;
1971         u8 reserved[3];
1972 } __packed;
1973
1974 struct opa_port_status_req {
1975         __u8 port_num;
1976         __u8 reserved[3];
1977         __be32 vl_select_mask;
1978 };
1979
1980 #define VL_MASK_ALL             0x000080ff
1981
1982 struct opa_port_status_rsp {
1983         __u8 port_num;
1984         __u8 reserved[3];
1985         __be32  vl_select_mask;
1986
1987         /* Data counters */
1988         __be64 port_xmit_data;
1989         __be64 port_rcv_data;
1990         __be64 port_xmit_pkts;
1991         __be64 port_rcv_pkts;
1992         __be64 port_multicast_xmit_pkts;
1993         __be64 port_multicast_rcv_pkts;
1994         __be64 port_xmit_wait;
1995         __be64 sw_port_congestion;
1996         __be64 port_rcv_fecn;
1997         __be64 port_rcv_becn;
1998         __be64 port_xmit_time_cong;
1999         __be64 port_xmit_wasted_bw;
2000         __be64 port_xmit_wait_data;
2001         __be64 port_rcv_bubble;
2002         __be64 port_mark_fecn;
2003         /* Error counters */
2004         __be64 port_rcv_constraint_errors;
2005         __be64 port_rcv_switch_relay_errors;
2006         __be64 port_xmit_discards;
2007         __be64 port_xmit_constraint_errors;
2008         __be64 port_rcv_remote_physical_errors;
2009         __be64 local_link_integrity_errors;
2010         __be64 port_rcv_errors;
2011         __be64 excessive_buffer_overruns;
2012         __be64 fm_config_errors;
2013         __be32 link_error_recovery;
2014         __be32 link_downed;
2015         u8 uncorrectable_errors;
2016
2017         u8 link_quality_indicator; /* 5res, 3bit */
2018         u8 res2[6];
2019         struct _vls_pctrs {
2020                 /* per-VL Data counters */
2021                 __be64 port_vl_xmit_data;
2022                 __be64 port_vl_rcv_data;
2023                 __be64 port_vl_xmit_pkts;
2024                 __be64 port_vl_rcv_pkts;
2025                 __be64 port_vl_xmit_wait;
2026                 __be64 sw_port_vl_congestion;
2027                 __be64 port_vl_rcv_fecn;
2028                 __be64 port_vl_rcv_becn;
2029                 __be64 port_xmit_time_cong;
2030                 __be64 port_vl_xmit_wasted_bw;
2031                 __be64 port_vl_xmit_wait_data;
2032                 __be64 port_vl_rcv_bubble;
2033                 __be64 port_vl_mark_fecn;
2034                 __be64 port_vl_xmit_discards;
2035         } vls[0]; /* real array size defined by # bits set in vl_select_mask */
2036 };
2037
2038 enum counter_selects {
2039         CS_PORT_XMIT_DATA                       = (1 << 31),
2040         CS_PORT_RCV_DATA                        = (1 << 30),
2041         CS_PORT_XMIT_PKTS                       = (1 << 29),
2042         CS_PORT_RCV_PKTS                        = (1 << 28),
2043         CS_PORT_MCAST_XMIT_PKTS                 = (1 << 27),
2044         CS_PORT_MCAST_RCV_PKTS                  = (1 << 26),
2045         CS_PORT_XMIT_WAIT                       = (1 << 25),
2046         CS_SW_PORT_CONGESTION                   = (1 << 24),
2047         CS_PORT_RCV_FECN                        = (1 << 23),
2048         CS_PORT_RCV_BECN                        = (1 << 22),
2049         CS_PORT_XMIT_TIME_CONG                  = (1 << 21),
2050         CS_PORT_XMIT_WASTED_BW                  = (1 << 20),
2051         CS_PORT_XMIT_WAIT_DATA                  = (1 << 19),
2052         CS_PORT_RCV_BUBBLE                      = (1 << 18),
2053         CS_PORT_MARK_FECN                       = (1 << 17),
2054         CS_PORT_RCV_CONSTRAINT_ERRORS           = (1 << 16),
2055         CS_PORT_RCV_SWITCH_RELAY_ERRORS         = (1 << 15),
2056         CS_PORT_XMIT_DISCARDS                   = (1 << 14),
2057         CS_PORT_XMIT_CONSTRAINT_ERRORS          = (1 << 13),
2058         CS_PORT_RCV_REMOTE_PHYSICAL_ERRORS      = (1 << 12),
2059         CS_LOCAL_LINK_INTEGRITY_ERRORS          = (1 << 11),
2060         CS_PORT_RCV_ERRORS                      = (1 << 10),
2061         CS_EXCESSIVE_BUFFER_OVERRUNS            = (1 << 9),
2062         CS_FM_CONFIG_ERRORS                     = (1 << 8),
2063         CS_LINK_ERROR_RECOVERY                  = (1 << 7),
2064         CS_LINK_DOWNED                          = (1 << 6),
2065         CS_UNCORRECTABLE_ERRORS                 = (1 << 5),
2066 };
2067
2068 struct opa_clear_port_status {
2069         __be64 port_select_mask[4];
2070         __be32 counter_select_mask;
2071 };
2072
2073 struct opa_aggregate {
2074         __be16 attr_id;
2075         __be16 err_reqlength;   /* 1 bit, 8 res, 7 bit */
2076         __be32 attr_mod;
2077         u8 data[0];
2078 };
2079
2080 #define MSK_LLI 0x000000f0
2081 #define MSK_LLI_SFT 4
2082 #define MSK_LER 0x0000000f
2083 #define MSK_LER_SFT 0
2084 #define ADD_LLI 8
2085 #define ADD_LER 2
2086
2087 /* Request contains first three fields, response contains those plus the rest */
2088 struct opa_port_data_counters_msg {
2089         __be64 port_select_mask[4];
2090         __be32 vl_select_mask;
2091         __be32 resolution;
2092
2093         /* Response fields follow */
2094         struct _port_dctrs {
2095                 u8 port_number;
2096                 u8 reserved2[3];
2097                 __be32 link_quality_indicator; /* 29res, 3bit */
2098
2099                 /* Data counters */
2100                 __be64 port_xmit_data;
2101                 __be64 port_rcv_data;
2102                 __be64 port_xmit_pkts;
2103                 __be64 port_rcv_pkts;
2104                 __be64 port_multicast_xmit_pkts;
2105                 __be64 port_multicast_rcv_pkts;
2106                 __be64 port_xmit_wait;
2107                 __be64 sw_port_congestion;
2108                 __be64 port_rcv_fecn;
2109                 __be64 port_rcv_becn;
2110                 __be64 port_xmit_time_cong;
2111                 __be64 port_xmit_wasted_bw;
2112                 __be64 port_xmit_wait_data;
2113                 __be64 port_rcv_bubble;
2114                 __be64 port_mark_fecn;
2115
2116                 __be64 port_error_counter_summary;
2117                 /* Sum of error counts/port */
2118
2119                 struct _vls_dctrs {
2120                         /* per-VL Data counters */
2121                         __be64 port_vl_xmit_data;
2122                         __be64 port_vl_rcv_data;
2123                         __be64 port_vl_xmit_pkts;
2124                         __be64 port_vl_rcv_pkts;
2125                         __be64 port_vl_xmit_wait;
2126                         __be64 sw_port_vl_congestion;
2127                         __be64 port_vl_rcv_fecn;
2128                         __be64 port_vl_rcv_becn;
2129                         __be64 port_xmit_time_cong;
2130                         __be64 port_vl_xmit_wasted_bw;
2131                         __be64 port_vl_xmit_wait_data;
2132                         __be64 port_vl_rcv_bubble;
2133                         __be64 port_vl_mark_fecn;
2134                 } vls[0];
2135                 /* array size defined by #bits set in vl_select_mask*/
2136         } port[1]; /* array size defined by  #ports in attribute modifier */
2137 };
2138
2139 struct opa_port_error_counters64_msg {
2140         /* Request contains first two fields, response contains the
2141          * whole magilla */
2142         __be64 port_select_mask[4];
2143         __be32 vl_select_mask;
2144
2145         /* Response-only fields follow */
2146         __be32 reserved1;
2147         struct _port_ectrs {
2148                 u8 port_number;
2149                 u8 reserved2[7];
2150                 __be64 port_rcv_constraint_errors;
2151                 __be64 port_rcv_switch_relay_errors;
2152                 __be64 port_xmit_discards;
2153                 __be64 port_xmit_constraint_errors;
2154                 __be64 port_rcv_remote_physical_errors;
2155                 __be64 local_link_integrity_errors;
2156                 __be64 port_rcv_errors;
2157                 __be64 excessive_buffer_overruns;
2158                 __be64 fm_config_errors;
2159                 __be32 link_error_recovery;
2160                 __be32 link_downed;
2161                 u8 uncorrectable_errors;
2162                 u8 reserved3[7];
2163                 struct _vls_ectrs {
2164                         __be64 port_vl_xmit_discards;
2165                 } vls[0];
2166                 /* array size defined by #bits set in vl_select_mask */
2167         } port[1]; /* array size defined by #ports in attribute modifier */
2168 };
2169
2170 struct opa_port_error_info_msg {
2171         __be64 port_select_mask[4];
2172         __be32 error_info_select_mask;
2173         __be32 reserved1;
2174         struct _port_ei {
2175
2176                 u8 port_number;
2177                 u8 reserved2[7];
2178
2179                 /* PortRcvErrorInfo */
2180                 struct {
2181                         u8 status_and_code;
2182                         union {
2183                                 u8 raw[17];
2184                                 struct {
2185                                         /* EI1to12 format */
2186                                         u8 packet_flit1[8];
2187                                         u8 packet_flit2[8];
2188                                         u8 remaining_flit_bits12;
2189                                 } ei1to12;
2190                                 struct {
2191                                         u8 packet_bytes[8];
2192                                         u8 remaining_flit_bits;
2193                                 } ei13;
2194                         } ei;
2195                         u8 reserved3[6];
2196                 } __packed port_rcv_ei;
2197
2198                 /* ExcessiveBufferOverrunInfo */
2199                 struct {
2200                         u8 status_and_sc;
2201                         u8 reserved4[7];
2202                 } __packed excessive_buffer_overrun_ei;
2203
2204                 /* PortXmitConstraintErrorInfo */
2205                 struct {
2206                         u8 status;
2207                         u8 reserved5;
2208                         __be16 pkey;
2209                         __be32 slid;
2210                 } __packed port_xmit_constraint_ei;
2211
2212                 /* PortRcvConstraintErrorInfo */
2213                 struct {
2214                         u8 status;
2215                         u8 reserved6;
2216                         __be16 pkey;
2217                         __be32 slid;
2218                 } __packed port_rcv_constraint_ei;
2219
2220                 /* PortRcvSwitchRelayErrorInfo */
2221                 struct {
2222                         u8 status_and_code;
2223                         u8 reserved7[3];
2224                         __u32 error_info;
2225                 } __packed port_rcv_switch_relay_ei;
2226
2227                 /* UncorrectableErrorInfo */
2228                 struct {
2229                         u8 status_and_code;
2230                         u8 reserved8;
2231                 } __packed uncorrectable_ei;
2232
2233                 /* FMConfigErrorInfo */
2234                 struct {
2235                         u8 status_and_code;
2236                         u8 error_info;
2237                 } __packed fm_config_ei;
2238                 __u32 reserved9;
2239         } port[1]; /* actual array size defined by #ports in attr modifier */
2240 };
2241
2242 /* opa_port_error_info_msg error_info_select_mask bit definitions */
2243 enum error_info_selects {
2244         ES_PORT_RCV_ERROR_INFO                  = (1 << 31),
2245         ES_EXCESSIVE_BUFFER_OVERRUN_INFO        = (1 << 30),
2246         ES_PORT_XMIT_CONSTRAINT_ERROR_INFO      = (1 << 29),
2247         ES_PORT_RCV_CONSTRAINT_ERROR_INFO       = (1 << 28),
2248         ES_PORT_RCV_SWITCH_RELAY_ERROR_INFO     = (1 << 27),
2249         ES_UNCORRECTABLE_ERROR_INFO             = (1 << 26),
2250         ES_FM_CONFIG_ERROR_INFO                 = (1 << 25)
2251 };
2252
2253 static int pma_get_opa_classportinfo(struct opa_pma_mad *pmp,
2254                                 struct ib_device *ibdev, u32 *resp_len)
2255 {
2256         struct opa_class_port_info *p =
2257                 (struct opa_class_port_info *)pmp->data;
2258
2259         memset(pmp->data, 0, sizeof(pmp->data));
2260
2261         if (pmp->mad_hdr.attr_mod != 0)
2262                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2263
2264         p->base_version = OPA_MGMT_BASE_VERSION;
2265         p->class_version = OPA_SMI_CLASS_VERSION;
2266         /*
2267          * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
2268          */
2269         p->cap_mask2_resp_time = cpu_to_be32(18);
2270
2271         if (resp_len)
2272                 *resp_len += sizeof(*p);
2273
2274         return reply((struct ib_mad_hdr *)pmp);
2275 }
2276
2277 static void a0_portstatus(struct hfi1_pportdata *ppd,
2278                           struct opa_port_status_rsp *rsp, u32 vl_select_mask)
2279 {
2280         if (!is_bx(ppd->dd)) {
2281                 unsigned long vl;
2282                 u64 sum_vl_xmit_wait = 0;
2283                 u32 vl_all_mask = VL_MASK_ALL;
2284
2285                 for_each_set_bit(vl, (unsigned long *)&(vl_all_mask),
2286                                  8 * sizeof(vl_all_mask)) {
2287                         u64 tmp = sum_vl_xmit_wait +
2288                                   read_port_cntr(ppd, C_TX_WAIT_VL,
2289                                                  idx_from_vl(vl));
2290                         if (tmp < sum_vl_xmit_wait) {
2291                                 /* we wrapped */
2292                                 sum_vl_xmit_wait = (u64)~0;
2293                                 break;
2294                         }
2295                         sum_vl_xmit_wait = tmp;
2296                 }
2297                 if (be64_to_cpu(rsp->port_xmit_wait) > sum_vl_xmit_wait)
2298                         rsp->port_xmit_wait = cpu_to_be64(sum_vl_xmit_wait);
2299         }
2300 }
2301
2302
2303 static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
2304                         struct ib_device *ibdev, u8 port, u32 *resp_len)
2305 {
2306         struct opa_port_status_req *req =
2307                 (struct opa_port_status_req *)pmp->data;
2308         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2309         struct opa_port_status_rsp *rsp;
2310         u32 vl_select_mask = be32_to_cpu(req->vl_select_mask);
2311         unsigned long vl;
2312         size_t response_data_size;
2313         u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
2314         u8 port_num = req->port_num;
2315         u8 num_vls = hweight32(vl_select_mask);
2316         struct _vls_pctrs *vlinfo;
2317         struct hfi1_ibport *ibp = to_iport(ibdev, port);
2318         struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2319         int vfi;
2320         u64 tmp, tmp2;
2321
2322         response_data_size = sizeof(struct opa_port_status_rsp) +
2323                                 num_vls * sizeof(struct _vls_pctrs);
2324         if (response_data_size > sizeof(pmp->data)) {
2325                 pmp->mad_hdr.status |= OPA_PM_STATUS_REQUEST_TOO_LARGE;
2326                 return reply((struct ib_mad_hdr *)pmp);
2327         }
2328
2329         if (nports != 1 || (port_num && port_num != port)
2330             || num_vls > OPA_MAX_VLS || (vl_select_mask & ~VL_MASK_ALL)) {
2331                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2332                 return reply((struct ib_mad_hdr *)pmp);
2333         }
2334
2335         memset(pmp->data, 0, sizeof(pmp->data));
2336
2337         rsp = (struct opa_port_status_rsp *)pmp->data;
2338         if (port_num)
2339                 rsp->port_num = port_num;
2340         else
2341                 rsp->port_num = port;
2342
2343         rsp->port_rcv_constraint_errors =
2344                 cpu_to_be64(read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
2345                                            CNTR_INVALID_VL));
2346
2347         hfi1_read_link_quality(dd, &rsp->link_quality_indicator);
2348
2349         rsp->vl_select_mask = cpu_to_be32(vl_select_mask);
2350         rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS,
2351                                           CNTR_INVALID_VL));
2352         rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
2353                                          CNTR_INVALID_VL));
2354         rsp->port_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_PKTS,
2355                                           CNTR_INVALID_VL));
2356         rsp->port_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_PKTS,
2357                                          CNTR_INVALID_VL));
2358         rsp->port_multicast_xmit_pkts =
2359                 cpu_to_be64(read_dev_cntr(dd, C_DC_MC_XMIT_PKTS,
2360                                         CNTR_INVALID_VL));
2361         rsp->port_multicast_rcv_pkts =
2362                 cpu_to_be64(read_dev_cntr(dd, C_DC_MC_RCV_PKTS,
2363                                           CNTR_INVALID_VL));
2364         rsp->port_xmit_wait =
2365                 cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL));
2366         rsp->port_rcv_fecn =
2367                 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL));
2368         rsp->port_rcv_becn =
2369                 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL));
2370         rsp->port_xmit_discards =
2371                 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD,
2372                                            CNTR_INVALID_VL));
2373         rsp->port_xmit_constraint_errors =
2374                 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
2375                                            CNTR_INVALID_VL));
2376         rsp->port_rcv_remote_physical_errors =
2377                 cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
2378                                           CNTR_INVALID_VL));
2379         tmp = read_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL);
2380         tmp2 = tmp + read_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL);
2381         if (tmp2 < tmp) {
2382                 /* overflow/wrapped */
2383                 rsp->local_link_integrity_errors = cpu_to_be64(~0);
2384         } else {
2385                 rsp->local_link_integrity_errors = cpu_to_be64(tmp2);
2386         }
2387         tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
2388         tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
2389                                         CNTR_INVALID_VL);
2390         if (tmp2 > (u32)UINT_MAX || tmp2 < tmp) {
2391                 /* overflow/wrapped */
2392                 rsp->link_error_recovery = cpu_to_be32(~0);
2393         } else {
2394                 rsp->link_error_recovery = cpu_to_be32(tmp2);
2395         }
2396         rsp->port_rcv_errors =
2397                 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL));
2398         rsp->excessive_buffer_overruns =
2399                 cpu_to_be64(read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL));
2400         rsp->fm_config_errors =
2401                 cpu_to_be64(read_dev_cntr(dd, C_DC_FM_CFG_ERR,
2402                                           CNTR_INVALID_VL));
2403         rsp->link_downed = cpu_to_be32(read_port_cntr(ppd, C_SW_LINK_DOWN,
2404                                           CNTR_INVALID_VL));
2405
2406         /* rsp->uncorrectable_errors is 8 bits wide, and it pegs at 0xff */
2407         tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
2408         rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff;
2409
2410         vlinfo = &(rsp->vls[0]);
2411         vfi = 0;
2412         /* The vl_select_mask has been checked above, and we know
2413          * that it contains only entries which represent valid VLs.
2414          * So in the for_each_set_bit() loop below, we don't need
2415          * any additional checks for vl.
2416          */
2417         for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
2418                          8 * sizeof(vl_select_mask)) {
2419                 memset(vlinfo, 0, sizeof(*vlinfo));
2420
2421                 tmp = read_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl));
2422                 rsp->vls[vfi].port_vl_rcv_data = cpu_to_be64(tmp);
2423
2424                 rsp->vls[vfi].port_vl_rcv_pkts =
2425                         cpu_to_be64(read_dev_cntr(dd, C_DC_RX_PKT_VL,
2426                                         idx_from_vl(vl)));
2427
2428                 rsp->vls[vfi].port_vl_xmit_data =
2429                         cpu_to_be64(read_port_cntr(ppd, C_TX_FLIT_VL,
2430                                         idx_from_vl(vl)));
2431
2432                 rsp->vls[vfi].port_vl_xmit_pkts =
2433                         cpu_to_be64(read_port_cntr(ppd, C_TX_PKT_VL,
2434                                         idx_from_vl(vl)));
2435
2436                 rsp->vls[vfi].port_vl_xmit_wait =
2437                         cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT_VL,
2438                                         idx_from_vl(vl)));
2439
2440                 rsp->vls[vfi].port_vl_rcv_fecn =
2441                         cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN_VL,
2442                                         idx_from_vl(vl)));
2443
2444                 rsp->vls[vfi].port_vl_rcv_becn =
2445                         cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN_VL,
2446                                         idx_from_vl(vl)));
2447
2448                 vlinfo++;
2449                 vfi++;
2450         }
2451
2452         a0_portstatus(ppd, rsp, vl_select_mask);
2453
2454         if (resp_len)
2455                 *resp_len += response_data_size;
2456
2457         return reply((struct ib_mad_hdr *)pmp);
2458 }
2459
2460 static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port,
2461                                      u8 res_lli, u8 res_ler)
2462 {
2463         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2464         struct hfi1_ibport *ibp = to_iport(ibdev, port);
2465         struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2466         u64 error_counter_summary = 0, tmp;
2467
2468         error_counter_summary += read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
2469                                                 CNTR_INVALID_VL);
2470         /* port_rcv_switch_relay_errors is 0 for HFIs */
2471         error_counter_summary += read_port_cntr(ppd, C_SW_XMIT_DSCD,
2472                                                 CNTR_INVALID_VL);
2473         error_counter_summary += read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
2474                                                 CNTR_INVALID_VL);
2475         error_counter_summary += read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
2476                                                 CNTR_INVALID_VL);
2477         /* local link integrity must be right-shifted by the lli resolution */
2478         tmp = read_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL);
2479         tmp += read_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL);
2480         error_counter_summary += (tmp >> res_lli);
2481         /* link error recovery must b right-shifted by the ler resolution */
2482         tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
2483         tmp += read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL);
2484         error_counter_summary += (tmp >> res_ler);
2485         error_counter_summary += read_dev_cntr(dd, C_DC_RCV_ERR,
2486                                                 CNTR_INVALID_VL);
2487         error_counter_summary += read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
2488         error_counter_summary += read_dev_cntr(dd, C_DC_FM_CFG_ERR,
2489                                                 CNTR_INVALID_VL);
2490         /* ppd->link_downed is a 32-bit value */
2491         error_counter_summary += read_port_cntr(ppd, C_SW_LINK_DOWN,
2492                                                 CNTR_INVALID_VL);
2493         tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
2494         /* this is an 8-bit quantity */
2495         error_counter_summary += tmp < 0x100 ? (tmp & 0xff) : 0xff;
2496
2497         return error_counter_summary;
2498 }
2499
2500 static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp,
2501                             u32 vl_select_mask)
2502 {
2503         if (!is_bx(ppd->dd)) {
2504                 unsigned long vl;
2505                 u64 sum_vl_xmit_wait = 0;
2506                 u32 vl_all_mask = VL_MASK_ALL;
2507
2508                 for_each_set_bit(vl, (unsigned long *)&(vl_all_mask),
2509                                  8 * sizeof(vl_all_mask)) {
2510                         u64 tmp = sum_vl_xmit_wait +
2511                                   read_port_cntr(ppd, C_TX_WAIT_VL,
2512                                                  idx_from_vl(vl));
2513                         if (tmp < sum_vl_xmit_wait) {
2514                                 /* we wrapped */
2515                                 sum_vl_xmit_wait = (u64) ~0;
2516                                 break;
2517                         }
2518                         sum_vl_xmit_wait = tmp;
2519                 }
2520                 if (be64_to_cpu(rsp->port_xmit_wait) > sum_vl_xmit_wait)
2521                         rsp->port_xmit_wait = cpu_to_be64(sum_vl_xmit_wait);
2522         }
2523 }
2524
2525 static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
2526                         struct ib_device *ibdev, u8 port, u32 *resp_len)
2527 {
2528         struct opa_port_data_counters_msg *req =
2529                 (struct opa_port_data_counters_msg *)pmp->data;
2530         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2531         struct hfi1_ibport *ibp = to_iport(ibdev, port);
2532         struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2533         struct _port_dctrs *rsp;
2534         struct _vls_dctrs *vlinfo;
2535         size_t response_data_size;
2536         u32 num_ports;
2537         u8 num_pslm;
2538         u8 lq, num_vls;
2539         u8 res_lli, res_ler;
2540         u64 port_mask;
2541         unsigned long port_num;
2542         unsigned long vl;
2543         u32 vl_select_mask;
2544         int vfi;
2545
2546         num_ports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
2547         num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
2548         num_vls = hweight32(be32_to_cpu(req->vl_select_mask));
2549         vl_select_mask = be32_to_cpu(req->vl_select_mask);
2550         res_lli = (u8)(be32_to_cpu(req->resolution) & MSK_LLI) >> MSK_LLI_SFT;
2551         res_lli = res_lli ? res_lli + ADD_LLI : 0;
2552         res_ler = (u8)(be32_to_cpu(req->resolution) & MSK_LER) >> MSK_LER_SFT;
2553         res_ler = res_ler ? res_ler + ADD_LER : 0;
2554
2555         if (num_ports != 1 || (vl_select_mask & ~VL_MASK_ALL)) {
2556                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2557                 return reply((struct ib_mad_hdr *)pmp);
2558         }
2559
2560         /* Sanity check */
2561         response_data_size = sizeof(struct opa_port_data_counters_msg) +
2562                                 num_vls * sizeof(struct _vls_dctrs);
2563
2564         if (response_data_size > sizeof(pmp->data)) {
2565                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2566                 return reply((struct ib_mad_hdr *)pmp);
2567         }
2568
2569         /*
2570          * The bit set in the mask needs to be consistent with the
2571          * port the request came in on.
2572          */
2573         port_mask = be64_to_cpu(req->port_select_mask[3]);
2574         port_num = find_first_bit((unsigned long *)&port_mask,
2575                                   sizeof(port_mask));
2576
2577         if ((u8)port_num != port) {
2578                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2579                 return reply((struct ib_mad_hdr *)pmp);
2580         }
2581
2582         rsp = &req->port[0];
2583         memset(rsp, 0, sizeof(*rsp));
2584
2585         rsp->port_number = port;
2586         /*
2587          * Note that link_quality_indicator is a 32 bit quantity in
2588          * 'datacounters' queries (as opposed to 'portinfo' queries,
2589          * where it's a byte).
2590          */
2591         hfi1_read_link_quality(dd, &lq);
2592         rsp->link_quality_indicator = cpu_to_be32((u32)lq);
2593
2594         /* rsp->sw_port_congestion is 0 for HFIs */
2595         /* rsp->port_xmit_time_cong is 0 for HFIs */
2596         /* rsp->port_xmit_wasted_bw ??? */
2597         /* rsp->port_xmit_wait_data ??? */
2598         /* rsp->port_mark_fecn is 0 for HFIs */
2599
2600         rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS,
2601                                                 CNTR_INVALID_VL));
2602         rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
2603                                                 CNTR_INVALID_VL));
2604         rsp->port_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_PKTS,
2605                                                 CNTR_INVALID_VL));
2606         rsp->port_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_PKTS,
2607                                                 CNTR_INVALID_VL));
2608         rsp->port_multicast_xmit_pkts =
2609                 cpu_to_be64(read_dev_cntr(dd, C_DC_MC_XMIT_PKTS,
2610                                                 CNTR_INVALID_VL));
2611         rsp->port_multicast_rcv_pkts =
2612                 cpu_to_be64(read_dev_cntr(dd, C_DC_MC_RCV_PKTS,
2613                                                 CNTR_INVALID_VL));
2614         rsp->port_xmit_wait =
2615                 cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL));
2616         rsp->port_rcv_fecn =
2617                 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL));
2618         rsp->port_rcv_becn =
2619                 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL));
2620
2621         rsp->port_error_counter_summary =
2622                 cpu_to_be64(get_error_counter_summary(ibdev, port,
2623                                                       res_lli, res_ler));
2624
2625         vlinfo = &(rsp->vls[0]);
2626         vfi = 0;
2627         /* The vl_select_mask has been checked above, and we know
2628          * that it contains only entries which represent valid VLs.
2629          * So in the for_each_set_bit() loop below, we don't need
2630          * any additional checks for vl.
2631          */
2632         for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
2633                  8 * sizeof(req->vl_select_mask)) {
2634                 memset(vlinfo, 0, sizeof(*vlinfo));
2635
2636                 rsp->vls[vfi].port_vl_xmit_data =
2637                         cpu_to_be64(read_port_cntr(ppd, C_TX_FLIT_VL,
2638                                                         idx_from_vl(vl)));
2639
2640                 rsp->vls[vfi].port_vl_rcv_data =
2641                         cpu_to_be64(read_dev_cntr(dd, C_DC_RX_FLIT_VL,
2642                                                         idx_from_vl(vl)));
2643
2644                 rsp->vls[vfi].port_vl_xmit_pkts =
2645                         cpu_to_be64(read_port_cntr(ppd, C_TX_PKT_VL,
2646                                                         idx_from_vl(vl)));
2647
2648                 rsp->vls[vfi].port_vl_rcv_pkts =
2649                         cpu_to_be64(read_dev_cntr(dd, C_DC_RX_PKT_VL,
2650                                                         idx_from_vl(vl)));
2651
2652                 rsp->vls[vfi].port_vl_xmit_wait =
2653                         cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT_VL,
2654                                                         idx_from_vl(vl)));
2655
2656                 rsp->vls[vfi].port_vl_rcv_fecn =
2657                         cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN_VL,
2658                                                         idx_from_vl(vl)));
2659                 rsp->vls[vfi].port_vl_rcv_becn =
2660                         cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN_VL,
2661                                                         idx_from_vl(vl)));
2662
2663                 /* rsp->port_vl_xmit_time_cong is 0 for HFIs */
2664                 /* rsp->port_vl_xmit_wasted_bw ??? */
2665                 /* port_vl_xmit_wait_data - TXE (table 13-9 HFI spec) ???
2666                  * does this differ from rsp->vls[vfi].port_vl_xmit_wait */
2667                 /*rsp->vls[vfi].port_vl_mark_fecn =
2668                         cpu_to_be64(read_csr(dd, DCC_PRF_PORT_VL_MARK_FECN_CNT
2669                                 + offset));
2670                 */
2671                 vlinfo++;
2672                 vfi++;
2673         }
2674
2675         a0_datacounters(ppd, rsp, vl_select_mask);
2676
2677         if (resp_len)
2678                 *resp_len += response_data_size;
2679
2680         return reply((struct ib_mad_hdr *)pmp);
2681 }
2682
2683 static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
2684                         struct ib_device *ibdev, u8 port, u32 *resp_len)
2685 {
2686         size_t response_data_size;
2687         struct _port_ectrs *rsp;
2688         unsigned long port_num;
2689         struct opa_port_error_counters64_msg *req;
2690         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2691         u32 num_ports;
2692         u8 num_pslm;
2693         u8 num_vls;
2694         struct hfi1_ibport *ibp;
2695         struct hfi1_pportdata *ppd;
2696         struct _vls_ectrs *vlinfo;
2697         unsigned long vl;
2698         u64 port_mask, tmp, tmp2;
2699         u32 vl_select_mask;
2700         int vfi;
2701
2702         req = (struct opa_port_error_counters64_msg *)pmp->data;
2703
2704         num_ports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
2705
2706         num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
2707         num_vls = hweight32(be32_to_cpu(req->vl_select_mask));
2708
2709         if (num_ports != 1 || num_ports != num_pslm) {
2710                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2711                 return reply((struct ib_mad_hdr *)pmp);
2712         }
2713
2714         response_data_size = sizeof(struct opa_port_error_counters64_msg) +
2715                                 num_vls * sizeof(struct _vls_ectrs);
2716
2717         if (response_data_size > sizeof(pmp->data)) {
2718                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2719                 return reply((struct ib_mad_hdr *)pmp);
2720         }
2721         /*
2722          * The bit set in the mask needs to be consistent with the
2723          * port the request came in on.
2724          */
2725         port_mask = be64_to_cpu(req->port_select_mask[3]);
2726         port_num = find_first_bit((unsigned long *)&port_mask,
2727                                         sizeof(port_mask));
2728
2729         if ((u8)port_num != port) {
2730                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2731                 return reply((struct ib_mad_hdr *)pmp);
2732         }
2733
2734         rsp = &req->port[0];
2735
2736         ibp = to_iport(ibdev, port_num);
2737         ppd = ppd_from_ibp(ibp);
2738
2739         memset(rsp, 0, sizeof(*rsp));
2740         rsp->port_number = (u8)port_num;
2741
2742         rsp->port_rcv_constraint_errors =
2743                 cpu_to_be64(read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
2744                                            CNTR_INVALID_VL));
2745         /* port_rcv_switch_relay_errors is 0 for HFIs */
2746         rsp->port_xmit_discards =
2747                 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD,
2748                                                 CNTR_INVALID_VL));
2749         rsp->port_rcv_remote_physical_errors =
2750                 cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
2751                                                 CNTR_INVALID_VL));
2752         tmp = read_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL);
2753         tmp2 = tmp + read_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL);
2754         if (tmp2 < tmp) {
2755                 /* overflow/wrapped */
2756                 rsp->local_link_integrity_errors = cpu_to_be64(~0);
2757         } else {
2758                 rsp->local_link_integrity_errors = cpu_to_be64(tmp2);
2759         }
2760         tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
2761         tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
2762                                         CNTR_INVALID_VL);
2763         if (tmp2 > (u32)UINT_MAX || tmp2 < tmp) {
2764                 /* overflow/wrapped */
2765                 rsp->link_error_recovery = cpu_to_be32(~0);
2766         } else {
2767                 rsp->link_error_recovery = cpu_to_be32(tmp2);
2768         }
2769         rsp->port_xmit_constraint_errors =
2770                 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
2771                                            CNTR_INVALID_VL));
2772         rsp->excessive_buffer_overruns =
2773                 cpu_to_be64(read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL));
2774         rsp->fm_config_errors =
2775                 cpu_to_be64(read_dev_cntr(dd, C_DC_FM_CFG_ERR,
2776                                                 CNTR_INVALID_VL));
2777         rsp->link_downed = cpu_to_be32(read_port_cntr(ppd, C_SW_LINK_DOWN,
2778                                                 CNTR_INVALID_VL));
2779         tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
2780         rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff;
2781
2782         vlinfo = &rsp->vls[0];
2783         vfi = 0;
2784         vl_select_mask = be32_to_cpu(req->vl_select_mask);
2785         for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
2786                          8 * sizeof(req->vl_select_mask)) {
2787                 memset(vlinfo, 0, sizeof(*vlinfo));
2788                 /* vlinfo->vls[vfi].port_vl_xmit_discards ??? */
2789                 vlinfo += 1;
2790                 vfi++;
2791         }
2792
2793         if (resp_len)
2794                 *resp_len += response_data_size;
2795
2796         return reply((struct ib_mad_hdr *)pmp);
2797 }
2798
2799 static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
2800                         struct ib_device *ibdev, u8 port, u32 *resp_len)
2801 {
2802         size_t response_data_size;
2803         struct _port_ei *rsp;
2804         struct opa_port_error_info_msg *req;
2805         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2806         u64 port_mask;
2807         u32 num_ports;
2808         unsigned long port_num;
2809         u8 num_pslm;
2810         u64 reg;
2811
2812         req = (struct opa_port_error_info_msg *)pmp->data;
2813         rsp = &req->port[0];
2814
2815         num_ports = OPA_AM_NPORT(be32_to_cpu(pmp->mad_hdr.attr_mod));
2816         num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
2817
2818         memset(rsp, 0, sizeof(*rsp));
2819
2820         if (num_ports != 1 || num_ports != num_pslm) {
2821                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2822                 return reply((struct ib_mad_hdr *)pmp);
2823         }
2824
2825         /* Sanity check */
2826         response_data_size = sizeof(struct opa_port_error_info_msg);
2827
2828         if (response_data_size > sizeof(pmp->data)) {
2829                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2830                 return reply((struct ib_mad_hdr *)pmp);
2831         }
2832
2833         /*
2834          * The bit set in the mask needs to be consistent with the port
2835          * the request came in on.
2836          */
2837         port_mask = be64_to_cpu(req->port_select_mask[3]);
2838         port_num = find_first_bit((unsigned long *)&port_mask,
2839                                   sizeof(port_mask));
2840
2841         if ((u8)port_num != port) {
2842                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2843                 return reply((struct ib_mad_hdr *)pmp);
2844         }
2845
2846         /* PortRcvErrorInfo */
2847         rsp->port_rcv_ei.status_and_code =
2848                 dd->err_info_rcvport.status_and_code;
2849         memcpy(&rsp->port_rcv_ei.ei.ei1to12.packet_flit1,
2850                 &dd->err_info_rcvport.packet_flit1, sizeof(u64));
2851         memcpy(&rsp->port_rcv_ei.ei.ei1to12.packet_flit2,
2852                 &dd->err_info_rcvport.packet_flit2, sizeof(u64));
2853
2854         /* ExcessiverBufferOverrunInfo */
2855         reg = read_csr(dd, RCV_ERR_INFO);
2856         if (reg & RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK) {
2857                 /* if the RcvExcessBufferOverrun bit is set, save SC of
2858                  * first pkt that encountered an excess buffer overrun */
2859                 u8 tmp = (u8)reg;
2860
2861                 tmp &=  RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SC_SMASK;
2862                 tmp <<= 2;
2863                 rsp->excessive_buffer_overrun_ei.status_and_sc = tmp;
2864                 /* set the status bit */
2865                 rsp->excessive_buffer_overrun_ei.status_and_sc |= 0x80;
2866         }
2867
2868         rsp->port_xmit_constraint_ei.status =
2869                 dd->err_info_xmit_constraint.status;
2870         rsp->port_xmit_constraint_ei.pkey =
2871                 cpu_to_be16(dd->err_info_xmit_constraint.pkey);
2872         rsp->port_xmit_constraint_ei.slid =
2873                 cpu_to_be32(dd->err_info_xmit_constraint.slid);
2874
2875         rsp->port_rcv_constraint_ei.status =
2876                 dd->err_info_rcv_constraint.status;
2877         rsp->port_rcv_constraint_ei.pkey =
2878                 cpu_to_be16(dd->err_info_rcv_constraint.pkey);
2879         rsp->port_rcv_constraint_ei.slid =
2880                 cpu_to_be32(dd->err_info_rcv_constraint.slid);
2881
2882         /* UncorrectableErrorInfo */
2883         rsp->uncorrectable_ei.status_and_code = dd->err_info_uncorrectable;
2884
2885         /* FMConfigErrorInfo */
2886         rsp->fm_config_ei.status_and_code = dd->err_info_fmconfig;
2887
2888         if (resp_len)
2889                 *resp_len += response_data_size;
2890
2891         return reply((struct ib_mad_hdr *)pmp);
2892 }
2893
2894 static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
2895                         struct ib_device *ibdev, u8 port, u32 *resp_len)
2896 {
2897         struct opa_clear_port_status *req =
2898                 (struct opa_clear_port_status *)pmp->data;
2899         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2900         struct hfi1_ibport *ibp = to_iport(ibdev, port);
2901         struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2902         u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
2903         u64 portn = be64_to_cpu(req->port_select_mask[3]);
2904         u32 counter_select = be32_to_cpu(req->counter_select_mask);
2905         u32 vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */
2906         unsigned long vl;
2907
2908         if ((nports != 1) || (portn != 1 << port)) {
2909                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2910                 return reply((struct ib_mad_hdr *)pmp);
2911         }
2912         /*
2913          * only counters returned by pma_get_opa_portstatus() are
2914          * handled, so when pma_get_opa_portstatus() gets a fix,
2915          * the corresponding change should be made here as well.
2916          */
2917
2918         if (counter_select & CS_PORT_XMIT_DATA)
2919                 write_dev_cntr(dd, C_DC_XMIT_FLITS, CNTR_INVALID_VL, 0);
2920
2921         if (counter_select & CS_PORT_RCV_DATA)
2922                 write_dev_cntr(dd, C_DC_RCV_FLITS, CNTR_INVALID_VL, 0);
2923
2924         if (counter_select & CS_PORT_XMIT_PKTS)
2925                 write_dev_cntr(dd, C_DC_XMIT_PKTS, CNTR_INVALID_VL, 0);
2926
2927         if (counter_select & CS_PORT_RCV_PKTS)
2928                 write_dev_cntr(dd, C_DC_RCV_PKTS, CNTR_INVALID_VL, 0);
2929
2930         if (counter_select & CS_PORT_MCAST_XMIT_PKTS)
2931                 write_dev_cntr(dd, C_DC_MC_XMIT_PKTS, CNTR_INVALID_VL, 0);
2932
2933         if (counter_select & CS_PORT_MCAST_RCV_PKTS)
2934                 write_dev_cntr(dd, C_DC_MC_RCV_PKTS, CNTR_INVALID_VL, 0);
2935
2936         if (counter_select & CS_PORT_XMIT_WAIT)
2937                 write_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL, 0);
2938
2939         /* ignore cs_sw_portCongestion for HFIs */
2940
2941         if (counter_select & CS_PORT_RCV_FECN)
2942                 write_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL, 0);
2943
2944         if (counter_select & CS_PORT_RCV_BECN)
2945                 write_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL, 0);
2946
2947         /* ignore cs_port_xmit_time_cong for HFIs */
2948         /* ignore cs_port_xmit_wasted_bw for now */
2949         /* ignore cs_port_xmit_wait_data for now */
2950         if (counter_select & CS_PORT_RCV_BUBBLE)
2951                 write_dev_cntr(dd, C_DC_RCV_BBL, CNTR_INVALID_VL, 0);
2952
2953         /* Only applicable for switch */
2954         /*if (counter_select & CS_PORT_MARK_FECN)
2955                 write_csr(dd, DCC_PRF_PORT_MARK_FECN_CNT, 0);*/
2956
2957         if (counter_select & CS_PORT_RCV_CONSTRAINT_ERRORS)
2958                 write_port_cntr(ppd, C_SW_RCV_CSTR_ERR, CNTR_INVALID_VL, 0);
2959
2960         /* ignore cs_port_rcv_switch_relay_errors for HFIs */
2961         if (counter_select & CS_PORT_XMIT_DISCARDS)
2962                 write_port_cntr(ppd, C_SW_XMIT_DSCD, CNTR_INVALID_VL, 0);
2963
2964         if (counter_select & CS_PORT_XMIT_CONSTRAINT_ERRORS)
2965                 write_port_cntr(ppd, C_SW_XMIT_CSTR_ERR, CNTR_INVALID_VL, 0);
2966
2967         if (counter_select & CS_PORT_RCV_REMOTE_PHYSICAL_ERRORS)
2968                 write_dev_cntr(dd, C_DC_RMT_PHY_ERR, CNTR_INVALID_VL, 0);
2969
2970         if (counter_select & CS_LOCAL_LINK_INTEGRITY_ERRORS) {
2971                 write_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL, 0);
2972                 write_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL, 0);
2973         }
2974
2975         if (counter_select & CS_LINK_ERROR_RECOVERY) {
2976                 write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0);
2977                 write_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
2978                                                 CNTR_INVALID_VL, 0);
2979         }
2980
2981         if (counter_select & CS_PORT_RCV_ERRORS)
2982                 write_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL, 0);
2983
2984         if (counter_select & CS_EXCESSIVE_BUFFER_OVERRUNS) {
2985                 write_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL, 0);
2986                 dd->rcv_ovfl_cnt = 0;
2987         }
2988
2989         if (counter_select & CS_FM_CONFIG_ERRORS)
2990                 write_dev_cntr(dd, C_DC_FM_CFG_ERR, CNTR_INVALID_VL, 0);
2991
2992         if (counter_select & CS_LINK_DOWNED)
2993                 write_port_cntr(ppd, C_SW_LINK_DOWN, CNTR_INVALID_VL, 0);
2994
2995         if (counter_select & CS_UNCORRECTABLE_ERRORS)
2996                 write_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL, 0);
2997
2998         for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
2999                          8 * sizeof(vl_select_mask)) {
3000
3001                 if (counter_select & CS_PORT_XMIT_DATA)
3002                         write_port_cntr(ppd, C_TX_FLIT_VL, idx_from_vl(vl), 0);
3003
3004                 if (counter_select & CS_PORT_RCV_DATA)
3005                         write_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl), 0);
3006
3007                 if (counter_select & CS_PORT_XMIT_PKTS)
3008                         write_port_cntr(ppd, C_TX_PKT_VL, idx_from_vl(vl), 0);
3009
3010                 if (counter_select & CS_PORT_RCV_PKTS)
3011                         write_dev_cntr(dd, C_DC_RX_PKT_VL, idx_from_vl(vl), 0);
3012
3013                 if (counter_select & CS_PORT_XMIT_WAIT)
3014                         write_port_cntr(ppd, C_TX_WAIT_VL, idx_from_vl(vl), 0);
3015
3016                 /* sw_port_vl_congestion is 0 for HFIs */
3017                 if (counter_select & CS_PORT_RCV_FECN)
3018                         write_dev_cntr(dd, C_DC_RCV_FCN_VL, idx_from_vl(vl), 0);
3019
3020                 if (counter_select & CS_PORT_RCV_BECN)
3021                         write_dev_cntr(dd, C_DC_RCV_BCN_VL, idx_from_vl(vl), 0);
3022
3023                 /* port_vl_xmit_time_cong is 0 for HFIs */
3024                 /* port_vl_xmit_wasted_bw ??? */
3025                 /* port_vl_xmit_wait_data - TXE (table 13-9 HFI spec) ??? */
3026                 if (counter_select & CS_PORT_RCV_BUBBLE)
3027                         write_dev_cntr(dd, C_DC_RCV_BBL_VL, idx_from_vl(vl), 0);
3028
3029                 /*if (counter_select & CS_PORT_MARK_FECN)
3030                      write_csr(dd, DCC_PRF_PORT_VL_MARK_FECN_CNT + offset, 0);
3031                 */
3032                 /* port_vl_xmit_discards ??? */
3033         }
3034
3035         if (resp_len)
3036                 *resp_len += sizeof(*req);
3037
3038         return reply((struct ib_mad_hdr *)pmp);
3039 }
3040
3041 static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp,
3042                         struct ib_device *ibdev, u8 port, u32 *resp_len)
3043 {
3044         struct _port_ei *rsp;
3045         struct opa_port_error_info_msg *req;
3046         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
3047         u64 port_mask;
3048         u32 num_ports;
3049         unsigned long port_num;
3050         u8 num_pslm;
3051         u32 error_info_select;
3052
3053         req = (struct opa_port_error_info_msg *)pmp->data;
3054         rsp = &req->port[0];
3055
3056         num_ports = OPA_AM_NPORT(be32_to_cpu(pmp->mad_hdr.attr_mod));
3057         num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
3058
3059         memset(rsp, 0, sizeof(*rsp));
3060
3061         if (num_ports != 1 || num_ports != num_pslm) {
3062                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3063                 return reply((struct ib_mad_hdr *)pmp);
3064         }
3065
3066         /*
3067          * The bit set in the mask needs to be consistent with the port
3068          * the request came in on.
3069          */
3070         port_mask = be64_to_cpu(req->port_select_mask[3]);
3071         port_num = find_first_bit((unsigned long *)&port_mask,
3072                                   sizeof(port_mask));
3073
3074         if ((u8)port_num != port) {
3075                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3076                 return reply((struct ib_mad_hdr *)pmp);
3077         }
3078
3079         error_info_select = be32_to_cpu(req->error_info_select_mask);
3080
3081         /* PortRcvErrorInfo */
3082         if (error_info_select & ES_PORT_RCV_ERROR_INFO)
3083                 /* turn off status bit */
3084                 dd->err_info_rcvport.status_and_code &= ~OPA_EI_STATUS_SMASK;
3085
3086         /* ExcessiverBufferOverrunInfo */
3087         if (error_info_select & ES_EXCESSIVE_BUFFER_OVERRUN_INFO)
3088                 /* status bit is essentially kept in the h/w - bit 5 of
3089                  * RCV_ERR_INFO */
3090                 write_csr(dd, RCV_ERR_INFO,
3091                           RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
3092
3093         if (error_info_select & ES_PORT_XMIT_CONSTRAINT_ERROR_INFO)
3094                 dd->err_info_xmit_constraint.status &= ~OPA_EI_STATUS_SMASK;
3095
3096         if (error_info_select & ES_PORT_RCV_CONSTRAINT_ERROR_INFO)
3097                 dd->err_info_rcv_constraint.status &= ~OPA_EI_STATUS_SMASK;
3098
3099         /* UncorrectableErrorInfo */
3100         if (error_info_select & ES_UNCORRECTABLE_ERROR_INFO)
3101                 /* turn off status bit */
3102                 dd->err_info_uncorrectable &= ~OPA_EI_STATUS_SMASK;
3103
3104         /* FMConfigErrorInfo */
3105         if (error_info_select & ES_FM_CONFIG_ERROR_INFO)
3106                 /* turn off status bit */
3107                 dd->err_info_fmconfig &= ~OPA_EI_STATUS_SMASK;
3108
3109         if (resp_len)
3110                 *resp_len += sizeof(*req);
3111
3112         return reply((struct ib_mad_hdr *)pmp);
3113 }
3114
3115 struct opa_congestion_info_attr {
3116         __be16 congestion_info;
3117         u8 control_table_cap;   /* Multiple of 64 entry unit CCTs */
3118         u8 congestion_log_length;
3119 } __packed;
3120
3121 static int __subn_get_opa_cong_info(struct opa_smp *smp, u32 am, u8 *data,
3122                                     struct ib_device *ibdev, u8 port,
3123                                     u32 *resp_len)
3124 {
3125         struct opa_congestion_info_attr *p =
3126                 (struct opa_congestion_info_attr *)data;
3127         struct hfi1_ibport *ibp = to_iport(ibdev, port);
3128         struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3129
3130         p->congestion_info = 0;
3131         p->control_table_cap = ppd->cc_max_table_entries;
3132         p->congestion_log_length = OPA_CONG_LOG_ELEMS;
3133
3134         if (resp_len)
3135                 *resp_len += sizeof(*p);
3136
3137         return reply((struct ib_mad_hdr *)smp);
3138 }
3139
3140 static int __subn_get_opa_cong_setting(struct opa_smp *smp, u32 am,
3141                                              u8 *data,
3142                                              struct ib_device *ibdev,
3143                                              u8 port, u32 *resp_len)
3144 {
3145         int i;
3146         struct opa_congestion_setting_attr *p =
3147                 (struct opa_congestion_setting_attr *) data;
3148         struct hfi1_ibport *ibp = to_iport(ibdev, port);
3149         struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3150         struct opa_congestion_setting_entry_shadow *entries;
3151         struct cc_state *cc_state;
3152
3153         rcu_read_lock();
3154
3155         cc_state = get_cc_state(ppd);
3156
3157         if (cc_state == NULL) {
3158                 rcu_read_unlock();
3159                 return reply((struct ib_mad_hdr *)smp);
3160         }
3161
3162         entries = cc_state->cong_setting.entries;
3163         p->port_control = cpu_to_be16(cc_state->cong_setting.port_control);
3164         p->control_map = cpu_to_be32(cc_state->cong_setting.control_map);
3165         for (i = 0; i < OPA_MAX_SLS; i++) {
3166                 p->entries[i].ccti_increase = entries[i].ccti_increase;
3167                 p->entries[i].ccti_timer = cpu_to_be16(entries[i].ccti_timer);
3168                 p->entries[i].trigger_threshold =
3169                         entries[i].trigger_threshold;
3170                 p->entries[i].ccti_min = entries[i].ccti_min;
3171         }
3172
3173         rcu_read_unlock();
3174
3175         if (resp_len)
3176                 *resp_len += sizeof(*p);
3177
3178         return reply((struct ib_mad_hdr *)smp);
3179 }
3180
3181 static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
3182                                        struct ib_device *ibdev, u8 port,
3183                                        u32 *resp_len)
3184 {
3185         struct opa_congestion_setting_attr *p =
3186                 (struct opa_congestion_setting_attr *) data;
3187         struct hfi1_ibport *ibp = to_iport(ibdev, port);
3188         struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3189         struct opa_congestion_setting_entry_shadow *entries;
3190         int i;
3191
3192         ppd->cc_sl_control_map = be32_to_cpu(p->control_map);
3193
3194         entries = ppd->congestion_entries;
3195         for (i = 0; i < OPA_MAX_SLS; i++) {
3196                 entries[i].ccti_increase = p->entries[i].ccti_increase;
3197                 entries[i].ccti_timer = be16_to_cpu(p->entries[i].ccti_timer);
3198                 entries[i].trigger_threshold =
3199                         p->entries[i].trigger_threshold;
3200                 entries[i].ccti_min = p->entries[i].ccti_min;
3201         }
3202
3203         return __subn_get_opa_cong_setting(smp, am, data, ibdev, port,
3204                                            resp_len);
3205 }
3206
3207 static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am,
3208                                         u8 *data, struct ib_device *ibdev,
3209                                         u8 port, u32 *resp_len)
3210 {
3211         struct hfi1_ibport *ibp = to_iport(ibdev, port);
3212         struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3213         struct opa_hfi1_cong_log *cong_log = (struct opa_hfi1_cong_log *)data;
3214         s64 ts;
3215         int i;
3216
3217         if (am != 0) {
3218                 smp->status |= IB_SMP_INVALID_FIELD;
3219                 return reply((struct ib_mad_hdr *)smp);
3220         }
3221
3222         spin_lock_irq(&ppd->cc_log_lock);
3223
3224         cong_log->log_type = OPA_CC_LOG_TYPE_HFI;
3225         cong_log->congestion_flags = 0;
3226         cong_log->threshold_event_counter =
3227                 cpu_to_be16(ppd->threshold_event_counter);
3228         memcpy(cong_log->threshold_cong_event_map,
3229                ppd->threshold_cong_event_map,
3230                sizeof(cong_log->threshold_cong_event_map));
3231         /* keep timestamp in units of 1.024 usec */
3232         ts = ktime_to_ns(ktime_get()) / 1024;
3233         cong_log->current_time_stamp = cpu_to_be32(ts);
3234         for (i = 0; i < OPA_CONG_LOG_ELEMS; i++) {
3235                 struct opa_hfi1_cong_log_event_internal *cce =
3236                         &ppd->cc_events[ppd->cc_mad_idx++];
3237                 if (ppd->cc_mad_idx == OPA_CONG_LOG_ELEMS)
3238                         ppd->cc_mad_idx = 0;
3239                 /*
3240                  * Entries which are older than twice the time
3241                  * required to wrap the counter are supposed to
3242                  * be zeroed (CA10-49 IBTA, release 1.2.1, V1).
3243                  */
3244                 if ((u64)(ts - cce->timestamp) > (2 * UINT_MAX))
3245                         continue;
3246                 memcpy(cong_log->events[i].local_qp_cn_entry, &cce->lqpn, 3);
3247                 memcpy(cong_log->events[i].remote_qp_number_cn_entry,
3248                         &cce->rqpn, 3);
3249                 cong_log->events[i].sl_svc_type_cn_entry =
3250                         ((cce->sl & 0x1f) << 3) | (cce->svc_type & 0x7);
3251                 cong_log->events[i].remote_lid_cn_entry =
3252                         cpu_to_be32(cce->rlid);
3253                 cong_log->events[i].timestamp_cn_entry =
3254                         cpu_to_be32(cce->timestamp);
3255         }
3256
3257         /*
3258          * Reset threshold_cong_event_map, and threshold_event_counter
3259          * to 0 when log is read.
3260          */
3261         memset(ppd->threshold_cong_event_map, 0x0,
3262                sizeof(ppd->threshold_cong_event_map));
3263         ppd->threshold_event_counter = 0;
3264
3265         spin_unlock_irq(&ppd->cc_log_lock);
3266
3267         if (resp_len)
3268                 *resp_len += sizeof(struct opa_hfi1_cong_log);
3269
3270         return reply((struct ib_mad_hdr *)smp);
3271 }
3272
3273 static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
3274                                    struct ib_device *ibdev, u8 port,
3275                                    u32 *resp_len)
3276 {
3277         struct ib_cc_table_attr *cc_table_attr =
3278                 (struct ib_cc_table_attr *) data;
3279         struct hfi1_ibport *ibp = to_iport(ibdev, port);
3280         struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3281         u32 start_block = OPA_AM_START_BLK(am);
3282         u32 n_blocks = OPA_AM_NBLK(am);
3283         struct ib_cc_table_entry_shadow *entries;
3284         int i, j;
3285         u32 sentry, eentry;
3286         struct cc_state *cc_state;
3287
3288         /* sanity check n_blocks, start_block */
3289         if (n_blocks == 0 ||
3290             start_block + n_blocks > ppd->cc_max_table_entries) {
3291                 smp->status |= IB_SMP_INVALID_FIELD;
3292                 return reply((struct ib_mad_hdr *)smp);
3293         }
3294
3295         rcu_read_lock();
3296
3297         cc_state = get_cc_state(ppd);
3298
3299         if (cc_state == NULL) {
3300                 rcu_read_unlock();
3301                 return reply((struct ib_mad_hdr *)smp);
3302         }
3303
3304         sentry = start_block * IB_CCT_ENTRIES;
3305         eentry = sentry + (IB_CCT_ENTRIES * n_blocks);
3306
3307         cc_table_attr->ccti_limit = cpu_to_be16(cc_state->cct.ccti_limit);
3308
3309         entries = cc_state->cct.entries;
3310
3311         /* return n_blocks, though the last block may not be full */
3312         for (j = 0, i = sentry; i < eentry; j++, i++)
3313                 cc_table_attr->ccti_entries[j].entry =
3314                         cpu_to_be16(entries[i].entry);
3315
3316         rcu_read_unlock();
3317
3318         if (resp_len)
3319                 *resp_len += sizeof(u16)*(IB_CCT_ENTRIES * n_blocks + 1);
3320
3321         return reply((struct ib_mad_hdr *)smp);
3322 }
3323
3324 void cc_state_reclaim(struct rcu_head *rcu)
3325 {
3326         struct cc_state *cc_state = container_of(rcu, struct cc_state, rcu);
3327
3328         kfree(cc_state);
3329 }
3330
3331 static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
3332                                    struct ib_device *ibdev, u8 port,
3333                                    u32 *resp_len)
3334 {
3335         struct ib_cc_table_attr *p = (struct ib_cc_table_attr *) data;
3336         struct hfi1_ibport *ibp = to_iport(ibdev, port);
3337         struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3338         u32 start_block = OPA_AM_START_BLK(am);
3339         u32 n_blocks = OPA_AM_NBLK(am);
3340         struct ib_cc_table_entry_shadow *entries;
3341         int i, j;
3342         u32 sentry, eentry;
3343         u16 ccti_limit;
3344         struct cc_state *old_cc_state, *new_cc_state;
3345
3346         /* sanity check n_blocks, start_block */
3347         if (n_blocks == 0 ||
3348             start_block + n_blocks > ppd->cc_max_table_entries) {
3349                 smp->status |= IB_SMP_INVALID_FIELD;
3350                 return reply((struct ib_mad_hdr *)smp);
3351         }
3352
3353         sentry = start_block * IB_CCT_ENTRIES;
3354         eentry = sentry + ((n_blocks - 1) * IB_CCT_ENTRIES) +
3355                  (be16_to_cpu(p->ccti_limit)) % IB_CCT_ENTRIES + 1;
3356
3357         /* sanity check ccti_limit */
3358         ccti_limit = be16_to_cpu(p->ccti_limit);
3359         if (ccti_limit + 1 > eentry) {
3360                 smp->status |= IB_SMP_INVALID_FIELD;
3361                 return reply((struct ib_mad_hdr *)smp);
3362         }
3363
3364         new_cc_state = kzalloc(sizeof(*new_cc_state), GFP_KERNEL);
3365         if (new_cc_state == NULL)
3366                 goto getit;
3367
3368         spin_lock(&ppd->cc_state_lock);
3369
3370         old_cc_state = get_cc_state(ppd);
3371
3372         if (old_cc_state == NULL) {
3373                 spin_unlock(&ppd->cc_state_lock);
3374                 kfree(new_cc_state);
3375                 return reply((struct ib_mad_hdr *)smp);
3376         }
3377
3378         *new_cc_state = *old_cc_state;
3379
3380         new_cc_state->cct.ccti_limit = ccti_limit;
3381
3382         entries = ppd->ccti_entries;
3383         ppd->total_cct_entry = ccti_limit + 1;
3384
3385         for (j = 0, i = sentry; i < eentry; j++, i++)
3386                 entries[i].entry = be16_to_cpu(p->ccti_entries[j].entry);
3387
3388         memcpy(new_cc_state->cct.entries, entries,
3389                eentry * sizeof(struct ib_cc_table_entry));
3390
3391         new_cc_state->cong_setting.port_control = IB_CC_CCS_PC_SL_BASED;
3392         new_cc_state->cong_setting.control_map = ppd->cc_sl_control_map;
3393         memcpy(new_cc_state->cong_setting.entries, ppd->congestion_entries,
3394                OPA_MAX_SLS * sizeof(struct opa_congestion_setting_entry));
3395
3396         rcu_assign_pointer(ppd->cc_state, new_cc_state);
3397
3398         spin_unlock(&ppd->cc_state_lock);
3399
3400         call_rcu(&old_cc_state->rcu, cc_state_reclaim);
3401
3402 getit:
3403         return __subn_get_opa_cc_table(smp, am, data, ibdev, port, resp_len);
3404 }
3405
3406 struct opa_led_info {
3407         __be32 rsvd_led_mask;
3408         __be32 rsvd;
3409 };
3410
3411 #define OPA_LED_SHIFT   31
3412 #define OPA_LED_MASK    (1 << OPA_LED_SHIFT)
3413
3414 static int __subn_get_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
3415                                    struct ib_device *ibdev, u8 port,
3416                                    u32 *resp_len)
3417 {
3418         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
3419         struct opa_led_info *p = (struct opa_led_info *) data;
3420         u32 nport = OPA_AM_NPORT(am);
3421         u64 reg;
3422
3423         if (nport != 1) {
3424                 smp->status |= IB_SMP_INVALID_FIELD;
3425                 return reply((struct ib_mad_hdr *)smp);
3426         }
3427
3428         reg = read_csr(dd, DCC_CFG_LED_CNTRL);
3429         if ((reg & DCC_CFG_LED_CNTRL_LED_CNTRL_SMASK) &&
3430                 ((reg & DCC_CFG_LED_CNTRL_LED_SW_BLINK_RATE_SMASK) == 0xf))
3431                         p->rsvd_led_mask = cpu_to_be32(OPA_LED_MASK);
3432
3433         if (resp_len)
3434                 *resp_len += sizeof(struct opa_led_info);
3435
3436         return reply((struct ib_mad_hdr *)smp);
3437 }
3438
3439 static int __subn_set_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
3440                                    struct ib_device *ibdev, u8 port,
3441                                    u32 *resp_len)
3442 {
3443         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
3444         struct opa_led_info *p = (struct opa_led_info *) data;
3445         u32 nport = OPA_AM_NPORT(am);
3446         int on = !!(be32_to_cpu(p->rsvd_led_mask) & OPA_LED_MASK);
3447
3448         if (nport != 1) {
3449                 smp->status |= IB_SMP_INVALID_FIELD;
3450                 return reply((struct ib_mad_hdr *)smp);
3451         }
3452
3453         setextled(dd, on);
3454
3455         return __subn_get_opa_led_info(smp, am, data, ibdev, port, resp_len);
3456 }
3457
3458 static int subn_get_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
3459                             u8 *data, struct ib_device *ibdev, u8 port,
3460                             u32 *resp_len)
3461 {
3462         int ret;
3463         struct hfi1_ibport *ibp = to_iport(ibdev, port);
3464
3465         switch (attr_id) {
3466         case IB_SMP_ATTR_NODE_DESC:
3467                 ret = __subn_get_opa_nodedesc(smp, am, data, ibdev, port,
3468                                               resp_len);
3469                 break;
3470         case IB_SMP_ATTR_NODE_INFO:
3471                 ret = __subn_get_opa_nodeinfo(smp, am, data, ibdev, port,
3472                                               resp_len);
3473                 break;
3474         case IB_SMP_ATTR_PORT_INFO:
3475                 ret = __subn_get_opa_portinfo(smp, am, data, ibdev, port,
3476                                               resp_len);
3477                 break;
3478         case IB_SMP_ATTR_PKEY_TABLE:
3479                 ret = __subn_get_opa_pkeytable(smp, am, data, ibdev, port,
3480                                                resp_len);
3481                 break;
3482         case OPA_ATTRIB_ID_SL_TO_SC_MAP:
3483                 ret = __subn_get_opa_sl_to_sc(smp, am, data, ibdev, port,
3484                                               resp_len);
3485                 break;
3486         case OPA_ATTRIB_ID_SC_TO_SL_MAP:
3487                 ret = __subn_get_opa_sc_to_sl(smp, am, data, ibdev, port,
3488                                               resp_len);
3489                 break;
3490         case OPA_ATTRIB_ID_SC_TO_VLT_MAP:
3491                 ret = __subn_get_opa_sc_to_vlt(smp, am, data, ibdev, port,
3492                                                resp_len);
3493                 break;
3494         case OPA_ATTRIB_ID_SC_TO_VLNT_MAP:
3495                 ret = __subn_get_opa_sc_to_vlnt(smp, am, data, ibdev, port,
3496                                                resp_len);
3497                 break;
3498         case OPA_ATTRIB_ID_PORT_STATE_INFO:
3499                 ret = __subn_get_opa_psi(smp, am, data, ibdev, port,
3500                                          resp_len);
3501                 break;
3502         case OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE:
3503                 ret = __subn_get_opa_bct(smp, am, data, ibdev, port,
3504                                          resp_len);
3505                 break;
3506         case OPA_ATTRIB_ID_CABLE_INFO:
3507                 ret = __subn_get_opa_cable_info(smp, am, data, ibdev, port,
3508                                                 resp_len);
3509                 break;
3510         case IB_SMP_ATTR_VL_ARB_TABLE:
3511                 ret = __subn_get_opa_vl_arb(smp, am, data, ibdev, port,
3512                                             resp_len);
3513                 break;
3514         case OPA_ATTRIB_ID_CONGESTION_INFO:
3515                 ret = __subn_get_opa_cong_info(smp, am, data, ibdev, port,
3516                                                resp_len);
3517                 break;
3518         case OPA_ATTRIB_ID_HFI_CONGESTION_SETTING:
3519                 ret = __subn_get_opa_cong_setting(smp, am, data, ibdev,
3520                                                   port, resp_len);
3521                 break;
3522         case OPA_ATTRIB_ID_HFI_CONGESTION_LOG:
3523                 ret = __subn_get_opa_hfi1_cong_log(smp, am, data, ibdev,
3524                                                    port, resp_len);
3525                 break;
3526         case OPA_ATTRIB_ID_CONGESTION_CONTROL_TABLE:
3527                 ret = __subn_get_opa_cc_table(smp, am, data, ibdev, port,
3528                                               resp_len);
3529                 break;
3530         case IB_SMP_ATTR_LED_INFO:
3531                 ret = __subn_get_opa_led_info(smp, am, data, ibdev, port,
3532                                               resp_len);
3533                 break;
3534         case IB_SMP_ATTR_SM_INFO:
3535                 if (ibp->port_cap_flags & IB_PORT_SM_DISABLED)
3536                         return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
3537                 if (ibp->port_cap_flags & IB_PORT_SM)
3538                         return IB_MAD_RESULT_SUCCESS;
3539                 /* FALLTHROUGH */
3540         default:
3541                 smp->status |= IB_SMP_UNSUP_METH_ATTR;
3542                 ret = reply((struct ib_mad_hdr *)smp);
3543                 break;
3544         }
3545         return ret;
3546 }
3547
3548 static int subn_set_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
3549                             u8 *data, struct ib_device *ibdev, u8 port,
3550                             u32 *resp_len)
3551 {
3552         int ret;
3553         struct hfi1_ibport *ibp = to_iport(ibdev, port);
3554
3555         switch (attr_id) {
3556         case IB_SMP_ATTR_PORT_INFO:
3557                 ret = __subn_set_opa_portinfo(smp, am, data, ibdev, port,
3558                                               resp_len);
3559                 break;
3560         case IB_SMP_ATTR_PKEY_TABLE:
3561                 ret = __subn_set_opa_pkeytable(smp, am, data, ibdev, port,
3562                                                resp_len);
3563                 break;
3564         case OPA_ATTRIB_ID_SL_TO_SC_MAP:
3565                 ret = __subn_set_opa_sl_to_sc(smp, am, data, ibdev, port,
3566                                               resp_len);
3567                 break;
3568         case OPA_ATTRIB_ID_SC_TO_SL_MAP:
3569                 ret = __subn_set_opa_sc_to_sl(smp, am, data, ibdev, port,
3570                                               resp_len);
3571                 break;
3572         case OPA_ATTRIB_ID_SC_TO_VLT_MAP:
3573                 ret = __subn_set_opa_sc_to_vlt(smp, am, data, ibdev, port,
3574                                                resp_len);
3575                 break;
3576         case OPA_ATTRIB_ID_SC_TO_VLNT_MAP:
3577                 ret = __subn_set_opa_sc_to_vlnt(smp, am, data, ibdev, port,
3578                                                resp_len);
3579                 break;
3580         case OPA_ATTRIB_ID_PORT_STATE_INFO:
3581                 ret = __subn_set_opa_psi(smp, am, data, ibdev, port,
3582                                          resp_len);
3583                 break;
3584         case OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE:
3585                 ret = __subn_set_opa_bct(smp, am, data, ibdev, port,
3586                                          resp_len);
3587                 break;
3588         case IB_SMP_ATTR_VL_ARB_TABLE:
3589                 ret = __subn_set_opa_vl_arb(smp, am, data, ibdev, port,
3590                                             resp_len);
3591                 break;
3592         case OPA_ATTRIB_ID_HFI_CONGESTION_SETTING:
3593                 ret = __subn_set_opa_cong_setting(smp, am, data, ibdev,
3594                                                   port, resp_len);
3595                 break;
3596         case OPA_ATTRIB_ID_CONGESTION_CONTROL_TABLE:
3597                 ret = __subn_set_opa_cc_table(smp, am, data, ibdev, port,
3598                                               resp_len);
3599                 break;
3600         case IB_SMP_ATTR_LED_INFO:
3601                 ret = __subn_set_opa_led_info(smp, am, data, ibdev, port,
3602                                               resp_len);
3603                 break;
3604         case IB_SMP_ATTR_SM_INFO:
3605                 if (ibp->port_cap_flags & IB_PORT_SM_DISABLED)
3606                         return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
3607                 if (ibp->port_cap_flags & IB_PORT_SM)
3608                         return IB_MAD_RESULT_SUCCESS;
3609                 /* FALLTHROUGH */
3610         default:
3611                 smp->status |= IB_SMP_UNSUP_METH_ATTR;
3612                 ret = reply((struct ib_mad_hdr *)smp);
3613                 break;
3614         }
3615         return ret;
3616 }
3617
3618 static inline void set_aggr_error(struct opa_aggregate *ag)
3619 {
3620         ag->err_reqlength |= cpu_to_be16(0x8000);
3621 }
3622
3623 static int subn_get_opa_aggregate(struct opa_smp *smp,
3624                                   struct ib_device *ibdev, u8 port,
3625                                   u32 *resp_len)
3626 {
3627         int i;
3628         u32 num_attr = be32_to_cpu(smp->attr_mod) & 0x000000ff;
3629         u8 *next_smp = opa_get_smp_data(smp);
3630
3631         if (num_attr < 1 || num_attr > 117) {
3632                 smp->status |= IB_SMP_INVALID_FIELD;
3633                 return reply((struct ib_mad_hdr *)smp);
3634         }
3635
3636         for (i = 0; i < num_attr; i++) {
3637                 struct opa_aggregate *agg;
3638                 size_t agg_data_len;
3639                 size_t agg_size;
3640                 u32 am;
3641
3642                 agg = (struct opa_aggregate *)next_smp;
3643                 agg_data_len = (be16_to_cpu(agg->err_reqlength) & 0x007f) * 8;
3644                 agg_size = sizeof(*agg) + agg_data_len;
3645                 am = be32_to_cpu(agg->attr_mod);
3646
3647                 *resp_len += agg_size;
3648
3649                 if (next_smp + agg_size > ((u8 *)smp) + sizeof(*smp)) {
3650                         smp->status |= IB_SMP_INVALID_FIELD;
3651                         return reply((struct ib_mad_hdr *)smp);
3652                 }
3653
3654                 /* zero the payload for this segment */
3655                 memset(next_smp + sizeof(*agg), 0, agg_data_len);
3656
3657                 (void) subn_get_opa_sma(agg->attr_id, smp, am, agg->data,
3658                                         ibdev, port, NULL);
3659                 if (smp->status & ~IB_SMP_DIRECTION) {
3660                         set_aggr_error(agg);
3661                         return reply((struct ib_mad_hdr *)smp);
3662                 }
3663                 next_smp += agg_size;
3664
3665         }
3666
3667         return reply((struct ib_mad_hdr *)smp);
3668 }
3669
3670 static int subn_set_opa_aggregate(struct opa_smp *smp,
3671                                   struct ib_device *ibdev, u8 port,
3672                                   u32 *resp_len)
3673 {
3674         int i;
3675         u32 num_attr = be32_to_cpu(smp->attr_mod) & 0x000000ff;
3676         u8 *next_smp = opa_get_smp_data(smp);
3677
3678         if (num_attr < 1 || num_attr > 117) {
3679                 smp->status |= IB_SMP_INVALID_FIELD;
3680                 return reply((struct ib_mad_hdr *)smp);
3681         }
3682
3683         for (i = 0; i < num_attr; i++) {
3684                 struct opa_aggregate *agg;
3685                 size_t agg_data_len;
3686                 size_t agg_size;
3687                 u32 am;
3688
3689                 agg = (struct opa_aggregate *)next_smp;
3690                 agg_data_len = (be16_to_cpu(agg->err_reqlength) & 0x007f) * 8;
3691                 agg_size = sizeof(*agg) + agg_data_len;
3692                 am = be32_to_cpu(agg->attr_mod);
3693
3694                 *resp_len += agg_size;
3695
3696                 if (next_smp + agg_size > ((u8 *)smp) + sizeof(*smp)) {
3697                         smp->status |= IB_SMP_INVALID_FIELD;
3698                         return reply((struct ib_mad_hdr *)smp);
3699                 }
3700
3701                 (void) subn_set_opa_sma(agg->attr_id, smp, am, agg->data,
3702                                         ibdev, port, NULL);
3703                 if (smp->status & ~IB_SMP_DIRECTION) {
3704                         set_aggr_error(agg);
3705                         return reply((struct ib_mad_hdr *)smp);
3706                 }
3707                 next_smp += agg_size;
3708
3709         }
3710
3711         return reply((struct ib_mad_hdr *)smp);
3712 }
3713
3714 /*
3715  * OPAv1 specifies that, on the transition to link up, these counters
3716  * are cleared:
3717  *   PortRcvErrors [*]
3718  *   LinkErrorRecovery
3719  *   LocalLinkIntegrityErrors
3720  *   ExcessiveBufferOverruns [*]
3721  *
3722  * [*] Error info associated with these counters is retained, but the
3723  * error info status is reset to 0.
3724  */
3725 void clear_linkup_counters(struct hfi1_devdata *dd)
3726 {
3727         /* PortRcvErrors */
3728         write_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL, 0);
3729         dd->err_info_rcvport.status_and_code &= ~OPA_EI_STATUS_SMASK;
3730         /* LinkErrorRecovery */
3731         write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0);
3732         write_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL, 0);
3733         /* LocalLinkIntegrityErrors */
3734         write_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL, 0);
3735         write_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL, 0);
3736         /* ExcessiveBufferOverruns */
3737         write_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL, 0);
3738         dd->rcv_ovfl_cnt = 0;
3739         dd->err_info_xmit_constraint.status &= ~OPA_EI_STATUS_SMASK;
3740 }
3741
3742 /*
3743  * is_local_mad() returns 1 if 'mad' is sent from, and destined to the
3744  * local node, 0 otherwise.
3745  */
3746 static int is_local_mad(struct hfi1_ibport *ibp, const struct opa_mad *mad,
3747                         const struct ib_wc *in_wc)
3748 {
3749         struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3750         const struct opa_smp *smp = (const struct opa_smp *)mad;
3751
3752         if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
3753                 return (smp->hop_cnt == 0 &&
3754                         smp->route.dr.dr_slid == OPA_LID_PERMISSIVE &&
3755                         smp->route.dr.dr_dlid == OPA_LID_PERMISSIVE);
3756         }
3757
3758         return (in_wc->slid == ppd->lid);
3759 }
3760
3761 /*
3762  * opa_local_smp_check() should only be called on MADs for which
3763  * is_local_mad() returns true. It applies the SMP checks that are
3764  * specific to SMPs which are sent from, and destined to this node.
3765  * opa_local_smp_check() returns 0 if the SMP passes its checks, 1
3766  * otherwise.
3767  *
3768  * SMPs which arrive from other nodes are instead checked by
3769  * opa_smp_check().
3770  */
3771 static int opa_local_smp_check(struct hfi1_ibport *ibp,
3772                                const struct ib_wc *in_wc)
3773 {
3774         struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3775         u16 slid = in_wc->slid;
3776         u16 pkey;
3777
3778         if (in_wc->pkey_index >= ARRAY_SIZE(ppd->pkeys))
3779                 return 1;
3780
3781         pkey = ppd->pkeys[in_wc->pkey_index];
3782         /*
3783          * We need to do the "node-local" checks specified in OPAv1,
3784          * rev 0.90, section 9.10.26, which are:
3785          *   - pkey is 0x7fff, or 0xffff
3786          *   - Source QPN == 0 || Destination QPN == 0
3787          *   - the MAD header's management class is either
3788          *     IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE or
3789          *     IB_MGMT_CLASS_SUBN_LID_ROUTED
3790          *   - SLID != 0
3791          *
3792          * However, we know (and so don't need to check again) that,
3793          * for local SMPs, the MAD stack passes MADs with:
3794          *   - Source QPN of 0
3795          *   - MAD mgmt_class is IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
3796          *   - SLID is either: OPA_LID_PERMISSIVE (0xFFFFFFFF), or
3797          *     our own port's lid
3798          *
3799          */
3800         if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY)
3801                 return 0;
3802         ingress_pkey_table_fail(ppd, pkey, slid);
3803         return 1;
3804 }
3805
3806 static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
3807                             u8 port, const struct opa_mad *in_mad,
3808                             struct opa_mad *out_mad,
3809                             u32 *resp_len)
3810 {
3811         struct opa_smp *smp = (struct opa_smp *)out_mad;
3812         struct hfi1_ibport *ibp = to_iport(ibdev, port);
3813         u8 *data;
3814         u32 am;
3815         __be16 attr_id;
3816         int ret;
3817
3818         *out_mad = *in_mad;
3819         data = opa_get_smp_data(smp);
3820
3821         am = be32_to_cpu(smp->attr_mod);
3822         attr_id = smp->attr_id;
3823         if (smp->class_version != OPA_SMI_CLASS_VERSION) {
3824                 smp->status |= IB_SMP_UNSUP_VERSION;
3825                 ret = reply((struct ib_mad_hdr *)smp);
3826                 goto bail;
3827         }
3828         ret = check_mkey(ibp, (struct ib_mad_hdr *)smp, mad_flags, smp->mkey,
3829                          smp->route.dr.dr_slid, smp->route.dr.return_path,
3830                          smp->hop_cnt);
3831         if (ret) {
3832                 u32 port_num = be32_to_cpu(smp->attr_mod);
3833
3834                 /*
3835                  * If this is a get/set portinfo, we already check the
3836                  * M_Key if the MAD is for another port and the M_Key
3837                  * is OK on the receiving port. This check is needed
3838                  * to increment the error counters when the M_Key
3839                  * fails to match on *both* ports.
3840                  */
3841                 if (attr_id == IB_SMP_ATTR_PORT_INFO &&
3842                     (smp->method == IB_MGMT_METHOD_GET ||
3843                      smp->method == IB_MGMT_METHOD_SET) &&
3844                     port_num && port_num <= ibdev->phys_port_cnt &&
3845                     port != port_num)
3846                         (void) check_mkey(to_iport(ibdev, port_num),
3847                                           (struct ib_mad_hdr *)smp, 0,
3848                                           smp->mkey, smp->route.dr.dr_slid,
3849                                           smp->route.dr.return_path,
3850                                           smp->hop_cnt);
3851                 ret = IB_MAD_RESULT_FAILURE;
3852                 goto bail;
3853         }
3854
3855         *resp_len = opa_get_smp_header_size(smp);
3856
3857         switch (smp->method) {
3858         case IB_MGMT_METHOD_GET:
3859                 switch (attr_id) {
3860                 default:
3861                         clear_opa_smp_data(smp);
3862                         ret = subn_get_opa_sma(attr_id, smp, am, data,
3863                                                ibdev, port, resp_len);
3864                         goto bail;
3865                 case OPA_ATTRIB_ID_AGGREGATE:
3866                         ret = subn_get_opa_aggregate(smp, ibdev, port,
3867                                                      resp_len);
3868                         goto bail;
3869                 }
3870         case IB_MGMT_METHOD_SET:
3871                 switch (attr_id) {
3872                 default:
3873                         ret = subn_set_opa_sma(attr_id, smp, am, data,
3874                                                ibdev, port, resp_len);
3875                         goto bail;
3876                 case OPA_ATTRIB_ID_AGGREGATE:
3877                         ret = subn_set_opa_aggregate(smp, ibdev, port,
3878                                                      resp_len);
3879                         goto bail;
3880                 }
3881         case IB_MGMT_METHOD_TRAP:
3882         case IB_MGMT_METHOD_REPORT:
3883         case IB_MGMT_METHOD_REPORT_RESP:
3884         case IB_MGMT_METHOD_GET_RESP:
3885                 /*
3886                  * The ib_mad module will call us to process responses
3887                  * before checking for other consumers.
3888                  * Just tell the caller to process it normally.
3889                  */
3890                 ret = IB_MAD_RESULT_SUCCESS;
3891                 goto bail;
3892         default:
3893                 smp->status |= IB_SMP_UNSUP_METHOD;
3894                 ret = reply((struct ib_mad_hdr *)smp);
3895         }
3896
3897 bail:
3898         return ret;
3899 }
3900
3901 static int process_subn(struct ib_device *ibdev, int mad_flags,
3902                         u8 port, const struct ib_mad *in_mad,
3903                         struct ib_mad *out_mad)
3904 {
3905         struct ib_smp *smp = (struct ib_smp *)out_mad;
3906         struct hfi1_ibport *ibp = to_iport(ibdev, port);
3907         int ret;
3908
3909         *out_mad = *in_mad;
3910         if (smp->class_version != 1) {
3911                 smp->status |= IB_SMP_UNSUP_VERSION;
3912                 ret = reply((struct ib_mad_hdr *)smp);
3913                 goto bail;
3914         }
3915
3916         ret = check_mkey(ibp, (struct ib_mad_hdr *)smp, mad_flags,
3917                          smp->mkey, (__force __be32)smp->dr_slid,
3918                          smp->return_path, smp->hop_cnt);
3919         if (ret) {
3920                 u32 port_num = be32_to_cpu(smp->attr_mod);
3921
3922                 /*
3923                  * If this is a get/set portinfo, we already check the
3924                  * M_Key if the MAD is for another port and the M_Key
3925                  * is OK on the receiving port. This check is needed
3926                  * to increment the error counters when the M_Key
3927                  * fails to match on *both* ports.
3928                  */
3929                 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
3930                     (smp->method == IB_MGMT_METHOD_GET ||
3931                      smp->method == IB_MGMT_METHOD_SET) &&
3932                     port_num && port_num <= ibdev->phys_port_cnt &&
3933                     port != port_num)
3934                         (void) check_mkey(to_iport(ibdev, port_num),
3935                                           (struct ib_mad_hdr *)smp, 0,
3936                                           smp->mkey,
3937                                           (__force __be32)smp->dr_slid,
3938                                           smp->return_path, smp->hop_cnt);
3939                 ret = IB_MAD_RESULT_FAILURE;
3940                 goto bail;
3941         }
3942
3943         switch (smp->method) {
3944         case IB_MGMT_METHOD_GET:
3945                 switch (smp->attr_id) {
3946                 case IB_SMP_ATTR_NODE_INFO:
3947                         ret = subn_get_nodeinfo(smp, ibdev, port);
3948                         goto bail;
3949                 default:
3950                         smp->status |= IB_SMP_UNSUP_METH_ATTR;
3951                         ret = reply((struct ib_mad_hdr *)smp);
3952                         goto bail;
3953                 }
3954         }
3955
3956 bail:
3957         return ret;
3958 }
3959
3960 static int process_perf_opa(struct ib_device *ibdev, u8 port,
3961                             const struct opa_mad *in_mad,
3962                             struct opa_mad *out_mad, u32 *resp_len)
3963 {
3964         struct opa_pma_mad *pmp = (struct opa_pma_mad *)out_mad;
3965         int ret;
3966
3967         *out_mad = *in_mad;
3968
3969         if (pmp->mad_hdr.class_version != OPA_SMI_CLASS_VERSION) {
3970                 pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
3971                 return reply((struct ib_mad_hdr *)pmp);
3972         }
3973
3974         *resp_len = sizeof(pmp->mad_hdr);
3975
3976         switch (pmp->mad_hdr.method) {
3977         case IB_MGMT_METHOD_GET:
3978                 switch (pmp->mad_hdr.attr_id) {
3979                 case IB_PMA_CLASS_PORT_INFO:
3980                         ret = pma_get_opa_classportinfo(pmp, ibdev, resp_len);
3981                         goto bail;
3982                 case OPA_PM_ATTRIB_ID_PORT_STATUS:
3983                         ret = pma_get_opa_portstatus(pmp, ibdev, port,
3984                                                                 resp_len);
3985                         goto bail;
3986                 case OPA_PM_ATTRIB_ID_DATA_PORT_COUNTERS:
3987                         ret = pma_get_opa_datacounters(pmp, ibdev, port,
3988                                                                 resp_len);
3989                         goto bail;
3990                 case OPA_PM_ATTRIB_ID_ERROR_PORT_COUNTERS:
3991                         ret = pma_get_opa_porterrors(pmp, ibdev, port,
3992                                                                 resp_len);
3993                         goto bail;
3994                 case OPA_PM_ATTRIB_ID_ERROR_INFO:
3995                         ret = pma_get_opa_errorinfo(pmp, ibdev, port,
3996                                                                 resp_len);
3997                         goto bail;
3998                 default:
3999                         pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
4000                         ret = reply((struct ib_mad_hdr *)pmp);
4001                         goto bail;
4002                 }
4003
4004         case IB_MGMT_METHOD_SET:
4005                 switch (pmp->mad_hdr.attr_id) {
4006                 case OPA_PM_ATTRIB_ID_CLEAR_PORT_STATUS:
4007                         ret = pma_set_opa_portstatus(pmp, ibdev, port,
4008                                                                 resp_len);
4009                         goto bail;
4010                 case OPA_PM_ATTRIB_ID_ERROR_INFO:
4011                         ret = pma_set_opa_errorinfo(pmp, ibdev, port,
4012                                                                 resp_len);
4013                         goto bail;
4014                 default:
4015                         pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
4016                         ret = reply((struct ib_mad_hdr *)pmp);
4017                         goto bail;
4018                 }
4019
4020         case IB_MGMT_METHOD_TRAP:
4021         case IB_MGMT_METHOD_GET_RESP:
4022                 /*
4023                  * The ib_mad module will call us to process responses
4024                  * before checking for other consumers.
4025                  * Just tell the caller to process it normally.
4026                  */
4027                 ret = IB_MAD_RESULT_SUCCESS;
4028                 goto bail;
4029
4030         default:
4031                 pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
4032                 ret = reply((struct ib_mad_hdr *)pmp);
4033         }
4034
4035 bail:
4036         return ret;
4037 }
4038
4039 static int hfi1_process_opa_mad(struct ib_device *ibdev, int mad_flags,
4040                                 u8 port, const struct ib_wc *in_wc,
4041                                 const struct ib_grh *in_grh,
4042                                 const struct opa_mad *in_mad,
4043                                 struct opa_mad *out_mad, size_t *out_mad_size,
4044                                 u16 *out_mad_pkey_index)
4045 {
4046         int ret;
4047         int pkey_idx;
4048         u32 resp_len = 0;
4049         struct hfi1_ibport *ibp = to_iport(ibdev, port);
4050
4051         pkey_idx = hfi1_lookup_pkey_idx(ibp, LIM_MGMT_P_KEY);
4052         if (pkey_idx < 0) {
4053                 pr_warn("failed to find limited mgmt pkey, defaulting 0x%x\n",
4054                         hfi1_get_pkey(ibp, 1));
4055                 pkey_idx = 1;
4056         }
4057         *out_mad_pkey_index = (u16)pkey_idx;
4058
4059         switch (in_mad->mad_hdr.mgmt_class) {
4060         case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
4061         case IB_MGMT_CLASS_SUBN_LID_ROUTED:
4062                 if (is_local_mad(ibp, in_mad, in_wc)) {
4063                         ret = opa_local_smp_check(ibp, in_wc);
4064                         if (ret)
4065                                 return IB_MAD_RESULT_FAILURE;
4066                 }
4067                 ret = process_subn_opa(ibdev, mad_flags, port, in_mad,
4068                                        out_mad, &resp_len);
4069                 goto bail;
4070         case IB_MGMT_CLASS_PERF_MGMT:
4071                 ret = process_perf_opa(ibdev, port, in_mad, out_mad,
4072                                        &resp_len);
4073                 goto bail;
4074
4075         default:
4076                 ret = IB_MAD_RESULT_SUCCESS;
4077         }
4078
4079 bail:
4080         if (ret & IB_MAD_RESULT_REPLY)
4081                 *out_mad_size = round_up(resp_len, 8);
4082         else if (ret & IB_MAD_RESULT_SUCCESS)
4083                 *out_mad_size = in_wc->byte_len - sizeof(struct ib_grh);
4084
4085         return ret;
4086 }
4087
4088 static int hfi1_process_ib_mad(struct ib_device *ibdev, int mad_flags, u8 port,
4089                                const struct ib_wc *in_wc,
4090                                const struct ib_grh *in_grh,
4091                                const struct ib_mad *in_mad,
4092                                struct ib_mad *out_mad)
4093 {
4094         int ret;
4095
4096         switch (in_mad->mad_hdr.mgmt_class) {
4097         case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
4098         case IB_MGMT_CLASS_SUBN_LID_ROUTED:
4099                 ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad);
4100                 goto bail;
4101         default:
4102                 ret = IB_MAD_RESULT_SUCCESS;
4103         }
4104
4105 bail:
4106         return ret;
4107 }
4108
4109 /**
4110  * hfi1_process_mad - process an incoming MAD packet
4111  * @ibdev: the infiniband device this packet came in on
4112  * @mad_flags: MAD flags
4113  * @port: the port number this packet came in on
4114  * @in_wc: the work completion entry for this packet
4115  * @in_grh: the global route header for this packet
4116  * @in_mad: the incoming MAD
4117  * @out_mad: any outgoing MAD reply
4118  *
4119  * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
4120  * interested in processing.
4121  *
4122  * Note that the verbs framework has already done the MAD sanity checks,
4123  * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
4124  * MADs.
4125  *
4126  * This is called by the ib_mad module.
4127  */
4128 int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
4129                      const struct ib_wc *in_wc, const struct ib_grh *in_grh,
4130                      const struct ib_mad_hdr *in_mad, size_t in_mad_size,
4131                      struct ib_mad_hdr *out_mad, size_t *out_mad_size,
4132                      u16 *out_mad_pkey_index)
4133 {
4134         switch (in_mad->base_version) {
4135         case OPA_MGMT_BASE_VERSION:
4136                 if (unlikely(in_mad_size != sizeof(struct opa_mad))) {
4137                         dev_err(ibdev->dma_device, "invalid in_mad_size\n");
4138                         return IB_MAD_RESULT_FAILURE;
4139                 }
4140                 return hfi1_process_opa_mad(ibdev, mad_flags, port,
4141                                             in_wc, in_grh,
4142                                             (struct opa_mad *)in_mad,
4143                                             (struct opa_mad *)out_mad,
4144                                             out_mad_size,
4145                                             out_mad_pkey_index);
4146         case IB_MGMT_BASE_VERSION:
4147                 return hfi1_process_ib_mad(ibdev, mad_flags, port,
4148                                           in_wc, in_grh,
4149                                           (const struct ib_mad *)in_mad,
4150                                           (struct ib_mad *)out_mad);
4151         default:
4152                 break;
4153         }
4154
4155         return IB_MAD_RESULT_FAILURE;
4156 }
4157
4158 static void send_handler(struct ib_mad_agent *agent,
4159                          struct ib_mad_send_wc *mad_send_wc)
4160 {
4161         ib_free_send_mad(mad_send_wc->send_buf);
4162 }
4163
4164 int hfi1_create_agents(struct hfi1_ibdev *dev)
4165 {
4166         struct hfi1_devdata *dd = dd_from_dev(dev);
4167         struct ib_mad_agent *agent;
4168         struct hfi1_ibport *ibp;
4169         int p;
4170         int ret;
4171
4172         for (p = 0; p < dd->num_pports; p++) {
4173                 ibp = &dd->pport[p].ibport_data;
4174                 agent = ib_register_mad_agent(&dev->ibdev, p + 1, IB_QPT_SMI,
4175                                               NULL, 0, send_handler,
4176                                               NULL, NULL, 0);
4177                 if (IS_ERR(agent)) {
4178                         ret = PTR_ERR(agent);
4179                         goto err;
4180                 }
4181
4182                 ibp->send_agent = agent;
4183         }
4184
4185         return 0;
4186
4187 err:
4188         for (p = 0; p < dd->num_pports; p++) {
4189                 ibp = &dd->pport[p].ibport_data;
4190                 if (ibp->send_agent) {
4191                         agent = ibp->send_agent;
4192                         ibp->send_agent = NULL;
4193                         ib_unregister_mad_agent(agent);
4194                 }
4195         }
4196
4197         return ret;
4198 }
4199
4200 void hfi1_free_agents(struct hfi1_ibdev *dev)
4201 {
4202         struct hfi1_devdata *dd = dd_from_dev(dev);
4203         struct ib_mad_agent *agent;
4204         struct hfi1_ibport *ibp;
4205         int p;
4206
4207         for (p = 0; p < dd->num_pports; p++) {
4208                 ibp = &dd->pport[p].ibport_data;
4209                 if (ibp->send_agent) {
4210                         agent = ibp->send_agent;
4211                         ibp->send_agent = NULL;
4212                         ib_unregister_mad_agent(agent);
4213                 }
4214                 if (ibp->sm_ah) {
4215                         ib_destroy_ah(&ibp->sm_ah->ibah);
4216                         ibp->sm_ah = NULL;
4217                 }
4218         }
4219 }