Merge remote-tracking branch 'regmap/fix/raw' into regmap-linus
[cascardo/linux.git] / drivers / staging / rdma / hfi1 / mad.c
1 /*
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2015 Intel Corporation.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of version 2 of the GNU General Public License as
12  * published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  *
19  * BSD LICENSE
20  *
21  * Copyright(c) 2015 Intel Corporation.
22  *
23  * Redistribution and use in source and binary forms, with or without
24  * modification, are permitted provided that the following conditions
25  * are met:
26  *
27  *  - Redistributions of source code must retain the above copyright
28  *    notice, this list of conditions and the following disclaimer.
29  *  - Redistributions in binary form must reproduce the above copyright
30  *    notice, this list of conditions and the following disclaimer in
31  *    the documentation and/or other materials provided with the
32  *    distribution.
33  *  - Neither the name of Intel Corporation nor the names of its
34  *    contributors may be used to endorse or promote products derived
35  *    from this software without specific prior written permission.
36  *
37  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48  *
49  */
50
51 #include <linux/net.h>
52 #define OPA_NUM_PKEY_BLOCKS_PER_SMP (OPA_SMP_DR_DATA_SIZE \
53                         / (OPA_PARTITION_TABLE_BLK_SIZE * sizeof(u16)))
54
55 #include "hfi.h"
56 #include "mad.h"
57 #include "trace.h"
58
59 /* the reset value from the FM is supposed to be 0xffff, handle both */
60 #define OPA_LINK_WIDTH_RESET_OLD 0x0fff
61 #define OPA_LINK_WIDTH_RESET 0xffff
62
63 static int reply(struct ib_mad_hdr *smp)
64 {
65         /*
66          * The verbs framework will handle the directed/LID route
67          * packet changes.
68          */
69         smp->method = IB_MGMT_METHOD_GET_RESP;
70         if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
71                 smp->status |= IB_SMP_DIRECTION;
72         return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
73 }
74
75 static inline void clear_opa_smp_data(struct opa_smp *smp)
76 {
77         void *data = opa_get_smp_data(smp);
78         size_t size = opa_get_smp_data_size(smp);
79
80         memset(data, 0, size);
81 }
82
83 static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len)
84 {
85         struct ib_mad_send_buf *send_buf;
86         struct ib_mad_agent *agent;
87         struct opa_smp *smp;
88         int ret;
89         unsigned long flags;
90         unsigned long timeout;
91         int pkey_idx;
92         u32 qpn = ppd_from_ibp(ibp)->sm_trap_qp;
93
94         agent = ibp->send_agent;
95         if (!agent)
96                 return;
97
98         /* o14-3.2.1 */
99         if (ppd_from_ibp(ibp)->lstate != IB_PORT_ACTIVE)
100                 return;
101
102         /* o14-2 */
103         if (ibp->trap_timeout && time_before(jiffies, ibp->trap_timeout))
104                 return;
105
106         pkey_idx = hfi1_lookup_pkey_idx(ibp, LIM_MGMT_P_KEY);
107         if (pkey_idx < 0) {
108                 pr_warn("%s: failed to find limited mgmt pkey, defaulting 0x%x\n",
109                         __func__, hfi1_get_pkey(ibp, 1));
110                 pkey_idx = 1;
111         }
112
113         send_buf = ib_create_send_mad(agent, qpn, pkey_idx, 0,
114                                       IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
115                                       GFP_ATOMIC, IB_MGMT_BASE_VERSION);
116         if (IS_ERR(send_buf))
117                 return;
118
119         smp = send_buf->mad;
120         smp->base_version = OPA_MGMT_BASE_VERSION;
121         smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
122         smp->class_version = OPA_SMI_CLASS_VERSION;
123         smp->method = IB_MGMT_METHOD_TRAP;
124         ibp->tid++;
125         smp->tid = cpu_to_be64(ibp->tid);
126         smp->attr_id = IB_SMP_ATTR_NOTICE;
127         /* o14-1: smp->mkey = 0; */
128         memcpy(smp->route.lid.data, data, len);
129
130         spin_lock_irqsave(&ibp->lock, flags);
131         if (!ibp->sm_ah) {
132                 if (ibp->sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
133                         struct ib_ah *ah;
134
135                         ah = hfi1_create_qp0_ah(ibp, ibp->sm_lid);
136                         if (IS_ERR(ah))
137                                 ret = PTR_ERR(ah);
138                         else {
139                                 send_buf->ah = ah;
140                                 ibp->sm_ah = to_iah(ah);
141                                 ret = 0;
142                         }
143                 } else
144                         ret = -EINVAL;
145         } else {
146                 send_buf->ah = &ibp->sm_ah->ibah;
147                 ret = 0;
148         }
149         spin_unlock_irqrestore(&ibp->lock, flags);
150
151         if (!ret)
152                 ret = ib_post_send_mad(send_buf, NULL);
153         if (!ret) {
154                 /* 4.096 usec. */
155                 timeout = (4096 * (1UL << ibp->subnet_timeout)) / 1000;
156                 ibp->trap_timeout = jiffies + usecs_to_jiffies(timeout);
157         } else {
158                 ib_free_send_mad(send_buf);
159                 ibp->trap_timeout = 0;
160         }
161 }
162
163 /*
164  * Send a bad [PQ]_Key trap (ch. 14.3.8).
165  */
166 void hfi1_bad_pqkey(struct hfi1_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
167                     u32 qp1, u32 qp2, u16 lid1, u16 lid2)
168 {
169         struct opa_mad_notice_attr data;
170         u32 lid = ppd_from_ibp(ibp)->lid;
171         u32 _lid1 = lid1;
172         u32 _lid2 = lid2;
173
174         memset(&data, 0, sizeof(data));
175
176         if (trap_num == OPA_TRAP_BAD_P_KEY)
177                 ibp->pkey_violations++;
178         else
179                 ibp->qkey_violations++;
180         ibp->n_pkt_drops++;
181
182         /* Send violation trap */
183         data.generic_type = IB_NOTICE_TYPE_SECURITY;
184         data.prod_type_lsb = IB_NOTICE_PROD_CA;
185         data.trap_num = trap_num;
186         data.issuer_lid = cpu_to_be32(lid);
187         data.ntc_257_258.lid1 = cpu_to_be32(_lid1);
188         data.ntc_257_258.lid2 = cpu_to_be32(_lid2);
189         data.ntc_257_258.key = cpu_to_be32(key);
190         data.ntc_257_258.sl = sl << 3;
191         data.ntc_257_258.qp1 = cpu_to_be32(qp1);
192         data.ntc_257_258.qp2 = cpu_to_be32(qp2);
193
194         send_trap(ibp, &data, sizeof(data));
195 }
196
197 /*
198  * Send a bad M_Key trap (ch. 14.3.9).
199  */
200 static void bad_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
201                      __be64 mkey, __be32 dr_slid, u8 return_path[], u8 hop_cnt)
202 {
203         struct opa_mad_notice_attr data;
204         u32 lid = ppd_from_ibp(ibp)->lid;
205
206         memset(&data, 0, sizeof(data));
207         /* Send violation trap */
208         data.generic_type = IB_NOTICE_TYPE_SECURITY;
209         data.prod_type_lsb = IB_NOTICE_PROD_CA;
210         data.trap_num = OPA_TRAP_BAD_M_KEY;
211         data.issuer_lid = cpu_to_be32(lid);
212         data.ntc_256.lid = data.issuer_lid;
213         data.ntc_256.method = mad->method;
214         data.ntc_256.attr_id = mad->attr_id;
215         data.ntc_256.attr_mod = mad->attr_mod;
216         data.ntc_256.mkey = mkey;
217         if (mad->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
218                 data.ntc_256.dr_slid = dr_slid;
219                 data.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE;
220                 if (hop_cnt > ARRAY_SIZE(data.ntc_256.dr_rtn_path)) {
221                         data.ntc_256.dr_trunc_hop |=
222                                 IB_NOTICE_TRAP_DR_TRUNC;
223                         hop_cnt = ARRAY_SIZE(data.ntc_256.dr_rtn_path);
224                 }
225                 data.ntc_256.dr_trunc_hop |= hop_cnt;
226                 memcpy(data.ntc_256.dr_rtn_path, return_path,
227                        hop_cnt);
228         }
229
230         send_trap(ibp, &data, sizeof(data));
231 }
232
233 /*
234  * Send a Port Capability Mask Changed trap (ch. 14.3.11).
235  */
236 void hfi1_cap_mask_chg(struct hfi1_ibport *ibp)
237 {
238         struct opa_mad_notice_attr data;
239         u32 lid = ppd_from_ibp(ibp)->lid;
240
241         memset(&data, 0, sizeof(data));
242
243         data.generic_type = IB_NOTICE_TYPE_INFO;
244         data.prod_type_lsb = IB_NOTICE_PROD_CA;
245         data.trap_num = OPA_TRAP_CHANGE_CAPABILITY;
246         data.issuer_lid = cpu_to_be32(lid);
247         data.ntc_144.lid = data.issuer_lid;
248         data.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags);
249
250         send_trap(ibp, &data, sizeof(data));
251 }
252
253 /*
254  * Send a System Image GUID Changed trap (ch. 14.3.12).
255  */
256 void hfi1_sys_guid_chg(struct hfi1_ibport *ibp)
257 {
258         struct opa_mad_notice_attr data;
259         u32 lid = ppd_from_ibp(ibp)->lid;
260
261         memset(&data, 0, sizeof(data));
262
263         data.generic_type = IB_NOTICE_TYPE_INFO;
264         data.prod_type_lsb = IB_NOTICE_PROD_CA;
265         data.trap_num = OPA_TRAP_CHANGE_SYSGUID;
266         data.issuer_lid = cpu_to_be32(lid);
267         data.ntc_145.new_sys_guid = ib_hfi1_sys_image_guid;
268         data.ntc_145.lid = data.issuer_lid;
269
270         send_trap(ibp, &data, sizeof(data));
271 }
272
273 /*
274  * Send a Node Description Changed trap (ch. 14.3.13).
275  */
276 void hfi1_node_desc_chg(struct hfi1_ibport *ibp)
277 {
278         struct opa_mad_notice_attr data;
279         u32 lid = ppd_from_ibp(ibp)->lid;
280
281         memset(&data, 0, sizeof(data));
282
283         data.generic_type = IB_NOTICE_TYPE_INFO;
284         data.prod_type_lsb = IB_NOTICE_PROD_CA;
285         data.trap_num = OPA_TRAP_CHANGE_CAPABILITY;
286         data.issuer_lid = cpu_to_be32(lid);
287         data.ntc_144.lid = data.issuer_lid;
288         data.ntc_144.change_flags =
289                 cpu_to_be16(OPA_NOTICE_TRAP_NODE_DESC_CHG);
290
291         send_trap(ibp, &data, sizeof(data));
292 }
293
294 static int __subn_get_opa_nodedesc(struct opa_smp *smp, u32 am,
295                                    u8 *data, struct ib_device *ibdev,
296                                    u8 port, u32 *resp_len)
297 {
298         struct opa_node_description *nd;
299
300         if (am) {
301                 smp->status |= IB_SMP_INVALID_FIELD;
302                 return reply((struct ib_mad_hdr *)smp);
303         }
304
305         nd = (struct opa_node_description *)data;
306
307         memcpy(nd->data, ibdev->node_desc, sizeof(nd->data));
308
309         if (resp_len)
310                 *resp_len += sizeof(*nd);
311
312         return reply((struct ib_mad_hdr *)smp);
313 }
314
315 static int __subn_get_opa_nodeinfo(struct opa_smp *smp, u32 am, u8 *data,
316                                    struct ib_device *ibdev, u8 port,
317                                    u32 *resp_len)
318 {
319         struct opa_node_info *ni;
320         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
321         unsigned pidx = port - 1; /* IB number port from 1, hw from 0 */
322
323         ni = (struct opa_node_info *)data;
324
325         /* GUID 0 is illegal */
326         if (am || pidx >= dd->num_pports || dd->pport[pidx].guid == 0) {
327                 smp->status |= IB_SMP_INVALID_FIELD;
328                 return reply((struct ib_mad_hdr *)smp);
329         }
330
331         ni->port_guid = cpu_to_be64(dd->pport[pidx].guid);
332         ni->base_version = OPA_MGMT_BASE_VERSION;
333         ni->class_version = OPA_SMI_CLASS_VERSION;
334         ni->node_type = 1;     /* channel adapter */
335         ni->num_ports = ibdev->phys_port_cnt;
336         /* This is already in network order */
337         ni->system_image_guid = ib_hfi1_sys_image_guid;
338         /* Use first-port GUID as node */
339         ni->node_guid = cpu_to_be64(dd->pport->guid);
340         ni->partition_cap = cpu_to_be16(hfi1_get_npkeys(dd));
341         ni->device_id = cpu_to_be16(dd->pcidev->device);
342         ni->revision = cpu_to_be32(dd->minrev);
343         ni->local_port_num = port;
344         ni->vendor_id[0] = dd->oui1;
345         ni->vendor_id[1] = dd->oui2;
346         ni->vendor_id[2] = dd->oui3;
347
348         if (resp_len)
349                 *resp_len += sizeof(*ni);
350
351         return reply((struct ib_mad_hdr *)smp);
352 }
353
354 static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
355                              u8 port)
356 {
357         struct ib_node_info *nip = (struct ib_node_info *)&smp->data;
358         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
359         unsigned pidx = port - 1; /* IB number port from 1, hw from 0 */
360
361         /* GUID 0 is illegal */
362         if (smp->attr_mod || pidx >= dd->num_pports ||
363             dd->pport[pidx].guid == 0)
364                 smp->status |= IB_SMP_INVALID_FIELD;
365         else
366                 nip->port_guid = cpu_to_be64(dd->pport[pidx].guid);
367
368         nip->base_version = OPA_MGMT_BASE_VERSION;
369         nip->class_version = OPA_SMI_CLASS_VERSION;
370         nip->node_type = 1;     /* channel adapter */
371         nip->num_ports = ibdev->phys_port_cnt;
372         /* This is already in network order */
373         nip->sys_guid = ib_hfi1_sys_image_guid;
374          /* Use first-port GUID as node */
375         nip->node_guid = cpu_to_be64(dd->pport->guid);
376         nip->partition_cap = cpu_to_be16(hfi1_get_npkeys(dd));
377         nip->device_id = cpu_to_be16(dd->pcidev->device);
378         nip->revision = cpu_to_be32(dd->minrev);
379         nip->local_port_num = port;
380         nip->vendor_id[0] = dd->oui1;
381         nip->vendor_id[1] = dd->oui2;
382         nip->vendor_id[2] = dd->oui3;
383
384         return reply((struct ib_mad_hdr *)smp);
385 }
386
387 static void set_link_width_enabled(struct hfi1_pportdata *ppd, u32 w)
388 {
389         (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LWID_ENB, w);
390 }
391
392 static void set_link_width_downgrade_enabled(struct hfi1_pportdata *ppd, u32 w)
393 {
394         (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LWID_DG_ENB, w);
395 }
396
397 static void set_link_speed_enabled(struct hfi1_pportdata *ppd, u32 s)
398 {
399         (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_SPD_ENB, s);
400 }
401
402 static int check_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
403                       int mad_flags, __be64 mkey, __be32 dr_slid,
404                       u8 return_path[], u8 hop_cnt)
405 {
406         int valid_mkey = 0;
407         int ret = 0;
408
409         /* Is the mkey in the process of expiring? */
410         if (ibp->mkey_lease_timeout &&
411             time_after_eq(jiffies, ibp->mkey_lease_timeout)) {
412                 /* Clear timeout and mkey protection field. */
413                 ibp->mkey_lease_timeout = 0;
414                 ibp->mkeyprot = 0;
415         }
416
417         if ((mad_flags & IB_MAD_IGNORE_MKEY) ||  ibp->mkey == 0 ||
418             ibp->mkey == mkey)
419                 valid_mkey = 1;
420
421         /* Unset lease timeout on any valid Get/Set/TrapRepress */
422         if (valid_mkey && ibp->mkey_lease_timeout &&
423             (mad->method == IB_MGMT_METHOD_GET ||
424              mad->method == IB_MGMT_METHOD_SET ||
425              mad->method == IB_MGMT_METHOD_TRAP_REPRESS))
426                 ibp->mkey_lease_timeout = 0;
427
428         if (!valid_mkey) {
429                 switch (mad->method) {
430                 case IB_MGMT_METHOD_GET:
431                         /* Bad mkey not a violation below level 2 */
432                         if (ibp->mkeyprot < 2)
433                                 break;
434                 case IB_MGMT_METHOD_SET:
435                 case IB_MGMT_METHOD_TRAP_REPRESS:
436                         if (ibp->mkey_violations != 0xFFFF)
437                                 ++ibp->mkey_violations;
438                         if (!ibp->mkey_lease_timeout && ibp->mkey_lease_period)
439                                 ibp->mkey_lease_timeout = jiffies +
440                                         ibp->mkey_lease_period * HZ;
441                         /* Generate a trap notice. */
442                         bad_mkey(ibp, mad, mkey, dr_slid, return_path,
443                                  hop_cnt);
444                         ret = 1;
445                 }
446         }
447
448         return ret;
449 }
450
451 /*
452  * The SMA caches reads from LCB registers in case the LCB is unavailable.
453  * (The LCB is unavailable in certain link states, for example.)
454  */
455 struct lcb_datum {
456         u32 off;
457         u64 val;
458 };
459
460 static struct lcb_datum lcb_cache[] = {
461         { DC_LCB_STS_ROUND_TRIP_LTP_CNT, 0 },
462 };
463
464 static int write_lcb_cache(u32 off, u64 val)
465 {
466         int i;
467
468         for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
469                 if (lcb_cache[i].off == off) {
470                         lcb_cache[i].val = val;
471                         return 0;
472                 }
473         }
474
475         pr_warn("%s bad offset 0x%x\n", __func__, off);
476         return -1;
477 }
478
479 static int read_lcb_cache(u32 off, u64 *val)
480 {
481         int i;
482
483         for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
484                 if (lcb_cache[i].off == off) {
485                         *val = lcb_cache[i].val;
486                         return 0;
487                 }
488         }
489
490         pr_warn("%s bad offset 0x%x\n", __func__, off);
491         return -1;
492 }
493
494 void read_ltp_rtt(struct hfi1_devdata *dd)
495 {
496         u64 reg;
497
498         if (read_lcb_csr(dd, DC_LCB_STS_ROUND_TRIP_LTP_CNT, &reg))
499                 dd_dev_err(dd, "%s: unable to read LTP RTT\n", __func__);
500         else
501                 write_lcb_cache(DC_LCB_STS_ROUND_TRIP_LTP_CNT, reg);
502 }
503
504 static u8 __opa_porttype(struct hfi1_pportdata *ppd)
505 {
506         if (qsfp_mod_present(ppd)) {
507                 if (ppd->qsfp_info.cache_valid)
508                         return OPA_PORT_TYPE_STANDARD;
509                 return OPA_PORT_TYPE_DISCONNECTED;
510         }
511         return OPA_PORT_TYPE_UNKNOWN;
512 }
513
514 static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
515                                    struct ib_device *ibdev, u8 port,
516                                    u32 *resp_len)
517 {
518         int i;
519         struct hfi1_devdata *dd;
520         struct hfi1_pportdata *ppd;
521         struct hfi1_ibport *ibp;
522         struct opa_port_info *pi = (struct opa_port_info *)data;
523         u8 mtu;
524         u8 credit_rate;
525         u32 state;
526         u32 num_ports = OPA_AM_NPORT(am);
527         u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
528         u32 buffer_units;
529         u64 tmp = 0;
530
531         if (num_ports != 1) {
532                 smp->status |= IB_SMP_INVALID_FIELD;
533                 return reply((struct ib_mad_hdr *)smp);
534         }
535
536         dd = dd_from_ibdev(ibdev);
537         /* IB numbers ports from 1, hw from 0 */
538         ppd = dd->pport + (port - 1);
539         ibp = &ppd->ibport_data;
540
541         if (ppd->vls_supported/2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) ||
542                 ppd->vls_supported > ARRAY_SIZE(dd->vld)) {
543                 smp->status |= IB_SMP_INVALID_FIELD;
544                 return reply((struct ib_mad_hdr *)smp);
545         }
546
547         pi->lid = cpu_to_be32(ppd->lid);
548
549         /* Only return the mkey if the protection field allows it. */
550         if (!(smp->method == IB_MGMT_METHOD_GET &&
551               ibp->mkey != smp->mkey &&
552               ibp->mkeyprot == 1))
553                 pi->mkey = ibp->mkey;
554
555         pi->subnet_prefix = ibp->gid_prefix;
556         pi->sm_lid = cpu_to_be32(ibp->sm_lid);
557         pi->ib_cap_mask = cpu_to_be32(ibp->port_cap_flags);
558         pi->mkey_lease_period = cpu_to_be16(ibp->mkey_lease_period);
559         pi->sm_trap_qp = cpu_to_be32(ppd->sm_trap_qp);
560         pi->sa_qp = cpu_to_be32(ppd->sa_qp);
561
562         pi->link_width.enabled = cpu_to_be16(ppd->link_width_enabled);
563         pi->link_width.supported = cpu_to_be16(ppd->link_width_supported);
564         pi->link_width.active = cpu_to_be16(ppd->link_width_active);
565
566         pi->link_width_downgrade.supported =
567                         cpu_to_be16(ppd->link_width_downgrade_supported);
568         pi->link_width_downgrade.enabled =
569                         cpu_to_be16(ppd->link_width_downgrade_enabled);
570         pi->link_width_downgrade.tx_active =
571                         cpu_to_be16(ppd->link_width_downgrade_tx_active);
572         pi->link_width_downgrade.rx_active =
573                         cpu_to_be16(ppd->link_width_downgrade_rx_active);
574
575         pi->link_speed.supported = cpu_to_be16(ppd->link_speed_supported);
576         pi->link_speed.active = cpu_to_be16(ppd->link_speed_active);
577         pi->link_speed.enabled = cpu_to_be16(ppd->link_speed_enabled);
578
579         state = driver_lstate(ppd);
580
581         if (start_of_sm_config && (state == IB_PORT_INIT))
582                 ppd->is_sm_config_started = 1;
583
584         pi->port_phys_conf = __opa_porttype(ppd) & 0xf;
585
586 #if PI_LED_ENABLE_SUP
587         pi->port_states.ledenable_offlinereason = ppd->neighbor_normal << 4;
588         pi->port_states.ledenable_offlinereason |=
589                 ppd->is_sm_config_started << 5;
590         pi->port_states.ledenable_offlinereason |=
591                 ppd->offline_disabled_reason & OPA_PI_MASK_OFFLINE_REASON;
592 #else
593         pi->port_states.offline_reason = ppd->neighbor_normal << 4;
594         pi->port_states.offline_reason |= ppd->is_sm_config_started << 5;
595         pi->port_states.offline_reason |= ppd->offline_disabled_reason &
596                                                 OPA_PI_MASK_OFFLINE_REASON;
597 #endif /* PI_LED_ENABLE_SUP */
598
599         pi->port_states.portphysstate_portstate =
600                 (hfi1_ibphys_portstate(ppd) << 4) | state;
601
602         pi->mkeyprotect_lmc = (ibp->mkeyprot << 6) | ppd->lmc;
603
604         memset(pi->neigh_mtu.pvlx_to_mtu, 0, sizeof(pi->neigh_mtu.pvlx_to_mtu));
605         for (i = 0; i < ppd->vls_supported; i++) {
606                 mtu = mtu_to_enum(dd->vld[i].mtu, HFI1_DEFAULT_ACTIVE_MTU);
607                 if ((i % 2) == 0)
608                         pi->neigh_mtu.pvlx_to_mtu[i/2] |= (mtu << 4);
609                 else
610                         pi->neigh_mtu.pvlx_to_mtu[i/2] |= mtu;
611         }
612         /* don't forget VL 15 */
613         mtu = mtu_to_enum(dd->vld[15].mtu, 2048);
614         pi->neigh_mtu.pvlx_to_mtu[15/2] |= mtu;
615         pi->smsl = ibp->sm_sl & OPA_PI_MASK_SMSL;
616         pi->operational_vls = hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_OP_VLS);
617         pi->partenforce_filterraw |=
618                 (ppd->linkinit_reason & OPA_PI_MASK_LINKINIT_REASON);
619         if (ppd->part_enforce & HFI1_PART_ENFORCE_IN)
620                 pi->partenforce_filterraw |= OPA_PI_MASK_PARTITION_ENFORCE_IN;
621         if (ppd->part_enforce & HFI1_PART_ENFORCE_OUT)
622                 pi->partenforce_filterraw |= OPA_PI_MASK_PARTITION_ENFORCE_OUT;
623         pi->mkey_violations = cpu_to_be16(ibp->mkey_violations);
624         /* P_KeyViolations are counted by hardware. */
625         pi->pkey_violations = cpu_to_be16(ibp->pkey_violations);
626         pi->qkey_violations = cpu_to_be16(ibp->qkey_violations);
627
628         pi->vl.cap = ppd->vls_supported;
629         pi->vl.high_limit = cpu_to_be16(ibp->vl_high_limit);
630         pi->vl.arb_high_cap = (u8)hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_VL_HIGH_CAP);
631         pi->vl.arb_low_cap = (u8)hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_VL_LOW_CAP);
632
633         pi->clientrereg_subnettimeout = ibp->subnet_timeout;
634
635         pi->port_link_mode  = cpu_to_be16(OPA_PORT_LINK_MODE_OPA << 10 |
636                                           OPA_PORT_LINK_MODE_OPA << 5 |
637                                           OPA_PORT_LINK_MODE_OPA);
638
639         pi->port_ltp_crc_mode = cpu_to_be16(ppd->port_ltp_crc_mode);
640
641         pi->port_mode = cpu_to_be16(
642                                 ppd->is_active_optimize_enabled ?
643                                         OPA_PI_MASK_PORT_ACTIVE_OPTOMIZE : 0);
644
645         pi->port_packet_format.supported =
646                 cpu_to_be16(OPA_PORT_PACKET_FORMAT_9B);
647         pi->port_packet_format.enabled =
648                 cpu_to_be16(OPA_PORT_PACKET_FORMAT_9B);
649
650         /* flit_control.interleave is (OPA V1, version .76):
651          * bits         use
652          * ----         ---
653          * 2            res
654          * 2            DistanceSupported
655          * 2            DistanceEnabled
656          * 5            MaxNextLevelTxEnabled
657          * 5            MaxNestLevelRxSupported
658          *
659          * HFI supports only "distance mode 1" (see OPA V1, version .76,
660          * section 9.6.2), so set DistanceSupported, DistanceEnabled
661          * to 0x1.
662          */
663         pi->flit_control.interleave = cpu_to_be16(0x1400);
664
665         pi->link_down_reason = ppd->local_link_down_reason.sma;
666         pi->neigh_link_down_reason = ppd->neigh_link_down_reason.sma;
667         pi->port_error_action = cpu_to_be32(ppd->port_error_action);
668         pi->mtucap = mtu_to_enum(hfi1_max_mtu, IB_MTU_4096);
669
670         /* 32.768 usec. response time (guessing) */
671         pi->resptimevalue = 3;
672
673         pi->local_port_num = port;
674
675         /* buffer info for FM */
676         pi->overall_buffer_space = cpu_to_be16(dd->link_credits);
677
678         pi->neigh_node_guid = cpu_to_be64(ppd->neighbor_guid);
679         pi->neigh_port_num = ppd->neighbor_port_number;
680         pi->port_neigh_mode =
681                 (ppd->neighbor_type & OPA_PI_MASK_NEIGH_NODE_TYPE) |
682                 (ppd->mgmt_allowed ? OPA_PI_MASK_NEIGH_MGMT_ALLOWED : 0) |
683                 (ppd->neighbor_fm_security ?
684                         OPA_PI_MASK_NEIGH_FW_AUTH_BYPASS : 0);
685
686         /* HFIs shall always return VL15 credits to their
687          * neighbor in a timely manner, without any credit return pacing.
688          */
689         credit_rate = 0;
690         buffer_units  = (dd->vau) & OPA_PI_MASK_BUF_UNIT_BUF_ALLOC;
691         buffer_units |= (dd->vcu << 3) & OPA_PI_MASK_BUF_UNIT_CREDIT_ACK;
692         buffer_units |= (credit_rate << 6) &
693                                 OPA_PI_MASK_BUF_UNIT_VL15_CREDIT_RATE;
694         buffer_units |= (dd->vl15_init << 11) & OPA_PI_MASK_BUF_UNIT_VL15_INIT;
695         pi->buffer_units = cpu_to_be32(buffer_units);
696
697         pi->opa_cap_mask = cpu_to_be16(OPA_CAP_MASK3_IsSharedSpaceSupported);
698
699         /* HFI supports a replay buffer 128 LTPs in size */
700         pi->replay_depth.buffer = 0x80;
701         /* read the cached value of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
702         read_lcb_cache(DC_LCB_STS_ROUND_TRIP_LTP_CNT, &tmp);
703
704         /* this counter is 16 bits wide, but the replay_depth.wire
705          * variable is only 8 bits */
706         if (tmp > 0xff)
707                 tmp = 0xff;
708         pi->replay_depth.wire = tmp;
709
710         if (resp_len)
711                 *resp_len += sizeof(struct opa_port_info);
712
713         return reply((struct ib_mad_hdr *)smp);
714 }
715
716 /**
717  * get_pkeys - return the PKEY table
718  * @dd: the hfi1_ib device
719  * @port: the IB port number
720  * @pkeys: the pkey table is placed here
721  */
722 static int get_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
723 {
724         struct hfi1_pportdata *ppd = dd->pport + port - 1;
725
726         memcpy(pkeys, ppd->pkeys, sizeof(ppd->pkeys));
727
728         return 0;
729 }
730
731 static int __subn_get_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
732                                     struct ib_device *ibdev, u8 port,
733                                     u32 *resp_len)
734 {
735         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
736         u32 n_blocks_req = OPA_AM_NBLK(am);
737         u32 start_block = am & 0x7ff;
738         __be16 *p;
739         u16 *q;
740         int i;
741         u16 n_blocks_avail;
742         unsigned npkeys = hfi1_get_npkeys(dd);
743         size_t size;
744
745         if (n_blocks_req == 0) {
746                 pr_warn("OPA Get PKey AM Invalid : P = %d; B = 0x%x; N = 0x%x\n",
747                         port, start_block, n_blocks_req);
748                 smp->status |= IB_SMP_INVALID_FIELD;
749                 return reply((struct ib_mad_hdr *)smp);
750         }
751
752         n_blocks_avail = (u16) (npkeys/OPA_PARTITION_TABLE_BLK_SIZE) + 1;
753
754         size = (n_blocks_req * OPA_PARTITION_TABLE_BLK_SIZE) * sizeof(u16);
755
756         if (start_block + n_blocks_req > n_blocks_avail ||
757             n_blocks_req > OPA_NUM_PKEY_BLOCKS_PER_SMP) {
758                 pr_warn("OPA Get PKey AM Invalid : s 0x%x; req 0x%x; "
759                         "avail 0x%x; blk/smp 0x%lx\n",
760                         start_block, n_blocks_req, n_blocks_avail,
761                         OPA_NUM_PKEY_BLOCKS_PER_SMP);
762                 smp->status |= IB_SMP_INVALID_FIELD;
763                 return reply((struct ib_mad_hdr *)smp);
764         }
765
766         p = (__be16 *) data;
767         q = (u16 *)data;
768         /* get the real pkeys if we are requesting the first block */
769         if (start_block == 0) {
770                 get_pkeys(dd, port, q);
771                 for (i = 0; i < npkeys; i++)
772                         p[i] = cpu_to_be16(q[i]);
773                 if (resp_len)
774                         *resp_len += size;
775         } else
776                 smp->status |= IB_SMP_INVALID_FIELD;
777
778         return reply((struct ib_mad_hdr *)smp);
779 }
780
781 enum {
782         HFI_TRANSITION_DISALLOWED,
783         HFI_TRANSITION_IGNORED,
784         HFI_TRANSITION_ALLOWED,
785         HFI_TRANSITION_UNDEFINED,
786 };
787
788 /*
789  * Use shortened names to improve readability of
790  * {logical,physical}_state_transitions
791  */
792 enum {
793         __D = HFI_TRANSITION_DISALLOWED,
794         __I = HFI_TRANSITION_IGNORED,
795         __A = HFI_TRANSITION_ALLOWED,
796         __U = HFI_TRANSITION_UNDEFINED,
797 };
798
799 /*
800  * IB_PORTPHYSSTATE_POLLING (2) through OPA_PORTPHYSSTATE_MAX (11) are
801  * represented in physical_state_transitions.
802  */
803 #define __N_PHYSTATES (OPA_PORTPHYSSTATE_MAX - IB_PORTPHYSSTATE_POLLING + 1)
804
805 /*
806  * Within physical_state_transitions, rows represent "old" states,
807  * columns "new" states, and physical_state_transitions.allowed[old][new]
808  * indicates if the transition from old state to new state is legal (see
809  * OPAg1v1, Table 6-4).
810  */
811 static const struct {
812         u8 allowed[__N_PHYSTATES][__N_PHYSTATES];
813 } physical_state_transitions = {
814         {
815                 /* 2    3    4    5    6    7    8    9   10   11 */
816         /* 2 */ { __A, __A, __D, __D, __D, __D, __D, __D, __D, __D },
817         /* 3 */ { __A, __I, __D, __D, __D, __D, __D, __D, __D, __A },
818         /* 4 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
819         /* 5 */ { __A, __A, __D, __I, __D, __D, __D, __D, __D, __D },
820         /* 6 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
821         /* 7 */ { __D, __A, __D, __D, __D, __I, __D, __D, __D, __D },
822         /* 8 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
823         /* 9 */ { __I, __A, __D, __D, __D, __D, __D, __I, __D, __D },
824         /*10 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
825         /*11 */ { __D, __A, __D, __D, __D, __D, __D, __D, __D, __I },
826         }
827 };
828
829 /*
830  * IB_PORT_DOWN (1) through IB_PORT_ACTIVE_DEFER (5) are represented
831  * logical_state_transitions
832  */
833
834 #define __N_LOGICAL_STATES (IB_PORT_ACTIVE_DEFER - IB_PORT_DOWN + 1)
835
836 /*
837  * Within logical_state_transitions rows represent "old" states,
838  * columns "new" states, and logical_state_transitions.allowed[old][new]
839  * indicates if the transition from old state to new state is legal (see
840  * OPAg1v1, Table 9-12).
841  */
842 static const struct {
843         u8 allowed[__N_LOGICAL_STATES][__N_LOGICAL_STATES];
844 } logical_state_transitions = {
845         {
846                 /* 1    2    3    4    5 */
847         /* 1 */ { __I, __D, __D, __D, __U},
848         /* 2 */ { __D, __I, __A, __D, __U},
849         /* 3 */ { __D, __D, __I, __A, __U},
850         /* 4 */ { __D, __D, __I, __I, __U},
851         /* 5 */ { __U, __U, __U, __U, __U},
852         }
853 };
854
855 static int logical_transition_allowed(int old, int new)
856 {
857         if (old < IB_PORT_NOP || old > IB_PORT_ACTIVE_DEFER ||
858             new < IB_PORT_NOP || new > IB_PORT_ACTIVE_DEFER) {
859                 pr_warn("invalid logical state(s) (old %d new %d)\n",
860                         old, new);
861                 return HFI_TRANSITION_UNDEFINED;
862         }
863
864         if (new == IB_PORT_NOP)
865                 return HFI_TRANSITION_ALLOWED; /* always allowed */
866
867         /* adjust states for indexing into logical_state_transitions */
868         old -= IB_PORT_DOWN;
869         new -= IB_PORT_DOWN;
870
871         if (old < 0 || new < 0)
872                 return HFI_TRANSITION_UNDEFINED;
873         return logical_state_transitions.allowed[old][new];
874 }
875
876 static int physical_transition_allowed(int old, int new)
877 {
878         if (old < IB_PORTPHYSSTATE_NOP || old > OPA_PORTPHYSSTATE_MAX ||
879             new < IB_PORTPHYSSTATE_NOP || new > OPA_PORTPHYSSTATE_MAX) {
880                 pr_warn("invalid physical state(s) (old %d new %d)\n",
881                         old, new);
882                 return HFI_TRANSITION_UNDEFINED;
883         }
884
885         if (new == IB_PORTPHYSSTATE_NOP)
886                 return HFI_TRANSITION_ALLOWED; /* always allowed */
887
888         /* adjust states for indexing into physical_state_transitions */
889         old -= IB_PORTPHYSSTATE_POLLING;
890         new -= IB_PORTPHYSSTATE_POLLING;
891
892         if (old < 0 || new < 0)
893                 return HFI_TRANSITION_UNDEFINED;
894         return physical_state_transitions.allowed[old][new];
895 }
896
897 static int port_states_transition_allowed(struct hfi1_pportdata *ppd,
898                                           u32 logical_new, u32 physical_new)
899 {
900         u32 physical_old = driver_physical_state(ppd);
901         u32 logical_old = driver_logical_state(ppd);
902         int ret, logical_allowed, physical_allowed;
903
904         logical_allowed = ret =
905                 logical_transition_allowed(logical_old, logical_new);
906
907         if (ret == HFI_TRANSITION_DISALLOWED ||
908             ret == HFI_TRANSITION_UNDEFINED) {
909                 pr_warn("invalid logical state transition %s -> %s\n",
910                         opa_lstate_name(logical_old),
911                         opa_lstate_name(logical_new));
912                 return ret;
913         }
914
915         physical_allowed = ret =
916                 physical_transition_allowed(physical_old, physical_new);
917
918         if (ret == HFI_TRANSITION_DISALLOWED ||
919             ret == HFI_TRANSITION_UNDEFINED) {
920                 pr_warn("invalid physical state transition %s -> %s\n",
921                         opa_pstate_name(physical_old),
922                         opa_pstate_name(physical_new));
923                 return ret;
924         }
925
926         if (logical_allowed == HFI_TRANSITION_IGNORED &&
927             physical_allowed == HFI_TRANSITION_IGNORED)
928                 return HFI_TRANSITION_IGNORED;
929
930         /*
931          * Either physical_allowed or logical_allowed is
932          * HFI_TRANSITION_ALLOWED.
933          */
934         return HFI_TRANSITION_ALLOWED;
935 }
936
937 static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp,
938                            u32 logical_state, u32 phys_state,
939                            int suppress_idle_sma)
940 {
941         struct hfi1_devdata *dd = ppd->dd;
942         u32 link_state;
943         int ret;
944
945         ret = port_states_transition_allowed(ppd, logical_state, phys_state);
946         if (ret == HFI_TRANSITION_DISALLOWED ||
947             ret == HFI_TRANSITION_UNDEFINED) {
948                 /* error message emitted above */
949                 smp->status |= IB_SMP_INVALID_FIELD;
950                 return 0;
951         }
952
953         if (ret == HFI_TRANSITION_IGNORED)
954                 return 0;
955
956         if ((phys_state != IB_PORTPHYSSTATE_NOP) &&
957             !(logical_state == IB_PORT_DOWN ||
958               logical_state == IB_PORT_NOP)){
959                 pr_warn("SubnSet(OPA_PortInfo) port state invalid: logical_state 0x%x physical_state 0x%x\n",
960                         logical_state, phys_state);
961                 smp->status |= IB_SMP_INVALID_FIELD;
962         }
963
964         /*
965          * Logical state changes are summarized in OPAv1g1 spec.,
966          * Table 9-12; physical state changes are summarized in
967          * OPAv1g1 spec., Table 6.4.
968          */
969         switch (logical_state) {
970         case IB_PORT_NOP:
971                 if (phys_state == IB_PORTPHYSSTATE_NOP)
972                         break;
973                 /* FALLTHROUGH */
974         case IB_PORT_DOWN:
975                 if (phys_state == IB_PORTPHYSSTATE_NOP)
976                         link_state = HLS_DN_DOWNDEF;
977                 else if (phys_state == IB_PORTPHYSSTATE_POLLING) {
978                         link_state = HLS_DN_POLL;
979                         set_link_down_reason(ppd,
980                              OPA_LINKDOWN_REASON_FM_BOUNCE, 0,
981                              OPA_LINKDOWN_REASON_FM_BOUNCE);
982                 } else if (phys_state == IB_PORTPHYSSTATE_DISABLED)
983                         link_state = HLS_DN_DISABLE;
984                 else {
985                         pr_warn("SubnSet(OPA_PortInfo) invalid physical state 0x%x\n",
986                                 phys_state);
987                         smp->status |= IB_SMP_INVALID_FIELD;
988                         break;
989                 }
990
991                 set_link_state(ppd, link_state);
992                 if (link_state == HLS_DN_DISABLE &&
993                     (ppd->offline_disabled_reason >
994                      OPA_LINKDOWN_REASON_SMA_DISABLED ||
995                      ppd->offline_disabled_reason ==
996                      OPA_LINKDOWN_REASON_NONE))
997                         ppd->offline_disabled_reason =
998                         OPA_LINKDOWN_REASON_SMA_DISABLED;
999                 /*
1000                  * Don't send a reply if the response would be sent
1001                  * through the disabled port.
1002                  */
1003                 if (link_state == HLS_DN_DISABLE && smp->hop_cnt)
1004                         return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
1005                 break;
1006         case IB_PORT_ARMED:
1007                 ret = set_link_state(ppd, HLS_UP_ARMED);
1008                 if ((ret == 0) && (suppress_idle_sma == 0))
1009                         send_idle_sma(dd, SMA_IDLE_ARM);
1010                 break;
1011         case IB_PORT_ACTIVE:
1012                 if (ppd->neighbor_normal) {
1013                         ret = set_link_state(ppd, HLS_UP_ACTIVE);
1014                         if (ret == 0)
1015                                 send_idle_sma(dd, SMA_IDLE_ACTIVE);
1016                 } else {
1017                         pr_warn("SubnSet(OPA_PortInfo) Cannot move to Active with NeighborNormal 0\n");
1018                         smp->status |= IB_SMP_INVALID_FIELD;
1019                 }
1020                 break;
1021         default:
1022                 pr_warn("SubnSet(OPA_PortInfo) invalid logical state 0x%x\n",
1023                         logical_state);
1024                 smp->status |= IB_SMP_INVALID_FIELD;
1025         }
1026
1027         return 0;
1028 }
1029
1030 /**
1031  * subn_set_opa_portinfo - set port information
1032  * @smp: the incoming SM packet
1033  * @ibdev: the infiniband device
1034  * @port: the port on the device
1035  *
1036  */
1037 static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
1038                                    struct ib_device *ibdev, u8 port,
1039                                    u32 *resp_len)
1040 {
1041         struct opa_port_info *pi = (struct opa_port_info *)data;
1042         struct ib_event event;
1043         struct hfi1_devdata *dd;
1044         struct hfi1_pportdata *ppd;
1045         struct hfi1_ibport *ibp;
1046         u8 clientrereg;
1047         unsigned long flags;
1048         u32 smlid, opa_lid; /* tmp vars to hold LID values */
1049         u16 lid;
1050         u8 ls_old, ls_new, ps_new;
1051         u8 vls;
1052         u8 msl;
1053         u8 crc_enabled;
1054         u16 lse, lwe, mtu;
1055         u32 num_ports = OPA_AM_NPORT(am);
1056         u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
1057         int ret, i, invalid = 0, call_set_mtu = 0;
1058         int call_link_downgrade_policy = 0;
1059
1060         if (num_ports != 1) {
1061                 smp->status |= IB_SMP_INVALID_FIELD;
1062                 return reply((struct ib_mad_hdr *)smp);
1063         }
1064
1065         opa_lid = be32_to_cpu(pi->lid);
1066         if (opa_lid & 0xFFFF0000) {
1067                 pr_warn("OPA_PortInfo lid out of range: %X\n", opa_lid);
1068                 smp->status |= IB_SMP_INVALID_FIELD;
1069                 goto get_only;
1070         }
1071
1072         lid = (u16)(opa_lid & 0x0000FFFF);
1073
1074         smlid = be32_to_cpu(pi->sm_lid);
1075         if (smlid & 0xFFFF0000) {
1076                 pr_warn("OPA_PortInfo SM lid out of range: %X\n", smlid);
1077                 smp->status |= IB_SMP_INVALID_FIELD;
1078                 goto get_only;
1079         }
1080         smlid &= 0x0000FFFF;
1081
1082         clientrereg = (pi->clientrereg_subnettimeout &
1083                         OPA_PI_MASK_CLIENT_REREGISTER);
1084
1085         dd = dd_from_ibdev(ibdev);
1086         /* IB numbers ports from 1, hw from 0 */
1087         ppd = dd->pport + (port - 1);
1088         ibp = &ppd->ibport_data;
1089         event.device = ibdev;
1090         event.element.port_num = port;
1091
1092         ls_old = driver_lstate(ppd);
1093
1094         ibp->mkey = pi->mkey;
1095         ibp->gid_prefix = pi->subnet_prefix;
1096         ibp->mkey_lease_period = be16_to_cpu(pi->mkey_lease_period);
1097
1098         /* Must be a valid unicast LID address. */
1099         if ((lid == 0 && ls_old > IB_PORT_INIT) ||
1100              lid >= HFI1_MULTICAST_LID_BASE) {
1101                 smp->status |= IB_SMP_INVALID_FIELD;
1102                 pr_warn("SubnSet(OPA_PortInfo) lid invalid 0x%x\n",
1103                         lid);
1104         } else if (ppd->lid != lid ||
1105                  ppd->lmc != (pi->mkeyprotect_lmc & OPA_PI_MASK_LMC)) {
1106                 if (ppd->lid != lid)
1107                         hfi1_set_uevent_bits(ppd, _HFI1_EVENT_LID_CHANGE_BIT);
1108                 if (ppd->lmc != (pi->mkeyprotect_lmc & OPA_PI_MASK_LMC))
1109                         hfi1_set_uevent_bits(ppd, _HFI1_EVENT_LMC_CHANGE_BIT);
1110                 hfi1_set_lid(ppd, lid, pi->mkeyprotect_lmc & OPA_PI_MASK_LMC);
1111                 event.event = IB_EVENT_LID_CHANGE;
1112                 ib_dispatch_event(&event);
1113         }
1114
1115         msl = pi->smsl & OPA_PI_MASK_SMSL;
1116         if (pi->partenforce_filterraw & OPA_PI_MASK_LINKINIT_REASON)
1117                 ppd->linkinit_reason =
1118                         (pi->partenforce_filterraw &
1119                          OPA_PI_MASK_LINKINIT_REASON);
1120         /* enable/disable SW pkey checking as per FM control */
1121         if (pi->partenforce_filterraw & OPA_PI_MASK_PARTITION_ENFORCE_IN)
1122                 ppd->part_enforce |= HFI1_PART_ENFORCE_IN;
1123         else
1124                 ppd->part_enforce &= ~HFI1_PART_ENFORCE_IN;
1125
1126         if (pi->partenforce_filterraw & OPA_PI_MASK_PARTITION_ENFORCE_OUT)
1127                 ppd->part_enforce |= HFI1_PART_ENFORCE_OUT;
1128         else
1129                 ppd->part_enforce &= ~HFI1_PART_ENFORCE_OUT;
1130
1131         /* Must be a valid unicast LID address. */
1132         if ((smlid == 0 && ls_old > IB_PORT_INIT) ||
1133              smlid >= HFI1_MULTICAST_LID_BASE) {
1134                 smp->status |= IB_SMP_INVALID_FIELD;
1135                 pr_warn("SubnSet(OPA_PortInfo) smlid invalid 0x%x\n", smlid);
1136         } else if (smlid != ibp->sm_lid || msl != ibp->sm_sl) {
1137                 pr_warn("SubnSet(OPA_PortInfo) smlid 0x%x\n", smlid);
1138                 spin_lock_irqsave(&ibp->lock, flags);
1139                 if (ibp->sm_ah) {
1140                         if (smlid != ibp->sm_lid)
1141                                 ibp->sm_ah->attr.dlid = smlid;
1142                         if (msl != ibp->sm_sl)
1143                                 ibp->sm_ah->attr.sl = msl;
1144                 }
1145                 spin_unlock_irqrestore(&ibp->lock, flags);
1146                 if (smlid != ibp->sm_lid)
1147                         ibp->sm_lid = smlid;
1148                 if (msl != ibp->sm_sl)
1149                         ibp->sm_sl = msl;
1150                 event.event = IB_EVENT_SM_CHANGE;
1151                 ib_dispatch_event(&event);
1152         }
1153
1154         if (pi->link_down_reason == 0) {
1155                 ppd->local_link_down_reason.sma = 0;
1156                 ppd->local_link_down_reason.latest = 0;
1157         }
1158
1159         if (pi->neigh_link_down_reason == 0) {
1160                 ppd->neigh_link_down_reason.sma = 0;
1161                 ppd->neigh_link_down_reason.latest = 0;
1162         }
1163
1164         ppd->sm_trap_qp = be32_to_cpu(pi->sm_trap_qp);
1165         ppd->sa_qp = be32_to_cpu(pi->sa_qp);
1166
1167         ppd->port_error_action = be32_to_cpu(pi->port_error_action);
1168         lwe = be16_to_cpu(pi->link_width.enabled);
1169         if (lwe) {
1170                 if (lwe == OPA_LINK_WIDTH_RESET
1171                                 || lwe == OPA_LINK_WIDTH_RESET_OLD)
1172                         set_link_width_enabled(ppd, ppd->link_width_supported);
1173                 else if ((lwe & ~ppd->link_width_supported) == 0)
1174                         set_link_width_enabled(ppd, lwe);
1175                 else
1176                         smp->status |= IB_SMP_INVALID_FIELD;
1177         }
1178         lwe = be16_to_cpu(pi->link_width_downgrade.enabled);
1179         /* LWD.E is always applied - 0 means "disabled" */
1180         if (lwe == OPA_LINK_WIDTH_RESET
1181                         || lwe == OPA_LINK_WIDTH_RESET_OLD) {
1182                 set_link_width_downgrade_enabled(ppd,
1183                                 ppd->link_width_downgrade_supported);
1184         } else if ((lwe & ~ppd->link_width_downgrade_supported) == 0) {
1185                 /* only set and apply if something changed */
1186                 if (lwe != ppd->link_width_downgrade_enabled) {
1187                         set_link_width_downgrade_enabled(ppd, lwe);
1188                         call_link_downgrade_policy = 1;
1189                 }
1190         } else
1191                 smp->status |= IB_SMP_INVALID_FIELD;
1192
1193         lse = be16_to_cpu(pi->link_speed.enabled);
1194         if (lse) {
1195                 if (lse & be16_to_cpu(pi->link_speed.supported))
1196                         set_link_speed_enabled(ppd, lse);
1197                 else
1198                         smp->status |= IB_SMP_INVALID_FIELD;
1199         }
1200
1201         ibp->mkeyprot = (pi->mkeyprotect_lmc & OPA_PI_MASK_MKEY_PROT_BIT) >> 6;
1202         ibp->vl_high_limit = be16_to_cpu(pi->vl.high_limit) & 0xFF;
1203         (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_VL_HIGH_LIMIT,
1204                                     ibp->vl_high_limit);
1205
1206         if (ppd->vls_supported/2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) ||
1207                 ppd->vls_supported > ARRAY_SIZE(dd->vld)) {
1208                 smp->status |= IB_SMP_INVALID_FIELD;
1209                 return reply((struct ib_mad_hdr *)smp);
1210         }
1211         for (i = 0; i < ppd->vls_supported; i++) {
1212                 if ((i % 2) == 0)
1213                         mtu = enum_to_mtu((pi->neigh_mtu.pvlx_to_mtu[i/2] >> 4)
1214                                           & 0xF);
1215                 else
1216                         mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[i/2] & 0xF);
1217                 if (mtu == 0xffff) {
1218                         pr_warn("SubnSet(OPA_PortInfo) mtu invalid %d (0x%x)\n",
1219                                 mtu,
1220                                 (pi->neigh_mtu.pvlx_to_mtu[0] >> 4) & 0xF);
1221                         smp->status |= IB_SMP_INVALID_FIELD;
1222                         mtu = hfi1_max_mtu; /* use a valid MTU */
1223                 }
1224                 if (dd->vld[i].mtu != mtu) {
1225                         dd_dev_info(dd,
1226                                 "MTU change on vl %d from %d to %d\n",
1227                                 i, dd->vld[i].mtu, mtu);
1228                         dd->vld[i].mtu = mtu;
1229                         call_set_mtu++;
1230                 }
1231         }
1232         /* As per OPAV1 spec: VL15 must support and be configured
1233          * for operation with a 2048 or larger MTU.
1234          */
1235         mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[15/2] & 0xF);
1236         if (mtu < 2048 || mtu == 0xffff)
1237                 mtu = 2048;
1238         if (dd->vld[15].mtu != mtu) {
1239                 dd_dev_info(dd,
1240                         "MTU change on vl 15 from %d to %d\n",
1241                         dd->vld[15].mtu, mtu);
1242                 dd->vld[15].mtu = mtu;
1243                 call_set_mtu++;
1244         }
1245         if (call_set_mtu)
1246                 set_mtu(ppd);
1247
1248         /* Set operational VLs */
1249         vls = pi->operational_vls & OPA_PI_MASK_OPERATIONAL_VL;
1250         if (vls) {
1251                 if (vls > ppd->vls_supported) {
1252                         pr_warn("SubnSet(OPA_PortInfo) VL's supported invalid %d\n",
1253                                 pi->operational_vls);
1254                         smp->status |= IB_SMP_INVALID_FIELD;
1255                 } else {
1256                         if (hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_OP_VLS,
1257                                                 vls) == -EINVAL)
1258                                 smp->status |= IB_SMP_INVALID_FIELD;
1259                 }
1260         }
1261
1262         if (pi->mkey_violations == 0)
1263                 ibp->mkey_violations = 0;
1264
1265         if (pi->pkey_violations == 0)
1266                 ibp->pkey_violations = 0;
1267
1268         if (pi->qkey_violations == 0)
1269                 ibp->qkey_violations = 0;
1270
1271         ibp->subnet_timeout =
1272                 pi->clientrereg_subnettimeout & OPA_PI_MASK_SUBNET_TIMEOUT;
1273
1274         crc_enabled = be16_to_cpu(pi->port_ltp_crc_mode);
1275         crc_enabled >>= 4;
1276         crc_enabled &= 0xf;
1277
1278         if (crc_enabled != 0)
1279                 ppd->port_crc_mode_enabled = port_ltp_to_cap(crc_enabled);
1280
1281         ppd->is_active_optimize_enabled =
1282                         !!(be16_to_cpu(pi->port_mode)
1283                                         & OPA_PI_MASK_PORT_ACTIVE_OPTOMIZE);
1284
1285         ls_new = pi->port_states.portphysstate_portstate &
1286                         OPA_PI_MASK_PORT_STATE;
1287         ps_new = (pi->port_states.portphysstate_portstate &
1288                         OPA_PI_MASK_PORT_PHYSICAL_STATE) >> 4;
1289
1290         if (ls_old == IB_PORT_INIT) {
1291                 if (start_of_sm_config) {
1292                         if (ls_new == ls_old || (ls_new == IB_PORT_ARMED))
1293                                 ppd->is_sm_config_started = 1;
1294                 } else if (ls_new == IB_PORT_ARMED) {
1295                         if (ppd->is_sm_config_started == 0)
1296                                 invalid = 1;
1297                 }
1298         }
1299
1300         /* Handle CLIENT_REREGISTER event b/c SM asked us for it */
1301         if (clientrereg) {
1302                 event.event = IB_EVENT_CLIENT_REREGISTER;
1303                 ib_dispatch_event(&event);
1304         }
1305
1306         /*
1307          * Do the port state change now that the other link parameters
1308          * have been set.
1309          * Changing the port physical state only makes sense if the link
1310          * is down or is being set to down.
1311          */
1312
1313         ret = set_port_states(ppd, smp, ls_new, ps_new, invalid);
1314         if (ret)
1315                 return ret;
1316
1317         ret = __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len);
1318
1319         /* restore re-reg bit per o14-12.2.1 */
1320         pi->clientrereg_subnettimeout |= clientrereg;
1321
1322         /*
1323          * Apply the new link downgrade policy.  This may result in a link
1324          * bounce.  Do this after everything else so things are settled.
1325          * Possible problem: if setting the port state above fails, then
1326          * the policy change is not applied.
1327          */
1328         if (call_link_downgrade_policy)
1329                 apply_link_downgrade_policy(ppd, 0);
1330
1331         return ret;
1332
1333 get_only:
1334         return __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len);
1335 }
1336
1337 /**
1338  * set_pkeys - set the PKEY table for ctxt 0
1339  * @dd: the hfi1_ib device
1340  * @port: the IB port number
1341  * @pkeys: the PKEY table
1342  */
1343 static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
1344 {
1345         struct hfi1_pportdata *ppd;
1346         int i;
1347         int changed = 0;
1348         int update_includes_mgmt_partition = 0;
1349
1350         /*
1351          * IB port one/two always maps to context zero/one,
1352          * always a kernel context, no locking needed
1353          * If we get here with ppd setup, no need to check
1354          * that rcd is valid.
1355          */
1356         ppd = dd->pport + (port - 1);
1357         /*
1358          * If the update does not include the management pkey, don't do it.
1359          */
1360         for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
1361                 if (pkeys[i] == LIM_MGMT_P_KEY) {
1362                         update_includes_mgmt_partition = 1;
1363                         break;
1364                 }
1365         }
1366
1367         if (!update_includes_mgmt_partition)
1368                 return 1;
1369
1370         for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
1371                 u16 key = pkeys[i];
1372                 u16 okey = ppd->pkeys[i];
1373
1374                 if (key == okey)
1375                         continue;
1376                 /*
1377                  * The SM gives us the complete PKey table. We have
1378                  * to ensure that we put the PKeys in the matching
1379                  * slots.
1380                  */
1381                 ppd->pkeys[i] = key;
1382                 changed = 1;
1383         }
1384
1385         if (changed) {
1386                 struct ib_event event;
1387
1388                 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
1389
1390                 event.event = IB_EVENT_PKEY_CHANGE;
1391                 event.device = &dd->verbs_dev.ibdev;
1392                 event.element.port_num = port;
1393                 ib_dispatch_event(&event);
1394         }
1395         return 0;
1396 }
1397
1398 static int __subn_set_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
1399                                     struct ib_device *ibdev, u8 port,
1400                                     u32 *resp_len)
1401 {
1402         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1403         u32 n_blocks_sent = OPA_AM_NBLK(am);
1404         u32 start_block = am & 0x7ff;
1405         u16 *p = (u16 *) data;
1406         __be16 *q = (__be16 *)data;
1407         int i;
1408         u16 n_blocks_avail;
1409         unsigned npkeys = hfi1_get_npkeys(dd);
1410
1411         if (n_blocks_sent == 0) {
1412                 pr_warn("OPA Get PKey AM Invalid : P = %d; B = 0x%x; N = 0x%x\n",
1413                         port, start_block, n_blocks_sent);
1414                 smp->status |= IB_SMP_INVALID_FIELD;
1415                 return reply((struct ib_mad_hdr *)smp);
1416         }
1417
1418         n_blocks_avail = (u16)(npkeys/OPA_PARTITION_TABLE_BLK_SIZE) + 1;
1419
1420         if (start_block + n_blocks_sent > n_blocks_avail ||
1421             n_blocks_sent > OPA_NUM_PKEY_BLOCKS_PER_SMP) {
1422                 pr_warn("OPA Set PKey AM Invalid : s 0x%x; req 0x%x; avail 0x%x; blk/smp 0x%lx\n",
1423                         start_block, n_blocks_sent, n_blocks_avail,
1424                         OPA_NUM_PKEY_BLOCKS_PER_SMP);
1425                 smp->status |= IB_SMP_INVALID_FIELD;
1426                 return reply((struct ib_mad_hdr *)smp);
1427         }
1428
1429         for (i = 0; i < n_blocks_sent * OPA_PARTITION_TABLE_BLK_SIZE; i++)
1430                 p[i] = be16_to_cpu(q[i]);
1431
1432         if (start_block == 0 && set_pkeys(dd, port, p) != 0) {
1433                 smp->status |= IB_SMP_INVALID_FIELD;
1434                 return reply((struct ib_mad_hdr *)smp);
1435         }
1436
1437         return __subn_get_opa_pkeytable(smp, am, data, ibdev, port, resp_len);
1438 }
1439
1440 static int get_sc2vlt_tables(struct hfi1_devdata *dd, void *data)
1441 {
1442         u64 *val = data;
1443
1444         *val++ = read_csr(dd, SEND_SC2VLT0);
1445         *val++ = read_csr(dd, SEND_SC2VLT1);
1446         *val++ = read_csr(dd, SEND_SC2VLT2);
1447         *val++ = read_csr(dd, SEND_SC2VLT3);
1448         return 0;
1449 }
1450
1451 #define ILLEGAL_VL 12
1452 /*
1453  * filter_sc2vlt changes mappings to VL15 to ILLEGAL_VL (except
1454  * for SC15, which must map to VL15). If we don't remap things this
1455  * way it is possible for VL15 counters to increment when we try to
1456  * send on a SC which is mapped to an invalid VL.
1457  */
1458 static void filter_sc2vlt(void *data)
1459 {
1460         int i;
1461         u8 *pd = data;
1462
1463         for (i = 0; i < OPA_MAX_SCS; i++) {
1464                 if (i == 15)
1465                         continue;
1466                 if ((pd[i] & 0x1f) == 0xf)
1467                         pd[i] = ILLEGAL_VL;
1468         }
1469 }
1470
1471 static int set_sc2vlt_tables(struct hfi1_devdata *dd, void *data)
1472 {
1473         u64 *val = data;
1474
1475         filter_sc2vlt(data);
1476
1477         write_csr(dd, SEND_SC2VLT0, *val++);
1478         write_csr(dd, SEND_SC2VLT1, *val++);
1479         write_csr(dd, SEND_SC2VLT2, *val++);
1480         write_csr(dd, SEND_SC2VLT3, *val++);
1481         write_seqlock_irq(&dd->sc2vl_lock);
1482         memcpy(dd->sc2vl, data, sizeof(dd->sc2vl));
1483         write_sequnlock_irq(&dd->sc2vl_lock);
1484         return 0;
1485 }
1486
1487 static int __subn_get_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
1488                                    struct ib_device *ibdev, u8 port,
1489                                    u32 *resp_len)
1490 {
1491         struct hfi1_ibport *ibp = to_iport(ibdev, port);
1492         u8 *p = data;
1493         size_t size = ARRAY_SIZE(ibp->sl_to_sc); /* == 32 */
1494         unsigned i;
1495
1496         if (am) {
1497                 smp->status |= IB_SMP_INVALID_FIELD;
1498                 return reply((struct ib_mad_hdr *)smp);
1499         }
1500
1501         for (i = 0; i < ARRAY_SIZE(ibp->sl_to_sc); i++)
1502                 *p++ = ibp->sl_to_sc[i];
1503
1504         if (resp_len)
1505                 *resp_len += size;
1506
1507         return reply((struct ib_mad_hdr *)smp);
1508 }
1509
1510 static int __subn_set_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
1511                                    struct ib_device *ibdev, u8 port,
1512                                    u32 *resp_len)
1513 {
1514         struct hfi1_ibport *ibp = to_iport(ibdev, port);
1515         u8 *p = data;
1516         int i;
1517
1518         if (am) {
1519                 smp->status |= IB_SMP_INVALID_FIELD;
1520                 return reply((struct ib_mad_hdr *)smp);
1521         }
1522
1523         for (i = 0; i <  ARRAY_SIZE(ibp->sl_to_sc); i++)
1524                 ibp->sl_to_sc[i] = *p++;
1525
1526         return __subn_get_opa_sl_to_sc(smp, am, data, ibdev, port, resp_len);
1527 }
1528
1529 static int __subn_get_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
1530                                    struct ib_device *ibdev, u8 port,
1531                                    u32 *resp_len)
1532 {
1533         struct hfi1_ibport *ibp = to_iport(ibdev, port);
1534         u8 *p = data;
1535         size_t size = ARRAY_SIZE(ibp->sc_to_sl); /* == 32 */
1536         unsigned i;
1537
1538         if (am) {
1539                 smp->status |= IB_SMP_INVALID_FIELD;
1540                 return reply((struct ib_mad_hdr *)smp);
1541         }
1542
1543         for (i = 0; i < ARRAY_SIZE(ibp->sc_to_sl); i++)
1544                 *p++ = ibp->sc_to_sl[i];
1545
1546         if (resp_len)
1547                 *resp_len += size;
1548
1549         return reply((struct ib_mad_hdr *)smp);
1550 }
1551
1552 static int __subn_set_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
1553                                    struct ib_device *ibdev, u8 port,
1554                                    u32 *resp_len)
1555 {
1556         struct hfi1_ibport *ibp = to_iport(ibdev, port);
1557         u8 *p = data;
1558         int i;
1559
1560         if (am) {
1561                 smp->status |= IB_SMP_INVALID_FIELD;
1562                 return reply((struct ib_mad_hdr *)smp);
1563         }
1564
1565         for (i = 0; i < ARRAY_SIZE(ibp->sc_to_sl); i++)
1566                 ibp->sc_to_sl[i] = *p++;
1567
1568         return __subn_get_opa_sc_to_sl(smp, am, data, ibdev, port, resp_len);
1569 }
1570
1571 static int __subn_get_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
1572                                     struct ib_device *ibdev, u8 port,
1573                                     u32 *resp_len)
1574 {
1575         u32 n_blocks = OPA_AM_NBLK(am);
1576         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1577         void *vp = (void *) data;
1578         size_t size = 4 * sizeof(u64);
1579
1580         if (n_blocks != 1) {
1581                 smp->status |= IB_SMP_INVALID_FIELD;
1582                 return reply((struct ib_mad_hdr *)smp);
1583         }
1584
1585         get_sc2vlt_tables(dd, vp);
1586
1587         if (resp_len)
1588                 *resp_len += size;
1589
1590         return reply((struct ib_mad_hdr *)smp);
1591 }
1592
1593 static int __subn_set_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
1594                                     struct ib_device *ibdev, u8 port,
1595                                     u32 *resp_len)
1596 {
1597         u32 n_blocks = OPA_AM_NBLK(am);
1598         int async_update = OPA_AM_ASYNC(am);
1599         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1600         void *vp = (void *) data;
1601         struct hfi1_pportdata *ppd;
1602         int lstate;
1603
1604         if (n_blocks != 1 || async_update) {
1605                 smp->status |= IB_SMP_INVALID_FIELD;
1606                 return reply((struct ib_mad_hdr *)smp);
1607         }
1608
1609         /* IB numbers ports from 1, hw from 0 */
1610         ppd = dd->pport + (port - 1);
1611         lstate = driver_lstate(ppd);
1612         /* it's known that async_update is 0 by this point, but include
1613          * the explicit check for clarity */
1614         if (!async_update &&
1615             (lstate == IB_PORT_ARMED || lstate == IB_PORT_ACTIVE)) {
1616                 smp->status |= IB_SMP_INVALID_FIELD;
1617                 return reply((struct ib_mad_hdr *)smp);
1618         }
1619
1620         set_sc2vlt_tables(dd, vp);
1621
1622         return __subn_get_opa_sc_to_vlt(smp, am, data, ibdev, port, resp_len);
1623 }
1624
1625 static int __subn_get_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
1626                                      struct ib_device *ibdev, u8 port,
1627                                      u32 *resp_len)
1628 {
1629         u32 n_blocks = OPA_AM_NPORT(am);
1630         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1631         struct hfi1_pportdata *ppd;
1632         void *vp = (void *) data;
1633         int size;
1634
1635         if (n_blocks != 1) {
1636                 smp->status |= IB_SMP_INVALID_FIELD;
1637                 return reply((struct ib_mad_hdr *)smp);
1638         }
1639
1640         ppd = dd->pport + (port - 1);
1641
1642         size = fm_get_table(ppd, FM_TBL_SC2VLNT, vp);
1643
1644         if (resp_len)
1645                 *resp_len += size;
1646
1647         return reply((struct ib_mad_hdr *)smp);
1648 }
1649
1650 static int __subn_set_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
1651                                      struct ib_device *ibdev, u8 port,
1652                                      u32 *resp_len)
1653 {
1654         u32 n_blocks = OPA_AM_NPORT(am);
1655         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1656         struct hfi1_pportdata *ppd;
1657         void *vp = (void *) data;
1658         int lstate;
1659
1660         if (n_blocks != 1) {
1661                 smp->status |= IB_SMP_INVALID_FIELD;
1662                 return reply((struct ib_mad_hdr *)smp);
1663         }
1664
1665         /* IB numbers ports from 1, hw from 0 */
1666         ppd = dd->pport + (port - 1);
1667         lstate = driver_lstate(ppd);
1668         if (lstate == IB_PORT_ARMED || lstate == IB_PORT_ACTIVE) {
1669                 smp->status |= IB_SMP_INVALID_FIELD;
1670                 return reply((struct ib_mad_hdr *)smp);
1671         }
1672
1673         ppd = dd->pport + (port - 1);
1674
1675         fm_set_table(ppd, FM_TBL_SC2VLNT, vp);
1676
1677         return __subn_get_opa_sc_to_vlnt(smp, am, data, ibdev, port,
1678                                          resp_len);
1679 }
1680
1681 static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
1682                               struct ib_device *ibdev, u8 port,
1683                               u32 *resp_len)
1684 {
1685         u32 nports = OPA_AM_NPORT(am);
1686         u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
1687         u32 lstate;
1688         struct hfi1_ibport *ibp;
1689         struct hfi1_pportdata *ppd;
1690         struct opa_port_state_info *psi = (struct opa_port_state_info *) data;
1691
1692         if (nports != 1) {
1693                 smp->status |= IB_SMP_INVALID_FIELD;
1694                 return reply((struct ib_mad_hdr *)smp);
1695         }
1696
1697         ibp = to_iport(ibdev, port);
1698         ppd = ppd_from_ibp(ibp);
1699
1700         lstate = driver_lstate(ppd);
1701
1702         if (start_of_sm_config && (lstate == IB_PORT_INIT))
1703                 ppd->is_sm_config_started = 1;
1704
1705 #if PI_LED_ENABLE_SUP
1706         psi->port_states.ledenable_offlinereason = ppd->neighbor_normal << 4;
1707         psi->port_states.ledenable_offlinereason |=
1708                 ppd->is_sm_config_started << 5;
1709         psi->port_states.ledenable_offlinereason |=
1710                 ppd->offline_disabled_reason & OPA_PI_MASK_OFFLINE_REASON;
1711 #else
1712         psi->port_states.offline_reason = ppd->neighbor_normal << 4;
1713         psi->port_states.offline_reason |= ppd->is_sm_config_started << 5;
1714         psi->port_states.offline_reason |= ppd->offline_disabled_reason &
1715                                 OPA_PI_MASK_OFFLINE_REASON;
1716 #endif /* PI_LED_ENABLE_SUP */
1717
1718         psi->port_states.portphysstate_portstate =
1719                 (hfi1_ibphys_portstate(ppd) << 4) | (lstate & 0xf);
1720         psi->link_width_downgrade_tx_active =
1721                 cpu_to_be16(ppd->link_width_downgrade_tx_active);
1722         psi->link_width_downgrade_rx_active =
1723                 cpu_to_be16(ppd->link_width_downgrade_rx_active);
1724         if (resp_len)
1725                 *resp_len += sizeof(struct opa_port_state_info);
1726
1727         return reply((struct ib_mad_hdr *)smp);
1728 }
1729
1730 static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
1731                               struct ib_device *ibdev, u8 port,
1732                               u32 *resp_len)
1733 {
1734         u32 nports = OPA_AM_NPORT(am);
1735         u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
1736         u32 ls_old;
1737         u8 ls_new, ps_new;
1738         struct hfi1_ibport *ibp;
1739         struct hfi1_pportdata *ppd;
1740         struct opa_port_state_info *psi = (struct opa_port_state_info *) data;
1741         int ret, invalid = 0;
1742
1743         if (nports != 1) {
1744                 smp->status |= IB_SMP_INVALID_FIELD;
1745                 return reply((struct ib_mad_hdr *)smp);
1746         }
1747
1748         ibp = to_iport(ibdev, port);
1749         ppd = ppd_from_ibp(ibp);
1750
1751         ls_old = driver_lstate(ppd);
1752
1753         ls_new = port_states_to_logical_state(&psi->port_states);
1754         ps_new = port_states_to_phys_state(&psi->port_states);
1755
1756         if (ls_old == IB_PORT_INIT) {
1757                 if (start_of_sm_config) {
1758                         if (ls_new == ls_old || (ls_new == IB_PORT_ARMED))
1759                                 ppd->is_sm_config_started = 1;
1760                 } else if (ls_new == IB_PORT_ARMED) {
1761                         if (ppd->is_sm_config_started == 0)
1762                                 invalid = 1;
1763                 }
1764         }
1765
1766         ret = set_port_states(ppd, smp, ls_new, ps_new, invalid);
1767         if (ret)
1768                 return ret;
1769
1770         if (invalid)
1771                 smp->status |= IB_SMP_INVALID_FIELD;
1772
1773         return __subn_get_opa_psi(smp, am, data, ibdev, port, resp_len);
1774 }
1775
1776 static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data,
1777                                      struct ib_device *ibdev, u8 port,
1778                                      u32 *resp_len)
1779 {
1780         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1781         u32 addr = OPA_AM_CI_ADDR(am);
1782         u32 len = OPA_AM_CI_LEN(am) + 1;
1783         int ret;
1784
1785 #define __CI_PAGE_SIZE (1 << 7) /* 128 bytes */
1786 #define __CI_PAGE_MASK ~(__CI_PAGE_SIZE - 1)
1787 #define __CI_PAGE_NUM(a) ((a) & __CI_PAGE_MASK)
1788
1789         /* check that addr is within spec, and
1790          * addr and (addr + len - 1) are on the same "page" */
1791         if (addr >= 4096 ||
1792                 (__CI_PAGE_NUM(addr) != __CI_PAGE_NUM(addr + len - 1))) {
1793                 smp->status |= IB_SMP_INVALID_FIELD;
1794                 return reply((struct ib_mad_hdr *)smp);
1795         }
1796
1797         ret = get_cable_info(dd, port, addr, len, data);
1798
1799         if (ret == -ENODEV) {
1800                 smp->status |= IB_SMP_UNSUP_METH_ATTR;
1801                 return reply((struct ib_mad_hdr *)smp);
1802         }
1803
1804         /* The address range for the CableInfo SMA query is wider than the
1805          * memory available on the QSFP cable. We want to return a valid
1806          * response, albeit zeroed out, for address ranges beyond available
1807          * memory but that are within the CableInfo query spec
1808          */
1809         if (ret < 0 && ret != -ERANGE) {
1810                 smp->status |= IB_SMP_INVALID_FIELD;
1811                 return reply((struct ib_mad_hdr *)smp);
1812         }
1813
1814         if (resp_len)
1815                 *resp_len += len;
1816
1817         return reply((struct ib_mad_hdr *)smp);
1818 }
1819
1820 static int __subn_get_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
1821                               struct ib_device *ibdev, u8 port, u32 *resp_len)
1822 {
1823         u32 num_ports = OPA_AM_NPORT(am);
1824         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1825         struct hfi1_pportdata *ppd;
1826         struct buffer_control *p = (struct buffer_control *) data;
1827         int size;
1828
1829         if (num_ports != 1) {
1830                 smp->status |= IB_SMP_INVALID_FIELD;
1831                 return reply((struct ib_mad_hdr *)smp);
1832         }
1833
1834         ppd = dd->pport + (port - 1);
1835         size = fm_get_table(ppd, FM_TBL_BUFFER_CONTROL, p);
1836         trace_bct_get(dd, p);
1837         if (resp_len)
1838                 *resp_len += size;
1839
1840         return reply((struct ib_mad_hdr *)smp);
1841 }
1842
1843 static int __subn_set_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
1844                               struct ib_device *ibdev, u8 port, u32 *resp_len)
1845 {
1846         u32 num_ports = OPA_AM_NPORT(am);
1847         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1848         struct hfi1_pportdata *ppd;
1849         struct buffer_control *p = (struct buffer_control *) data;
1850
1851         if (num_ports != 1) {
1852                 smp->status |= IB_SMP_INVALID_FIELD;
1853                 return reply((struct ib_mad_hdr *)smp);
1854         }
1855         ppd = dd->pport + (port - 1);
1856         trace_bct_set(dd, p);
1857         if (fm_set_table(ppd, FM_TBL_BUFFER_CONTROL, p) < 0) {
1858                 smp->status |= IB_SMP_INVALID_FIELD;
1859                 return reply((struct ib_mad_hdr *)smp);
1860         }
1861
1862         return __subn_get_opa_bct(smp, am, data, ibdev, port, resp_len);
1863 }
1864
1865 static int __subn_get_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
1866                                  struct ib_device *ibdev, u8 port,
1867                                  u32 *resp_len)
1868 {
1869         struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
1870         u32 num_ports = OPA_AM_NPORT(am);
1871         u8 section = (am & 0x00ff0000) >> 16;
1872         u8 *p = data;
1873         int size = 0;
1874
1875         if (num_ports != 1) {
1876                 smp->status |= IB_SMP_INVALID_FIELD;
1877                 return reply((struct ib_mad_hdr *)smp);
1878         }
1879
1880         switch (section) {
1881         case OPA_VLARB_LOW_ELEMENTS:
1882                 size = fm_get_table(ppd, FM_TBL_VL_LOW_ARB, p);
1883                 break;
1884         case OPA_VLARB_HIGH_ELEMENTS:
1885                 size = fm_get_table(ppd, FM_TBL_VL_HIGH_ARB, p);
1886                 break;
1887         case OPA_VLARB_PREEMPT_ELEMENTS:
1888                 size = fm_get_table(ppd, FM_TBL_VL_PREEMPT_ELEMS, p);
1889                 break;
1890         case OPA_VLARB_PREEMPT_MATRIX:
1891                 size = fm_get_table(ppd, FM_TBL_VL_PREEMPT_MATRIX, p);
1892                 break;
1893         default:
1894                 pr_warn("OPA SubnGet(VL Arb) AM Invalid : 0x%x\n",
1895                         be32_to_cpu(smp->attr_mod));
1896                 smp->status |= IB_SMP_INVALID_FIELD;
1897                 break;
1898         }
1899
1900         if (size > 0 && resp_len)
1901                 *resp_len += size;
1902
1903         return reply((struct ib_mad_hdr *)smp);
1904 }
1905
1906 static int __subn_set_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
1907                                  struct ib_device *ibdev, u8 port,
1908                                  u32 *resp_len)
1909 {
1910         struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
1911         u32 num_ports = OPA_AM_NPORT(am);
1912         u8 section = (am & 0x00ff0000) >> 16;
1913         u8 *p = data;
1914
1915         if (num_ports != 1) {
1916                 smp->status |= IB_SMP_INVALID_FIELD;
1917                 return reply((struct ib_mad_hdr *)smp);
1918         }
1919
1920         switch (section) {
1921         case OPA_VLARB_LOW_ELEMENTS:
1922                 (void) fm_set_table(ppd, FM_TBL_VL_LOW_ARB, p);
1923                 break;
1924         case OPA_VLARB_HIGH_ELEMENTS:
1925                 (void) fm_set_table(ppd, FM_TBL_VL_HIGH_ARB, p);
1926                 break;
1927         /* neither OPA_VLARB_PREEMPT_ELEMENTS, or OPA_VLARB_PREEMPT_MATRIX
1928          * can be changed from the default values */
1929         case OPA_VLARB_PREEMPT_ELEMENTS:
1930                 /* FALLTHROUGH */
1931         case OPA_VLARB_PREEMPT_MATRIX:
1932                 smp->status |= IB_SMP_UNSUP_METH_ATTR;
1933                 break;
1934         default:
1935                 pr_warn("OPA SubnSet(VL Arb) AM Invalid : 0x%x\n",
1936                         be32_to_cpu(smp->attr_mod));
1937                 smp->status |= IB_SMP_INVALID_FIELD;
1938                 break;
1939         }
1940
1941         return __subn_get_opa_vl_arb(smp, am, data, ibdev, port, resp_len);
1942 }
1943
1944 struct opa_pma_mad {
1945         struct ib_mad_hdr mad_hdr;
1946         u8 data[2024];
1947 } __packed;
1948
1949 struct opa_class_port_info {
1950         u8 base_version;
1951         u8 class_version;
1952         __be16 cap_mask;
1953         __be32 cap_mask2_resp_time;
1954
1955         u8 redirect_gid[16];
1956         __be32 redirect_tc_fl;
1957         __be32 redirect_lid;
1958         __be32 redirect_sl_qp;
1959         __be32 redirect_qkey;
1960
1961         u8 trap_gid[16];
1962         __be32 trap_tc_fl;
1963         __be32 trap_lid;
1964         __be32 trap_hl_qp;
1965         __be32 trap_qkey;
1966
1967         __be16 trap_pkey;
1968         __be16 redirect_pkey;
1969
1970         u8 trap_sl_rsvd;
1971         u8 reserved[3];
1972 } __packed;
1973
1974 struct opa_port_status_req {
1975         __u8 port_num;
1976         __u8 reserved[3];
1977         __be32 vl_select_mask;
1978 };
1979
1980 #define VL_MASK_ALL             0x000080ff
1981
1982 struct opa_port_status_rsp {
1983         __u8 port_num;
1984         __u8 reserved[3];
1985         __be32  vl_select_mask;
1986
1987         /* Data counters */
1988         __be64 port_xmit_data;
1989         __be64 port_rcv_data;
1990         __be64 port_xmit_pkts;
1991         __be64 port_rcv_pkts;
1992         __be64 port_multicast_xmit_pkts;
1993         __be64 port_multicast_rcv_pkts;
1994         __be64 port_xmit_wait;
1995         __be64 sw_port_congestion;
1996         __be64 port_rcv_fecn;
1997         __be64 port_rcv_becn;
1998         __be64 port_xmit_time_cong;
1999         __be64 port_xmit_wasted_bw;
2000         __be64 port_xmit_wait_data;
2001         __be64 port_rcv_bubble;
2002         __be64 port_mark_fecn;
2003         /* Error counters */
2004         __be64 port_rcv_constraint_errors;
2005         __be64 port_rcv_switch_relay_errors;
2006         __be64 port_xmit_discards;
2007         __be64 port_xmit_constraint_errors;
2008         __be64 port_rcv_remote_physical_errors;
2009         __be64 local_link_integrity_errors;
2010         __be64 port_rcv_errors;
2011         __be64 excessive_buffer_overruns;
2012         __be64 fm_config_errors;
2013         __be32 link_error_recovery;
2014         __be32 link_downed;
2015         u8 uncorrectable_errors;
2016
2017         u8 link_quality_indicator; /* 5res, 3bit */
2018         u8 res2[6];
2019         struct _vls_pctrs {
2020                 /* per-VL Data counters */
2021                 __be64 port_vl_xmit_data;
2022                 __be64 port_vl_rcv_data;
2023                 __be64 port_vl_xmit_pkts;
2024                 __be64 port_vl_rcv_pkts;
2025                 __be64 port_vl_xmit_wait;
2026                 __be64 sw_port_vl_congestion;
2027                 __be64 port_vl_rcv_fecn;
2028                 __be64 port_vl_rcv_becn;
2029                 __be64 port_xmit_time_cong;
2030                 __be64 port_vl_xmit_wasted_bw;
2031                 __be64 port_vl_xmit_wait_data;
2032                 __be64 port_vl_rcv_bubble;
2033                 __be64 port_vl_mark_fecn;
2034                 __be64 port_vl_xmit_discards;
2035         } vls[0]; /* real array size defined by # bits set in vl_select_mask */
2036 };
2037
2038 enum counter_selects {
2039         CS_PORT_XMIT_DATA                       = (1 << 31),
2040         CS_PORT_RCV_DATA                        = (1 << 30),
2041         CS_PORT_XMIT_PKTS                       = (1 << 29),
2042         CS_PORT_RCV_PKTS                        = (1 << 28),
2043         CS_PORT_MCAST_XMIT_PKTS                 = (1 << 27),
2044         CS_PORT_MCAST_RCV_PKTS                  = (1 << 26),
2045         CS_PORT_XMIT_WAIT                       = (1 << 25),
2046         CS_SW_PORT_CONGESTION                   = (1 << 24),
2047         CS_PORT_RCV_FECN                        = (1 << 23),
2048         CS_PORT_RCV_BECN                        = (1 << 22),
2049         CS_PORT_XMIT_TIME_CONG                  = (1 << 21),
2050         CS_PORT_XMIT_WASTED_BW                  = (1 << 20),
2051         CS_PORT_XMIT_WAIT_DATA                  = (1 << 19),
2052         CS_PORT_RCV_BUBBLE                      = (1 << 18),
2053         CS_PORT_MARK_FECN                       = (1 << 17),
2054         CS_PORT_RCV_CONSTRAINT_ERRORS           = (1 << 16),
2055         CS_PORT_RCV_SWITCH_RELAY_ERRORS         = (1 << 15),
2056         CS_PORT_XMIT_DISCARDS                   = (1 << 14),
2057         CS_PORT_XMIT_CONSTRAINT_ERRORS          = (1 << 13),
2058         CS_PORT_RCV_REMOTE_PHYSICAL_ERRORS      = (1 << 12),
2059         CS_LOCAL_LINK_INTEGRITY_ERRORS          = (1 << 11),
2060         CS_PORT_RCV_ERRORS                      = (1 << 10),
2061         CS_EXCESSIVE_BUFFER_OVERRUNS            = (1 << 9),
2062         CS_FM_CONFIG_ERRORS                     = (1 << 8),
2063         CS_LINK_ERROR_RECOVERY                  = (1 << 7),
2064         CS_LINK_DOWNED                          = (1 << 6),
2065         CS_UNCORRECTABLE_ERRORS                 = (1 << 5),
2066 };
2067
2068 struct opa_clear_port_status {
2069         __be64 port_select_mask[4];
2070         __be32 counter_select_mask;
2071 };
2072
2073 struct opa_aggregate {
2074         __be16 attr_id;
2075         __be16 err_reqlength;   /* 1 bit, 8 res, 7 bit */
2076         __be32 attr_mod;
2077         u8 data[0];
2078 };
2079
2080 #define MSK_LLI 0x000000f0
2081 #define MSK_LLI_SFT 4
2082 #define MSK_LER 0x0000000f
2083 #define MSK_LER_SFT 0
2084 #define ADD_LLI 8
2085 #define ADD_LER 2
2086
2087 /* Request contains first three fields, response contains those plus the rest */
2088 struct opa_port_data_counters_msg {
2089         __be64 port_select_mask[4];
2090         __be32 vl_select_mask;
2091         __be32 resolution;
2092
2093         /* Response fields follow */
2094         struct _port_dctrs {
2095                 u8 port_number;
2096                 u8 reserved2[3];
2097                 __be32 link_quality_indicator; /* 29res, 3bit */
2098
2099                 /* Data counters */
2100                 __be64 port_xmit_data;
2101                 __be64 port_rcv_data;
2102                 __be64 port_xmit_pkts;
2103                 __be64 port_rcv_pkts;
2104                 __be64 port_multicast_xmit_pkts;
2105                 __be64 port_multicast_rcv_pkts;
2106                 __be64 port_xmit_wait;
2107                 __be64 sw_port_congestion;
2108                 __be64 port_rcv_fecn;
2109                 __be64 port_rcv_becn;
2110                 __be64 port_xmit_time_cong;
2111                 __be64 port_xmit_wasted_bw;
2112                 __be64 port_xmit_wait_data;
2113                 __be64 port_rcv_bubble;
2114                 __be64 port_mark_fecn;
2115
2116                 __be64 port_error_counter_summary;
2117                 /* Sum of error counts/port */
2118
2119                 struct _vls_dctrs {
2120                         /* per-VL Data counters */
2121                         __be64 port_vl_xmit_data;
2122                         __be64 port_vl_rcv_data;
2123                         __be64 port_vl_xmit_pkts;
2124                         __be64 port_vl_rcv_pkts;
2125                         __be64 port_vl_xmit_wait;
2126                         __be64 sw_port_vl_congestion;
2127                         __be64 port_vl_rcv_fecn;
2128                         __be64 port_vl_rcv_becn;
2129                         __be64 port_xmit_time_cong;
2130                         __be64 port_vl_xmit_wasted_bw;
2131                         __be64 port_vl_xmit_wait_data;
2132                         __be64 port_vl_rcv_bubble;
2133                         __be64 port_vl_mark_fecn;
2134                 } vls[0];
2135                 /* array size defined by #bits set in vl_select_mask*/
2136         } port[1]; /* array size defined by  #ports in attribute modifier */
2137 };
2138
2139 struct opa_port_error_counters64_msg {
2140         /* Request contains first two fields, response contains the
2141          * whole magilla */
2142         __be64 port_select_mask[4];
2143         __be32 vl_select_mask;
2144
2145         /* Response-only fields follow */
2146         __be32 reserved1;
2147         struct _port_ectrs {
2148                 u8 port_number;
2149                 u8 reserved2[7];
2150                 __be64 port_rcv_constraint_errors;
2151                 __be64 port_rcv_switch_relay_errors;
2152                 __be64 port_xmit_discards;
2153                 __be64 port_xmit_constraint_errors;
2154                 __be64 port_rcv_remote_physical_errors;
2155                 __be64 local_link_integrity_errors;
2156                 __be64 port_rcv_errors;
2157                 __be64 excessive_buffer_overruns;
2158                 __be64 fm_config_errors;
2159                 __be32 link_error_recovery;
2160                 __be32 link_downed;
2161                 u8 uncorrectable_errors;
2162                 u8 reserved3[7];
2163                 struct _vls_ectrs {
2164                         __be64 port_vl_xmit_discards;
2165                 } vls[0];
2166                 /* array size defined by #bits set in vl_select_mask */
2167         } port[1]; /* array size defined by #ports in attribute modifier */
2168 };
2169
2170 struct opa_port_error_info_msg {
2171         __be64 port_select_mask[4];
2172         __be32 error_info_select_mask;
2173         __be32 reserved1;
2174         struct _port_ei {
2175
2176                 u8 port_number;
2177                 u8 reserved2[7];
2178
2179                 /* PortRcvErrorInfo */
2180                 struct {
2181                         u8 status_and_code;
2182                         union {
2183                                 u8 raw[17];
2184                                 struct {
2185                                         /* EI1to12 format */
2186                                         u8 packet_flit1[8];
2187                                         u8 packet_flit2[8];
2188                                         u8 remaining_flit_bits12;
2189                                 } ei1to12;
2190                                 struct {
2191                                         u8 packet_bytes[8];
2192                                         u8 remaining_flit_bits;
2193                                 } ei13;
2194                         } ei;
2195                         u8 reserved3[6];
2196                 } __packed port_rcv_ei;
2197
2198                 /* ExcessiveBufferOverrunInfo */
2199                 struct {
2200                         u8 status_and_sc;
2201                         u8 reserved4[7];
2202                 } __packed excessive_buffer_overrun_ei;
2203
2204                 /* PortXmitConstraintErrorInfo */
2205                 struct {
2206                         u8 status;
2207                         u8 reserved5;
2208                         __be16 pkey;
2209                         __be32 slid;
2210                 } __packed port_xmit_constraint_ei;
2211
2212                 /* PortRcvConstraintErrorInfo */
2213                 struct {
2214                         u8 status;
2215                         u8 reserved6;
2216                         __be16 pkey;
2217                         __be32 slid;
2218                 } __packed port_rcv_constraint_ei;
2219
2220                 /* PortRcvSwitchRelayErrorInfo */
2221                 struct {
2222                         u8 status_and_code;
2223                         u8 reserved7[3];
2224                         __u32 error_info;
2225                 } __packed port_rcv_switch_relay_ei;
2226
2227                 /* UncorrectableErrorInfo */
2228                 struct {
2229                         u8 status_and_code;
2230                         u8 reserved8;
2231                 } __packed uncorrectable_ei;
2232
2233                 /* FMConfigErrorInfo */
2234                 struct {
2235                         u8 status_and_code;
2236                         u8 error_info;
2237                 } __packed fm_config_ei;
2238                 __u32 reserved9;
2239         } port[1]; /* actual array size defined by #ports in attr modifier */
2240 };
2241
2242 /* opa_port_error_info_msg error_info_select_mask bit definitions */
2243 enum error_info_selects {
2244         ES_PORT_RCV_ERROR_INFO                  = (1 << 31),
2245         ES_EXCESSIVE_BUFFER_OVERRUN_INFO        = (1 << 30),
2246         ES_PORT_XMIT_CONSTRAINT_ERROR_INFO      = (1 << 29),
2247         ES_PORT_RCV_CONSTRAINT_ERROR_INFO       = (1 << 28),
2248         ES_PORT_RCV_SWITCH_RELAY_ERROR_INFO     = (1 << 27),
2249         ES_UNCORRECTABLE_ERROR_INFO             = (1 << 26),
2250         ES_FM_CONFIG_ERROR_INFO                 = (1 << 25)
2251 };
2252
2253 static int pma_get_opa_classportinfo(struct opa_pma_mad *pmp,
2254                                 struct ib_device *ibdev, u32 *resp_len)
2255 {
2256         struct opa_class_port_info *p =
2257                 (struct opa_class_port_info *)pmp->data;
2258
2259         memset(pmp->data, 0, sizeof(pmp->data));
2260
2261         if (pmp->mad_hdr.attr_mod != 0)
2262                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2263
2264         p->base_version = OPA_MGMT_BASE_VERSION;
2265         p->class_version = OPA_SMI_CLASS_VERSION;
2266         /*
2267          * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
2268          */
2269         p->cap_mask2_resp_time = cpu_to_be32(18);
2270
2271         if (resp_len)
2272                 *resp_len += sizeof(*p);
2273
2274         return reply((struct ib_mad_hdr *)pmp);
2275 }
2276
2277 static void a0_portstatus(struct hfi1_pportdata *ppd,
2278                           struct opa_port_status_rsp *rsp, u32 vl_select_mask)
2279 {
2280         if (!is_bx(ppd->dd)) {
2281                 unsigned long vl;
2282                 u64 max_vl_xmit_wait = 0, tmp;
2283                 u32 vl_all_mask = VL_MASK_ALL;
2284
2285                 for_each_set_bit(vl, (unsigned long *)&(vl_all_mask),
2286                                  8 * sizeof(vl_all_mask)) {
2287                         tmp = read_port_cntr(ppd, C_TX_WAIT_VL,
2288                                              idx_from_vl(vl));
2289                         if (tmp > max_vl_xmit_wait)
2290                                 max_vl_xmit_wait = tmp;
2291                 }
2292                 rsp->port_xmit_wait = cpu_to_be64(max_vl_xmit_wait);
2293         }
2294 }
2295
2296
2297 static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
2298                         struct ib_device *ibdev, u8 port, u32 *resp_len)
2299 {
2300         struct opa_port_status_req *req =
2301                 (struct opa_port_status_req *)pmp->data;
2302         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2303         struct opa_port_status_rsp *rsp;
2304         u32 vl_select_mask = be32_to_cpu(req->vl_select_mask);
2305         unsigned long vl;
2306         size_t response_data_size;
2307         u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
2308         u8 port_num = req->port_num;
2309         u8 num_vls = hweight32(vl_select_mask);
2310         struct _vls_pctrs *vlinfo;
2311         struct hfi1_ibport *ibp = to_iport(ibdev, port);
2312         struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2313         int vfi;
2314         u64 tmp, tmp2;
2315
2316         response_data_size = sizeof(struct opa_port_status_rsp) +
2317                                 num_vls * sizeof(struct _vls_pctrs);
2318         if (response_data_size > sizeof(pmp->data)) {
2319                 pmp->mad_hdr.status |= OPA_PM_STATUS_REQUEST_TOO_LARGE;
2320                 return reply((struct ib_mad_hdr *)pmp);
2321         }
2322
2323         if (nports != 1 || (port_num && port_num != port)
2324             || num_vls > OPA_MAX_VLS || (vl_select_mask & ~VL_MASK_ALL)) {
2325                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2326                 return reply((struct ib_mad_hdr *)pmp);
2327         }
2328
2329         memset(pmp->data, 0, sizeof(pmp->data));
2330
2331         rsp = (struct opa_port_status_rsp *)pmp->data;
2332         if (port_num)
2333                 rsp->port_num = port_num;
2334         else
2335                 rsp->port_num = port;
2336
2337         rsp->port_rcv_constraint_errors =
2338                 cpu_to_be64(read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
2339                                            CNTR_INVALID_VL));
2340
2341         hfi1_read_link_quality(dd, &rsp->link_quality_indicator);
2342
2343         rsp->vl_select_mask = cpu_to_be32(vl_select_mask);
2344         rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS,
2345                                           CNTR_INVALID_VL));
2346         rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
2347                                          CNTR_INVALID_VL));
2348         rsp->port_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_PKTS,
2349                                           CNTR_INVALID_VL));
2350         rsp->port_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_PKTS,
2351                                          CNTR_INVALID_VL));
2352         rsp->port_multicast_xmit_pkts =
2353                 cpu_to_be64(read_dev_cntr(dd, C_DC_MC_XMIT_PKTS,
2354                                         CNTR_INVALID_VL));
2355         rsp->port_multicast_rcv_pkts =
2356                 cpu_to_be64(read_dev_cntr(dd, C_DC_MC_RCV_PKTS,
2357                                           CNTR_INVALID_VL));
2358         rsp->port_xmit_wait =
2359                 cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL));
2360         rsp->port_rcv_fecn =
2361                 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL));
2362         rsp->port_rcv_becn =
2363                 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL));
2364         rsp->port_xmit_discards =
2365                 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD,
2366                                            CNTR_INVALID_VL));
2367         rsp->port_xmit_constraint_errors =
2368                 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
2369                                            CNTR_INVALID_VL));
2370         rsp->port_rcv_remote_physical_errors =
2371                 cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
2372                                           CNTR_INVALID_VL));
2373         tmp = read_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL);
2374         tmp2 = tmp + read_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL);
2375         if (tmp2 < tmp) {
2376                 /* overflow/wrapped */
2377                 rsp->local_link_integrity_errors = cpu_to_be64(~0);
2378         } else {
2379                 rsp->local_link_integrity_errors = cpu_to_be64(tmp2);
2380         }
2381         tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
2382         tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
2383                                         CNTR_INVALID_VL);
2384         if (tmp2 > (u32)UINT_MAX || tmp2 < tmp) {
2385                 /* overflow/wrapped */
2386                 rsp->link_error_recovery = cpu_to_be32(~0);
2387         } else {
2388                 rsp->link_error_recovery = cpu_to_be32(tmp2);
2389         }
2390         rsp->port_rcv_errors =
2391                 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL));
2392         rsp->excessive_buffer_overruns =
2393                 cpu_to_be64(read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL));
2394         rsp->fm_config_errors =
2395                 cpu_to_be64(read_dev_cntr(dd, C_DC_FM_CFG_ERR,
2396                                           CNTR_INVALID_VL));
2397         rsp->link_downed = cpu_to_be32(read_port_cntr(ppd, C_SW_LINK_DOWN,
2398                                           CNTR_INVALID_VL));
2399
2400         /* rsp->uncorrectable_errors is 8 bits wide, and it pegs at 0xff */
2401         tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
2402         rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff;
2403
2404         vlinfo = &(rsp->vls[0]);
2405         vfi = 0;
2406         /* The vl_select_mask has been checked above, and we know
2407          * that it contains only entries which represent valid VLs.
2408          * So in the for_each_set_bit() loop below, we don't need
2409          * any additional checks for vl.
2410          */
2411         for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
2412                          8 * sizeof(vl_select_mask)) {
2413                 memset(vlinfo, 0, sizeof(*vlinfo));
2414
2415                 tmp = read_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl));
2416                 rsp->vls[vfi].port_vl_rcv_data = cpu_to_be64(tmp);
2417
2418                 rsp->vls[vfi].port_vl_rcv_pkts =
2419                         cpu_to_be64(read_dev_cntr(dd, C_DC_RX_PKT_VL,
2420                                         idx_from_vl(vl)));
2421
2422                 rsp->vls[vfi].port_vl_xmit_data =
2423                         cpu_to_be64(read_port_cntr(ppd, C_TX_FLIT_VL,
2424                                         idx_from_vl(vl)));
2425
2426                 rsp->vls[vfi].port_vl_xmit_pkts =
2427                         cpu_to_be64(read_port_cntr(ppd, C_TX_PKT_VL,
2428                                         idx_from_vl(vl)));
2429
2430                 rsp->vls[vfi].port_vl_xmit_wait =
2431                         cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT_VL,
2432                                         idx_from_vl(vl)));
2433
2434                 rsp->vls[vfi].port_vl_rcv_fecn =
2435                         cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN_VL,
2436                                         idx_from_vl(vl)));
2437
2438                 rsp->vls[vfi].port_vl_rcv_becn =
2439                         cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN_VL,
2440                                         idx_from_vl(vl)));
2441
2442                 vlinfo++;
2443                 vfi++;
2444         }
2445
2446         a0_portstatus(ppd, rsp, vl_select_mask);
2447
2448         if (resp_len)
2449                 *resp_len += response_data_size;
2450
2451         return reply((struct ib_mad_hdr *)pmp);
2452 }
2453
2454 static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port,
2455                                      u8 res_lli, u8 res_ler)
2456 {
2457         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2458         struct hfi1_ibport *ibp = to_iport(ibdev, port);
2459         struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2460         u64 error_counter_summary = 0, tmp;
2461
2462         error_counter_summary += read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
2463                                                 CNTR_INVALID_VL);
2464         /* port_rcv_switch_relay_errors is 0 for HFIs */
2465         error_counter_summary += read_port_cntr(ppd, C_SW_XMIT_DSCD,
2466                                                 CNTR_INVALID_VL);
2467         error_counter_summary += read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
2468                                                 CNTR_INVALID_VL);
2469         error_counter_summary += read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
2470                                                 CNTR_INVALID_VL);
2471         /* local link integrity must be right-shifted by the lli resolution */
2472         tmp = read_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL);
2473         tmp += read_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL);
2474         error_counter_summary += (tmp >> res_lli);
2475         /* link error recovery must b right-shifted by the ler resolution */
2476         tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
2477         tmp += read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL);
2478         error_counter_summary += (tmp >> res_ler);
2479         error_counter_summary += read_dev_cntr(dd, C_DC_RCV_ERR,
2480                                                 CNTR_INVALID_VL);
2481         error_counter_summary += read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
2482         error_counter_summary += read_dev_cntr(dd, C_DC_FM_CFG_ERR,
2483                                                 CNTR_INVALID_VL);
2484         /* ppd->link_downed is a 32-bit value */
2485         error_counter_summary += read_port_cntr(ppd, C_SW_LINK_DOWN,
2486                                                 CNTR_INVALID_VL);
2487         tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
2488         /* this is an 8-bit quantity */
2489         error_counter_summary += tmp < 0x100 ? (tmp & 0xff) : 0xff;
2490
2491         return error_counter_summary;
2492 }
2493
2494 static void a0_datacounters(struct hfi1_devdata *dd, struct _port_dctrs *rsp,
2495                             u32 vl_select_mask)
2496 {
2497         if (!is_bx(dd)) {
2498                 unsigned long vl;
2499                 int vfi = 0;
2500                 u64 sum_vl_xmit_wait = 0;
2501
2502                 for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
2503                                 8 * sizeof(vl_select_mask)) {
2504                         u64 tmp = sum_vl_xmit_wait +
2505                                 be64_to_cpu(rsp->vls[vfi++].port_vl_xmit_wait);
2506                         if (tmp < sum_vl_xmit_wait) {
2507                                 /* we wrapped */
2508                                 sum_vl_xmit_wait = (u64) ~0;
2509                                 break;
2510                         }
2511                         sum_vl_xmit_wait = tmp;
2512                 }
2513                 if (be64_to_cpu(rsp->port_xmit_wait) > sum_vl_xmit_wait)
2514                         rsp->port_xmit_wait = cpu_to_be64(sum_vl_xmit_wait);
2515         }
2516 }
2517
2518 static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
2519                         struct ib_device *ibdev, u8 port, u32 *resp_len)
2520 {
2521         struct opa_port_data_counters_msg *req =
2522                 (struct opa_port_data_counters_msg *)pmp->data;
2523         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2524         struct hfi1_ibport *ibp = to_iport(ibdev, port);
2525         struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2526         struct _port_dctrs *rsp;
2527         struct _vls_dctrs *vlinfo;
2528         size_t response_data_size;
2529         u32 num_ports;
2530         u8 num_pslm;
2531         u8 lq, num_vls;
2532         u8 res_lli, res_ler;
2533         u64 port_mask;
2534         unsigned long port_num;
2535         unsigned long vl;
2536         u32 vl_select_mask;
2537         int vfi;
2538
2539         num_ports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
2540         num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
2541         num_vls = hweight32(be32_to_cpu(req->vl_select_mask));
2542         vl_select_mask = be32_to_cpu(req->vl_select_mask);
2543         res_lli = (u8)(be32_to_cpu(req->resolution) & MSK_LLI) >> MSK_LLI_SFT;
2544         res_lli = res_lli ? res_lli + ADD_LLI : 0;
2545         res_ler = (u8)(be32_to_cpu(req->resolution) & MSK_LER) >> MSK_LER_SFT;
2546         res_ler = res_ler ? res_ler + ADD_LER : 0;
2547
2548         if (num_ports != 1 || (vl_select_mask & ~VL_MASK_ALL)) {
2549                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2550                 return reply((struct ib_mad_hdr *)pmp);
2551         }
2552
2553         /* Sanity check */
2554         response_data_size = sizeof(struct opa_port_data_counters_msg) +
2555                                 num_vls * sizeof(struct _vls_dctrs);
2556
2557         if (response_data_size > sizeof(pmp->data)) {
2558                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2559                 return reply((struct ib_mad_hdr *)pmp);
2560         }
2561
2562         /*
2563          * The bit set in the mask needs to be consistent with the
2564          * port the request came in on.
2565          */
2566         port_mask = be64_to_cpu(req->port_select_mask[3]);
2567         port_num = find_first_bit((unsigned long *)&port_mask,
2568                                   sizeof(port_mask));
2569
2570         if ((u8)port_num != port) {
2571                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2572                 return reply((struct ib_mad_hdr *)pmp);
2573         }
2574
2575         rsp = (struct _port_dctrs *)&(req->port[0]);
2576         memset(rsp, 0, sizeof(*rsp));
2577
2578         rsp->port_number = port;
2579         /*
2580          * Note that link_quality_indicator is a 32 bit quantity in
2581          * 'datacounters' queries (as opposed to 'portinfo' queries,
2582          * where it's a byte).
2583          */
2584         hfi1_read_link_quality(dd, &lq);
2585         rsp->link_quality_indicator = cpu_to_be32((u32)lq);
2586
2587         /* rsp->sw_port_congestion is 0 for HFIs */
2588         /* rsp->port_xmit_time_cong is 0 for HFIs */
2589         /* rsp->port_xmit_wasted_bw ??? */
2590         /* rsp->port_xmit_wait_data ??? */
2591         /* rsp->port_mark_fecn is 0 for HFIs */
2592
2593         rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS,
2594                                                 CNTR_INVALID_VL));
2595         rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
2596                                                 CNTR_INVALID_VL));
2597         rsp->port_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_PKTS,
2598                                                 CNTR_INVALID_VL));
2599         rsp->port_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_PKTS,
2600                                                 CNTR_INVALID_VL));
2601         rsp->port_multicast_xmit_pkts =
2602                 cpu_to_be64(read_dev_cntr(dd, C_DC_MC_XMIT_PKTS,
2603                                                 CNTR_INVALID_VL));
2604         rsp->port_multicast_rcv_pkts =
2605                 cpu_to_be64(read_dev_cntr(dd, C_DC_MC_RCV_PKTS,
2606                                                 CNTR_INVALID_VL));
2607         rsp->port_xmit_wait =
2608                 cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL));
2609         rsp->port_rcv_fecn =
2610                 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL));
2611         rsp->port_rcv_becn =
2612                 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL));
2613
2614         rsp->port_error_counter_summary =
2615                 cpu_to_be64(get_error_counter_summary(ibdev, port,
2616                                                       res_lli, res_ler));
2617
2618         vlinfo = &(rsp->vls[0]);
2619         vfi = 0;
2620         /* The vl_select_mask has been checked above, and we know
2621          * that it contains only entries which represent valid VLs.
2622          * So in the for_each_set_bit() loop below, we don't need
2623          * any additional checks for vl.
2624          */
2625         for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
2626                  8 * sizeof(req->vl_select_mask)) {
2627                 memset(vlinfo, 0, sizeof(*vlinfo));
2628
2629                 rsp->vls[vfi].port_vl_xmit_data =
2630                         cpu_to_be64(read_port_cntr(ppd, C_TX_FLIT_VL,
2631                                                         idx_from_vl(vl)));
2632
2633                 rsp->vls[vfi].port_vl_rcv_data =
2634                         cpu_to_be64(read_dev_cntr(dd, C_DC_RX_FLIT_VL,
2635                                                         idx_from_vl(vl)));
2636
2637                 rsp->vls[vfi].port_vl_xmit_pkts =
2638                         cpu_to_be64(read_port_cntr(ppd, C_TX_PKT_VL,
2639                                                         idx_from_vl(vl)));
2640
2641                 rsp->vls[vfi].port_vl_rcv_pkts =
2642                         cpu_to_be64(read_dev_cntr(dd, C_DC_RX_PKT_VL,
2643                                                         idx_from_vl(vl)));
2644
2645                 rsp->vls[vfi].port_vl_xmit_wait =
2646                         cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT_VL,
2647                                                         idx_from_vl(vl)));
2648
2649                 rsp->vls[vfi].port_vl_rcv_fecn =
2650                         cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN_VL,
2651                                                         idx_from_vl(vl)));
2652                 rsp->vls[vfi].port_vl_rcv_becn =
2653                         cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN_VL,
2654                                                         idx_from_vl(vl)));
2655
2656                 /* rsp->port_vl_xmit_time_cong is 0 for HFIs */
2657                 /* rsp->port_vl_xmit_wasted_bw ??? */
2658                 /* port_vl_xmit_wait_data - TXE (table 13-9 HFI spec) ???
2659                  * does this differ from rsp->vls[vfi].port_vl_xmit_wait */
2660                 /*rsp->vls[vfi].port_vl_mark_fecn =
2661                         cpu_to_be64(read_csr(dd, DCC_PRF_PORT_VL_MARK_FECN_CNT
2662                                 + offset));
2663                 */
2664                 vlinfo++;
2665                 vfi++;
2666         }
2667
2668         a0_datacounters(dd, rsp, vl_select_mask);
2669
2670         if (resp_len)
2671                 *resp_len += response_data_size;
2672
2673         return reply((struct ib_mad_hdr *)pmp);
2674 }
2675
2676 static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
2677                         struct ib_device *ibdev, u8 port, u32 *resp_len)
2678 {
2679         size_t response_data_size;
2680         struct _port_ectrs *rsp;
2681         unsigned long port_num;
2682         struct opa_port_error_counters64_msg *req;
2683         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2684         u32 num_ports;
2685         u8 num_pslm;
2686         u8 num_vls;
2687         struct hfi1_ibport *ibp;
2688         struct hfi1_pportdata *ppd;
2689         struct _vls_ectrs *vlinfo;
2690         unsigned long vl;
2691         u64 port_mask, tmp, tmp2;
2692         u32 vl_select_mask;
2693         int vfi;
2694
2695         req = (struct opa_port_error_counters64_msg *)pmp->data;
2696
2697         num_ports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
2698
2699         num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
2700         num_vls = hweight32(be32_to_cpu(req->vl_select_mask));
2701
2702         if (num_ports != 1 || num_ports != num_pslm) {
2703                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2704                 return reply((struct ib_mad_hdr *)pmp);
2705         }
2706
2707         response_data_size = sizeof(struct opa_port_error_counters64_msg) +
2708                                 num_vls * sizeof(struct _vls_ectrs);
2709
2710         if (response_data_size > sizeof(pmp->data)) {
2711                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2712                 return reply((struct ib_mad_hdr *)pmp);
2713         }
2714         /*
2715          * The bit set in the mask needs to be consistent with the
2716          * port the request came in on.
2717          */
2718         port_mask = be64_to_cpu(req->port_select_mask[3]);
2719         port_num = find_first_bit((unsigned long *)&port_mask,
2720                                         sizeof(port_mask));
2721
2722         if ((u8)port_num != port) {
2723                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2724                 return reply((struct ib_mad_hdr *)pmp);
2725         }
2726
2727         rsp = (struct _port_ectrs *)&(req->port[0]);
2728
2729         ibp = to_iport(ibdev, port_num);
2730         ppd = ppd_from_ibp(ibp);
2731
2732         memset(rsp, 0, sizeof(*rsp));
2733         rsp->port_number = (u8)port_num;
2734
2735         rsp->port_rcv_constraint_errors =
2736                 cpu_to_be64(read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
2737                                            CNTR_INVALID_VL));
2738         /* port_rcv_switch_relay_errors is 0 for HFIs */
2739         rsp->port_xmit_discards =
2740                 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD,
2741                                                 CNTR_INVALID_VL));
2742         rsp->port_rcv_remote_physical_errors =
2743                 cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
2744                                                 CNTR_INVALID_VL));
2745         tmp = read_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL);
2746         tmp2 = tmp + read_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL);
2747         if (tmp2 < tmp) {
2748                 /* overflow/wrapped */
2749                 rsp->local_link_integrity_errors = cpu_to_be64(~0);
2750         } else {
2751                 rsp->local_link_integrity_errors = cpu_to_be64(tmp2);
2752         }
2753         tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
2754         tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
2755                                         CNTR_INVALID_VL);
2756         if (tmp2 > (u32)UINT_MAX || tmp2 < tmp) {
2757                 /* overflow/wrapped */
2758                 rsp->link_error_recovery = cpu_to_be32(~0);
2759         } else {
2760                 rsp->link_error_recovery = cpu_to_be32(tmp2);
2761         }
2762         rsp->port_xmit_constraint_errors =
2763                 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
2764                                            CNTR_INVALID_VL));
2765         rsp->excessive_buffer_overruns =
2766                 cpu_to_be64(read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL));
2767         rsp->fm_config_errors =
2768                 cpu_to_be64(read_dev_cntr(dd, C_DC_FM_CFG_ERR,
2769                                                 CNTR_INVALID_VL));
2770         rsp->link_downed = cpu_to_be32(read_port_cntr(ppd, C_SW_LINK_DOWN,
2771                                                 CNTR_INVALID_VL));
2772         tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
2773         rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff;
2774
2775         vlinfo = (struct _vls_ectrs *)&(rsp->vls[0]);
2776         vfi = 0;
2777         vl_select_mask = be32_to_cpu(req->vl_select_mask);
2778         for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
2779                          8 * sizeof(req->vl_select_mask)) {
2780                 memset(vlinfo, 0, sizeof(*vlinfo));
2781                 /* vlinfo->vls[vfi].port_vl_xmit_discards ??? */
2782                 vlinfo += 1;
2783                 vfi++;
2784         }
2785
2786         if (resp_len)
2787                 *resp_len += response_data_size;
2788
2789         return reply((struct ib_mad_hdr *)pmp);
2790 }
2791
2792 static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
2793                         struct ib_device *ibdev, u8 port, u32 *resp_len)
2794 {
2795         size_t response_data_size;
2796         struct _port_ei *rsp;
2797         struct opa_port_error_info_msg *req;
2798         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2799         u64 port_mask;
2800         u32 num_ports;
2801         unsigned long port_num;
2802         u8 num_pslm;
2803         u64 reg;
2804
2805         req = (struct opa_port_error_info_msg *)pmp->data;
2806         rsp = (struct _port_ei *)&(req->port[0]);
2807
2808         num_ports = OPA_AM_NPORT(be32_to_cpu(pmp->mad_hdr.attr_mod));
2809         num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
2810
2811         memset(rsp, 0, sizeof(*rsp));
2812
2813         if (num_ports != 1 || num_ports != num_pslm) {
2814                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2815                 return reply((struct ib_mad_hdr *)pmp);
2816         }
2817
2818         /* Sanity check */
2819         response_data_size = sizeof(struct opa_port_error_info_msg);
2820
2821         if (response_data_size > sizeof(pmp->data)) {
2822                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2823                 return reply((struct ib_mad_hdr *)pmp);
2824         }
2825
2826         /*
2827          * The bit set in the mask needs to be consistent with the port
2828          * the request came in on.
2829          */
2830         port_mask = be64_to_cpu(req->port_select_mask[3]);
2831         port_num = find_first_bit((unsigned long *)&port_mask,
2832                                   sizeof(port_mask));
2833
2834         if ((u8)port_num != port) {
2835                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2836                 return reply((struct ib_mad_hdr *)pmp);
2837         }
2838
2839         /* PortRcvErrorInfo */
2840         rsp->port_rcv_ei.status_and_code =
2841                 dd->err_info_rcvport.status_and_code;
2842         memcpy(&rsp->port_rcv_ei.ei.ei1to12.packet_flit1,
2843                 &dd->err_info_rcvport.packet_flit1, sizeof(u64));
2844         memcpy(&rsp->port_rcv_ei.ei.ei1to12.packet_flit2,
2845                 &dd->err_info_rcvport.packet_flit2, sizeof(u64));
2846
2847         /* ExcessiverBufferOverrunInfo */
2848         reg = read_csr(dd, RCV_ERR_INFO);
2849         if (reg & RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK) {
2850                 /* if the RcvExcessBufferOverrun bit is set, save SC of
2851                  * first pkt that encountered an excess buffer overrun */
2852                 u8 tmp = (u8)reg;
2853
2854                 tmp &=  RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SC_SMASK;
2855                 tmp <<= 2;
2856                 rsp->excessive_buffer_overrun_ei.status_and_sc = tmp;
2857                 /* set the status bit */
2858                 rsp->excessive_buffer_overrun_ei.status_and_sc |= 0x80;
2859         }
2860
2861         rsp->port_xmit_constraint_ei.status =
2862                 dd->err_info_xmit_constraint.status;
2863         rsp->port_xmit_constraint_ei.pkey =
2864                 cpu_to_be16(dd->err_info_xmit_constraint.pkey);
2865         rsp->port_xmit_constraint_ei.slid =
2866                 cpu_to_be32(dd->err_info_xmit_constraint.slid);
2867
2868         rsp->port_rcv_constraint_ei.status =
2869                 dd->err_info_rcv_constraint.status;
2870         rsp->port_rcv_constraint_ei.pkey =
2871                 cpu_to_be16(dd->err_info_rcv_constraint.pkey);
2872         rsp->port_rcv_constraint_ei.slid =
2873                 cpu_to_be32(dd->err_info_rcv_constraint.slid);
2874
2875         /* UncorrectableErrorInfo */
2876         rsp->uncorrectable_ei.status_and_code = dd->err_info_uncorrectable;
2877
2878         /* FMConfigErrorInfo */
2879         rsp->fm_config_ei.status_and_code = dd->err_info_fmconfig;
2880
2881         if (resp_len)
2882                 *resp_len += response_data_size;
2883
2884         return reply((struct ib_mad_hdr *)pmp);
2885 }
2886
2887 static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
2888                         struct ib_device *ibdev, u8 port, u32 *resp_len)
2889 {
2890         struct opa_clear_port_status *req =
2891                 (struct opa_clear_port_status *)pmp->data;
2892         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2893         struct hfi1_ibport *ibp = to_iport(ibdev, port);
2894         struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2895         u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
2896         u64 portn = be64_to_cpu(req->port_select_mask[3]);
2897         u32 counter_select = be32_to_cpu(req->counter_select_mask);
2898         u32 vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */
2899         unsigned long vl;
2900
2901         if ((nports != 1) || (portn != 1 << port)) {
2902                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2903                 return reply((struct ib_mad_hdr *)pmp);
2904         }
2905         /*
2906          * only counters returned by pma_get_opa_portstatus() are
2907          * handled, so when pma_get_opa_portstatus() gets a fix,
2908          * the corresponding change should be made here as well.
2909          */
2910
2911         if (counter_select & CS_PORT_XMIT_DATA)
2912                 write_dev_cntr(dd, C_DC_XMIT_FLITS, CNTR_INVALID_VL, 0);
2913
2914         if (counter_select & CS_PORT_RCV_DATA)
2915                 write_dev_cntr(dd, C_DC_RCV_FLITS, CNTR_INVALID_VL, 0);
2916
2917         if (counter_select & CS_PORT_XMIT_PKTS)
2918                 write_dev_cntr(dd, C_DC_XMIT_PKTS, CNTR_INVALID_VL, 0);
2919
2920         if (counter_select & CS_PORT_RCV_PKTS)
2921                 write_dev_cntr(dd, C_DC_RCV_PKTS, CNTR_INVALID_VL, 0);
2922
2923         if (counter_select & CS_PORT_MCAST_XMIT_PKTS)
2924                 write_dev_cntr(dd, C_DC_MC_XMIT_PKTS, CNTR_INVALID_VL, 0);
2925
2926         if (counter_select & CS_PORT_MCAST_RCV_PKTS)
2927                 write_dev_cntr(dd, C_DC_MC_RCV_PKTS, CNTR_INVALID_VL, 0);
2928
2929         if (counter_select & CS_PORT_XMIT_WAIT)
2930                 write_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL, 0);
2931
2932         /* ignore cs_sw_portCongestion for HFIs */
2933
2934         if (counter_select & CS_PORT_RCV_FECN)
2935                 write_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL, 0);
2936
2937         if (counter_select & CS_PORT_RCV_BECN)
2938                 write_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL, 0);
2939
2940         /* ignore cs_port_xmit_time_cong for HFIs */
2941         /* ignore cs_port_xmit_wasted_bw for now */
2942         /* ignore cs_port_xmit_wait_data for now */
2943         if (counter_select & CS_PORT_RCV_BUBBLE)
2944                 write_dev_cntr(dd, C_DC_RCV_BBL, CNTR_INVALID_VL, 0);
2945
2946         /* Only applicable for switch */
2947         /*if (counter_select & CS_PORT_MARK_FECN)
2948                 write_csr(dd, DCC_PRF_PORT_MARK_FECN_CNT, 0);*/
2949
2950         if (counter_select & CS_PORT_RCV_CONSTRAINT_ERRORS)
2951                 write_port_cntr(ppd, C_SW_RCV_CSTR_ERR, CNTR_INVALID_VL, 0);
2952
2953         /* ignore cs_port_rcv_switch_relay_errors for HFIs */
2954         if (counter_select & CS_PORT_XMIT_DISCARDS)
2955                 write_port_cntr(ppd, C_SW_XMIT_DSCD, CNTR_INVALID_VL, 0);
2956
2957         if (counter_select & CS_PORT_XMIT_CONSTRAINT_ERRORS)
2958                 write_port_cntr(ppd, C_SW_XMIT_CSTR_ERR, CNTR_INVALID_VL, 0);
2959
2960         if (counter_select & CS_PORT_RCV_REMOTE_PHYSICAL_ERRORS)
2961                 write_dev_cntr(dd, C_DC_RMT_PHY_ERR, CNTR_INVALID_VL, 0);
2962
2963         if (counter_select & CS_LOCAL_LINK_INTEGRITY_ERRORS) {
2964                 write_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL, 0);
2965                 write_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL, 0);
2966         }
2967
2968         if (counter_select & CS_LINK_ERROR_RECOVERY) {
2969                 write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0);
2970                 write_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
2971                                                 CNTR_INVALID_VL, 0);
2972         }
2973
2974         if (counter_select & CS_PORT_RCV_ERRORS)
2975                 write_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL, 0);
2976
2977         if (counter_select & CS_EXCESSIVE_BUFFER_OVERRUNS) {
2978                 write_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL, 0);
2979                 dd->rcv_ovfl_cnt = 0;
2980         }
2981
2982         if (counter_select & CS_FM_CONFIG_ERRORS)
2983                 write_dev_cntr(dd, C_DC_FM_CFG_ERR, CNTR_INVALID_VL, 0);
2984
2985         if (counter_select & CS_LINK_DOWNED)
2986                 write_port_cntr(ppd, C_SW_LINK_DOWN, CNTR_INVALID_VL, 0);
2987
2988         if (counter_select & CS_UNCORRECTABLE_ERRORS)
2989                 write_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL, 0);
2990
2991         for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
2992                          8 * sizeof(vl_select_mask)) {
2993
2994                 if (counter_select & CS_PORT_XMIT_DATA)
2995                         write_port_cntr(ppd, C_TX_FLIT_VL, idx_from_vl(vl), 0);
2996
2997                 if (counter_select & CS_PORT_RCV_DATA)
2998                         write_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl), 0);
2999
3000                 if (counter_select & CS_PORT_XMIT_PKTS)
3001                         write_port_cntr(ppd, C_TX_PKT_VL, idx_from_vl(vl), 0);
3002
3003                 if (counter_select & CS_PORT_RCV_PKTS)
3004                         write_dev_cntr(dd, C_DC_RX_PKT_VL, idx_from_vl(vl), 0);
3005
3006                 if (counter_select & CS_PORT_XMIT_WAIT)
3007                         write_port_cntr(ppd, C_TX_WAIT_VL, idx_from_vl(vl), 0);
3008
3009                 /* sw_port_vl_congestion is 0 for HFIs */
3010                 if (counter_select & CS_PORT_RCV_FECN)
3011                         write_dev_cntr(dd, C_DC_RCV_FCN_VL, idx_from_vl(vl), 0);
3012
3013                 if (counter_select & CS_PORT_RCV_BECN)
3014                         write_dev_cntr(dd, C_DC_RCV_BCN_VL, idx_from_vl(vl), 0);
3015
3016                 /* port_vl_xmit_time_cong is 0 for HFIs */
3017                 /* port_vl_xmit_wasted_bw ??? */
3018                 /* port_vl_xmit_wait_data - TXE (table 13-9 HFI spec) ??? */
3019                 if (counter_select & CS_PORT_RCV_BUBBLE)
3020                         write_dev_cntr(dd, C_DC_RCV_BBL_VL, idx_from_vl(vl), 0);
3021
3022                 /*if (counter_select & CS_PORT_MARK_FECN)
3023                      write_csr(dd, DCC_PRF_PORT_VL_MARK_FECN_CNT + offset, 0);
3024                 */
3025                 /* port_vl_xmit_discards ??? */
3026         }
3027
3028         if (resp_len)
3029                 *resp_len += sizeof(*req);
3030
3031         return reply((struct ib_mad_hdr *)pmp);
3032 }
3033
3034 static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp,
3035                         struct ib_device *ibdev, u8 port, u32 *resp_len)
3036 {
3037         struct _port_ei *rsp;
3038         struct opa_port_error_info_msg *req;
3039         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
3040         u64 port_mask;
3041         u32 num_ports;
3042         unsigned long port_num;
3043         u8 num_pslm;
3044         u32 error_info_select;
3045
3046         req = (struct opa_port_error_info_msg *)pmp->data;
3047         rsp = (struct _port_ei *)&(req->port[0]);
3048
3049         num_ports = OPA_AM_NPORT(be32_to_cpu(pmp->mad_hdr.attr_mod));
3050         num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
3051
3052         memset(rsp, 0, sizeof(*rsp));
3053
3054         if (num_ports != 1 || num_ports != num_pslm) {
3055                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3056                 return reply((struct ib_mad_hdr *)pmp);
3057         }
3058
3059         /*
3060          * The bit set in the mask needs to be consistent with the port
3061          * the request came in on.
3062          */
3063         port_mask = be64_to_cpu(req->port_select_mask[3]);
3064         port_num = find_first_bit((unsigned long *)&port_mask,
3065                                   sizeof(port_mask));
3066
3067         if ((u8)port_num != port) {
3068                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3069                 return reply((struct ib_mad_hdr *)pmp);
3070         }
3071
3072         error_info_select = be32_to_cpu(req->error_info_select_mask);
3073
3074         /* PortRcvErrorInfo */
3075         if (error_info_select & ES_PORT_RCV_ERROR_INFO)
3076                 /* turn off status bit */
3077                 dd->err_info_rcvport.status_and_code &= ~OPA_EI_STATUS_SMASK;
3078
3079         /* ExcessiverBufferOverrunInfo */
3080         if (error_info_select & ES_EXCESSIVE_BUFFER_OVERRUN_INFO)
3081                 /* status bit is essentially kept in the h/w - bit 5 of
3082                  * RCV_ERR_INFO */
3083                 write_csr(dd, RCV_ERR_INFO,
3084                           RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
3085
3086         if (error_info_select & ES_PORT_XMIT_CONSTRAINT_ERROR_INFO)
3087                 dd->err_info_xmit_constraint.status &= ~OPA_EI_STATUS_SMASK;
3088
3089         if (error_info_select & ES_PORT_RCV_CONSTRAINT_ERROR_INFO)
3090                 dd->err_info_rcv_constraint.status &= ~OPA_EI_STATUS_SMASK;
3091
3092         /* UncorrectableErrorInfo */
3093         if (error_info_select & ES_UNCORRECTABLE_ERROR_INFO)
3094                 /* turn off status bit */
3095                 dd->err_info_uncorrectable &= ~OPA_EI_STATUS_SMASK;
3096
3097         /* FMConfigErrorInfo */
3098         if (error_info_select & ES_FM_CONFIG_ERROR_INFO)
3099                 /* turn off status bit */
3100                 dd->err_info_fmconfig &= ~OPA_EI_STATUS_SMASK;
3101
3102         if (resp_len)
3103                 *resp_len += sizeof(*req);
3104
3105         return reply((struct ib_mad_hdr *)pmp);
3106 }
3107
3108 struct opa_congestion_info_attr {
3109         __be16 congestion_info;
3110         u8 control_table_cap;   /* Multiple of 64 entry unit CCTs */
3111         u8 congestion_log_length;
3112 } __packed;
3113
3114 static int __subn_get_opa_cong_info(struct opa_smp *smp, u32 am, u8 *data,
3115                                     struct ib_device *ibdev, u8 port,
3116                                     u32 *resp_len)
3117 {
3118         struct opa_congestion_info_attr *p =
3119                 (struct opa_congestion_info_attr *)data;
3120         struct hfi1_ibport *ibp = to_iport(ibdev, port);
3121         struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3122
3123         p->congestion_info = 0;
3124         p->control_table_cap = ppd->cc_max_table_entries;
3125         p->congestion_log_length = OPA_CONG_LOG_ELEMS;
3126
3127         if (resp_len)
3128                 *resp_len += sizeof(*p);
3129
3130         return reply((struct ib_mad_hdr *)smp);
3131 }
3132
3133 static int __subn_get_opa_cong_setting(struct opa_smp *smp, u32 am,
3134                                              u8 *data,
3135                                              struct ib_device *ibdev,
3136                                              u8 port, u32 *resp_len)
3137 {
3138         int i;
3139         struct opa_congestion_setting_attr *p =
3140                 (struct opa_congestion_setting_attr *) data;
3141         struct hfi1_ibport *ibp = to_iport(ibdev, port);
3142         struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3143         struct opa_congestion_setting_entry_shadow *entries;
3144         struct cc_state *cc_state;
3145
3146         rcu_read_lock();
3147
3148         cc_state = get_cc_state(ppd);
3149
3150         if (cc_state == NULL) {
3151                 rcu_read_unlock();
3152                 return reply((struct ib_mad_hdr *)smp);
3153         }
3154
3155         entries = cc_state->cong_setting.entries;
3156         p->port_control = cpu_to_be16(cc_state->cong_setting.port_control);
3157         p->control_map = cpu_to_be32(cc_state->cong_setting.control_map);
3158         for (i = 0; i < OPA_MAX_SLS; i++) {
3159                 p->entries[i].ccti_increase = entries[i].ccti_increase;
3160                 p->entries[i].ccti_timer = cpu_to_be16(entries[i].ccti_timer);
3161                 p->entries[i].trigger_threshold =
3162                         entries[i].trigger_threshold;
3163                 p->entries[i].ccti_min = entries[i].ccti_min;
3164         }
3165
3166         rcu_read_unlock();
3167
3168         if (resp_len)
3169                 *resp_len += sizeof(*p);
3170
3171         return reply((struct ib_mad_hdr *)smp);
3172 }
3173
3174 static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
3175                                        struct ib_device *ibdev, u8 port,
3176                                        u32 *resp_len)
3177 {
3178         struct opa_congestion_setting_attr *p =
3179                 (struct opa_congestion_setting_attr *) data;
3180         struct hfi1_ibport *ibp = to_iport(ibdev, port);
3181         struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3182         struct opa_congestion_setting_entry_shadow *entries;
3183         int i;
3184
3185         ppd->cc_sl_control_map = be32_to_cpu(p->control_map);
3186
3187         entries = ppd->congestion_entries;
3188         for (i = 0; i < OPA_MAX_SLS; i++) {
3189                 entries[i].ccti_increase = p->entries[i].ccti_increase;
3190                 entries[i].ccti_timer = be16_to_cpu(p->entries[i].ccti_timer);
3191                 entries[i].trigger_threshold =
3192                         p->entries[i].trigger_threshold;
3193                 entries[i].ccti_min = p->entries[i].ccti_min;
3194         }
3195
3196         return __subn_get_opa_cong_setting(smp, am, data, ibdev, port,
3197                                            resp_len);
3198 }
3199
3200 static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am,
3201                                         u8 *data, struct ib_device *ibdev,
3202                                         u8 port, u32 *resp_len)
3203 {
3204         struct hfi1_ibport *ibp = to_iport(ibdev, port);
3205         struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3206         struct opa_hfi1_cong_log *cong_log = (struct opa_hfi1_cong_log *)data;
3207         s64 ts;
3208         int i;
3209
3210         if (am != 0) {
3211                 smp->status |= IB_SMP_INVALID_FIELD;
3212                 return reply((struct ib_mad_hdr *)smp);
3213         }
3214
3215         spin_lock_irq(&ppd->cc_log_lock);
3216
3217         cong_log->log_type = OPA_CC_LOG_TYPE_HFI;
3218         cong_log->congestion_flags = 0;
3219         cong_log->threshold_event_counter =
3220                 cpu_to_be16(ppd->threshold_event_counter);
3221         memcpy(cong_log->threshold_cong_event_map,
3222                ppd->threshold_cong_event_map,
3223                sizeof(cong_log->threshold_cong_event_map));
3224         /* keep timestamp in units of 1.024 usec */
3225         ts = ktime_to_ns(ktime_get()) / 1024;
3226         cong_log->current_time_stamp = cpu_to_be32(ts);
3227         for (i = 0; i < OPA_CONG_LOG_ELEMS; i++) {
3228                 struct opa_hfi1_cong_log_event_internal *cce =
3229                         &ppd->cc_events[ppd->cc_mad_idx++];
3230                 if (ppd->cc_mad_idx == OPA_CONG_LOG_ELEMS)
3231                         ppd->cc_mad_idx = 0;
3232                 /*
3233                  * Entries which are older than twice the time
3234                  * required to wrap the counter are supposed to
3235                  * be zeroed (CA10-49 IBTA, release 1.2.1, V1).
3236                  */
3237                 if ((u64)(ts - cce->timestamp) > (2 * UINT_MAX))
3238                         continue;
3239                 memcpy(cong_log->events[i].local_qp_cn_entry, &cce->lqpn, 3);
3240                 memcpy(cong_log->events[i].remote_qp_number_cn_entry,
3241                         &cce->rqpn, 3);
3242                 cong_log->events[i].sl_svc_type_cn_entry =
3243                         ((cce->sl & 0x1f) << 3) | (cce->svc_type & 0x7);
3244                 cong_log->events[i].remote_lid_cn_entry =
3245                         cpu_to_be32(cce->rlid);
3246                 cong_log->events[i].timestamp_cn_entry =
3247                         cpu_to_be32(cce->timestamp);
3248         }
3249
3250         /*
3251          * Reset threshold_cong_event_map, and threshold_event_counter
3252          * to 0 when log is read.
3253          */
3254         memset(ppd->threshold_cong_event_map, 0x0,
3255                sizeof(ppd->threshold_cong_event_map));
3256         ppd->threshold_event_counter = 0;
3257
3258         spin_unlock_irq(&ppd->cc_log_lock);
3259
3260         if (resp_len)
3261                 *resp_len += sizeof(struct opa_hfi1_cong_log);
3262
3263         return reply((struct ib_mad_hdr *)smp);
3264 }
3265
3266 static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
3267                                    struct ib_device *ibdev, u8 port,
3268                                    u32 *resp_len)
3269 {
3270         struct ib_cc_table_attr *cc_table_attr =
3271                 (struct ib_cc_table_attr *) data;
3272         struct hfi1_ibport *ibp = to_iport(ibdev, port);
3273         struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3274         u32 start_block = OPA_AM_START_BLK(am);
3275         u32 n_blocks = OPA_AM_NBLK(am);
3276         struct ib_cc_table_entry_shadow *entries;
3277         int i, j;
3278         u32 sentry, eentry;
3279         struct cc_state *cc_state;
3280
3281         /* sanity check n_blocks, start_block */
3282         if (n_blocks == 0 ||
3283             start_block + n_blocks > ppd->cc_max_table_entries) {
3284                 smp->status |= IB_SMP_INVALID_FIELD;
3285                 return reply((struct ib_mad_hdr *)smp);
3286         }
3287
3288         rcu_read_lock();
3289
3290         cc_state = get_cc_state(ppd);
3291
3292         if (cc_state == NULL) {
3293                 rcu_read_unlock();
3294                 return reply((struct ib_mad_hdr *)smp);
3295         }
3296
3297         sentry = start_block * IB_CCT_ENTRIES;
3298         eentry = sentry + (IB_CCT_ENTRIES * n_blocks);
3299
3300         cc_table_attr->ccti_limit = cpu_to_be16(cc_state->cct.ccti_limit);
3301
3302         entries = cc_state->cct.entries;
3303
3304         /* return n_blocks, though the last block may not be full */
3305         for (j = 0, i = sentry; i < eentry; j++, i++)
3306                 cc_table_attr->ccti_entries[j].entry =
3307                         cpu_to_be16(entries[i].entry);
3308
3309         rcu_read_unlock();
3310
3311         if (resp_len)
3312                 *resp_len += sizeof(u16)*(IB_CCT_ENTRIES * n_blocks + 1);
3313
3314         return reply((struct ib_mad_hdr *)smp);
3315 }
3316
3317 void cc_state_reclaim(struct rcu_head *rcu)
3318 {
3319         struct cc_state *cc_state = container_of(rcu, struct cc_state, rcu);
3320
3321         kfree(cc_state);
3322 }
3323
3324 static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
3325                                    struct ib_device *ibdev, u8 port,
3326                                    u32 *resp_len)
3327 {
3328         struct ib_cc_table_attr *p = (struct ib_cc_table_attr *) data;
3329         struct hfi1_ibport *ibp = to_iport(ibdev, port);
3330         struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3331         u32 start_block = OPA_AM_START_BLK(am);
3332         u32 n_blocks = OPA_AM_NBLK(am);
3333         struct ib_cc_table_entry_shadow *entries;
3334         int i, j;
3335         u32 sentry, eentry;
3336         u16 ccti_limit;
3337         struct cc_state *old_cc_state, *new_cc_state;
3338
3339         /* sanity check n_blocks, start_block */
3340         if (n_blocks == 0 ||
3341             start_block + n_blocks > ppd->cc_max_table_entries) {
3342                 smp->status |= IB_SMP_INVALID_FIELD;
3343                 return reply((struct ib_mad_hdr *)smp);
3344         }
3345
3346         sentry = start_block * IB_CCT_ENTRIES;
3347         eentry = sentry + ((n_blocks - 1) * IB_CCT_ENTRIES) +
3348                  (be16_to_cpu(p->ccti_limit)) % IB_CCT_ENTRIES + 1;
3349
3350         /* sanity check ccti_limit */
3351         ccti_limit = be16_to_cpu(p->ccti_limit);
3352         if (ccti_limit + 1 > eentry) {
3353                 smp->status |= IB_SMP_INVALID_FIELD;
3354                 return reply((struct ib_mad_hdr *)smp);
3355         }
3356
3357         new_cc_state = kzalloc(sizeof(*new_cc_state), GFP_KERNEL);
3358         if (new_cc_state == NULL)
3359                 goto getit;
3360
3361         spin_lock(&ppd->cc_state_lock);
3362
3363         old_cc_state = get_cc_state(ppd);
3364
3365         if (old_cc_state == NULL) {
3366                 spin_unlock(&ppd->cc_state_lock);
3367                 kfree(new_cc_state);
3368                 return reply((struct ib_mad_hdr *)smp);
3369         }
3370
3371         *new_cc_state = *old_cc_state;
3372
3373         new_cc_state->cct.ccti_limit = ccti_limit;
3374
3375         entries = ppd->ccti_entries;
3376         ppd->total_cct_entry = ccti_limit + 1;
3377
3378         for (j = 0, i = sentry; i < eentry; j++, i++)
3379                 entries[i].entry = be16_to_cpu(p->ccti_entries[j].entry);
3380
3381         memcpy(new_cc_state->cct.entries, entries,
3382                eentry * sizeof(struct ib_cc_table_entry));
3383
3384         new_cc_state->cong_setting.port_control = IB_CC_CCS_PC_SL_BASED;
3385         new_cc_state->cong_setting.control_map = ppd->cc_sl_control_map;
3386         memcpy(new_cc_state->cong_setting.entries, ppd->congestion_entries,
3387                OPA_MAX_SLS * sizeof(struct opa_congestion_setting_entry));
3388
3389         rcu_assign_pointer(ppd->cc_state, new_cc_state);
3390
3391         spin_unlock(&ppd->cc_state_lock);
3392
3393         call_rcu(&old_cc_state->rcu, cc_state_reclaim);
3394
3395 getit:
3396         return __subn_get_opa_cc_table(smp, am, data, ibdev, port, resp_len);
3397 }
3398
3399 struct opa_led_info {
3400         __be32 rsvd_led_mask;
3401         __be32 rsvd;
3402 };
3403
3404 #define OPA_LED_SHIFT   31
3405 #define OPA_LED_MASK    (1 << OPA_LED_SHIFT)
3406
3407 static int __subn_get_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
3408                                    struct ib_device *ibdev, u8 port,
3409                                    u32 *resp_len)
3410 {
3411         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
3412         struct opa_led_info *p = (struct opa_led_info *) data;
3413         u32 nport = OPA_AM_NPORT(am);
3414         u64 reg;
3415
3416         if (nport != 1) {
3417                 smp->status |= IB_SMP_INVALID_FIELD;
3418                 return reply((struct ib_mad_hdr *)smp);
3419         }
3420
3421         reg = read_csr(dd, DCC_CFG_LED_CNTRL);
3422         if ((reg & DCC_CFG_LED_CNTRL_LED_CNTRL_SMASK) &&
3423                 ((reg & DCC_CFG_LED_CNTRL_LED_SW_BLINK_RATE_SMASK) == 0xf))
3424                         p->rsvd_led_mask = cpu_to_be32(OPA_LED_MASK);
3425
3426         if (resp_len)
3427                 *resp_len += sizeof(struct opa_led_info);
3428
3429         return reply((struct ib_mad_hdr *)smp);
3430 }
3431
3432 static int __subn_set_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
3433                                    struct ib_device *ibdev, u8 port,
3434                                    u32 *resp_len)
3435 {
3436         struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
3437         struct opa_led_info *p = (struct opa_led_info *) data;
3438         u32 nport = OPA_AM_NPORT(am);
3439         int on = !!(be32_to_cpu(p->rsvd_led_mask) & OPA_LED_MASK);
3440
3441         if (nport != 1) {
3442                 smp->status |= IB_SMP_INVALID_FIELD;
3443                 return reply((struct ib_mad_hdr *)smp);
3444         }
3445
3446         setextled(dd, on);
3447
3448         return __subn_get_opa_led_info(smp, am, data, ibdev, port, resp_len);
3449 }
3450
3451 static int subn_get_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
3452                             u8 *data, struct ib_device *ibdev, u8 port,
3453                             u32 *resp_len)
3454 {
3455         int ret;
3456         struct hfi1_ibport *ibp = to_iport(ibdev, port);
3457
3458         switch (attr_id) {
3459         case IB_SMP_ATTR_NODE_DESC:
3460                 ret = __subn_get_opa_nodedesc(smp, am, data, ibdev, port,
3461                                               resp_len);
3462                 break;
3463         case IB_SMP_ATTR_NODE_INFO:
3464                 ret = __subn_get_opa_nodeinfo(smp, am, data, ibdev, port,
3465                                               resp_len);
3466                 break;
3467         case IB_SMP_ATTR_PORT_INFO:
3468                 ret = __subn_get_opa_portinfo(smp, am, data, ibdev, port,
3469                                               resp_len);
3470                 break;
3471         case IB_SMP_ATTR_PKEY_TABLE:
3472                 ret = __subn_get_opa_pkeytable(smp, am, data, ibdev, port,
3473                                                resp_len);
3474                 break;
3475         case OPA_ATTRIB_ID_SL_TO_SC_MAP:
3476                 ret = __subn_get_opa_sl_to_sc(smp, am, data, ibdev, port,
3477                                               resp_len);
3478                 break;
3479         case OPA_ATTRIB_ID_SC_TO_SL_MAP:
3480                 ret = __subn_get_opa_sc_to_sl(smp, am, data, ibdev, port,
3481                                               resp_len);
3482                 break;
3483         case OPA_ATTRIB_ID_SC_TO_VLT_MAP:
3484                 ret = __subn_get_opa_sc_to_vlt(smp, am, data, ibdev, port,
3485                                                resp_len);
3486                 break;
3487         case OPA_ATTRIB_ID_SC_TO_VLNT_MAP:
3488                 ret = __subn_get_opa_sc_to_vlnt(smp, am, data, ibdev, port,
3489                                                resp_len);
3490                 break;
3491         case OPA_ATTRIB_ID_PORT_STATE_INFO:
3492                 ret = __subn_get_opa_psi(smp, am, data, ibdev, port,
3493                                          resp_len);
3494                 break;
3495         case OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE:
3496                 ret = __subn_get_opa_bct(smp, am, data, ibdev, port,
3497                                          resp_len);
3498                 break;
3499         case OPA_ATTRIB_ID_CABLE_INFO:
3500                 ret = __subn_get_opa_cable_info(smp, am, data, ibdev, port,
3501                                                 resp_len);
3502                 break;
3503         case IB_SMP_ATTR_VL_ARB_TABLE:
3504                 ret = __subn_get_opa_vl_arb(smp, am, data, ibdev, port,
3505                                             resp_len);
3506                 break;
3507         case OPA_ATTRIB_ID_CONGESTION_INFO:
3508                 ret = __subn_get_opa_cong_info(smp, am, data, ibdev, port,
3509                                                resp_len);
3510                 break;
3511         case OPA_ATTRIB_ID_HFI_CONGESTION_SETTING:
3512                 ret = __subn_get_opa_cong_setting(smp, am, data, ibdev,
3513                                                   port, resp_len);
3514                 break;
3515         case OPA_ATTRIB_ID_HFI_CONGESTION_LOG:
3516                 ret = __subn_get_opa_hfi1_cong_log(smp, am, data, ibdev,
3517                                                    port, resp_len);
3518                 break;
3519         case OPA_ATTRIB_ID_CONGESTION_CONTROL_TABLE:
3520                 ret = __subn_get_opa_cc_table(smp, am, data, ibdev, port,
3521                                               resp_len);
3522                 break;
3523         case IB_SMP_ATTR_LED_INFO:
3524                 ret = __subn_get_opa_led_info(smp, am, data, ibdev, port,
3525                                               resp_len);
3526                 break;
3527         case IB_SMP_ATTR_SM_INFO:
3528                 if (ibp->port_cap_flags & IB_PORT_SM_DISABLED)
3529                         return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
3530                 if (ibp->port_cap_flags & IB_PORT_SM)
3531                         return IB_MAD_RESULT_SUCCESS;
3532                 /* FALLTHROUGH */
3533         default:
3534                 smp->status |= IB_SMP_UNSUP_METH_ATTR;
3535                 ret = reply((struct ib_mad_hdr *)smp);
3536                 break;
3537         }
3538         return ret;
3539 }
3540
3541 static int subn_set_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
3542                             u8 *data, struct ib_device *ibdev, u8 port,
3543                             u32 *resp_len)
3544 {
3545         int ret;
3546         struct hfi1_ibport *ibp = to_iport(ibdev, port);
3547
3548         switch (attr_id) {
3549         case IB_SMP_ATTR_PORT_INFO:
3550                 ret = __subn_set_opa_portinfo(smp, am, data, ibdev, port,
3551                                               resp_len);
3552                 break;
3553         case IB_SMP_ATTR_PKEY_TABLE:
3554                 ret = __subn_set_opa_pkeytable(smp, am, data, ibdev, port,
3555                                                resp_len);
3556                 break;
3557         case OPA_ATTRIB_ID_SL_TO_SC_MAP:
3558                 ret = __subn_set_opa_sl_to_sc(smp, am, data, ibdev, port,
3559                                               resp_len);
3560                 break;
3561         case OPA_ATTRIB_ID_SC_TO_SL_MAP:
3562                 ret = __subn_set_opa_sc_to_sl(smp, am, data, ibdev, port,
3563                                               resp_len);
3564                 break;
3565         case OPA_ATTRIB_ID_SC_TO_VLT_MAP:
3566                 ret = __subn_set_opa_sc_to_vlt(smp, am, data, ibdev, port,
3567                                                resp_len);
3568                 break;
3569         case OPA_ATTRIB_ID_SC_TO_VLNT_MAP:
3570                 ret = __subn_set_opa_sc_to_vlnt(smp, am, data, ibdev, port,
3571                                                resp_len);
3572                 break;
3573         case OPA_ATTRIB_ID_PORT_STATE_INFO:
3574                 ret = __subn_set_opa_psi(smp, am, data, ibdev, port,
3575                                          resp_len);
3576                 break;
3577         case OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE:
3578                 ret = __subn_set_opa_bct(smp, am, data, ibdev, port,
3579                                          resp_len);
3580                 break;
3581         case IB_SMP_ATTR_VL_ARB_TABLE:
3582                 ret = __subn_set_opa_vl_arb(smp, am, data, ibdev, port,
3583                                             resp_len);
3584                 break;
3585         case OPA_ATTRIB_ID_HFI_CONGESTION_SETTING:
3586                 ret = __subn_set_opa_cong_setting(smp, am, data, ibdev,
3587                                                   port, resp_len);
3588                 break;
3589         case OPA_ATTRIB_ID_CONGESTION_CONTROL_TABLE:
3590                 ret = __subn_set_opa_cc_table(smp, am, data, ibdev, port,
3591                                               resp_len);
3592                 break;
3593         case IB_SMP_ATTR_LED_INFO:
3594                 ret = __subn_set_opa_led_info(smp, am, data, ibdev, port,
3595                                               resp_len);
3596                 break;
3597         case IB_SMP_ATTR_SM_INFO:
3598                 if (ibp->port_cap_flags & IB_PORT_SM_DISABLED)
3599                         return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
3600                 if (ibp->port_cap_flags & IB_PORT_SM)
3601                         return IB_MAD_RESULT_SUCCESS;
3602                 /* FALLTHROUGH */
3603         default:
3604                 smp->status |= IB_SMP_UNSUP_METH_ATTR;
3605                 ret = reply((struct ib_mad_hdr *)smp);
3606                 break;
3607         }
3608         return ret;
3609 }
3610
3611 static inline void set_aggr_error(struct opa_aggregate *ag)
3612 {
3613         ag->err_reqlength |= cpu_to_be16(0x8000);
3614 }
3615
3616 static int subn_get_opa_aggregate(struct opa_smp *smp,
3617                                   struct ib_device *ibdev, u8 port,
3618                                   u32 *resp_len)
3619 {
3620         int i;
3621         u32 num_attr = be32_to_cpu(smp->attr_mod) & 0x000000ff;
3622         u8 *next_smp = opa_get_smp_data(smp);
3623
3624         if (num_attr < 1 || num_attr > 117) {
3625                 smp->status |= IB_SMP_INVALID_FIELD;
3626                 return reply((struct ib_mad_hdr *)smp);
3627         }
3628
3629         for (i = 0; i < num_attr; i++) {
3630                 struct opa_aggregate *agg;
3631                 size_t agg_data_len;
3632                 size_t agg_size;
3633                 u32 am;
3634
3635                 agg = (struct opa_aggregate *)next_smp;
3636                 agg_data_len = (be16_to_cpu(agg->err_reqlength) & 0x007f) * 8;
3637                 agg_size = sizeof(*agg) + agg_data_len;
3638                 am = be32_to_cpu(agg->attr_mod);
3639
3640                 *resp_len += agg_size;
3641
3642                 if (next_smp + agg_size > ((u8 *)smp) + sizeof(*smp)) {
3643                         smp->status |= IB_SMP_INVALID_FIELD;
3644                         return reply((struct ib_mad_hdr *)smp);
3645                 }
3646
3647                 /* zero the payload for this segment */
3648                 memset(next_smp + sizeof(*agg), 0, agg_data_len);
3649
3650                 (void) subn_get_opa_sma(agg->attr_id, smp, am, agg->data,
3651                                         ibdev, port, NULL);
3652                 if (smp->status & ~IB_SMP_DIRECTION) {
3653                         set_aggr_error(agg);
3654                         return reply((struct ib_mad_hdr *)smp);
3655                 }
3656                 next_smp += agg_size;
3657
3658         }
3659
3660         return reply((struct ib_mad_hdr *)smp);
3661 }
3662
3663 static int subn_set_opa_aggregate(struct opa_smp *smp,
3664                                   struct ib_device *ibdev, u8 port,
3665                                   u32 *resp_len)
3666 {
3667         int i;
3668         u32 num_attr = be32_to_cpu(smp->attr_mod) & 0x000000ff;
3669         u8 *next_smp = opa_get_smp_data(smp);
3670
3671         if (num_attr < 1 || num_attr > 117) {
3672                 smp->status |= IB_SMP_INVALID_FIELD;
3673                 return reply((struct ib_mad_hdr *)smp);
3674         }
3675
3676         for (i = 0; i < num_attr; i++) {
3677                 struct opa_aggregate *agg;
3678                 size_t agg_data_len;
3679                 size_t agg_size;
3680                 u32 am;
3681
3682                 agg = (struct opa_aggregate *)next_smp;
3683                 agg_data_len = (be16_to_cpu(agg->err_reqlength) & 0x007f) * 8;
3684                 agg_size = sizeof(*agg) + agg_data_len;
3685                 am = be32_to_cpu(agg->attr_mod);
3686
3687                 *resp_len += agg_size;
3688
3689                 if (next_smp + agg_size > ((u8 *)smp) + sizeof(*smp)) {
3690                         smp->status |= IB_SMP_INVALID_FIELD;
3691                         return reply((struct ib_mad_hdr *)smp);
3692                 }
3693
3694                 (void) subn_set_opa_sma(agg->attr_id, smp, am, agg->data,
3695                                         ibdev, port, NULL);
3696                 if (smp->status & ~IB_SMP_DIRECTION) {
3697                         set_aggr_error(agg);
3698                         return reply((struct ib_mad_hdr *)smp);
3699                 }
3700                 next_smp += agg_size;
3701
3702         }
3703
3704         return reply((struct ib_mad_hdr *)smp);
3705 }
3706
3707 /*
3708  * OPAv1 specifies that, on the transition to link up, these counters
3709  * are cleared:
3710  *   PortRcvErrors [*]
3711  *   LinkErrorRecovery
3712  *   LocalLinkIntegrityErrors
3713  *   ExcessiveBufferOverruns [*]
3714  *
3715  * [*] Error info associated with these counters is retained, but the
3716  * error info status is reset to 0.
3717  */
3718 void clear_linkup_counters(struct hfi1_devdata *dd)
3719 {
3720         /* PortRcvErrors */
3721         write_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL, 0);
3722         dd->err_info_rcvport.status_and_code &= ~OPA_EI_STATUS_SMASK;
3723         /* LinkErrorRecovery */
3724         write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0);
3725         write_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL, 0);
3726         /* LocalLinkIntegrityErrors */
3727         write_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL, 0);
3728         write_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL, 0);
3729         /* ExcessiveBufferOverruns */
3730         write_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL, 0);
3731         dd->rcv_ovfl_cnt = 0;
3732         dd->err_info_xmit_constraint.status &= ~OPA_EI_STATUS_SMASK;
3733 }
3734
3735 /*
3736  * is_local_mad() returns 1 if 'mad' is sent from, and destined to the
3737  * local node, 0 otherwise.
3738  */
3739 static int is_local_mad(struct hfi1_ibport *ibp, const struct opa_mad *mad,
3740                         const struct ib_wc *in_wc)
3741 {
3742         struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3743         const struct opa_smp *smp = (const struct opa_smp *)mad;
3744
3745         if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
3746                 return (smp->hop_cnt == 0 &&
3747                         smp->route.dr.dr_slid == OPA_LID_PERMISSIVE &&
3748                         smp->route.dr.dr_dlid == OPA_LID_PERMISSIVE);
3749         }
3750
3751         return (in_wc->slid == ppd->lid);
3752 }
3753
3754 /*
3755  * opa_local_smp_check() should only be called on MADs for which
3756  * is_local_mad() returns true. It applies the SMP checks that are
3757  * specific to SMPs which are sent from, and destined to this node.
3758  * opa_local_smp_check() returns 0 if the SMP passes its checks, 1
3759  * otherwise.
3760  *
3761  * SMPs which arrive from other nodes are instead checked by
3762  * opa_smp_check().
3763  */
3764 static int opa_local_smp_check(struct hfi1_ibport *ibp,
3765                                const struct ib_wc *in_wc)
3766 {
3767         struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3768         u16 slid = in_wc->slid;
3769         u16 pkey;
3770
3771         if (in_wc->pkey_index >= ARRAY_SIZE(ppd->pkeys))
3772                 return 1;
3773
3774         pkey = ppd->pkeys[in_wc->pkey_index];
3775         /*
3776          * We need to do the "node-local" checks specified in OPAv1,
3777          * rev 0.90, section 9.10.26, which are:
3778          *   - pkey is 0x7fff, or 0xffff
3779          *   - Source QPN == 0 || Destination QPN == 0
3780          *   - the MAD header's management class is either
3781          *     IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE or
3782          *     IB_MGMT_CLASS_SUBN_LID_ROUTED
3783          *   - SLID != 0
3784          *
3785          * However, we know (and so don't need to check again) that,
3786          * for local SMPs, the MAD stack passes MADs with:
3787          *   - Source QPN of 0
3788          *   - MAD mgmt_class is IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
3789          *   - SLID is either: OPA_LID_PERMISSIVE (0xFFFFFFFF), or
3790          *     our own port's lid
3791          *
3792          */
3793         if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY)
3794                 return 0;
3795         ingress_pkey_table_fail(ppd, pkey, slid);
3796         return 1;
3797 }
3798
3799 static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
3800                             u8 port, const struct opa_mad *in_mad,
3801                             struct opa_mad *out_mad,
3802                             u32 *resp_len)
3803 {
3804         struct opa_smp *smp = (struct opa_smp *)out_mad;
3805         struct hfi1_ibport *ibp = to_iport(ibdev, port);
3806         u8 *data;
3807         u32 am;
3808         __be16 attr_id;
3809         int ret;
3810
3811         *out_mad = *in_mad;
3812         data = opa_get_smp_data(smp);
3813
3814         am = be32_to_cpu(smp->attr_mod);
3815         attr_id = smp->attr_id;
3816         if (smp->class_version != OPA_SMI_CLASS_VERSION) {
3817                 smp->status |= IB_SMP_UNSUP_VERSION;
3818                 ret = reply((struct ib_mad_hdr *)smp);
3819                 goto bail;
3820         }
3821         ret = check_mkey(ibp, (struct ib_mad_hdr *)smp, mad_flags, smp->mkey,
3822                          smp->route.dr.dr_slid, smp->route.dr.return_path,
3823                          smp->hop_cnt);
3824         if (ret) {
3825                 u32 port_num = be32_to_cpu(smp->attr_mod);
3826
3827                 /*
3828                  * If this is a get/set portinfo, we already check the
3829                  * M_Key if the MAD is for another port and the M_Key
3830                  * is OK on the receiving port. This check is needed
3831                  * to increment the error counters when the M_Key
3832                  * fails to match on *both* ports.
3833                  */
3834                 if (attr_id == IB_SMP_ATTR_PORT_INFO &&
3835                     (smp->method == IB_MGMT_METHOD_GET ||
3836                      smp->method == IB_MGMT_METHOD_SET) &&
3837                     port_num && port_num <= ibdev->phys_port_cnt &&
3838                     port != port_num)
3839                         (void) check_mkey(to_iport(ibdev, port_num),
3840                                           (struct ib_mad_hdr *)smp, 0,
3841                                           smp->mkey, smp->route.dr.dr_slid,
3842                                           smp->route.dr.return_path,
3843                                           smp->hop_cnt);
3844                 ret = IB_MAD_RESULT_FAILURE;
3845                 goto bail;
3846         }
3847
3848         *resp_len = opa_get_smp_header_size(smp);
3849
3850         switch (smp->method) {
3851         case IB_MGMT_METHOD_GET:
3852                 switch (attr_id) {
3853                 default:
3854                         clear_opa_smp_data(smp);
3855                         ret = subn_get_opa_sma(attr_id, smp, am, data,
3856                                                ibdev, port, resp_len);
3857                         goto bail;
3858                 case OPA_ATTRIB_ID_AGGREGATE:
3859                         ret = subn_get_opa_aggregate(smp, ibdev, port,
3860                                                      resp_len);
3861                         goto bail;
3862                 }
3863         case IB_MGMT_METHOD_SET:
3864                 switch (attr_id) {
3865                 default:
3866                         ret = subn_set_opa_sma(attr_id, smp, am, data,
3867                                                ibdev, port, resp_len);
3868                         goto bail;
3869                 case OPA_ATTRIB_ID_AGGREGATE:
3870                         ret = subn_set_opa_aggregate(smp, ibdev, port,
3871                                                      resp_len);
3872                         goto bail;
3873                 }
3874         case IB_MGMT_METHOD_TRAP:
3875         case IB_MGMT_METHOD_REPORT:
3876         case IB_MGMT_METHOD_REPORT_RESP:
3877         case IB_MGMT_METHOD_GET_RESP:
3878                 /*
3879                  * The ib_mad module will call us to process responses
3880                  * before checking for other consumers.
3881                  * Just tell the caller to process it normally.
3882                  */
3883                 ret = IB_MAD_RESULT_SUCCESS;
3884                 goto bail;
3885         default:
3886                 smp->status |= IB_SMP_UNSUP_METHOD;
3887                 ret = reply((struct ib_mad_hdr *)smp);
3888         }
3889
3890 bail:
3891         return ret;
3892 }
3893
3894 static int process_subn(struct ib_device *ibdev, int mad_flags,
3895                         u8 port, const struct ib_mad *in_mad,
3896                         struct ib_mad *out_mad)
3897 {
3898         struct ib_smp *smp = (struct ib_smp *)out_mad;
3899         struct hfi1_ibport *ibp = to_iport(ibdev, port);
3900         int ret;
3901
3902         *out_mad = *in_mad;
3903         if (smp->class_version != 1) {
3904                 smp->status |= IB_SMP_UNSUP_VERSION;
3905                 ret = reply((struct ib_mad_hdr *)smp);
3906                 goto bail;
3907         }
3908
3909         ret = check_mkey(ibp, (struct ib_mad_hdr *)smp, mad_flags,
3910                          smp->mkey, (__force __be32)smp->dr_slid,
3911                          smp->return_path, smp->hop_cnt);
3912         if (ret) {
3913                 u32 port_num = be32_to_cpu(smp->attr_mod);
3914
3915                 /*
3916                  * If this is a get/set portinfo, we already check the
3917                  * M_Key if the MAD is for another port and the M_Key
3918                  * is OK on the receiving port. This check is needed
3919                  * to increment the error counters when the M_Key
3920                  * fails to match on *both* ports.
3921                  */
3922                 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
3923                     (smp->method == IB_MGMT_METHOD_GET ||
3924                      smp->method == IB_MGMT_METHOD_SET) &&
3925                     port_num && port_num <= ibdev->phys_port_cnt &&
3926                     port != port_num)
3927                         (void) check_mkey(to_iport(ibdev, port_num),
3928                                           (struct ib_mad_hdr *)smp, 0,
3929                                           smp->mkey,
3930                                           (__force __be32)smp->dr_slid,
3931                                           smp->return_path, smp->hop_cnt);
3932                 ret = IB_MAD_RESULT_FAILURE;
3933                 goto bail;
3934         }
3935
3936         switch (smp->method) {
3937         case IB_MGMT_METHOD_GET:
3938                 switch (smp->attr_id) {
3939                 case IB_SMP_ATTR_NODE_INFO:
3940                         ret = subn_get_nodeinfo(smp, ibdev, port);
3941                         goto bail;
3942                 default:
3943                         smp->status |= IB_SMP_UNSUP_METH_ATTR;
3944                         ret = reply((struct ib_mad_hdr *)smp);
3945                         goto bail;
3946                 }
3947         }
3948
3949 bail:
3950         return ret;
3951 }
3952
3953 static int process_perf_opa(struct ib_device *ibdev, u8 port,
3954                             const struct opa_mad *in_mad,
3955                             struct opa_mad *out_mad, u32 *resp_len)
3956 {
3957         struct opa_pma_mad *pmp = (struct opa_pma_mad *)out_mad;
3958         int ret;
3959
3960         *out_mad = *in_mad;
3961
3962         if (pmp->mad_hdr.class_version != OPA_SMI_CLASS_VERSION) {
3963                 pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
3964                 return reply((struct ib_mad_hdr *)pmp);
3965         }
3966
3967         *resp_len = sizeof(pmp->mad_hdr);
3968
3969         switch (pmp->mad_hdr.method) {
3970         case IB_MGMT_METHOD_GET:
3971                 switch (pmp->mad_hdr.attr_id) {
3972                 case IB_PMA_CLASS_PORT_INFO:
3973                         ret = pma_get_opa_classportinfo(pmp, ibdev, resp_len);
3974                         goto bail;
3975                 case OPA_PM_ATTRIB_ID_PORT_STATUS:
3976                         ret = pma_get_opa_portstatus(pmp, ibdev, port,
3977                                                                 resp_len);
3978                         goto bail;
3979                 case OPA_PM_ATTRIB_ID_DATA_PORT_COUNTERS:
3980                         ret = pma_get_opa_datacounters(pmp, ibdev, port,
3981                                                                 resp_len);
3982                         goto bail;
3983                 case OPA_PM_ATTRIB_ID_ERROR_PORT_COUNTERS:
3984                         ret = pma_get_opa_porterrors(pmp, ibdev, port,
3985                                                                 resp_len);
3986                         goto bail;
3987                 case OPA_PM_ATTRIB_ID_ERROR_INFO:
3988                         ret = pma_get_opa_errorinfo(pmp, ibdev, port,
3989                                                                 resp_len);
3990                         goto bail;
3991                 default:
3992                         pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
3993                         ret = reply((struct ib_mad_hdr *)pmp);
3994                         goto bail;
3995                 }
3996
3997         case IB_MGMT_METHOD_SET:
3998                 switch (pmp->mad_hdr.attr_id) {
3999                 case OPA_PM_ATTRIB_ID_CLEAR_PORT_STATUS:
4000                         ret = pma_set_opa_portstatus(pmp, ibdev, port,
4001                                                                 resp_len);
4002                         goto bail;
4003                 case OPA_PM_ATTRIB_ID_ERROR_INFO:
4004                         ret = pma_set_opa_errorinfo(pmp, ibdev, port,
4005                                                                 resp_len);
4006                         goto bail;
4007                 default:
4008                         pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
4009                         ret = reply((struct ib_mad_hdr *)pmp);
4010                         goto bail;
4011                 }
4012
4013         case IB_MGMT_METHOD_TRAP:
4014         case IB_MGMT_METHOD_GET_RESP:
4015                 /*
4016                  * The ib_mad module will call us to process responses
4017                  * before checking for other consumers.
4018                  * Just tell the caller to process it normally.
4019                  */
4020                 ret = IB_MAD_RESULT_SUCCESS;
4021                 goto bail;
4022
4023         default:
4024                 pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
4025                 ret = reply((struct ib_mad_hdr *)pmp);
4026         }
4027
4028 bail:
4029         return ret;
4030 }
4031
4032 static int hfi1_process_opa_mad(struct ib_device *ibdev, int mad_flags,
4033                                 u8 port, const struct ib_wc *in_wc,
4034                                 const struct ib_grh *in_grh,
4035                                 const struct opa_mad *in_mad,
4036                                 struct opa_mad *out_mad, size_t *out_mad_size,
4037                                 u16 *out_mad_pkey_index)
4038 {
4039         int ret;
4040         int pkey_idx;
4041         u32 resp_len = 0;
4042         struct hfi1_ibport *ibp = to_iport(ibdev, port);
4043
4044         pkey_idx = hfi1_lookup_pkey_idx(ibp, LIM_MGMT_P_KEY);
4045         if (pkey_idx < 0) {
4046                 pr_warn("failed to find limited mgmt pkey, defaulting 0x%x\n",
4047                         hfi1_get_pkey(ibp, 1));
4048                 pkey_idx = 1;
4049         }
4050         *out_mad_pkey_index = (u16)pkey_idx;
4051
4052         switch (in_mad->mad_hdr.mgmt_class) {
4053         case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
4054         case IB_MGMT_CLASS_SUBN_LID_ROUTED:
4055                 if (is_local_mad(ibp, in_mad, in_wc)) {
4056                         ret = opa_local_smp_check(ibp, in_wc);
4057                         if (ret)
4058                                 return IB_MAD_RESULT_FAILURE;
4059                 }
4060                 ret = process_subn_opa(ibdev, mad_flags, port, in_mad,
4061                                        out_mad, &resp_len);
4062                 goto bail;
4063         case IB_MGMT_CLASS_PERF_MGMT:
4064                 ret = process_perf_opa(ibdev, port, in_mad, out_mad,
4065                                        &resp_len);
4066                 goto bail;
4067
4068         default:
4069                 ret = IB_MAD_RESULT_SUCCESS;
4070         }
4071
4072 bail:
4073         if (ret & IB_MAD_RESULT_REPLY)
4074                 *out_mad_size = round_up(resp_len, 8);
4075         else if (ret & IB_MAD_RESULT_SUCCESS)
4076                 *out_mad_size = in_wc->byte_len - sizeof(struct ib_grh);
4077
4078         return ret;
4079 }
4080
4081 static int hfi1_process_ib_mad(struct ib_device *ibdev, int mad_flags, u8 port,
4082                                const struct ib_wc *in_wc,
4083                                const struct ib_grh *in_grh,
4084                                const struct ib_mad *in_mad,
4085                                struct ib_mad *out_mad)
4086 {
4087         int ret;
4088
4089         switch (in_mad->mad_hdr.mgmt_class) {
4090         case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
4091         case IB_MGMT_CLASS_SUBN_LID_ROUTED:
4092                 ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad);
4093                 goto bail;
4094         default:
4095                 ret = IB_MAD_RESULT_SUCCESS;
4096         }
4097
4098 bail:
4099         return ret;
4100 }
4101
4102 /**
4103  * hfi1_process_mad - process an incoming MAD packet
4104  * @ibdev: the infiniband device this packet came in on
4105  * @mad_flags: MAD flags
4106  * @port: the port number this packet came in on
4107  * @in_wc: the work completion entry for this packet
4108  * @in_grh: the global route header for this packet
4109  * @in_mad: the incoming MAD
4110  * @out_mad: any outgoing MAD reply
4111  *
4112  * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
4113  * interested in processing.
4114  *
4115  * Note that the verbs framework has already done the MAD sanity checks,
4116  * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
4117  * MADs.
4118  *
4119  * This is called by the ib_mad module.
4120  */
4121 int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
4122                      const struct ib_wc *in_wc, const struct ib_grh *in_grh,
4123                      const struct ib_mad_hdr *in_mad, size_t in_mad_size,
4124                      struct ib_mad_hdr *out_mad, size_t *out_mad_size,
4125                      u16 *out_mad_pkey_index)
4126 {
4127         switch (in_mad->base_version) {
4128         case OPA_MGMT_BASE_VERSION:
4129                 if (unlikely(in_mad_size != sizeof(struct opa_mad))) {
4130                         dev_err(ibdev->dma_device, "invalid in_mad_size\n");
4131                         return IB_MAD_RESULT_FAILURE;
4132                 }
4133                 return hfi1_process_opa_mad(ibdev, mad_flags, port,
4134                                             in_wc, in_grh,
4135                                             (struct opa_mad *)in_mad,
4136                                             (struct opa_mad *)out_mad,
4137                                             out_mad_size,
4138                                             out_mad_pkey_index);
4139         case IB_MGMT_BASE_VERSION:
4140                 return hfi1_process_ib_mad(ibdev, mad_flags, port,
4141                                           in_wc, in_grh,
4142                                           (const struct ib_mad *)in_mad,
4143                                           (struct ib_mad *)out_mad);
4144         default:
4145                 break;
4146         }
4147
4148         return IB_MAD_RESULT_FAILURE;
4149 }
4150
4151 static void send_handler(struct ib_mad_agent *agent,
4152                          struct ib_mad_send_wc *mad_send_wc)
4153 {
4154         ib_free_send_mad(mad_send_wc->send_buf);
4155 }
4156
4157 int hfi1_create_agents(struct hfi1_ibdev *dev)
4158 {
4159         struct hfi1_devdata *dd = dd_from_dev(dev);
4160         struct ib_mad_agent *agent;
4161         struct hfi1_ibport *ibp;
4162         int p;
4163         int ret;
4164
4165         for (p = 0; p < dd->num_pports; p++) {
4166                 ibp = &dd->pport[p].ibport_data;
4167                 agent = ib_register_mad_agent(&dev->ibdev, p + 1, IB_QPT_SMI,
4168                                               NULL, 0, send_handler,
4169                                               NULL, NULL, 0);
4170                 if (IS_ERR(agent)) {
4171                         ret = PTR_ERR(agent);
4172                         goto err;
4173                 }
4174
4175                 ibp->send_agent = agent;
4176         }
4177
4178         return 0;
4179
4180 err:
4181         for (p = 0; p < dd->num_pports; p++) {
4182                 ibp = &dd->pport[p].ibport_data;
4183                 if (ibp->send_agent) {
4184                         agent = ibp->send_agent;
4185                         ibp->send_agent = NULL;
4186                         ib_unregister_mad_agent(agent);
4187                 }
4188         }
4189
4190         return ret;
4191 }
4192
4193 void hfi1_free_agents(struct hfi1_ibdev *dev)
4194 {
4195         struct hfi1_devdata *dd = dd_from_dev(dev);
4196         struct ib_mad_agent *agent;
4197         struct hfi1_ibport *ibp;
4198         int p;
4199
4200         for (p = 0; p < dd->num_pports; p++) {
4201                 ibp = &dd->pport[p].ibport_data;
4202                 if (ibp->send_agent) {
4203                         agent = ibp->send_agent;
4204                         ibp->send_agent = NULL;
4205                         ib_unregister_mad_agent(agent);
4206                 }
4207                 if (ibp->sm_ah) {
4208                         ib_destroy_ah(&ibp->sm_ah->ibah);
4209                         ibp->sm_ah = NULL;
4210                 }
4211         }
4212 }