0bd18375d7df97301a8a037889c54dba87e004e8
[cascardo/linux.git] / drivers / infiniband / hw / qib / qib_mad.c
1 /*
2  * Copyright (c) 2012 Intel Corporation.  All rights reserved.
3  * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34
35 #include <rdma/ib_smi.h>
36
37 #include "qib.h"
38 #include "qib_mad.h"
39
40 static int reply(struct ib_smp *smp)
41 {
42         /*
43          * The verbs framework will handle the directed/LID route
44          * packet changes.
45          */
46         smp->method = IB_MGMT_METHOD_GET_RESP;
47         if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
48                 smp->status |= IB_SMP_DIRECTION;
49         return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
50 }
51
52 static int reply_failure(struct ib_smp *smp)
53 {
54         /*
55          * The verbs framework will handle the directed/LID route
56          * packet changes.
57          */
58         smp->method = IB_MGMT_METHOD_GET_RESP;
59         if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
60                 smp->status |= IB_SMP_DIRECTION;
61         return IB_MAD_RESULT_FAILURE | IB_MAD_RESULT_REPLY;
62 }
63
64 static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
65 {
66         struct ib_mad_send_buf *send_buf;
67         struct ib_mad_agent *agent;
68         struct ib_smp *smp;
69         int ret;
70         unsigned long flags;
71         unsigned long timeout;
72
73         agent = ibp->rvp.send_agent;
74         if (!agent)
75                 return;
76
77         /* o14-3.2.1 */
78         if (!(ppd_from_ibp(ibp)->lflags & QIBL_LINKACTIVE))
79                 return;
80
81         /* o14-2 */
82         if (ibp->rvp.trap_timeout &&
83             time_before(jiffies, ibp->rvp.trap_timeout))
84                 return;
85
86         send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
87                                       IB_MGMT_MAD_DATA, GFP_ATOMIC,
88                                       IB_MGMT_BASE_VERSION);
89         if (IS_ERR(send_buf))
90                 return;
91
92         smp = send_buf->mad;
93         smp->base_version = IB_MGMT_BASE_VERSION;
94         smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
95         smp->class_version = 1;
96         smp->method = IB_MGMT_METHOD_TRAP;
97         ibp->rvp.tid++;
98         smp->tid = cpu_to_be64(ibp->rvp.tid);
99         smp->attr_id = IB_SMP_ATTR_NOTICE;
100         /* o14-1: smp->mkey = 0; */
101         memcpy(smp->data, data, len);
102
103         spin_lock_irqsave(&ibp->rvp.lock, flags);
104         if (!ibp->rvp.sm_ah) {
105                 if (ibp->rvp.sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
106                         struct ib_ah *ah;
107
108                         ah = qib_create_qp0_ah(ibp, ibp->rvp.sm_lid);
109                         if (IS_ERR(ah))
110                                 ret = PTR_ERR(ah);
111                         else {
112                                 send_buf->ah = ah;
113                                 ibp->rvp.sm_ah = ibah_to_rvtah(ah);
114                                 ret = 0;
115                         }
116                 } else
117                         ret = -EINVAL;
118         } else {
119                 send_buf->ah = &ibp->rvp.sm_ah->ibah;
120                 ret = 0;
121         }
122         spin_unlock_irqrestore(&ibp->rvp.lock, flags);
123
124         if (!ret)
125                 ret = ib_post_send_mad(send_buf, NULL);
126         if (!ret) {
127                 /* 4.096 usec. */
128                 timeout = (4096 * (1UL << ibp->rvp.subnet_timeout)) / 1000;
129                 ibp->rvp.trap_timeout = jiffies + usecs_to_jiffies(timeout);
130         } else {
131                 ib_free_send_mad(send_buf);
132                 ibp->rvp.trap_timeout = 0;
133         }
134 }
135
136 /*
137  * Send a bad [PQ]_Key trap (ch. 14.3.8).
138  */
139 void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
140                    u32 qp1, u32 qp2, __be16 lid1, __be16 lid2)
141 {
142         struct ib_mad_notice_attr data;
143
144         if (trap_num == IB_NOTICE_TRAP_BAD_PKEY)
145                 ibp->rvp.pkey_violations++;
146         else
147                 ibp->rvp.qkey_violations++;
148         ibp->rvp.n_pkt_drops++;
149
150         /* Send violation trap */
151         data.generic_type = IB_NOTICE_TYPE_SECURITY;
152         data.prod_type_msb = 0;
153         data.prod_type_lsb = IB_NOTICE_PROD_CA;
154         data.trap_num = trap_num;
155         data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
156         data.toggle_count = 0;
157         memset(&data.details, 0, sizeof(data.details));
158         data.details.ntc_257_258.lid1 = lid1;
159         data.details.ntc_257_258.lid2 = lid2;
160         data.details.ntc_257_258.key = cpu_to_be32(key);
161         data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1);
162         data.details.ntc_257_258.qp2 = cpu_to_be32(qp2);
163
164         qib_send_trap(ibp, &data, sizeof(data));
165 }
166
167 /*
168  * Send a bad M_Key trap (ch. 14.3.9).
169  */
170 static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)
171 {
172         struct ib_mad_notice_attr data;
173
174         /* Send violation trap */
175         data.generic_type = IB_NOTICE_TYPE_SECURITY;
176         data.prod_type_msb = 0;
177         data.prod_type_lsb = IB_NOTICE_PROD_CA;
178         data.trap_num = IB_NOTICE_TRAP_BAD_MKEY;
179         data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
180         data.toggle_count = 0;
181         memset(&data.details, 0, sizeof(data.details));
182         data.details.ntc_256.lid = data.issuer_lid;
183         data.details.ntc_256.method = smp->method;
184         data.details.ntc_256.attr_id = smp->attr_id;
185         data.details.ntc_256.attr_mod = smp->attr_mod;
186         data.details.ntc_256.mkey = smp->mkey;
187         if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
188                 u8 hop_cnt;
189
190                 data.details.ntc_256.dr_slid = smp->dr_slid;
191                 data.details.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE;
192                 hop_cnt = smp->hop_cnt;
193                 if (hop_cnt > ARRAY_SIZE(data.details.ntc_256.dr_rtn_path)) {
194                         data.details.ntc_256.dr_trunc_hop |=
195                                 IB_NOTICE_TRAP_DR_TRUNC;
196                         hop_cnt = ARRAY_SIZE(data.details.ntc_256.dr_rtn_path);
197                 }
198                 data.details.ntc_256.dr_trunc_hop |= hop_cnt;
199                 memcpy(data.details.ntc_256.dr_rtn_path, smp->return_path,
200                        hop_cnt);
201         }
202
203         qib_send_trap(ibp, &data, sizeof(data));
204 }
205
206 /*
207  * Send a Port Capability Mask Changed trap (ch. 14.3.11).
208  */
209 void qib_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num)
210 {
211         struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
212         struct qib_devdata *dd = dd_from_dev(ibdev);
213         struct qib_ibport *ibp = &dd->pport[port_num - 1].ibport_data;
214         struct ib_mad_notice_attr data;
215
216         data.generic_type = IB_NOTICE_TYPE_INFO;
217         data.prod_type_msb = 0;
218         data.prod_type_lsb = IB_NOTICE_PROD_CA;
219         data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
220         data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
221         data.toggle_count = 0;
222         memset(&data.details, 0, sizeof(data.details));
223         data.details.ntc_144.lid = data.issuer_lid;
224         data.details.ntc_144.new_cap_mask =
225                                         cpu_to_be32(ibp->rvp.port_cap_flags);
226         qib_send_trap(ibp, &data, sizeof(data));
227 }
228
229 /*
230  * Send a System Image GUID Changed trap (ch. 14.3.12).
231  */
232 void qib_sys_guid_chg(struct qib_ibport *ibp)
233 {
234         struct ib_mad_notice_attr data;
235
236         data.generic_type = IB_NOTICE_TYPE_INFO;
237         data.prod_type_msb = 0;
238         data.prod_type_lsb = IB_NOTICE_PROD_CA;
239         data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG;
240         data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
241         data.toggle_count = 0;
242         memset(&data.details, 0, sizeof(data.details));
243         data.details.ntc_145.lid = data.issuer_lid;
244         data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid;
245
246         qib_send_trap(ibp, &data, sizeof(data));
247 }
248
249 /*
250  * Send a Node Description Changed trap (ch. 14.3.13).
251  */
252 void qib_node_desc_chg(struct qib_ibport *ibp)
253 {
254         struct ib_mad_notice_attr data;
255
256         data.generic_type = IB_NOTICE_TYPE_INFO;
257         data.prod_type_msb = 0;
258         data.prod_type_lsb = IB_NOTICE_PROD_CA;
259         data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
260         data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
261         data.toggle_count = 0;
262         memset(&data.details, 0, sizeof(data.details));
263         data.details.ntc_144.lid = data.issuer_lid;
264         data.details.ntc_144.local_changes = 1;
265         data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG;
266
267         qib_send_trap(ibp, &data, sizeof(data));
268 }
269
270 static int subn_get_nodedescription(struct ib_smp *smp,
271                                     struct ib_device *ibdev)
272 {
273         if (smp->attr_mod)
274                 smp->status |= IB_SMP_INVALID_FIELD;
275
276         memcpy(smp->data, ibdev->node_desc, sizeof(smp->data));
277
278         return reply(smp);
279 }
280
281 static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
282                              u8 port)
283 {
284         struct ib_node_info *nip = (struct ib_node_info *)&smp->data;
285         struct qib_devdata *dd = dd_from_ibdev(ibdev);
286         u32 vendor, majrev, minrev;
287         unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
288
289         /* GUID 0 is illegal */
290         if (smp->attr_mod || pidx >= dd->num_pports ||
291             dd->pport[pidx].guid == 0)
292                 smp->status |= IB_SMP_INVALID_FIELD;
293         else
294                 nip->port_guid = dd->pport[pidx].guid;
295
296         nip->base_version = 1;
297         nip->class_version = 1;
298         nip->node_type = 1;     /* channel adapter */
299         nip->num_ports = ibdev->phys_port_cnt;
300         /* This is already in network order */
301         nip->sys_guid = ib_qib_sys_image_guid;
302         nip->node_guid = dd->pport->guid; /* Use first-port GUID as node */
303         nip->partition_cap = cpu_to_be16(qib_get_npkeys(dd));
304         nip->device_id = cpu_to_be16(dd->deviceid);
305         majrev = dd->majrev;
306         minrev = dd->minrev;
307         nip->revision = cpu_to_be32((majrev << 16) | minrev);
308         nip->local_port_num = port;
309         vendor = dd->vendorid;
310         nip->vendor_id[0] = QIB_SRC_OUI_1;
311         nip->vendor_id[1] = QIB_SRC_OUI_2;
312         nip->vendor_id[2] = QIB_SRC_OUI_3;
313
314         return reply(smp);
315 }
316
317 static int subn_get_guidinfo(struct ib_smp *smp, struct ib_device *ibdev,
318                              u8 port)
319 {
320         struct qib_devdata *dd = dd_from_ibdev(ibdev);
321         u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
322         __be64 *p = (__be64 *) smp->data;
323         unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
324
325         /* 32 blocks of 8 64-bit GUIDs per block */
326
327         memset(smp->data, 0, sizeof(smp->data));
328
329         if (startgx == 0 && pidx < dd->num_pports) {
330                 struct qib_pportdata *ppd = dd->pport + pidx;
331                 struct qib_ibport *ibp = &ppd->ibport_data;
332                 __be64 g = ppd->guid;
333                 unsigned i;
334
335                 /* GUID 0 is illegal */
336                 if (g == 0)
337                         smp->status |= IB_SMP_INVALID_FIELD;
338                 else {
339                         /* The first is a copy of the read-only HW GUID. */
340                         p[0] = g;
341                         for (i = 1; i < QIB_GUIDS_PER_PORT; i++)
342                                 p[i] = ibp->guids[i - 1];
343                 }
344         } else
345                 smp->status |= IB_SMP_INVALID_FIELD;
346
347         return reply(smp);
348 }
349
350 static void set_link_width_enabled(struct qib_pportdata *ppd, u32 w)
351 {
352         (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LWID_ENB, w);
353 }
354
355 static void set_link_speed_enabled(struct qib_pportdata *ppd, u32 s)
356 {
357         (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_SPD_ENB, s);
358 }
359
360 static int get_overrunthreshold(struct qib_pportdata *ppd)
361 {
362         return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH);
363 }
364
365 /**
366  * set_overrunthreshold - set the overrun threshold
367  * @ppd: the physical port data
368  * @n: the new threshold
369  *
370  * Note that this will only take effect when the link state changes.
371  */
372 static int set_overrunthreshold(struct qib_pportdata *ppd, unsigned n)
373 {
374         (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH,
375                                          (u32)n);
376         return 0;
377 }
378
379 static int get_phyerrthreshold(struct qib_pportdata *ppd)
380 {
381         return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH);
382 }
383
384 /**
385  * set_phyerrthreshold - set the physical error threshold
386  * @ppd: the physical port data
387  * @n: the new threshold
388  *
389  * Note that this will only take effect when the link state changes.
390  */
391 static int set_phyerrthreshold(struct qib_pportdata *ppd, unsigned n)
392 {
393         (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH,
394                                          (u32)n);
395         return 0;
396 }
397
398 /**
399  * get_linkdowndefaultstate - get the default linkdown state
400  * @ppd: the physical port data
401  *
402  * Returns zero if the default is POLL, 1 if the default is SLEEP.
403  */
404 static int get_linkdowndefaultstate(struct qib_pportdata *ppd)
405 {
406         return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT) ==
407                 IB_LINKINITCMD_SLEEP;
408 }
409
410 static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags)
411 {
412         int valid_mkey = 0;
413         int ret = 0;
414
415         /* Is the mkey in the process of expiring? */
416         if (ibp->rvp.mkey_lease_timeout &&
417             time_after_eq(jiffies, ibp->rvp.mkey_lease_timeout)) {
418                 /* Clear timeout and mkey protection field. */
419                 ibp->rvp.mkey_lease_timeout = 0;
420                 ibp->rvp.mkeyprot = 0;
421         }
422
423         if ((mad_flags & IB_MAD_IGNORE_MKEY) ||  ibp->rvp.mkey == 0 ||
424             ibp->rvp.mkey == smp->mkey)
425                 valid_mkey = 1;
426
427         /* Unset lease timeout on any valid Get/Set/TrapRepress */
428         if (valid_mkey && ibp->rvp.mkey_lease_timeout &&
429             (smp->method == IB_MGMT_METHOD_GET ||
430              smp->method == IB_MGMT_METHOD_SET ||
431              smp->method == IB_MGMT_METHOD_TRAP_REPRESS))
432                 ibp->rvp.mkey_lease_timeout = 0;
433
434         if (!valid_mkey) {
435                 switch (smp->method) {
436                 case IB_MGMT_METHOD_GET:
437                         /* Bad mkey not a violation below level 2 */
438                         if (ibp->rvp.mkeyprot < 2)
439                                 break;
440                 case IB_MGMT_METHOD_SET:
441                 case IB_MGMT_METHOD_TRAP_REPRESS:
442                         if (ibp->rvp.mkey_violations != 0xFFFF)
443                                 ++ibp->rvp.mkey_violations;
444                         if (!ibp->rvp.mkey_lease_timeout &&
445                             ibp->rvp.mkey_lease_period)
446                                 ibp->rvp.mkey_lease_timeout = jiffies +
447                                         ibp->rvp.mkey_lease_period * HZ;
448                         /* Generate a trap notice. */
449                         qib_bad_mkey(ibp, smp);
450                         ret = 1;
451                 }
452         }
453
454         return ret;
455 }
456
457 static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
458                              u8 port)
459 {
460         struct qib_devdata *dd;
461         struct qib_pportdata *ppd;
462         struct qib_ibport *ibp;
463         struct ib_port_info *pip = (struct ib_port_info *)smp->data;
464         u8 mtu;
465         int ret;
466         u32 state;
467         u32 port_num = be32_to_cpu(smp->attr_mod);
468
469         if (port_num == 0)
470                 port_num = port;
471         else {
472                 if (port_num > ibdev->phys_port_cnt) {
473                         smp->status |= IB_SMP_INVALID_FIELD;
474                         ret = reply(smp);
475                         goto bail;
476                 }
477                 if (port_num != port) {
478                         ibp = to_iport(ibdev, port_num);
479                         ret = check_mkey(ibp, smp, 0);
480                         if (ret) {
481                                 ret = IB_MAD_RESULT_FAILURE;
482                                 goto bail;
483                         }
484                 }
485         }
486
487         dd = dd_from_ibdev(ibdev);
488         /* IB numbers ports from 1, hdw from 0 */
489         ppd = dd->pport + (port_num - 1);
490         ibp = &ppd->ibport_data;
491
492         /* Clear all fields.  Only set the non-zero fields. */
493         memset(smp->data, 0, sizeof(smp->data));
494
495         /* Only return the mkey if the protection field allows it. */
496         if (!(smp->method == IB_MGMT_METHOD_GET &&
497               ibp->rvp.mkey != smp->mkey &&
498               ibp->rvp.mkeyprot == 1))
499                 pip->mkey = ibp->rvp.mkey;
500         pip->gid_prefix = ibp->rvp.gid_prefix;
501         pip->lid = cpu_to_be16(ppd->lid);
502         pip->sm_lid = cpu_to_be16(ibp->rvp.sm_lid);
503         pip->cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags);
504         /* pip->diag_code; */
505         pip->mkey_lease_period = cpu_to_be16(ibp->rvp.mkey_lease_period);
506         pip->local_port_num = port;
507         pip->link_width_enabled = ppd->link_width_enabled;
508         pip->link_width_supported = ppd->link_width_supported;
509         pip->link_width_active = ppd->link_width_active;
510         state = dd->f_iblink_state(ppd->lastibcstat);
511         pip->linkspeed_portstate = ppd->link_speed_supported << 4 | state;
512
513         pip->portphysstate_linkdown =
514                 (dd->f_ibphys_portstate(ppd->lastibcstat) << 4) |
515                 (get_linkdowndefaultstate(ppd) ? 1 : 2);
516         pip->mkeyprot_resv_lmc = (ibp->rvp.mkeyprot << 6) | ppd->lmc;
517         pip->linkspeedactive_enabled = (ppd->link_speed_active << 4) |
518                 ppd->link_speed_enabled;
519         switch (ppd->ibmtu) {
520         default: /* something is wrong; fall through */
521         case 4096:
522                 mtu = IB_MTU_4096;
523                 break;
524         case 2048:
525                 mtu = IB_MTU_2048;
526                 break;
527         case 1024:
528                 mtu = IB_MTU_1024;
529                 break;
530         case 512:
531                 mtu = IB_MTU_512;
532                 break;
533         case 256:
534                 mtu = IB_MTU_256;
535                 break;
536         }
537         pip->neighbormtu_mastersmsl = (mtu << 4) | ibp->rvp.sm_sl;
538         pip->vlcap_inittype = ppd->vls_supported << 4;  /* InitType = 0 */
539         pip->vl_high_limit = ibp->rvp.vl_high_limit;
540         pip->vl_arb_high_cap =
541                 dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_CAP);
542         pip->vl_arb_low_cap =
543                 dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_LOW_CAP);
544         /* InitTypeReply = 0 */
545         pip->inittypereply_mtucap = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
546         /* HCAs ignore VLStallCount and HOQLife */
547         /* pip->vlstallcnt_hoqlife; */
548         pip->operationalvl_pei_peo_fpi_fpo =
549                 dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OP_VLS) << 4;
550         pip->mkey_violations = cpu_to_be16(ibp->rvp.mkey_violations);
551         /* P_KeyViolations are counted by hardware. */
552         pip->pkey_violations = cpu_to_be16(ibp->rvp.pkey_violations);
553         pip->qkey_violations = cpu_to_be16(ibp->rvp.qkey_violations);
554         /* Only the hardware GUID is supported for now */
555         pip->guid_cap = QIB_GUIDS_PER_PORT;
556         pip->clientrereg_resv_subnetto = ibp->rvp.subnet_timeout;
557         /* 32.768 usec. response time (guessing) */
558         pip->resv_resptimevalue = 3;
559         pip->localphyerrors_overrunerrors =
560                 (get_phyerrthreshold(ppd) << 4) |
561                 get_overrunthreshold(ppd);
562         /* pip->max_credit_hint; */
563         if (ibp->rvp.port_cap_flags & IB_PORT_LINK_LATENCY_SUP) {
564                 u32 v;
565
566                 v = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKLATENCY);
567                 pip->link_roundtrip_latency[0] = v >> 16;
568                 pip->link_roundtrip_latency[1] = v >> 8;
569                 pip->link_roundtrip_latency[2] = v;
570         }
571
572         ret = reply(smp);
573
574 bail:
575         return ret;
576 }
577
578 /**
579  * get_pkeys - return the PKEY table
580  * @dd: the qlogic_ib device
581  * @port: the IB port number
582  * @pkeys: the pkey table is placed here
583  */
584 static int get_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
585 {
586         struct qib_pportdata *ppd = dd->pport + port - 1;
587         /*
588          * always a kernel context, no locking needed.
589          * If we get here with ppd setup, no need to check
590          * that pd is valid.
591          */
592         struct qib_ctxtdata *rcd = dd->rcd[ppd->hw_pidx];
593
594         memcpy(pkeys, rcd->pkeys, sizeof(rcd->pkeys));
595
596         return 0;
597 }
598
599 static int subn_get_pkeytable(struct ib_smp *smp, struct ib_device *ibdev,
600                               u8 port)
601 {
602         u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
603         u16 *p = (u16 *) smp->data;
604         __be16 *q = (__be16 *) smp->data;
605
606         /* 64 blocks of 32 16-bit P_Key entries */
607
608         memset(smp->data, 0, sizeof(smp->data));
609         if (startpx == 0) {
610                 struct qib_devdata *dd = dd_from_ibdev(ibdev);
611                 unsigned i, n = qib_get_npkeys(dd);
612
613                 get_pkeys(dd, port, p);
614
615                 for (i = 0; i < n; i++)
616                         q[i] = cpu_to_be16(p[i]);
617         } else
618                 smp->status |= IB_SMP_INVALID_FIELD;
619
620         return reply(smp);
621 }
622
623 static int subn_set_guidinfo(struct ib_smp *smp, struct ib_device *ibdev,
624                              u8 port)
625 {
626         struct qib_devdata *dd = dd_from_ibdev(ibdev);
627         u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
628         __be64 *p = (__be64 *) smp->data;
629         unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
630
631         /* 32 blocks of 8 64-bit GUIDs per block */
632
633         if (startgx == 0 && pidx < dd->num_pports) {
634                 struct qib_pportdata *ppd = dd->pport + pidx;
635                 struct qib_ibport *ibp = &ppd->ibport_data;
636                 unsigned i;
637
638                 /* The first entry is read-only. */
639                 for (i = 1; i < QIB_GUIDS_PER_PORT; i++)
640                         ibp->guids[i - 1] = p[i];
641         } else
642                 smp->status |= IB_SMP_INVALID_FIELD;
643
644         /* The only GUID we support is the first read-only entry. */
645         return subn_get_guidinfo(smp, ibdev, port);
646 }
647
648 /**
649  * subn_set_portinfo - set port information
650  * @smp: the incoming SM packet
651  * @ibdev: the infiniband device
652  * @port: the port on the device
653  *
654  * Set Portinfo (see ch. 14.2.5.6).
655  */
656 static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
657                              u8 port)
658 {
659         struct ib_port_info *pip = (struct ib_port_info *)smp->data;
660         struct ib_event event;
661         struct qib_devdata *dd;
662         struct qib_pportdata *ppd;
663         struct qib_ibport *ibp;
664         u8 clientrereg = (pip->clientrereg_resv_subnetto & 0x80);
665         unsigned long flags;
666         u16 lid, smlid;
667         u8 lwe;
668         u8 lse;
669         u8 state;
670         u8 vls;
671         u8 msl;
672         u16 lstate;
673         int ret, ore, mtu;
674         u32 port_num = be32_to_cpu(smp->attr_mod);
675
676         if (port_num == 0)
677                 port_num = port;
678         else {
679                 if (port_num > ibdev->phys_port_cnt)
680                         goto err;
681                 /* Port attributes can only be set on the receiving port */
682                 if (port_num != port)
683                         goto get_only;
684         }
685
686         dd = dd_from_ibdev(ibdev);
687         /* IB numbers ports from 1, hdw from 0 */
688         ppd = dd->pport + (port_num - 1);
689         ibp = &ppd->ibport_data;
690         event.device = ibdev;
691         event.element.port_num = port;
692
693         ibp->rvp.mkey = pip->mkey;
694         ibp->rvp.gid_prefix = pip->gid_prefix;
695         ibp->rvp.mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
696
697         lid = be16_to_cpu(pip->lid);
698         /* Must be a valid unicast LID address. */
699         if (lid == 0 || lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
700                 smp->status |= IB_SMP_INVALID_FIELD;
701         else if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) {
702                 if (ppd->lid != lid)
703                         qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT);
704                 if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7))
705                         qib_set_uevent_bits(ppd, _QIB_EVENT_LMC_CHANGE_BIT);
706                 qib_set_lid(ppd, lid, pip->mkeyprot_resv_lmc & 7);
707                 event.event = IB_EVENT_LID_CHANGE;
708                 ib_dispatch_event(&event);
709         }
710
711         smlid = be16_to_cpu(pip->sm_lid);
712         msl = pip->neighbormtu_mastersmsl & 0xF;
713         /* Must be a valid unicast LID address. */
714         if (smlid == 0 || smlid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
715                 smp->status |= IB_SMP_INVALID_FIELD;
716         else if (smlid != ibp->rvp.sm_lid || msl != ibp->rvp.sm_sl) {
717                 spin_lock_irqsave(&ibp->rvp.lock, flags);
718                 if (ibp->rvp.sm_ah) {
719                         if (smlid != ibp->rvp.sm_lid)
720                                 ibp->rvp.sm_ah->attr.dlid = smlid;
721                         if (msl != ibp->rvp.sm_sl)
722                                 ibp->rvp.sm_ah->attr.sl = msl;
723                 }
724                 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
725                 if (smlid != ibp->rvp.sm_lid)
726                         ibp->rvp.sm_lid = smlid;
727                 if (msl != ibp->rvp.sm_sl)
728                         ibp->rvp.sm_sl = msl;
729                 event.event = IB_EVENT_SM_CHANGE;
730                 ib_dispatch_event(&event);
731         }
732
733         /* Allow 1x or 4x to be set (see 14.2.6.6). */
734         lwe = pip->link_width_enabled;
735         if (lwe) {
736                 if (lwe == 0xFF)
737                         set_link_width_enabled(ppd, ppd->link_width_supported);
738                 else if (lwe >= 16 || (lwe & ~ppd->link_width_supported))
739                         smp->status |= IB_SMP_INVALID_FIELD;
740                 else if (lwe != ppd->link_width_enabled)
741                         set_link_width_enabled(ppd, lwe);
742         }
743
744         lse = pip->linkspeedactive_enabled & 0xF;
745         if (lse) {
746                 /*
747                  * The IB 1.2 spec. only allows link speed values
748                  * 1, 3, 5, 7, 15.  1.2.1 extended to allow specific
749                  * speeds.
750                  */
751                 if (lse == 15)
752                         set_link_speed_enabled(ppd,
753                                                ppd->link_speed_supported);
754                 else if (lse >= 8 || (lse & ~ppd->link_speed_supported))
755                         smp->status |= IB_SMP_INVALID_FIELD;
756                 else if (lse != ppd->link_speed_enabled)
757                         set_link_speed_enabled(ppd, lse);
758         }
759
760         /* Set link down default state. */
761         switch (pip->portphysstate_linkdown & 0xF) {
762         case 0: /* NOP */
763                 break;
764         case 1: /* SLEEP */
765                 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,
766                                         IB_LINKINITCMD_SLEEP);
767                 break;
768         case 2: /* POLL */
769                 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,
770                                         IB_LINKINITCMD_POLL);
771                 break;
772         default:
773                 smp->status |= IB_SMP_INVALID_FIELD;
774         }
775
776         ibp->rvp.mkeyprot = pip->mkeyprot_resv_lmc >> 6;
777         ibp->rvp.vl_high_limit = pip->vl_high_limit;
778         (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_LIMIT,
779                                     ibp->rvp.vl_high_limit);
780
781         mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF);
782         if (mtu == -1)
783                 smp->status |= IB_SMP_INVALID_FIELD;
784         else
785                 qib_set_mtu(ppd, mtu);
786
787         /* Set operational VLs */
788         vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF;
789         if (vls) {
790                 if (vls > ppd->vls_supported)
791                         smp->status |= IB_SMP_INVALID_FIELD;
792                 else
793                         (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls);
794         }
795
796         if (pip->mkey_violations == 0)
797                 ibp->rvp.mkey_violations = 0;
798
799         if (pip->pkey_violations == 0)
800                 ibp->rvp.pkey_violations = 0;
801
802         if (pip->qkey_violations == 0)
803                 ibp->rvp.qkey_violations = 0;
804
805         ore = pip->localphyerrors_overrunerrors;
806         if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF))
807                 smp->status |= IB_SMP_INVALID_FIELD;
808
809         if (set_overrunthreshold(ppd, (ore & 0xF)))
810                 smp->status |= IB_SMP_INVALID_FIELD;
811
812         ibp->rvp.subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
813
814         /*
815          * Do the port state change now that the other link parameters
816          * have been set.
817          * Changing the port physical state only makes sense if the link
818          * is down or is being set to down.
819          */
820         state = pip->linkspeed_portstate & 0xF;
821         lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
822         if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
823                 smp->status |= IB_SMP_INVALID_FIELD;
824
825         /*
826          * Only state changes of DOWN, ARM, and ACTIVE are valid
827          * and must be in the correct state to take effect (see 7.2.6).
828          */
829         switch (state) {
830         case IB_PORT_NOP:
831                 if (lstate == 0)
832                         break;
833                 /* FALLTHROUGH */
834         case IB_PORT_DOWN:
835                 if (lstate == 0)
836                         lstate = QIB_IB_LINKDOWN_ONLY;
837                 else if (lstate == 1)
838                         lstate = QIB_IB_LINKDOWN_SLEEP;
839                 else if (lstate == 2)
840                         lstate = QIB_IB_LINKDOWN;
841                 else if (lstate == 3)
842                         lstate = QIB_IB_LINKDOWN_DISABLE;
843                 else {
844                         smp->status |= IB_SMP_INVALID_FIELD;
845                         break;
846                 }
847                 spin_lock_irqsave(&ppd->lflags_lock, flags);
848                 ppd->lflags &= ~QIBL_LINKV;
849                 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
850                 qib_set_linkstate(ppd, lstate);
851                 /*
852                  * Don't send a reply if the response would be sent
853                  * through the disabled port.
854                  */
855                 if (lstate == QIB_IB_LINKDOWN_DISABLE && smp->hop_cnt) {
856                         ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
857                         goto done;
858                 }
859                 qib_wait_linkstate(ppd, QIBL_LINKV, 10);
860                 break;
861         case IB_PORT_ARMED:
862                 qib_set_linkstate(ppd, QIB_IB_LINKARM);
863                 break;
864         case IB_PORT_ACTIVE:
865                 qib_set_linkstate(ppd, QIB_IB_LINKACTIVE);
866                 break;
867         default:
868                 smp->status |= IB_SMP_INVALID_FIELD;
869         }
870
871         if (clientrereg) {
872                 event.event = IB_EVENT_CLIENT_REREGISTER;
873                 ib_dispatch_event(&event);
874         }
875
876         ret = subn_get_portinfo(smp, ibdev, port);
877
878         /* restore re-reg bit per o14-12.2.1 */
879         pip->clientrereg_resv_subnetto |= clientrereg;
880
881         goto get_only;
882
883 err:
884         smp->status |= IB_SMP_INVALID_FIELD;
885 get_only:
886         ret = subn_get_portinfo(smp, ibdev, port);
887 done:
888         return ret;
889 }
890
891 /**
892  * rm_pkey - decrecment the reference count for the given PKEY
893  * @dd: the qlogic_ib device
894  * @key: the PKEY index
895  *
896  * Return true if this was the last reference and the hardware table entry
897  * needs to be changed.
898  */
899 static int rm_pkey(struct qib_pportdata *ppd, u16 key)
900 {
901         int i;
902         int ret;
903
904         for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
905                 if (ppd->pkeys[i] != key)
906                         continue;
907                 if (atomic_dec_and_test(&ppd->pkeyrefs[i])) {
908                         ppd->pkeys[i] = 0;
909                         ret = 1;
910                         goto bail;
911                 }
912                 break;
913         }
914
915         ret = 0;
916
917 bail:
918         return ret;
919 }
920
921 /**
922  * add_pkey - add the given PKEY to the hardware table
923  * @dd: the qlogic_ib device
924  * @key: the PKEY
925  *
926  * Return an error code if unable to add the entry, zero if no change,
927  * or 1 if the hardware PKEY register needs to be updated.
928  */
929 static int add_pkey(struct qib_pportdata *ppd, u16 key)
930 {
931         int i;
932         u16 lkey = key & 0x7FFF;
933         int any = 0;
934         int ret;
935
936         if (lkey == 0x7FFF) {
937                 ret = 0;
938                 goto bail;
939         }
940
941         /* Look for an empty slot or a matching PKEY. */
942         for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
943                 if (!ppd->pkeys[i]) {
944                         any++;
945                         continue;
946                 }
947                 /* If it matches exactly, try to increment the ref count */
948                 if (ppd->pkeys[i] == key) {
949                         if (atomic_inc_return(&ppd->pkeyrefs[i]) > 1) {
950                                 ret = 0;
951                                 goto bail;
952                         }
953                         /* Lost the race. Look for an empty slot below. */
954                         atomic_dec(&ppd->pkeyrefs[i]);
955                         any++;
956                 }
957                 /*
958                  * It makes no sense to have both the limited and unlimited
959                  * PKEY set at the same time since the unlimited one will
960                  * disable the limited one.
961                  */
962                 if ((ppd->pkeys[i] & 0x7FFF) == lkey) {
963                         ret = -EEXIST;
964                         goto bail;
965                 }
966         }
967         if (!any) {
968                 ret = -EBUSY;
969                 goto bail;
970         }
971         for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
972                 if (!ppd->pkeys[i] &&
973                     atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
974                         /* for qibstats, etc. */
975                         ppd->pkeys[i] = key;
976                         ret = 1;
977                         goto bail;
978                 }
979         }
980         ret = -EBUSY;
981
982 bail:
983         return ret;
984 }
985
986 /**
987  * set_pkeys - set the PKEY table for ctxt 0
988  * @dd: the qlogic_ib device
989  * @port: the IB port number
990  * @pkeys: the PKEY table
991  */
992 static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
993 {
994         struct qib_pportdata *ppd;
995         struct qib_ctxtdata *rcd;
996         int i;
997         int changed = 0;
998
999         /*
1000          * IB port one/two always maps to context zero/one,
1001          * always a kernel context, no locking needed
1002          * If we get here with ppd setup, no need to check
1003          * that rcd is valid.
1004          */
1005         ppd = dd->pport + (port - 1);
1006         rcd = dd->rcd[ppd->hw_pidx];
1007
1008         for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
1009                 u16 key = pkeys[i];
1010                 u16 okey = rcd->pkeys[i];
1011
1012                 if (key == okey)
1013                         continue;
1014                 /*
1015                  * The value of this PKEY table entry is changing.
1016                  * Remove the old entry in the hardware's array of PKEYs.
1017                  */
1018                 if (okey & 0x7FFF)
1019                         changed |= rm_pkey(ppd, okey);
1020                 if (key & 0x7FFF) {
1021                         int ret = add_pkey(ppd, key);
1022
1023                         if (ret < 0)
1024                                 key = 0;
1025                         else
1026                                 changed |= ret;
1027                 }
1028                 rcd->pkeys[i] = key;
1029         }
1030         if (changed) {
1031                 struct ib_event event;
1032
1033                 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
1034
1035                 event.event = IB_EVENT_PKEY_CHANGE;
1036                 event.device = &dd->verbs_dev.rdi.ibdev;
1037                 event.element.port_num = port;
1038                 ib_dispatch_event(&event);
1039         }
1040         return 0;
1041 }
1042
1043 static int subn_set_pkeytable(struct ib_smp *smp, struct ib_device *ibdev,
1044                               u8 port)
1045 {
1046         u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
1047         __be16 *p = (__be16 *) smp->data;
1048         u16 *q = (u16 *) smp->data;
1049         struct qib_devdata *dd = dd_from_ibdev(ibdev);
1050         unsigned i, n = qib_get_npkeys(dd);
1051
1052         for (i = 0; i < n; i++)
1053                 q[i] = be16_to_cpu(p[i]);
1054
1055         if (startpx != 0 || set_pkeys(dd, port, q) != 0)
1056                 smp->status |= IB_SMP_INVALID_FIELD;
1057
1058         return subn_get_pkeytable(smp, ibdev, port);
1059 }
1060
1061 static int subn_get_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
1062                              u8 port)
1063 {
1064         struct qib_ibport *ibp = to_iport(ibdev, port);
1065         u8 *p = (u8 *) smp->data;
1066         unsigned i;
1067
1068         memset(smp->data, 0, sizeof(smp->data));
1069
1070         if (!(ibp->rvp.port_cap_flags & IB_PORT_SL_MAP_SUP))
1071                 smp->status |= IB_SMP_UNSUP_METHOD;
1072         else
1073                 for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2)
1074                         *p++ = (ibp->sl_to_vl[i] << 4) | ibp->sl_to_vl[i + 1];
1075
1076         return reply(smp);
1077 }
1078
1079 static int subn_set_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
1080                              u8 port)
1081 {
1082         struct qib_ibport *ibp = to_iport(ibdev, port);
1083         u8 *p = (u8 *) smp->data;
1084         unsigned i;
1085
1086         if (!(ibp->rvp.port_cap_flags & IB_PORT_SL_MAP_SUP)) {
1087                 smp->status |= IB_SMP_UNSUP_METHOD;
1088                 return reply(smp);
1089         }
1090
1091         for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2, p++) {
1092                 ibp->sl_to_vl[i] = *p >> 4;
1093                 ibp->sl_to_vl[i + 1] = *p & 0xF;
1094         }
1095         qib_set_uevent_bits(ppd_from_ibp(to_iport(ibdev, port)),
1096                             _QIB_EVENT_SL2VL_CHANGE_BIT);
1097
1098         return subn_get_sl_to_vl(smp, ibdev, port);
1099 }
1100
1101 static int subn_get_vl_arb(struct ib_smp *smp, struct ib_device *ibdev,
1102                            u8 port)
1103 {
1104         unsigned which = be32_to_cpu(smp->attr_mod) >> 16;
1105         struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
1106
1107         memset(smp->data, 0, sizeof(smp->data));
1108
1109         if (ppd->vls_supported == IB_VL_VL0)
1110                 smp->status |= IB_SMP_UNSUP_METHOD;
1111         else if (which == IB_VLARB_LOWPRI_0_31)
1112                 (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,
1113                                                    smp->data);
1114         else if (which == IB_VLARB_HIGHPRI_0_31)
1115                 (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,
1116                                                    smp->data);
1117         else
1118                 smp->status |= IB_SMP_INVALID_FIELD;
1119
1120         return reply(smp);
1121 }
1122
1123 static int subn_set_vl_arb(struct ib_smp *smp, struct ib_device *ibdev,
1124                            u8 port)
1125 {
1126         unsigned which = be32_to_cpu(smp->attr_mod) >> 16;
1127         struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
1128
1129         if (ppd->vls_supported == IB_VL_VL0)
1130                 smp->status |= IB_SMP_UNSUP_METHOD;
1131         else if (which == IB_VLARB_LOWPRI_0_31)
1132                 (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,
1133                                                    smp->data);
1134         else if (which == IB_VLARB_HIGHPRI_0_31)
1135                 (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,
1136                                                    smp->data);
1137         else
1138                 smp->status |= IB_SMP_INVALID_FIELD;
1139
1140         return subn_get_vl_arb(smp, ibdev, port);
1141 }
1142
1143 static int subn_trap_repress(struct ib_smp *smp, struct ib_device *ibdev,
1144                              u8 port)
1145 {
1146         /*
1147          * For now, we only send the trap once so no need to process this.
1148          * o13-6, o13-7,
1149          * o14-3.a4 The SMA shall not send any message in response to a valid
1150          * SubnTrapRepress() message.
1151          */
1152         return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
1153 }
1154
1155 static int pma_get_classportinfo(struct ib_pma_mad *pmp,
1156                                  struct ib_device *ibdev)
1157 {
1158         struct ib_class_port_info *p =
1159                 (struct ib_class_port_info *)pmp->data;
1160         struct qib_devdata *dd = dd_from_ibdev(ibdev);
1161
1162         memset(pmp->data, 0, sizeof(pmp->data));
1163
1164         if (pmp->mad_hdr.attr_mod != 0)
1165                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1166
1167         /* Note that AllPortSelect is not valid */
1168         p->base_version = 1;
1169         p->class_version = 1;
1170         p->capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
1171         /*
1172          * Set the most significant bit of CM2 to indicate support for
1173          * congestion statistics
1174          */
1175         p->reserved[0] = dd->psxmitwait_supported << 7;
1176         /*
1177          * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
1178          */
1179         p->resp_time_value = 18;
1180
1181         return reply((struct ib_smp *) pmp);
1182 }
1183
1184 static int pma_get_portsamplescontrol(struct ib_pma_mad *pmp,
1185                                       struct ib_device *ibdev, u8 port)
1186 {
1187         struct ib_pma_portsamplescontrol *p =
1188                 (struct ib_pma_portsamplescontrol *)pmp->data;
1189         struct qib_ibdev *dev = to_idev(ibdev);
1190         struct qib_devdata *dd = dd_from_dev(dev);
1191         struct qib_ibport *ibp = to_iport(ibdev, port);
1192         struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1193         unsigned long flags;
1194         u8 port_select = p->port_select;
1195
1196         memset(pmp->data, 0, sizeof(pmp->data));
1197
1198         p->port_select = port_select;
1199         if (pmp->mad_hdr.attr_mod != 0 || port_select != port) {
1200                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1201                 goto bail;
1202         }
1203         spin_lock_irqsave(&ibp->rvp.lock, flags);
1204         p->tick = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PMA_TICKS);
1205         p->sample_status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
1206         p->counter_width = 4;   /* 32 bit counters */
1207         p->counter_mask0_9 = COUNTER_MASK0_9;
1208         p->sample_start = cpu_to_be32(ibp->rvp.pma_sample_start);
1209         p->sample_interval = cpu_to_be32(ibp->rvp.pma_sample_interval);
1210         p->tag = cpu_to_be16(ibp->rvp.pma_tag);
1211         p->counter_select[0] = ibp->rvp.pma_counter_select[0];
1212         p->counter_select[1] = ibp->rvp.pma_counter_select[1];
1213         p->counter_select[2] = ibp->rvp.pma_counter_select[2];
1214         p->counter_select[3] = ibp->rvp.pma_counter_select[3];
1215         p->counter_select[4] = ibp->rvp.pma_counter_select[4];
1216         spin_unlock_irqrestore(&ibp->rvp.lock, flags);
1217
1218 bail:
1219         return reply((struct ib_smp *) pmp);
1220 }
1221
1222 static int pma_set_portsamplescontrol(struct ib_pma_mad *pmp,
1223                                       struct ib_device *ibdev, u8 port)
1224 {
1225         struct ib_pma_portsamplescontrol *p =
1226                 (struct ib_pma_portsamplescontrol *)pmp->data;
1227         struct qib_ibdev *dev = to_idev(ibdev);
1228         struct qib_devdata *dd = dd_from_dev(dev);
1229         struct qib_ibport *ibp = to_iport(ibdev, port);
1230         struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1231         unsigned long flags;
1232         u8 status, xmit_flags;
1233         int ret;
1234
1235         if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) {
1236                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1237                 ret = reply((struct ib_smp *) pmp);
1238                 goto bail;
1239         }
1240
1241         spin_lock_irqsave(&ibp->rvp.lock, flags);
1242
1243         /* Port Sampling code owns the PS* HW counters */
1244         xmit_flags = ppd->cong_stats.flags;
1245         ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_SAMPLE;
1246         status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
1247         if (status == IB_PMA_SAMPLE_STATUS_DONE ||
1248             (status == IB_PMA_SAMPLE_STATUS_RUNNING &&
1249              xmit_flags == IB_PMA_CONG_HW_CONTROL_TIMER)) {
1250                 ibp->rvp.pma_sample_start = be32_to_cpu(p->sample_start);
1251                 ibp->rvp.pma_sample_interval = be32_to_cpu(p->sample_interval);
1252                 ibp->rvp.pma_tag = be16_to_cpu(p->tag);
1253                 ibp->rvp.pma_counter_select[0] = p->counter_select[0];
1254                 ibp->rvp.pma_counter_select[1] = p->counter_select[1];
1255                 ibp->rvp.pma_counter_select[2] = p->counter_select[2];
1256                 ibp->rvp.pma_counter_select[3] = p->counter_select[3];
1257                 ibp->rvp.pma_counter_select[4] = p->counter_select[4];
1258                 dd->f_set_cntr_sample(ppd, ibp->rvp.pma_sample_interval,
1259                                       ibp->rvp.pma_sample_start);
1260         }
1261         spin_unlock_irqrestore(&ibp->rvp.lock, flags);
1262
1263         ret = pma_get_portsamplescontrol(pmp, ibdev, port);
1264
1265 bail:
1266         return ret;
1267 }
1268
1269 static u64 get_counter(struct qib_ibport *ibp, struct qib_pportdata *ppd,
1270                        __be16 sel)
1271 {
1272         u64 ret;
1273
1274         switch (sel) {
1275         case IB_PMA_PORT_XMIT_DATA:
1276                 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITDATA);
1277                 break;
1278         case IB_PMA_PORT_RCV_DATA:
1279                 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVDATA);
1280                 break;
1281         case IB_PMA_PORT_XMIT_PKTS:
1282                 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITPKTS);
1283                 break;
1284         case IB_PMA_PORT_RCV_PKTS:
1285                 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVPKTS);
1286                 break;
1287         case IB_PMA_PORT_XMIT_WAIT:
1288                 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITWAIT);
1289                 break;
1290         default:
1291                 ret = 0;
1292         }
1293
1294         return ret;
1295 }
1296
1297 /* This function assumes that the xmit_wait lock is already held */
1298 static u64 xmit_wait_get_value_delta(struct qib_pportdata *ppd)
1299 {
1300         u32 delta;
1301
1302         delta = get_counter(&ppd->ibport_data, ppd,
1303                             IB_PMA_PORT_XMIT_WAIT);
1304         return ppd->cong_stats.counter + delta;
1305 }
1306
1307 static void cache_hw_sample_counters(struct qib_pportdata *ppd)
1308 {
1309         struct qib_ibport *ibp = &ppd->ibport_data;
1310
1311         ppd->cong_stats.counter_cache.psxmitdata =
1312                 get_counter(ibp, ppd, IB_PMA_PORT_XMIT_DATA);
1313         ppd->cong_stats.counter_cache.psrcvdata =
1314                 get_counter(ibp, ppd, IB_PMA_PORT_RCV_DATA);
1315         ppd->cong_stats.counter_cache.psxmitpkts =
1316                 get_counter(ibp, ppd, IB_PMA_PORT_XMIT_PKTS);
1317         ppd->cong_stats.counter_cache.psrcvpkts =
1318                 get_counter(ibp, ppd, IB_PMA_PORT_RCV_PKTS);
1319         ppd->cong_stats.counter_cache.psxmitwait =
1320                 get_counter(ibp, ppd, IB_PMA_PORT_XMIT_WAIT);
1321 }
1322
1323 static u64 get_cache_hw_sample_counters(struct qib_pportdata *ppd,
1324                                         __be16 sel)
1325 {
1326         u64 ret;
1327
1328         switch (sel) {
1329         case IB_PMA_PORT_XMIT_DATA:
1330                 ret = ppd->cong_stats.counter_cache.psxmitdata;
1331                 break;
1332         case IB_PMA_PORT_RCV_DATA:
1333                 ret = ppd->cong_stats.counter_cache.psrcvdata;
1334                 break;
1335         case IB_PMA_PORT_XMIT_PKTS:
1336                 ret = ppd->cong_stats.counter_cache.psxmitpkts;
1337                 break;
1338         case IB_PMA_PORT_RCV_PKTS:
1339                 ret = ppd->cong_stats.counter_cache.psrcvpkts;
1340                 break;
1341         case IB_PMA_PORT_XMIT_WAIT:
1342                 ret = ppd->cong_stats.counter_cache.psxmitwait;
1343                 break;
1344         default:
1345                 ret = 0;
1346         }
1347
1348         return ret;
1349 }
1350
1351 static int pma_get_portsamplesresult(struct ib_pma_mad *pmp,
1352                                      struct ib_device *ibdev, u8 port)
1353 {
1354         struct ib_pma_portsamplesresult *p =
1355                 (struct ib_pma_portsamplesresult *)pmp->data;
1356         struct qib_ibdev *dev = to_idev(ibdev);
1357         struct qib_devdata *dd = dd_from_dev(dev);
1358         struct qib_ibport *ibp = to_iport(ibdev, port);
1359         struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1360         unsigned long flags;
1361         u8 status;
1362         int i;
1363
1364         memset(pmp->data, 0, sizeof(pmp->data));
1365         spin_lock_irqsave(&ibp->rvp.lock, flags);
1366         p->tag = cpu_to_be16(ibp->rvp.pma_tag);
1367         if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
1368                 p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
1369         else {
1370                 status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
1371                 p->sample_status = cpu_to_be16(status);
1372                 if (status == IB_PMA_SAMPLE_STATUS_DONE) {
1373                         cache_hw_sample_counters(ppd);
1374                         ppd->cong_stats.counter =
1375                                 xmit_wait_get_value_delta(ppd);
1376                         dd->f_set_cntr_sample(ppd,
1377                                               QIB_CONG_TIMER_PSINTERVAL, 0);
1378                         ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
1379                 }
1380         }
1381         for (i = 0; i < ARRAY_SIZE(ibp->rvp.pma_counter_select); i++)
1382                 p->counter[i] = cpu_to_be32(
1383                         get_cache_hw_sample_counters(
1384                                 ppd, ibp->rvp.pma_counter_select[i]));
1385         spin_unlock_irqrestore(&ibp->rvp.lock, flags);
1386
1387         return reply((struct ib_smp *) pmp);
1388 }
1389
1390 static int pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp,
1391                                          struct ib_device *ibdev, u8 port)
1392 {
1393         struct ib_pma_portsamplesresult_ext *p =
1394                 (struct ib_pma_portsamplesresult_ext *)pmp->data;
1395         struct qib_ibdev *dev = to_idev(ibdev);
1396         struct qib_devdata *dd = dd_from_dev(dev);
1397         struct qib_ibport *ibp = to_iport(ibdev, port);
1398         struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1399         unsigned long flags;
1400         u8 status;
1401         int i;
1402
1403         /* Port Sampling code owns the PS* HW counters */
1404         memset(pmp->data, 0, sizeof(pmp->data));
1405         spin_lock_irqsave(&ibp->rvp.lock, flags);
1406         p->tag = cpu_to_be16(ibp->rvp.pma_tag);
1407         if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
1408                 p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
1409         else {
1410                 status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
1411                 p->sample_status = cpu_to_be16(status);
1412                 /* 64 bits */
1413                 p->extended_width = cpu_to_be32(0x80000000);
1414                 if (status == IB_PMA_SAMPLE_STATUS_DONE) {
1415                         cache_hw_sample_counters(ppd);
1416                         ppd->cong_stats.counter =
1417                                 xmit_wait_get_value_delta(ppd);
1418                         dd->f_set_cntr_sample(ppd,
1419                                               QIB_CONG_TIMER_PSINTERVAL, 0);
1420                         ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
1421                 }
1422         }
1423         for (i = 0; i < ARRAY_SIZE(ibp->rvp.pma_counter_select); i++)
1424                 p->counter[i] = cpu_to_be64(
1425                         get_cache_hw_sample_counters(
1426                                 ppd, ibp->rvp.pma_counter_select[i]));
1427         spin_unlock_irqrestore(&ibp->rvp.lock, flags);
1428
1429         return reply((struct ib_smp *) pmp);
1430 }
1431
1432 static int pma_get_portcounters(struct ib_pma_mad *pmp,
1433                                 struct ib_device *ibdev, u8 port)
1434 {
1435         struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1436                 pmp->data;
1437         struct qib_ibport *ibp = to_iport(ibdev, port);
1438         struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1439         struct qib_verbs_counters cntrs;
1440         u8 port_select = p->port_select;
1441
1442         qib_get_counters(ppd, &cntrs);
1443
1444         /* Adjust counters for any resets done. */
1445         cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
1446         cntrs.link_error_recovery_counter -=
1447                 ibp->z_link_error_recovery_counter;
1448         cntrs.link_downed_counter -= ibp->z_link_downed_counter;
1449         cntrs.port_rcv_errors -= ibp->z_port_rcv_errors;
1450         cntrs.port_rcv_remphys_errors -= ibp->z_port_rcv_remphys_errors;
1451         cntrs.port_xmit_discards -= ibp->z_port_xmit_discards;
1452         cntrs.port_xmit_data -= ibp->z_port_xmit_data;
1453         cntrs.port_rcv_data -= ibp->z_port_rcv_data;
1454         cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
1455         cntrs.port_rcv_packets -= ibp->z_port_rcv_packets;
1456         cntrs.local_link_integrity_errors -=
1457                 ibp->z_local_link_integrity_errors;
1458         cntrs.excessive_buffer_overrun_errors -=
1459                 ibp->z_excessive_buffer_overrun_errors;
1460         cntrs.vl15_dropped -= ibp->z_vl15_dropped;
1461         cntrs.vl15_dropped += ibp->rvp.n_vl15_dropped;
1462
1463         memset(pmp->data, 0, sizeof(pmp->data));
1464
1465         p->port_select = port_select;
1466         if (pmp->mad_hdr.attr_mod != 0 || port_select != port)
1467                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1468
1469         if (cntrs.symbol_error_counter > 0xFFFFUL)
1470                 p->symbol_error_counter = cpu_to_be16(0xFFFF);
1471         else
1472                 p->symbol_error_counter =
1473                         cpu_to_be16((u16)cntrs.symbol_error_counter);
1474         if (cntrs.link_error_recovery_counter > 0xFFUL)
1475                 p->link_error_recovery_counter = 0xFF;
1476         else
1477                 p->link_error_recovery_counter =
1478                         (u8)cntrs.link_error_recovery_counter;
1479         if (cntrs.link_downed_counter > 0xFFUL)
1480                 p->link_downed_counter = 0xFF;
1481         else
1482                 p->link_downed_counter = (u8)cntrs.link_downed_counter;
1483         if (cntrs.port_rcv_errors > 0xFFFFUL)
1484                 p->port_rcv_errors = cpu_to_be16(0xFFFF);
1485         else
1486                 p->port_rcv_errors =
1487                         cpu_to_be16((u16) cntrs.port_rcv_errors);
1488         if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
1489                 p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
1490         else
1491                 p->port_rcv_remphys_errors =
1492                         cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);
1493         if (cntrs.port_xmit_discards > 0xFFFFUL)
1494                 p->port_xmit_discards = cpu_to_be16(0xFFFF);
1495         else
1496                 p->port_xmit_discards =
1497                         cpu_to_be16((u16)cntrs.port_xmit_discards);
1498         if (cntrs.local_link_integrity_errors > 0xFUL)
1499                 cntrs.local_link_integrity_errors = 0xFUL;
1500         if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
1501                 cntrs.excessive_buffer_overrun_errors = 0xFUL;
1502         p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
1503                 cntrs.excessive_buffer_overrun_errors;
1504         if (cntrs.vl15_dropped > 0xFFFFUL)
1505                 p->vl15_dropped = cpu_to_be16(0xFFFF);
1506         else
1507                 p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
1508         if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
1509                 p->port_xmit_data = cpu_to_be32(0xFFFFFFFF);
1510         else
1511                 p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);
1512         if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
1513                 p->port_rcv_data = cpu_to_be32(0xFFFFFFFF);
1514         else
1515                 p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);
1516         if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
1517                 p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF);
1518         else
1519                 p->port_xmit_packets =
1520                         cpu_to_be32((u32)cntrs.port_xmit_packets);
1521         if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
1522                 p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF);
1523         else
1524                 p->port_rcv_packets =
1525                         cpu_to_be32((u32) cntrs.port_rcv_packets);
1526
1527         return reply((struct ib_smp *) pmp);
1528 }
1529
1530 static int pma_get_portcounters_cong(struct ib_pma_mad *pmp,
1531                                      struct ib_device *ibdev, u8 port)
1532 {
1533         /* Congestion PMA packets start at offset 24 not 64 */
1534         struct ib_pma_portcounters_cong *p =
1535                 (struct ib_pma_portcounters_cong *)pmp->reserved;
1536         struct qib_verbs_counters cntrs;
1537         struct qib_ibport *ibp = to_iport(ibdev, port);
1538         struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1539         struct qib_devdata *dd = dd_from_ppd(ppd);
1540         u32 port_select = be32_to_cpu(pmp->mad_hdr.attr_mod) & 0xFF;
1541         u64 xmit_wait_counter;
1542         unsigned long flags;
1543
1544         /*
1545          * This check is performed only in the GET method because the
1546          * SET method ends up calling this anyway.
1547          */
1548         if (!dd->psxmitwait_supported)
1549                 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
1550         if (port_select != port)
1551                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1552
1553         qib_get_counters(ppd, &cntrs);
1554         spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags);
1555         xmit_wait_counter = xmit_wait_get_value_delta(ppd);
1556         spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags);
1557
1558         /* Adjust counters for any resets done. */
1559         cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
1560         cntrs.link_error_recovery_counter -=
1561                 ibp->z_link_error_recovery_counter;
1562         cntrs.link_downed_counter -= ibp->z_link_downed_counter;
1563         cntrs.port_rcv_errors -= ibp->z_port_rcv_errors;
1564         cntrs.port_rcv_remphys_errors -=
1565                 ibp->z_port_rcv_remphys_errors;
1566         cntrs.port_xmit_discards -= ibp->z_port_xmit_discards;
1567         cntrs.local_link_integrity_errors -=
1568                 ibp->z_local_link_integrity_errors;
1569         cntrs.excessive_buffer_overrun_errors -=
1570                 ibp->z_excessive_buffer_overrun_errors;
1571         cntrs.vl15_dropped -= ibp->z_vl15_dropped;
1572         cntrs.vl15_dropped += ibp->rvp.n_vl15_dropped;
1573         cntrs.port_xmit_data -= ibp->z_port_xmit_data;
1574         cntrs.port_rcv_data -= ibp->z_port_rcv_data;
1575         cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
1576         cntrs.port_rcv_packets -= ibp->z_port_rcv_packets;
1577
1578         memset(pmp->reserved, 0, sizeof(pmp->reserved) +
1579                sizeof(pmp->data));
1580
1581         /*
1582          * Set top 3 bits to indicate interval in picoseconds in
1583          * remaining bits.
1584          */
1585         p->port_check_rate =
1586                 cpu_to_be16((QIB_XMIT_RATE_PICO << 13) |
1587                             (dd->psxmitwait_check_rate &
1588                              ~(QIB_XMIT_RATE_PICO << 13)));
1589         p->port_adr_events = cpu_to_be64(0);
1590         p->port_xmit_wait = cpu_to_be64(xmit_wait_counter);
1591         p->port_xmit_data = cpu_to_be64(cntrs.port_xmit_data);
1592         p->port_rcv_data = cpu_to_be64(cntrs.port_rcv_data);
1593         p->port_xmit_packets =
1594                 cpu_to_be64(cntrs.port_xmit_packets);
1595         p->port_rcv_packets =
1596                 cpu_to_be64(cntrs.port_rcv_packets);
1597         if (cntrs.symbol_error_counter > 0xFFFFUL)
1598                 p->symbol_error_counter = cpu_to_be16(0xFFFF);
1599         else
1600                 p->symbol_error_counter =
1601                         cpu_to_be16(
1602                                 (u16)cntrs.symbol_error_counter);
1603         if (cntrs.link_error_recovery_counter > 0xFFUL)
1604                 p->link_error_recovery_counter = 0xFF;
1605         else
1606                 p->link_error_recovery_counter =
1607                         (u8)cntrs.link_error_recovery_counter;
1608         if (cntrs.link_downed_counter > 0xFFUL)
1609                 p->link_downed_counter = 0xFF;
1610         else
1611                 p->link_downed_counter =
1612                         (u8)cntrs.link_downed_counter;
1613         if (cntrs.port_rcv_errors > 0xFFFFUL)
1614                 p->port_rcv_errors = cpu_to_be16(0xFFFF);
1615         else
1616                 p->port_rcv_errors =
1617                         cpu_to_be16((u16) cntrs.port_rcv_errors);
1618         if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
1619                 p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
1620         else
1621                 p->port_rcv_remphys_errors =
1622                         cpu_to_be16(
1623                                 (u16)cntrs.port_rcv_remphys_errors);
1624         if (cntrs.port_xmit_discards > 0xFFFFUL)
1625                 p->port_xmit_discards = cpu_to_be16(0xFFFF);
1626         else
1627                 p->port_xmit_discards =
1628                         cpu_to_be16((u16)cntrs.port_xmit_discards);
1629         if (cntrs.local_link_integrity_errors > 0xFUL)
1630                 cntrs.local_link_integrity_errors = 0xFUL;
1631         if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
1632                 cntrs.excessive_buffer_overrun_errors = 0xFUL;
1633         p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
1634                 cntrs.excessive_buffer_overrun_errors;
1635         if (cntrs.vl15_dropped > 0xFFFFUL)
1636                 p->vl15_dropped = cpu_to_be16(0xFFFF);
1637         else
1638                 p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
1639
1640         return reply((struct ib_smp *)pmp);
1641 }
1642
1643 static void qib_snapshot_pmacounters(
1644         struct qib_ibport *ibp,
1645         struct qib_pma_counters *pmacounters)
1646 {
1647         struct qib_pma_counters *p;
1648         int cpu;
1649
1650         memset(pmacounters, 0, sizeof(*pmacounters));
1651         for_each_possible_cpu(cpu) {
1652                 p = per_cpu_ptr(ibp->pmastats, cpu);
1653                 pmacounters->n_unicast_xmit += p->n_unicast_xmit;
1654                 pmacounters->n_unicast_rcv += p->n_unicast_rcv;
1655                 pmacounters->n_multicast_xmit += p->n_multicast_xmit;
1656                 pmacounters->n_multicast_rcv += p->n_multicast_rcv;
1657         }
1658 }
1659
1660 static int pma_get_portcounters_ext(struct ib_pma_mad *pmp,
1661                                     struct ib_device *ibdev, u8 port)
1662 {
1663         struct ib_pma_portcounters_ext *p =
1664                 (struct ib_pma_portcounters_ext *)pmp->data;
1665         struct qib_ibport *ibp = to_iport(ibdev, port);
1666         struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1667         u64 swords, rwords, spkts, rpkts, xwait;
1668         struct qib_pma_counters pma;
1669         u8 port_select = p->port_select;
1670
1671         memset(pmp->data, 0, sizeof(pmp->data));
1672
1673         p->port_select = port_select;
1674         if (pmp->mad_hdr.attr_mod != 0 || port_select != port) {
1675                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1676                 goto bail;
1677         }
1678
1679         qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);
1680
1681         /* Adjust counters for any resets done. */
1682         swords -= ibp->z_port_xmit_data;
1683         rwords -= ibp->z_port_rcv_data;
1684         spkts -= ibp->z_port_xmit_packets;
1685         rpkts -= ibp->z_port_rcv_packets;
1686
1687         p->port_xmit_data = cpu_to_be64(swords);
1688         p->port_rcv_data = cpu_to_be64(rwords);
1689         p->port_xmit_packets = cpu_to_be64(spkts);
1690         p->port_rcv_packets = cpu_to_be64(rpkts);
1691
1692         qib_snapshot_pmacounters(ibp, &pma);
1693
1694         p->port_unicast_xmit_packets = cpu_to_be64(pma.n_unicast_xmit
1695                 - ibp->z_unicast_xmit);
1696         p->port_unicast_rcv_packets = cpu_to_be64(pma.n_unicast_rcv
1697                 - ibp->z_unicast_rcv);
1698         p->port_multicast_xmit_packets = cpu_to_be64(pma.n_multicast_xmit
1699                 - ibp->z_multicast_xmit);
1700         p->port_multicast_rcv_packets = cpu_to_be64(pma.n_multicast_rcv
1701                 - ibp->z_multicast_rcv);
1702
1703 bail:
1704         return reply((struct ib_smp *) pmp);
1705 }
1706
1707 static int pma_set_portcounters(struct ib_pma_mad *pmp,
1708                                 struct ib_device *ibdev, u8 port)
1709 {
1710         struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1711                 pmp->data;
1712         struct qib_ibport *ibp = to_iport(ibdev, port);
1713         struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1714         struct qib_verbs_counters cntrs;
1715
1716         /*
1717          * Since the HW doesn't support clearing counters, we save the
1718          * current count and subtract it from future responses.
1719          */
1720         qib_get_counters(ppd, &cntrs);
1721
1722         if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)
1723                 ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
1724
1725         if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY)
1726                 ibp->z_link_error_recovery_counter =
1727                         cntrs.link_error_recovery_counter;
1728
1729         if (p->counter_select & IB_PMA_SEL_LINK_DOWNED)
1730                 ibp->z_link_downed_counter = cntrs.link_downed_counter;
1731
1732         if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS)
1733                 ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
1734
1735         if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS)
1736                 ibp->z_port_rcv_remphys_errors =
1737                         cntrs.port_rcv_remphys_errors;
1738
1739         if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS)
1740                 ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
1741
1742         if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS)
1743                 ibp->z_local_link_integrity_errors =
1744                         cntrs.local_link_integrity_errors;
1745
1746         if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS)
1747                 ibp->z_excessive_buffer_overrun_errors =
1748                         cntrs.excessive_buffer_overrun_errors;
1749
1750         if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) {
1751                 ibp->rvp.n_vl15_dropped = 0;
1752                 ibp->z_vl15_dropped = cntrs.vl15_dropped;
1753         }
1754
1755         if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA)
1756                 ibp->z_port_xmit_data = cntrs.port_xmit_data;
1757
1758         if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA)
1759                 ibp->z_port_rcv_data = cntrs.port_rcv_data;
1760
1761         if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS)
1762                 ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
1763
1764         if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS)
1765                 ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
1766
1767         return pma_get_portcounters(pmp, ibdev, port);
1768 }
1769
1770 static int pma_set_portcounters_cong(struct ib_pma_mad *pmp,
1771                                      struct ib_device *ibdev, u8 port)
1772 {
1773         struct qib_ibport *ibp = to_iport(ibdev, port);
1774         struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1775         struct qib_devdata *dd = dd_from_ppd(ppd);
1776         struct qib_verbs_counters cntrs;
1777         u32 counter_select = (be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24) & 0xFF;
1778         int ret = 0;
1779         unsigned long flags;
1780
1781         qib_get_counters(ppd, &cntrs);
1782         /* Get counter values before we save them */
1783         ret = pma_get_portcounters_cong(pmp, ibdev, port);
1784
1785         if (counter_select & IB_PMA_SEL_CONG_XMIT) {
1786                 spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags);
1787                 ppd->cong_stats.counter = 0;
1788                 dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL,
1789                                       0x0);
1790                 spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags);
1791         }
1792         if (counter_select & IB_PMA_SEL_CONG_PORT_DATA) {
1793                 ibp->z_port_xmit_data = cntrs.port_xmit_data;
1794                 ibp->z_port_rcv_data = cntrs.port_rcv_data;
1795                 ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
1796                 ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
1797         }
1798         if (counter_select & IB_PMA_SEL_CONG_ALL) {
1799                 ibp->z_symbol_error_counter =
1800                         cntrs.symbol_error_counter;
1801                 ibp->z_link_error_recovery_counter =
1802                         cntrs.link_error_recovery_counter;
1803                 ibp->z_link_downed_counter =
1804                         cntrs.link_downed_counter;
1805                 ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
1806                 ibp->z_port_rcv_remphys_errors =
1807                         cntrs.port_rcv_remphys_errors;
1808                 ibp->z_port_xmit_discards =
1809                         cntrs.port_xmit_discards;
1810                 ibp->z_local_link_integrity_errors =
1811                         cntrs.local_link_integrity_errors;
1812                 ibp->z_excessive_buffer_overrun_errors =
1813                         cntrs.excessive_buffer_overrun_errors;
1814                 ibp->rvp.n_vl15_dropped = 0;
1815                 ibp->z_vl15_dropped = cntrs.vl15_dropped;
1816         }
1817
1818         return ret;
1819 }
1820
1821 static int pma_set_portcounters_ext(struct ib_pma_mad *pmp,
1822                                     struct ib_device *ibdev, u8 port)
1823 {
1824         struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1825                 pmp->data;
1826         struct qib_ibport *ibp = to_iport(ibdev, port);
1827         struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1828         u64 swords, rwords, spkts, rpkts, xwait;
1829         struct qib_pma_counters pma;
1830
1831         qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);
1832
1833         if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)
1834                 ibp->z_port_xmit_data = swords;
1835
1836         if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA)
1837                 ibp->z_port_rcv_data = rwords;
1838
1839         if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS)
1840                 ibp->z_port_xmit_packets = spkts;
1841
1842         if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS)
1843                 ibp->z_port_rcv_packets = rpkts;
1844
1845         qib_snapshot_pmacounters(ibp, &pma);
1846
1847         if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS)
1848                 ibp->z_unicast_xmit = pma.n_unicast_xmit;
1849
1850         if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS)
1851                 ibp->z_unicast_rcv = pma.n_unicast_rcv;
1852
1853         if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS)
1854                 ibp->z_multicast_xmit = pma.n_multicast_xmit;
1855
1856         if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS)
1857                 ibp->z_multicast_rcv = pma.n_multicast_rcv;
1858
1859         return pma_get_portcounters_ext(pmp, ibdev, port);
1860 }
1861
1862 static int process_subn(struct ib_device *ibdev, int mad_flags,
1863                         u8 port, const struct ib_mad *in_mad,
1864                         struct ib_mad *out_mad)
1865 {
1866         struct ib_smp *smp = (struct ib_smp *)out_mad;
1867         struct qib_ibport *ibp = to_iport(ibdev, port);
1868         struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1869         int ret;
1870
1871         *out_mad = *in_mad;
1872         if (smp->class_version != 1) {
1873                 smp->status |= IB_SMP_UNSUP_VERSION;
1874                 ret = reply(smp);
1875                 goto bail;
1876         }
1877
1878         ret = check_mkey(ibp, smp, mad_flags);
1879         if (ret) {
1880                 u32 port_num = be32_to_cpu(smp->attr_mod);
1881
1882                 /*
1883                  * If this is a get/set portinfo, we already check the
1884                  * M_Key if the MAD is for another port and the M_Key
1885                  * is OK on the receiving port. This check is needed
1886                  * to increment the error counters when the M_Key
1887                  * fails to match on *both* ports.
1888                  */
1889                 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
1890                     (smp->method == IB_MGMT_METHOD_GET ||
1891                      smp->method == IB_MGMT_METHOD_SET) &&
1892                     port_num && port_num <= ibdev->phys_port_cnt &&
1893                     port != port_num)
1894                         (void) check_mkey(to_iport(ibdev, port_num), smp, 0);
1895                 ret = IB_MAD_RESULT_FAILURE;
1896                 goto bail;
1897         }
1898
1899         switch (smp->method) {
1900         case IB_MGMT_METHOD_GET:
1901                 switch (smp->attr_id) {
1902                 case IB_SMP_ATTR_NODE_DESC:
1903                         ret = subn_get_nodedescription(smp, ibdev);
1904                         goto bail;
1905                 case IB_SMP_ATTR_NODE_INFO:
1906                         ret = subn_get_nodeinfo(smp, ibdev, port);
1907                         goto bail;
1908                 case IB_SMP_ATTR_GUID_INFO:
1909                         ret = subn_get_guidinfo(smp, ibdev, port);
1910                         goto bail;
1911                 case IB_SMP_ATTR_PORT_INFO:
1912                         ret = subn_get_portinfo(smp, ibdev, port);
1913                         goto bail;
1914                 case IB_SMP_ATTR_PKEY_TABLE:
1915                         ret = subn_get_pkeytable(smp, ibdev, port);
1916                         goto bail;
1917                 case IB_SMP_ATTR_SL_TO_VL_TABLE:
1918                         ret = subn_get_sl_to_vl(smp, ibdev, port);
1919                         goto bail;
1920                 case IB_SMP_ATTR_VL_ARB_TABLE:
1921                         ret = subn_get_vl_arb(smp, ibdev, port);
1922                         goto bail;
1923                 case IB_SMP_ATTR_SM_INFO:
1924                         if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED) {
1925                                 ret = IB_MAD_RESULT_SUCCESS |
1926                                         IB_MAD_RESULT_CONSUMED;
1927                                 goto bail;
1928                         }
1929                         if (ibp->rvp.port_cap_flags & IB_PORT_SM) {
1930                                 ret = IB_MAD_RESULT_SUCCESS;
1931                                 goto bail;
1932                         }
1933                         /* FALLTHROUGH */
1934                 default:
1935                         smp->status |= IB_SMP_UNSUP_METH_ATTR;
1936                         ret = reply(smp);
1937                         goto bail;
1938                 }
1939
1940         case IB_MGMT_METHOD_SET:
1941                 switch (smp->attr_id) {
1942                 case IB_SMP_ATTR_GUID_INFO:
1943                         ret = subn_set_guidinfo(smp, ibdev, port);
1944                         goto bail;
1945                 case IB_SMP_ATTR_PORT_INFO:
1946                         ret = subn_set_portinfo(smp, ibdev, port);
1947                         goto bail;
1948                 case IB_SMP_ATTR_PKEY_TABLE:
1949                         ret = subn_set_pkeytable(smp, ibdev, port);
1950                         goto bail;
1951                 case IB_SMP_ATTR_SL_TO_VL_TABLE:
1952                         ret = subn_set_sl_to_vl(smp, ibdev, port);
1953                         goto bail;
1954                 case IB_SMP_ATTR_VL_ARB_TABLE:
1955                         ret = subn_set_vl_arb(smp, ibdev, port);
1956                         goto bail;
1957                 case IB_SMP_ATTR_SM_INFO:
1958                         if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED) {
1959                                 ret = IB_MAD_RESULT_SUCCESS |
1960                                         IB_MAD_RESULT_CONSUMED;
1961                                 goto bail;
1962                         }
1963                         if (ibp->rvp.port_cap_flags & IB_PORT_SM) {
1964                                 ret = IB_MAD_RESULT_SUCCESS;
1965                                 goto bail;
1966                         }
1967                         /* FALLTHROUGH */
1968                 default:
1969                         smp->status |= IB_SMP_UNSUP_METH_ATTR;
1970                         ret = reply(smp);
1971                         goto bail;
1972                 }
1973
1974         case IB_MGMT_METHOD_TRAP_REPRESS:
1975                 if (smp->attr_id == IB_SMP_ATTR_NOTICE)
1976                         ret = subn_trap_repress(smp, ibdev, port);
1977                 else {
1978                         smp->status |= IB_SMP_UNSUP_METH_ATTR;
1979                         ret = reply(smp);
1980                 }
1981                 goto bail;
1982
1983         case IB_MGMT_METHOD_TRAP:
1984         case IB_MGMT_METHOD_REPORT:
1985         case IB_MGMT_METHOD_REPORT_RESP:
1986         case IB_MGMT_METHOD_GET_RESP:
1987                 /*
1988                  * The ib_mad module will call us to process responses
1989                  * before checking for other consumers.
1990                  * Just tell the caller to process it normally.
1991                  */
1992                 ret = IB_MAD_RESULT_SUCCESS;
1993                 goto bail;
1994
1995         case IB_MGMT_METHOD_SEND:
1996                 if (ib_get_smp_direction(smp) &&
1997                     smp->attr_id == QIB_VENDOR_IPG) {
1998                         ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PORT,
1999                                               smp->data[0]);
2000                         ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
2001                 } else
2002                         ret = IB_MAD_RESULT_SUCCESS;
2003                 goto bail;
2004
2005         default:
2006                 smp->status |= IB_SMP_UNSUP_METHOD;
2007                 ret = reply(smp);
2008         }
2009
2010 bail:
2011         return ret;
2012 }
2013
2014 static int process_perf(struct ib_device *ibdev, u8 port,
2015                         const struct ib_mad *in_mad,
2016                         struct ib_mad *out_mad)
2017 {
2018         struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad;
2019         int ret;
2020
2021         *out_mad = *in_mad;
2022         if (pmp->mad_hdr.class_version != 1) {
2023                 pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
2024                 ret = reply((struct ib_smp *) pmp);
2025                 goto bail;
2026         }
2027
2028         switch (pmp->mad_hdr.method) {
2029         case IB_MGMT_METHOD_GET:
2030                 switch (pmp->mad_hdr.attr_id) {
2031                 case IB_PMA_CLASS_PORT_INFO:
2032                         ret = pma_get_classportinfo(pmp, ibdev);
2033                         goto bail;
2034                 case IB_PMA_PORT_SAMPLES_CONTROL:
2035                         ret = pma_get_portsamplescontrol(pmp, ibdev, port);
2036                         goto bail;
2037                 case IB_PMA_PORT_SAMPLES_RESULT:
2038                         ret = pma_get_portsamplesresult(pmp, ibdev, port);
2039                         goto bail;
2040                 case IB_PMA_PORT_SAMPLES_RESULT_EXT:
2041                         ret = pma_get_portsamplesresult_ext(pmp, ibdev, port);
2042                         goto bail;
2043                 case IB_PMA_PORT_COUNTERS:
2044                         ret = pma_get_portcounters(pmp, ibdev, port);
2045                         goto bail;
2046                 case IB_PMA_PORT_COUNTERS_EXT:
2047                         ret = pma_get_portcounters_ext(pmp, ibdev, port);
2048                         goto bail;
2049                 case IB_PMA_PORT_COUNTERS_CONG:
2050                         ret = pma_get_portcounters_cong(pmp, ibdev, port);
2051                         goto bail;
2052                 default:
2053                         pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
2054                         ret = reply((struct ib_smp *) pmp);
2055                         goto bail;
2056                 }
2057
2058         case IB_MGMT_METHOD_SET:
2059                 switch (pmp->mad_hdr.attr_id) {
2060                 case IB_PMA_PORT_SAMPLES_CONTROL:
2061                         ret = pma_set_portsamplescontrol(pmp, ibdev, port);
2062                         goto bail;
2063                 case IB_PMA_PORT_COUNTERS:
2064                         ret = pma_set_portcounters(pmp, ibdev, port);
2065                         goto bail;
2066                 case IB_PMA_PORT_COUNTERS_EXT:
2067                         ret = pma_set_portcounters_ext(pmp, ibdev, port);
2068                         goto bail;
2069                 case IB_PMA_PORT_COUNTERS_CONG:
2070                         ret = pma_set_portcounters_cong(pmp, ibdev, port);
2071                         goto bail;
2072                 default:
2073                         pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
2074                         ret = reply((struct ib_smp *) pmp);
2075                         goto bail;
2076                 }
2077
2078         case IB_MGMT_METHOD_TRAP:
2079         case IB_MGMT_METHOD_GET_RESP:
2080                 /*
2081                  * The ib_mad module will call us to process responses
2082                  * before checking for other consumers.
2083                  * Just tell the caller to process it normally.
2084                  */
2085                 ret = IB_MAD_RESULT_SUCCESS;
2086                 goto bail;
2087
2088         default:
2089                 pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
2090                 ret = reply((struct ib_smp *) pmp);
2091         }
2092
2093 bail:
2094         return ret;
2095 }
2096
2097 static int cc_get_classportinfo(struct ib_cc_mad *ccp,
2098                                 struct ib_device *ibdev)
2099 {
2100         struct ib_cc_classportinfo_attr *p =
2101                 (struct ib_cc_classportinfo_attr *)ccp->mgmt_data;
2102
2103         memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
2104
2105         p->base_version = 1;
2106         p->class_version = 1;
2107         p->cap_mask = 0;
2108
2109         /*
2110          * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
2111          */
2112         p->resp_time_value = 18;
2113
2114         return reply((struct ib_smp *) ccp);
2115 }
2116
2117 static int cc_get_congestion_info(struct ib_cc_mad *ccp,
2118                                 struct ib_device *ibdev, u8 port)
2119 {
2120         struct ib_cc_info_attr *p =
2121                 (struct ib_cc_info_attr *)ccp->mgmt_data;
2122         struct qib_ibport *ibp = to_iport(ibdev, port);
2123         struct qib_pportdata *ppd = ppd_from_ibp(ibp);
2124
2125         memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
2126
2127         p->congestion_info = 0;
2128         p->control_table_cap = ppd->cc_max_table_entries;
2129
2130         return reply((struct ib_smp *) ccp);
2131 }
2132
2133 static int cc_get_congestion_setting(struct ib_cc_mad *ccp,
2134                                 struct ib_device *ibdev, u8 port)
2135 {
2136         int i;
2137         struct ib_cc_congestion_setting_attr *p =
2138                 (struct ib_cc_congestion_setting_attr *)ccp->mgmt_data;
2139         struct qib_ibport *ibp = to_iport(ibdev, port);
2140         struct qib_pportdata *ppd = ppd_from_ibp(ibp);
2141         struct ib_cc_congestion_entry_shadow *entries;
2142
2143         memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
2144
2145         spin_lock(&ppd->cc_shadow_lock);
2146
2147         entries = ppd->congestion_entries_shadow->entries;
2148         p->port_control = cpu_to_be16(
2149                 ppd->congestion_entries_shadow->port_control);
2150         p->control_map = cpu_to_be16(
2151                 ppd->congestion_entries_shadow->control_map);
2152         for (i = 0; i < IB_CC_CCS_ENTRIES; i++) {
2153                 p->entries[i].ccti_increase = entries[i].ccti_increase;
2154                 p->entries[i].ccti_timer = cpu_to_be16(entries[i].ccti_timer);
2155                 p->entries[i].trigger_threshold = entries[i].trigger_threshold;
2156                 p->entries[i].ccti_min = entries[i].ccti_min;
2157         }
2158
2159         spin_unlock(&ppd->cc_shadow_lock);
2160
2161         return reply((struct ib_smp *) ccp);
2162 }
2163
2164 static int cc_get_congestion_control_table(struct ib_cc_mad *ccp,
2165                                 struct ib_device *ibdev, u8 port)
2166 {
2167         struct ib_cc_table_attr *p =
2168                 (struct ib_cc_table_attr *)ccp->mgmt_data;
2169         struct qib_ibport *ibp = to_iport(ibdev, port);
2170         struct qib_pportdata *ppd = ppd_from_ibp(ibp);
2171         u32 cct_block_index = be32_to_cpu(ccp->attr_mod);
2172         u32 max_cct_block;
2173         u32 cct_entry;
2174         struct ib_cc_table_entry_shadow *entries;
2175         int i;
2176
2177         /* Is the table index more than what is supported? */
2178         if (cct_block_index > IB_CC_TABLE_CAP_DEFAULT - 1)
2179                 goto bail;
2180
2181         memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
2182
2183         spin_lock(&ppd->cc_shadow_lock);
2184
2185         max_cct_block =
2186                 (ppd->ccti_entries_shadow->ccti_last_entry + 1)/IB_CCT_ENTRIES;
2187         max_cct_block = max_cct_block ? max_cct_block - 1 : 0;
2188
2189         if (cct_block_index > max_cct_block) {
2190                 spin_unlock(&ppd->cc_shadow_lock);
2191                 goto bail;
2192         }
2193
2194         ccp->attr_mod = cpu_to_be32(cct_block_index);
2195
2196         cct_entry = IB_CCT_ENTRIES * (cct_block_index + 1);
2197
2198         cct_entry--;
2199
2200         p->ccti_limit = cpu_to_be16(cct_entry);
2201
2202         entries = &ppd->ccti_entries_shadow->
2203                         entries[IB_CCT_ENTRIES * cct_block_index];
2204         cct_entry %= IB_CCT_ENTRIES;
2205
2206         for (i = 0; i <= cct_entry; i++)
2207                 p->ccti_entries[i].entry = cpu_to_be16(entries[i].entry);
2208
2209         spin_unlock(&ppd->cc_shadow_lock);
2210
2211         return reply((struct ib_smp *) ccp);
2212
2213 bail:
2214         return reply_failure((struct ib_smp *) ccp);
2215 }
2216
2217 static int cc_set_congestion_setting(struct ib_cc_mad *ccp,
2218                                 struct ib_device *ibdev, u8 port)
2219 {
2220         struct ib_cc_congestion_setting_attr *p =
2221                 (struct ib_cc_congestion_setting_attr *)ccp->mgmt_data;
2222         struct qib_ibport *ibp = to_iport(ibdev, port);
2223         struct qib_pportdata *ppd = ppd_from_ibp(ibp);
2224         int i;
2225
2226         ppd->cc_sl_control_map = be16_to_cpu(p->control_map);
2227
2228         for (i = 0; i < IB_CC_CCS_ENTRIES; i++) {
2229                 ppd->congestion_entries[i].ccti_increase =
2230                         p->entries[i].ccti_increase;
2231
2232                 ppd->congestion_entries[i].ccti_timer =
2233                         be16_to_cpu(p->entries[i].ccti_timer);
2234
2235                 ppd->congestion_entries[i].trigger_threshold =
2236                         p->entries[i].trigger_threshold;
2237
2238                 ppd->congestion_entries[i].ccti_min =
2239                         p->entries[i].ccti_min;
2240         }
2241
2242         return reply((struct ib_smp *) ccp);
2243 }
2244
2245 static int cc_set_congestion_control_table(struct ib_cc_mad *ccp,
2246                                 struct ib_device *ibdev, u8 port)
2247 {
2248         struct ib_cc_table_attr *p =
2249                 (struct ib_cc_table_attr *)ccp->mgmt_data;
2250         struct qib_ibport *ibp = to_iport(ibdev, port);
2251         struct qib_pportdata *ppd = ppd_from_ibp(ibp);
2252         u32 cct_block_index = be32_to_cpu(ccp->attr_mod);
2253         u32 cct_entry;
2254         struct ib_cc_table_entry_shadow *entries;
2255         int i;
2256
2257         /* Is the table index more than what is supported? */
2258         if (cct_block_index > IB_CC_TABLE_CAP_DEFAULT - 1)
2259                 goto bail;
2260
2261         /* If this packet is the first in the sequence then
2262          * zero the total table entry count.
2263          */
2264         if (be16_to_cpu(p->ccti_limit) < IB_CCT_ENTRIES)
2265                 ppd->total_cct_entry = 0;
2266
2267         cct_entry = (be16_to_cpu(p->ccti_limit))%IB_CCT_ENTRIES;
2268
2269         /* ccti_limit is 0 to 63 */
2270         ppd->total_cct_entry += (cct_entry + 1);
2271
2272         if (ppd->total_cct_entry > ppd->cc_supported_table_entries)
2273                 goto bail;
2274
2275         ppd->ccti_limit = be16_to_cpu(p->ccti_limit);
2276
2277         entries = ppd->ccti_entries + (IB_CCT_ENTRIES * cct_block_index);
2278
2279         for (i = 0; i <= cct_entry; i++)
2280                 entries[i].entry = be16_to_cpu(p->ccti_entries[i].entry);
2281
2282         spin_lock(&ppd->cc_shadow_lock);
2283
2284         ppd->ccti_entries_shadow->ccti_last_entry = ppd->total_cct_entry - 1;
2285         memcpy(ppd->ccti_entries_shadow->entries, ppd->ccti_entries,
2286                 (ppd->total_cct_entry * sizeof(struct ib_cc_table_entry)));
2287
2288         ppd->congestion_entries_shadow->port_control = IB_CC_CCS_PC_SL_BASED;
2289         ppd->congestion_entries_shadow->control_map = ppd->cc_sl_control_map;
2290         memcpy(ppd->congestion_entries_shadow->entries, ppd->congestion_entries,
2291                 IB_CC_CCS_ENTRIES * sizeof(struct ib_cc_congestion_entry));
2292
2293         spin_unlock(&ppd->cc_shadow_lock);
2294
2295         return reply((struct ib_smp *) ccp);
2296
2297 bail:
2298         return reply_failure((struct ib_smp *) ccp);
2299 }
2300
2301 static int check_cc_key(struct qib_ibport *ibp,
2302                         struct ib_cc_mad *ccp, int mad_flags)
2303 {
2304         return 0;
2305 }
2306
2307 static int process_cc(struct ib_device *ibdev, int mad_flags,
2308                         u8 port, const struct ib_mad *in_mad,
2309                         struct ib_mad *out_mad)
2310 {
2311         struct ib_cc_mad *ccp = (struct ib_cc_mad *)out_mad;
2312         struct qib_ibport *ibp = to_iport(ibdev, port);
2313         int ret;
2314
2315         *out_mad = *in_mad;
2316
2317         if (ccp->class_version != 2) {
2318                 ccp->status |= IB_SMP_UNSUP_VERSION;
2319                 ret = reply((struct ib_smp *)ccp);
2320                 goto bail;
2321         }
2322
2323         ret = check_cc_key(ibp, ccp, mad_flags);
2324         if (ret)
2325                 goto bail;
2326
2327         switch (ccp->method) {
2328         case IB_MGMT_METHOD_GET:
2329                 switch (ccp->attr_id) {
2330                 case IB_CC_ATTR_CLASSPORTINFO:
2331                         ret = cc_get_classportinfo(ccp, ibdev);
2332                         goto bail;
2333
2334                 case IB_CC_ATTR_CONGESTION_INFO:
2335                         ret = cc_get_congestion_info(ccp, ibdev, port);
2336                         goto bail;
2337
2338                 case IB_CC_ATTR_CA_CONGESTION_SETTING:
2339                         ret = cc_get_congestion_setting(ccp, ibdev, port);
2340                         goto bail;
2341
2342                 case IB_CC_ATTR_CONGESTION_CONTROL_TABLE:
2343                         ret = cc_get_congestion_control_table(ccp, ibdev, port);
2344                         goto bail;
2345
2346                         /* FALLTHROUGH */
2347                 default:
2348                         ccp->status |= IB_SMP_UNSUP_METH_ATTR;
2349                         ret = reply((struct ib_smp *) ccp);
2350                         goto bail;
2351                 }
2352
2353         case IB_MGMT_METHOD_SET:
2354                 switch (ccp->attr_id) {
2355                 case IB_CC_ATTR_CA_CONGESTION_SETTING:
2356                         ret = cc_set_congestion_setting(ccp, ibdev, port);
2357                         goto bail;
2358
2359                 case IB_CC_ATTR_CONGESTION_CONTROL_TABLE:
2360                         ret = cc_set_congestion_control_table(ccp, ibdev, port);
2361                         goto bail;
2362
2363                         /* FALLTHROUGH */
2364                 default:
2365                         ccp->status |= IB_SMP_UNSUP_METH_ATTR;
2366                         ret = reply((struct ib_smp *) ccp);
2367                         goto bail;
2368                 }
2369
2370         case IB_MGMT_METHOD_GET_RESP:
2371                 /*
2372                  * The ib_mad module will call us to process responses
2373                  * before checking for other consumers.
2374                  * Just tell the caller to process it normally.
2375                  */
2376                 ret = IB_MAD_RESULT_SUCCESS;
2377                 goto bail;
2378
2379         case IB_MGMT_METHOD_TRAP:
2380         default:
2381                 ccp->status |= IB_SMP_UNSUP_METHOD;
2382                 ret = reply((struct ib_smp *) ccp);
2383         }
2384
2385 bail:
2386         return ret;
2387 }
2388
2389 /**
2390  * qib_process_mad - process an incoming MAD packet
2391  * @ibdev: the infiniband device this packet came in on
2392  * @mad_flags: MAD flags
2393  * @port: the port number this packet came in on
2394  * @in_wc: the work completion entry for this packet
2395  * @in_grh: the global route header for this packet
2396  * @in_mad: the incoming MAD
2397  * @out_mad: any outgoing MAD reply
2398  *
2399  * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
2400  * interested in processing.
2401  *
2402  * Note that the verbs framework has already done the MAD sanity checks,
2403  * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
2404  * MADs.
2405  *
2406  * This is called by the ib_mad module.
2407  */
2408 int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
2409                     const struct ib_wc *in_wc, const struct ib_grh *in_grh,
2410                     const struct ib_mad_hdr *in, size_t in_mad_size,
2411                     struct ib_mad_hdr *out, size_t *out_mad_size,
2412                     u16 *out_mad_pkey_index)
2413 {
2414         int ret;
2415         struct qib_ibport *ibp = to_iport(ibdev, port);
2416         struct qib_pportdata *ppd = ppd_from_ibp(ibp);
2417         const struct ib_mad *in_mad = (const struct ib_mad *)in;
2418         struct ib_mad *out_mad = (struct ib_mad *)out;
2419
2420         if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
2421                          *out_mad_size != sizeof(*out_mad)))
2422                 return IB_MAD_RESULT_FAILURE;
2423
2424         switch (in_mad->mad_hdr.mgmt_class) {
2425         case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
2426         case IB_MGMT_CLASS_SUBN_LID_ROUTED:
2427                 ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad);
2428                 goto bail;
2429
2430         case IB_MGMT_CLASS_PERF_MGMT:
2431                 ret = process_perf(ibdev, port, in_mad, out_mad);
2432                 goto bail;
2433
2434         case IB_MGMT_CLASS_CONG_MGMT:
2435                 if (!ppd->congestion_entries_shadow ||
2436                          !qib_cc_table_size) {
2437                         ret = IB_MAD_RESULT_SUCCESS;
2438                         goto bail;
2439                 }
2440                 ret = process_cc(ibdev, mad_flags, port, in_mad, out_mad);
2441                 goto bail;
2442
2443         default:
2444                 ret = IB_MAD_RESULT_SUCCESS;
2445         }
2446
2447 bail:
2448         return ret;
2449 }
2450
2451 static void xmit_wait_timer_func(unsigned long opaque)
2452 {
2453         struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
2454         struct qib_devdata *dd = dd_from_ppd(ppd);
2455         unsigned long flags;
2456         u8 status;
2457
2458         spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags);
2459         if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_SAMPLE) {
2460                 status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
2461                 if (status == IB_PMA_SAMPLE_STATUS_DONE) {
2462                         /* save counter cache */
2463                         cache_hw_sample_counters(ppd);
2464                         ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
2465                 } else
2466                         goto done;
2467         }
2468         ppd->cong_stats.counter = xmit_wait_get_value_delta(ppd);
2469         dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0x0);
2470 done:
2471         spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags);
2472         mod_timer(&ppd->cong_stats.timer, jiffies + HZ);
2473 }
2474
2475 void qib_notify_create_mad_agent(struct rvt_dev_info *rdi, int port_idx)
2476 {
2477         struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
2478         struct qib_devdata *dd = container_of(ibdev,
2479                                               struct qib_devdata, verbs_dev);
2480
2481         /* Initialize xmit_wait structure */
2482         dd->pport[port_idx].cong_stats.counter = 0;
2483         init_timer(&dd->pport[port_idx].cong_stats.timer);
2484         dd->pport[port_idx].cong_stats.timer.function = xmit_wait_timer_func;
2485         dd->pport[port_idx].cong_stats.timer.data =
2486                 (unsigned long)(&dd->pport[port_idx]);
2487         dd->pport[port_idx].cong_stats.timer.expires = 0;
2488         add_timer(&dd->pport[port_idx].cong_stats.timer);
2489 }
2490
2491 void qib_notify_free_mad_agent(struct rvt_dev_info *rdi, int port_idx)
2492 {
2493         struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
2494         struct qib_devdata *dd = container_of(ibdev,
2495                                               struct qib_devdata, verbs_dev);
2496
2497         if (dd->pport[port_idx].cong_stats.timer.data)
2498                 del_timer_sync(&dd->pport[port_idx].cong_stats.timer);
2499
2500         if (dd->pport[port_idx].ibport_data.smi_ah)
2501                 ib_destroy_ah(&dd->pport[port_idx].ibport_data.smi_ah->ibah);
2502 }