2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <rdma/ib_smi.h>
35 #include <rdma/ib_pma.h>
37 #include "ipath_kernel.h"
38 #include "ipath_verbs.h"
39 #include "ipath_common.h"
41 #define IB_SMP_UNSUP_VERSION cpu_to_be16(0x0004)
42 #define IB_SMP_UNSUP_METHOD cpu_to_be16(0x0008)
43 #define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C)
44 #define IB_SMP_INVALID_FIELD cpu_to_be16(0x001C)
46 static int reply(struct ib_smp
*smp
)
49 * The verbs framework will handle the directed/LID route
52 smp
->method
= IB_MGMT_METHOD_GET_RESP
;
53 if (smp
->mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
54 smp
->status
|= IB_SMP_DIRECTION
;
55 return IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_REPLY
;
58 static int recv_subn_get_nodedescription(struct ib_smp
*smp
,
59 struct ib_device
*ibdev
)
62 smp
->status
|= IB_SMP_INVALID_FIELD
;
64 memcpy(smp
->data
, ibdev
->node_desc
, sizeof(smp
->data
));
82 } __attribute__ ((packed
));
84 static int recv_subn_get_nodeinfo(struct ib_smp
*smp
,
85 struct ib_device
*ibdev
, u8 port
)
87 struct nodeinfo
*nip
= (struct nodeinfo
*)&smp
->data
;
88 struct ipath_devdata
*dd
= to_idev(ibdev
)->dd
;
89 u32 vendor
, majrev
, minrev
;
91 /* GUID 0 is illegal */
92 if (smp
->attr_mod
|| (dd
->ipath_guid
== 0))
93 smp
->status
|= IB_SMP_INVALID_FIELD
;
95 nip
->base_version
= 1;
96 nip
->class_version
= 1;
97 nip
->node_type
= 1; /* channel adapter */
99 * XXX The num_ports value will need a layer function to get
100 * the value if we ever have more than one IB port on a chip.
101 * We will also need to get the GUID for the port.
103 nip
->num_ports
= ibdev
->phys_port_cnt
;
104 /* This is already in network order */
105 nip
->sys_guid
= to_idev(ibdev
)->sys_image_guid
;
106 nip
->node_guid
= dd
->ipath_guid
;
107 nip
->port_guid
= dd
->ipath_guid
;
108 nip
->partition_cap
= cpu_to_be16(ipath_get_npkeys(dd
));
109 nip
->device_id
= cpu_to_be16(dd
->ipath_deviceid
);
110 majrev
= dd
->ipath_majrev
;
111 minrev
= dd
->ipath_minrev
;
112 nip
->revision
= cpu_to_be32((majrev
<< 16) | minrev
);
113 nip
->local_port_num
= port
;
114 vendor
= dd
->ipath_vendorid
;
115 nip
->vendor_id
[0] = IPATH_SRC_OUI_1
;
116 nip
->vendor_id
[1] = IPATH_SRC_OUI_2
;
117 nip
->vendor_id
[2] = IPATH_SRC_OUI_3
;
122 static int recv_subn_get_guidinfo(struct ib_smp
*smp
,
123 struct ib_device
*ibdev
)
125 u32 startgx
= 8 * be32_to_cpu(smp
->attr_mod
);
126 __be64
*p
= (__be64
*) smp
->data
;
128 /* 32 blocks of 8 64-bit GUIDs per block */
130 memset(smp
->data
, 0, sizeof(smp
->data
));
133 * We only support one GUID for now. If this changes, the
134 * portinfo.guid_cap field needs to be updated too.
137 __be64 g
= to_idev(ibdev
)->dd
->ipath_guid
;
139 /* GUID 0 is illegal */
140 smp
->status
|= IB_SMP_INVALID_FIELD
;
142 /* The first is a copy of the read-only HW GUID. */
145 smp
->status
|= IB_SMP_INVALID_FIELD
;
150 static void set_link_width_enabled(struct ipath_devdata
*dd
, u32 w
)
152 (void) dd
->ipath_f_set_ib_cfg(dd
, IPATH_IB_CFG_LWID_ENB
, w
);
155 static void set_link_speed_enabled(struct ipath_devdata
*dd
, u32 s
)
157 (void) dd
->ipath_f_set_ib_cfg(dd
, IPATH_IB_CFG_SPD_ENB
, s
);
160 static int get_overrunthreshold(struct ipath_devdata
*dd
)
162 return (dd
->ipath_ibcctrl
>>
163 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT
) &
164 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK
;
168 * set_overrunthreshold - set the overrun threshold
169 * @dd: the infinipath device
170 * @n: the new threshold
172 * Note that this will only take effect when the link state changes.
174 static int set_overrunthreshold(struct ipath_devdata
*dd
, unsigned n
)
178 v
= (dd
->ipath_ibcctrl
>> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT
) &
179 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK
;
182 ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK
<<
183 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT
);
185 (u64
) n
<< INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT
;
186 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_ibcctrl
,
192 static int get_phyerrthreshold(struct ipath_devdata
*dd
)
194 return (dd
->ipath_ibcctrl
>>
195 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT
) &
196 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK
;
200 * set_phyerrthreshold - set the physical error threshold
201 * @dd: the infinipath device
202 * @n: the new threshold
204 * Note that this will only take effect when the link state changes.
206 static int set_phyerrthreshold(struct ipath_devdata
*dd
, unsigned n
)
210 v
= (dd
->ipath_ibcctrl
>> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT
) &
211 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK
;
214 ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK
<<
215 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT
);
217 (u64
) n
<< INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT
;
218 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_ibcctrl
,
225 * get_linkdowndefaultstate - get the default linkdown state
226 * @dd: the infinipath device
228 * Returns zero if the default is POLL, 1 if the default is SLEEP.
230 static int get_linkdowndefaultstate(struct ipath_devdata
*dd
)
232 return !!(dd
->ipath_ibcctrl
& INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE
);
235 static int recv_subn_get_portinfo(struct ib_smp
*smp
,
236 struct ib_device
*ibdev
, u8 port
)
238 struct ipath_ibdev
*dev
;
239 struct ipath_devdata
*dd
;
240 struct ib_port_info
*pip
= (struct ib_port_info
*)smp
->data
;
246 if (be32_to_cpu(smp
->attr_mod
) > ibdev
->phys_port_cnt
) {
247 smp
->status
|= IB_SMP_INVALID_FIELD
;
252 dev
= to_idev(ibdev
);
255 /* Clear all fields. Only set the non-zero fields. */
256 memset(smp
->data
, 0, sizeof(smp
->data
));
258 /* Only return the mkey if the protection field allows it. */
259 if (smp
->method
== IB_MGMT_METHOD_SET
|| dev
->mkey
== smp
->mkey
||
261 pip
->mkey
= dev
->mkey
;
262 pip
->gid_prefix
= dev
->gid_prefix
;
264 pip
->lid
= lid
? cpu_to_be16(lid
) : IB_LID_PERMISSIVE
;
265 pip
->sm_lid
= cpu_to_be16(dev
->sm_lid
);
266 pip
->cap_mask
= cpu_to_be32(dev
->port_cap_flags
);
267 /* pip->diag_code; */
268 pip
->mkey_lease_period
= cpu_to_be16(dev
->mkey_lease_period
);
269 pip
->local_port_num
= port
;
270 pip
->link_width_enabled
= dd
->ipath_link_width_enabled
;
271 pip
->link_width_supported
= dd
->ipath_link_width_supported
;
272 pip
->link_width_active
= dd
->ipath_link_width_active
;
273 pip
->linkspeed_portstate
= dd
->ipath_link_speed_supported
<< 4;
274 ibcstat
= dd
->ipath_lastibcstat
;
275 /* map LinkState to IB portinfo values. */
276 pip
->linkspeed_portstate
|= ipath_ib_linkstate(dd
, ibcstat
) + 1;
278 pip
->portphysstate_linkdown
=
279 (ipath_cvt_physportstate
[ibcstat
& dd
->ibcs_lts_mask
] << 4) |
280 (get_linkdowndefaultstate(dd
) ? 1 : 2);
281 pip
->mkeyprot_resv_lmc
= (dev
->mkeyprot
<< 6) | dd
->ipath_lmc
;
282 pip
->linkspeedactive_enabled
= (dd
->ipath_link_speed_active
<< 4) |
283 dd
->ipath_link_speed_enabled
;
284 switch (dd
->ipath_ibmtu
) {
300 default: /* oops, something is wrong */
304 pip
->neighbormtu_mastersmsl
= (mtu
<< 4) | dev
->sm_sl
;
305 pip
->vlcap_inittype
= 0x10; /* VLCap = VL0, InitType = 0 */
306 pip
->vl_high_limit
= dev
->vl_high_limit
;
307 /* pip->vl_arb_high_cap; // only one VL */
308 /* pip->vl_arb_low_cap; // only one VL */
309 /* InitTypeReply = 0 */
310 /* our mtu cap depends on whether 4K MTU enabled or not */
311 pip
->inittypereply_mtucap
= ipath_mtu4096
? IB_MTU_4096
: IB_MTU_2048
;
312 /* HCAs ignore VLStallCount and HOQLife */
313 /* pip->vlstallcnt_hoqlife; */
314 pip
->operationalvl_pei_peo_fpi_fpo
= 0x10; /* OVLs = 1 */
315 pip
->mkey_violations
= cpu_to_be16(dev
->mkey_violations
);
316 /* P_KeyViolations are counted by hardware. */
317 pip
->pkey_violations
=
318 cpu_to_be16((ipath_get_cr_errpkey(dd
) -
319 dev
->z_pkey_violations
) & 0xFFFF);
320 pip
->qkey_violations
= cpu_to_be16(dev
->qkey_violations
);
321 /* Only the hardware GUID is supported for now */
323 pip
->clientrereg_resv_subnetto
= dev
->subnet_timeout
;
324 /* 32.768 usec. response time (guessing) */
325 pip
->resv_resptimevalue
= 3;
326 pip
->localphyerrors_overrunerrors
=
327 (get_phyerrthreshold(dd
) << 4) |
328 get_overrunthreshold(dd
);
329 /* pip->max_credit_hint; */
330 if (dev
->port_cap_flags
& IB_PORT_LINK_LATENCY_SUP
) {
333 v
= dd
->ipath_f_get_ib_cfg(dd
, IPATH_IB_CFG_LINKLATENCY
);
334 pip
->link_roundtrip_latency
[0] = v
>> 16;
335 pip
->link_roundtrip_latency
[1] = v
>> 8;
336 pip
->link_roundtrip_latency
[2] = v
;
346 * get_pkeys - return the PKEY table for port 0
347 * @dd: the infinipath device
348 * @pkeys: the pkey table is placed here
350 static int get_pkeys(struct ipath_devdata
*dd
, u16
* pkeys
)
352 /* always a kernel port, no locking needed */
353 struct ipath_portdata
*pd
= dd
->ipath_pd
[0];
355 memcpy(pkeys
, pd
->port_pkeys
, sizeof(pd
->port_pkeys
));
360 static int recv_subn_get_pkeytable(struct ib_smp
*smp
,
361 struct ib_device
*ibdev
)
363 u32 startpx
= 32 * (be32_to_cpu(smp
->attr_mod
) & 0xffff);
364 u16
*p
= (u16
*) smp
->data
;
365 __be16
*q
= (__be16
*) smp
->data
;
367 /* 64 blocks of 32 16-bit P_Key entries */
369 memset(smp
->data
, 0, sizeof(smp
->data
));
371 struct ipath_ibdev
*dev
= to_idev(ibdev
);
372 unsigned i
, n
= ipath_get_npkeys(dev
->dd
);
374 get_pkeys(dev
->dd
, p
);
376 for (i
= 0; i
< n
; i
++)
377 q
[i
] = cpu_to_be16(p
[i
]);
379 smp
->status
|= IB_SMP_INVALID_FIELD
;
384 static int recv_subn_set_guidinfo(struct ib_smp
*smp
,
385 struct ib_device
*ibdev
)
387 /* The only GUID we support is the first read-only entry. */
388 return recv_subn_get_guidinfo(smp
, ibdev
);
392 * set_linkdowndefaultstate - set the default linkdown state
393 * @dd: the infinipath device
394 * @sleep: the new state
396 * Note that this will only take effect when the link state changes.
398 static int set_linkdowndefaultstate(struct ipath_devdata
*dd
, int sleep
)
401 dd
->ipath_ibcctrl
|= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE
;
403 dd
->ipath_ibcctrl
&= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE
;
404 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_ibcctrl
,
410 * recv_subn_set_portinfo - set port information
411 * @smp: the incoming SM packet
412 * @ibdev: the infiniband device
413 * @port: the port on the device
415 * Set Portinfo (see ch. 14.2.5.6).
417 static int recv_subn_set_portinfo(struct ib_smp
*smp
,
418 struct ib_device
*ibdev
, u8 port
)
420 struct ib_port_info
*pip
= (struct ib_port_info
*)smp
->data
;
421 struct ib_event event
;
422 struct ipath_ibdev
*dev
;
423 struct ipath_devdata
*dd
;
424 char clientrereg
= 0;
433 if (be32_to_cpu(smp
->attr_mod
) > ibdev
->phys_port_cnt
)
436 dev
= to_idev(ibdev
);
438 event
.device
= ibdev
;
439 event
.element
.port_num
= port
;
441 dev
->mkey
= pip
->mkey
;
442 dev
->gid_prefix
= pip
->gid_prefix
;
443 dev
->mkey_lease_period
= be16_to_cpu(pip
->mkey_lease_period
);
445 lid
= be16_to_cpu(pip
->lid
);
446 if (dd
->ipath_lid
!= lid
||
447 dd
->ipath_lmc
!= (pip
->mkeyprot_resv_lmc
& 7)) {
448 /* Must be a valid unicast LID address. */
449 if (lid
== 0 || lid
>= IPATH_MULTICAST_LID_BASE
)
451 ipath_set_lid(dd
, lid
, pip
->mkeyprot_resv_lmc
& 7);
452 event
.event
= IB_EVENT_LID_CHANGE
;
453 ib_dispatch_event(&event
);
456 smlid
= be16_to_cpu(pip
->sm_lid
);
457 if (smlid
!= dev
->sm_lid
) {
458 /* Must be a valid unicast LID address. */
459 if (smlid
== 0 || smlid
>= IPATH_MULTICAST_LID_BASE
)
462 event
.event
= IB_EVENT_SM_CHANGE
;
463 ib_dispatch_event(&event
);
466 /* Allow 1x or 4x to be set (see 14.2.6.6). */
467 lwe
= pip
->link_width_enabled
;
470 lwe
= dd
->ipath_link_width_supported
;
471 else if (lwe
>= 16 || (lwe
& ~dd
->ipath_link_width_supported
))
473 set_link_width_enabled(dd
, lwe
);
476 /* Allow 2.5 or 5.0 Gbs. */
477 lse
= pip
->linkspeedactive_enabled
& 0xF;
480 lse
= dd
->ipath_link_speed_supported
;
481 else if (lse
>= 8 || (lse
& ~dd
->ipath_link_speed_supported
))
483 set_link_speed_enabled(dd
, lse
);
486 /* Set link down default state. */
487 switch (pip
->portphysstate_linkdown
& 0xF) {
491 if (set_linkdowndefaultstate(dd
, 1))
495 if (set_linkdowndefaultstate(dd
, 0))
502 dev
->mkeyprot
= pip
->mkeyprot_resv_lmc
>> 6;
503 dev
->vl_high_limit
= pip
->vl_high_limit
;
505 switch ((pip
->neighbormtu_mastersmsl
>> 4) & 0xF) {
524 /* XXX We have already partially updated our state! */
527 ipath_set_mtu(dd
, mtu
);
529 dev
->sm_sl
= pip
->neighbormtu_mastersmsl
& 0xF;
531 /* We only support VL0 */
532 if (((pip
->operationalvl_pei_peo_fpi_fpo
>> 4) & 0xF) > 1)
535 if (pip
->mkey_violations
== 0)
536 dev
->mkey_violations
= 0;
539 * Hardware counter can't be reset so snapshot and subtract
542 if (pip
->pkey_violations
== 0)
543 dev
->z_pkey_violations
= ipath_get_cr_errpkey(dd
);
545 if (pip
->qkey_violations
== 0)
546 dev
->qkey_violations
= 0;
548 ore
= pip
->localphyerrors_overrunerrors
;
549 if (set_phyerrthreshold(dd
, (ore
>> 4) & 0xF))
552 if (set_overrunthreshold(dd
, (ore
& 0xF)))
555 dev
->subnet_timeout
= pip
->clientrereg_resv_subnetto
& 0x1F;
557 if (pip
->clientrereg_resv_subnetto
& 0x80) {
559 event
.event
= IB_EVENT_CLIENT_REREGISTER
;
560 ib_dispatch_event(&event
);
564 * Do the port state change now that the other link parameters
566 * Changing the port physical state only makes sense if the link
567 * is down or is being set to down.
569 state
= pip
->linkspeed_portstate
& 0xF;
570 lstate
= (pip
->portphysstate_linkdown
>> 4) & 0xF;
571 if (lstate
&& !(state
== IB_PORT_DOWN
|| state
== IB_PORT_NOP
))
575 * Only state changes of DOWN, ARM, and ACTIVE are valid
576 * and must be in the correct state to take effect (see 7.2.6).
585 lstate
= IPATH_IB_LINKDOWN_ONLY
;
586 else if (lstate
== 1)
587 lstate
= IPATH_IB_LINKDOWN_SLEEP
;
588 else if (lstate
== 2)
589 lstate
= IPATH_IB_LINKDOWN
;
590 else if (lstate
== 3)
591 lstate
= IPATH_IB_LINKDOWN_DISABLE
;
594 ipath_set_linkstate(dd
, lstate
);
595 if (lstate
== IPATH_IB_LINKDOWN_DISABLE
) {
596 ret
= IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_CONSUMED
;
599 ipath_wait_linkstate(dd
, IPATH_LINKINIT
| IPATH_LINKARMED
|
600 IPATH_LINKACTIVE
, 1000);
603 ipath_set_linkstate(dd
, IPATH_IB_LINKARM
);
606 ipath_set_linkstate(dd
, IPATH_IB_LINKACTIVE
);
609 /* XXX We have already partially updated our state! */
613 ret
= recv_subn_get_portinfo(smp
, ibdev
, port
);
616 pip
->clientrereg_resv_subnetto
|= 0x80;
621 smp
->status
|= IB_SMP_INVALID_FIELD
;
622 ret
= recv_subn_get_portinfo(smp
, ibdev
, port
);
629 * rm_pkey - decrecment the reference count for the given PKEY
630 * @dd: the infinipath device
631 * @key: the PKEY index
633 * Return true if this was the last reference and the hardware table entry
634 * needs to be changed.
636 static int rm_pkey(struct ipath_devdata
*dd
, u16 key
)
641 for (i
= 0; i
< ARRAY_SIZE(dd
->ipath_pkeys
); i
++) {
642 if (dd
->ipath_pkeys
[i
] != key
)
644 if (atomic_dec_and_test(&dd
->ipath_pkeyrefs
[i
])) {
645 dd
->ipath_pkeys
[i
] = 0;
659 * add_pkey - add the given PKEY to the hardware table
660 * @dd: the infinipath device
663 * Return an error code if unable to add the entry, zero if no change,
664 * or 1 if the hardware PKEY register needs to be updated.
666 static int add_pkey(struct ipath_devdata
*dd
, u16 key
)
669 u16 lkey
= key
& 0x7FFF;
673 if (lkey
== 0x7FFF) {
678 /* Look for an empty slot or a matching PKEY. */
679 for (i
= 0; i
< ARRAY_SIZE(dd
->ipath_pkeys
); i
++) {
680 if (!dd
->ipath_pkeys
[i
]) {
684 /* If it matches exactly, try to increment the ref count */
685 if (dd
->ipath_pkeys
[i
] == key
) {
686 if (atomic_inc_return(&dd
->ipath_pkeyrefs
[i
]) > 1) {
690 /* Lost the race. Look for an empty slot below. */
691 atomic_dec(&dd
->ipath_pkeyrefs
[i
]);
695 * It makes no sense to have both the limited and unlimited
696 * PKEY set at the same time since the unlimited one will
697 * disable the limited one.
699 if ((dd
->ipath_pkeys
[i
] & 0x7FFF) == lkey
) {
708 for (i
= 0; i
< ARRAY_SIZE(dd
->ipath_pkeys
); i
++) {
709 if (!dd
->ipath_pkeys
[i
] &&
710 atomic_inc_return(&dd
->ipath_pkeyrefs
[i
]) == 1) {
711 /* for ipathstats, etc. */
712 ipath_stats
.sps_pkeys
[i
] = lkey
;
713 dd
->ipath_pkeys
[i
] = key
;
725 * set_pkeys - set the PKEY table for port 0
726 * @dd: the infinipath device
727 * @pkeys: the PKEY table
729 static int set_pkeys(struct ipath_devdata
*dd
, u16
*pkeys
, u8 port
)
731 struct ipath_portdata
*pd
;
735 /* always a kernel port, no locking needed */
736 pd
= dd
->ipath_pd
[0];
738 for (i
= 0; i
< ARRAY_SIZE(pd
->port_pkeys
); i
++) {
740 u16 okey
= pd
->port_pkeys
[i
];
745 * The value of this PKEY table entry is changing.
746 * Remove the old entry in the hardware's array of PKEYs.
749 changed
|= rm_pkey(dd
, okey
);
751 int ret
= add_pkey(dd
, key
);
758 pd
->port_pkeys
[i
] = key
;
762 struct ib_event event
;
764 pkey
= (u64
) dd
->ipath_pkeys
[0] |
765 ((u64
) dd
->ipath_pkeys
[1] << 16) |
766 ((u64
) dd
->ipath_pkeys
[2] << 32) |
767 ((u64
) dd
->ipath_pkeys
[3] << 48);
768 ipath_cdbg(VERBOSE
, "p0 new pkey reg %llx\n",
769 (unsigned long long) pkey
);
770 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_partitionkey
,
773 event
.event
= IB_EVENT_PKEY_CHANGE
;
774 event
.device
= &dd
->verbs_dev
->ibdev
;
775 event
.element
.port_num
= port
;
776 ib_dispatch_event(&event
);
781 static int recv_subn_set_pkeytable(struct ib_smp
*smp
,
782 struct ib_device
*ibdev
, u8 port
)
784 u32 startpx
= 32 * (be32_to_cpu(smp
->attr_mod
) & 0xffff);
785 __be16
*p
= (__be16
*) smp
->data
;
786 u16
*q
= (u16
*) smp
->data
;
787 struct ipath_ibdev
*dev
= to_idev(ibdev
);
788 unsigned i
, n
= ipath_get_npkeys(dev
->dd
);
790 for (i
= 0; i
< n
; i
++)
791 q
[i
] = be16_to_cpu(p
[i
]);
793 if (startpx
!= 0 || set_pkeys(dev
->dd
, q
, port
) != 0)
794 smp
->status
|= IB_SMP_INVALID_FIELD
;
796 return recv_subn_get_pkeytable(smp
, ibdev
);
799 static int recv_pma_get_classportinfo(struct ib_pma_mad
*pmp
)
801 struct ib_class_port_info
*p
=
802 (struct ib_class_port_info
*)pmp
->data
;
804 memset(pmp
->data
, 0, sizeof(pmp
->data
));
806 if (pmp
->mad_hdr
.attr_mod
!= 0)
807 pmp
->mad_hdr
.status
|= IB_SMP_INVALID_FIELD
;
809 /* Indicate AllPortSelect is valid (only one port anyway) */
810 p
->capability_mask
= cpu_to_be16(1 << 8);
812 p
->class_version
= 1;
814 * Expected response time is 4.096 usec. * 2^18 == 1.073741824
817 p
->resp_time_value
= 18;
819 return reply((struct ib_smp
*) pmp
);
823 * The PortSamplesControl.CounterMasks field is an array of 3 bit fields
824 * which specify the N'th counter's capabilities. See ch. 16.1.3.2.
825 * We support 5 counters which only count the mandatory quantities.
827 #define COUNTER_MASK(q, n) (q << ((9 - n) * 3))
828 #define COUNTER_MASK0_9 cpu_to_be32(COUNTER_MASK(1, 0) | \
829 COUNTER_MASK(1, 1) | \
830 COUNTER_MASK(1, 2) | \
831 COUNTER_MASK(1, 3) | \
834 static int recv_pma_get_portsamplescontrol(struct ib_pma_mad
*pmp
,
835 struct ib_device
*ibdev
, u8 port
)
837 struct ib_pma_portsamplescontrol
*p
=
838 (struct ib_pma_portsamplescontrol
*)pmp
->data
;
839 struct ipath_ibdev
*dev
= to_idev(ibdev
);
840 struct ipath_cregs
const *crp
= dev
->dd
->ipath_cregs
;
842 u8 port_select
= p
->port_select
;
844 memset(pmp
->data
, 0, sizeof(pmp
->data
));
846 p
->port_select
= port_select
;
847 if (pmp
->mad_hdr
.attr_mod
!= 0 ||
848 (port_select
!= port
&& port_select
!= 0xFF))
849 pmp
->mad_hdr
.status
|= IB_SMP_INVALID_FIELD
;
851 * Ticks are 10x the link transfer period which for 2.5Gbs is 4
852 * nsec. 0 == 4 nsec., 1 == 8 nsec., ..., 255 == 1020 nsec. Sample
853 * intervals are counted in ticks. Since we use Linux timers, that
854 * count in jiffies, we can't sample for less than 1000 ticks if HZ
855 * == 1000 (4000 ticks if HZ is 250). link_speed_active returns 2 for
856 * DDR, 1 for SDR, set the tick to 1 for DDR, 0 for SDR on chips that
857 * have hardware support for delaying packets.
860 p
->tick
= dev
->dd
->ipath_link_speed_active
- 1;
862 p
->tick
= 250; /* 1 usec. */
863 p
->counter_width
= 4; /* 32 bit counters */
864 p
->counter_mask0_9
= COUNTER_MASK0_9
;
865 spin_lock_irqsave(&dev
->pending_lock
, flags
);
867 p
->sample_status
= ipath_read_creg32(dev
->dd
, crp
->cr_psstat
);
869 p
->sample_status
= dev
->pma_sample_status
;
870 p
->sample_start
= cpu_to_be32(dev
->pma_sample_start
);
871 p
->sample_interval
= cpu_to_be32(dev
->pma_sample_interval
);
872 p
->tag
= cpu_to_be16(dev
->pma_tag
);
873 p
->counter_select
[0] = dev
->pma_counter_select
[0];
874 p
->counter_select
[1] = dev
->pma_counter_select
[1];
875 p
->counter_select
[2] = dev
->pma_counter_select
[2];
876 p
->counter_select
[3] = dev
->pma_counter_select
[3];
877 p
->counter_select
[4] = dev
->pma_counter_select
[4];
878 spin_unlock_irqrestore(&dev
->pending_lock
, flags
);
880 return reply((struct ib_smp
*) pmp
);
883 static int recv_pma_set_portsamplescontrol(struct ib_pma_mad
*pmp
,
884 struct ib_device
*ibdev
, u8 port
)
886 struct ib_pma_portsamplescontrol
*p
=
887 (struct ib_pma_portsamplescontrol
*)pmp
->data
;
888 struct ipath_ibdev
*dev
= to_idev(ibdev
);
889 struct ipath_cregs
const *crp
= dev
->dd
->ipath_cregs
;
894 if (pmp
->mad_hdr
.attr_mod
!= 0 ||
895 (p
->port_select
!= port
&& p
->port_select
!= 0xFF)) {
896 pmp
->mad_hdr
.status
|= IB_SMP_INVALID_FIELD
;
897 ret
= reply((struct ib_smp
*) pmp
);
901 spin_lock_irqsave(&dev
->pending_lock
, flags
);
903 status
= ipath_read_creg32(dev
->dd
, crp
->cr_psstat
);
905 status
= dev
->pma_sample_status
;
906 if (status
== IB_PMA_SAMPLE_STATUS_DONE
) {
907 dev
->pma_sample_start
= be32_to_cpu(p
->sample_start
);
908 dev
->pma_sample_interval
= be32_to_cpu(p
->sample_interval
);
909 dev
->pma_tag
= be16_to_cpu(p
->tag
);
910 dev
->pma_counter_select
[0] = p
->counter_select
[0];
911 dev
->pma_counter_select
[1] = p
->counter_select
[1];
912 dev
->pma_counter_select
[2] = p
->counter_select
[2];
913 dev
->pma_counter_select
[3] = p
->counter_select
[3];
914 dev
->pma_counter_select
[4] = p
->counter_select
[4];
915 if (crp
->cr_psstat
) {
916 ipath_write_creg(dev
->dd
, crp
->cr_psinterval
,
917 dev
->pma_sample_interval
);
918 ipath_write_creg(dev
->dd
, crp
->cr_psstart
,
919 dev
->pma_sample_start
);
921 dev
->pma_sample_status
= IB_PMA_SAMPLE_STATUS_STARTED
;
923 spin_unlock_irqrestore(&dev
->pending_lock
, flags
);
925 ret
= recv_pma_get_portsamplescontrol(pmp
, ibdev
, port
);
931 static u64
get_counter(struct ipath_ibdev
*dev
,
932 struct ipath_cregs
const *crp
,
938 case IB_PMA_PORT_XMIT_DATA
:
939 ret
= (crp
->cr_psxmitdatacount
) ?
940 ipath_read_creg32(dev
->dd
, crp
->cr_psxmitdatacount
) :
943 case IB_PMA_PORT_RCV_DATA
:
944 ret
= (crp
->cr_psrcvdatacount
) ?
945 ipath_read_creg32(dev
->dd
, crp
->cr_psrcvdatacount
) :
948 case IB_PMA_PORT_XMIT_PKTS
:
949 ret
= (crp
->cr_psxmitpktscount
) ?
950 ipath_read_creg32(dev
->dd
, crp
->cr_psxmitpktscount
) :
953 case IB_PMA_PORT_RCV_PKTS
:
954 ret
= (crp
->cr_psrcvpktscount
) ?
955 ipath_read_creg32(dev
->dd
, crp
->cr_psrcvpktscount
) :
958 case IB_PMA_PORT_XMIT_WAIT
:
959 ret
= (crp
->cr_psxmitwaitcount
) ?
960 ipath_read_creg32(dev
->dd
, crp
->cr_psxmitwaitcount
) :
961 dev
->ipath_xmit_wait
;
970 static int recv_pma_get_portsamplesresult(struct ib_pma_mad
*pmp
,
971 struct ib_device
*ibdev
)
973 struct ib_pma_portsamplesresult
*p
=
974 (struct ib_pma_portsamplesresult
*)pmp
->data
;
975 struct ipath_ibdev
*dev
= to_idev(ibdev
);
976 struct ipath_cregs
const *crp
= dev
->dd
->ipath_cregs
;
980 memset(pmp
->data
, 0, sizeof(pmp
->data
));
981 p
->tag
= cpu_to_be16(dev
->pma_tag
);
983 status
= ipath_read_creg32(dev
->dd
, crp
->cr_psstat
);
985 status
= dev
->pma_sample_status
;
986 p
->sample_status
= cpu_to_be16(status
);
987 for (i
= 0; i
< ARRAY_SIZE(dev
->pma_counter_select
); i
++)
988 p
->counter
[i
] = (status
!= IB_PMA_SAMPLE_STATUS_DONE
) ? 0 :
990 get_counter(dev
, crp
, dev
->pma_counter_select
[i
]));
992 return reply((struct ib_smp
*) pmp
);
995 static int recv_pma_get_portsamplesresult_ext(struct ib_pma_mad
*pmp
,
996 struct ib_device
*ibdev
)
998 struct ib_pma_portsamplesresult_ext
*p
=
999 (struct ib_pma_portsamplesresult_ext
*)pmp
->data
;
1000 struct ipath_ibdev
*dev
= to_idev(ibdev
);
1001 struct ipath_cregs
const *crp
= dev
->dd
->ipath_cregs
;
1005 memset(pmp
->data
, 0, sizeof(pmp
->data
));
1006 p
->tag
= cpu_to_be16(dev
->pma_tag
);
1008 status
= ipath_read_creg32(dev
->dd
, crp
->cr_psstat
);
1010 status
= dev
->pma_sample_status
;
1011 p
->sample_status
= cpu_to_be16(status
);
1013 p
->extended_width
= cpu_to_be32(0x80000000);
1014 for (i
= 0; i
< ARRAY_SIZE(dev
->pma_counter_select
); i
++)
1015 p
->counter
[i
] = (status
!= IB_PMA_SAMPLE_STATUS_DONE
) ? 0 :
1017 get_counter(dev
, crp
, dev
->pma_counter_select
[i
]));
1019 return reply((struct ib_smp
*) pmp
);
1022 static int recv_pma_get_portcounters(struct ib_pma_mad
*pmp
,
1023 struct ib_device
*ibdev
, u8 port
)
1025 struct ib_pma_portcounters
*p
= (struct ib_pma_portcounters
*)
1027 struct ipath_ibdev
*dev
= to_idev(ibdev
);
1028 struct ipath_verbs_counters cntrs
;
1029 u8 port_select
= p
->port_select
;
1031 ipath_get_counters(dev
->dd
, &cntrs
);
1033 /* Adjust counters for any resets done. */
1034 cntrs
.symbol_error_counter
-= dev
->z_symbol_error_counter
;
1035 cntrs
.link_error_recovery_counter
-=
1036 dev
->z_link_error_recovery_counter
;
1037 cntrs
.link_downed_counter
-= dev
->z_link_downed_counter
;
1038 cntrs
.port_rcv_errors
+= dev
->rcv_errors
;
1039 cntrs
.port_rcv_errors
-= dev
->z_port_rcv_errors
;
1040 cntrs
.port_rcv_remphys_errors
-= dev
->z_port_rcv_remphys_errors
;
1041 cntrs
.port_xmit_discards
-= dev
->z_port_xmit_discards
;
1042 cntrs
.port_xmit_data
-= dev
->z_port_xmit_data
;
1043 cntrs
.port_rcv_data
-= dev
->z_port_rcv_data
;
1044 cntrs
.port_xmit_packets
-= dev
->z_port_xmit_packets
;
1045 cntrs
.port_rcv_packets
-= dev
->z_port_rcv_packets
;
1046 cntrs
.local_link_integrity_errors
-=
1047 dev
->z_local_link_integrity_errors
;
1048 cntrs
.excessive_buffer_overrun_errors
-=
1049 dev
->z_excessive_buffer_overrun_errors
;
1050 cntrs
.vl15_dropped
-= dev
->z_vl15_dropped
;
1051 cntrs
.vl15_dropped
+= dev
->n_vl15_dropped
;
1053 memset(pmp
->data
, 0, sizeof(pmp
->data
));
1055 p
->port_select
= port_select
;
1056 if (pmp
->mad_hdr
.attr_mod
!= 0 ||
1057 (port_select
!= port
&& port_select
!= 0xFF))
1058 pmp
->mad_hdr
.status
|= IB_SMP_INVALID_FIELD
;
1060 if (cntrs
.symbol_error_counter
> 0xFFFFUL
)
1061 p
->symbol_error_counter
= cpu_to_be16(0xFFFF);
1063 p
->symbol_error_counter
=
1064 cpu_to_be16((u16
)cntrs
.symbol_error_counter
);
1065 if (cntrs
.link_error_recovery_counter
> 0xFFUL
)
1066 p
->link_error_recovery_counter
= 0xFF;
1068 p
->link_error_recovery_counter
=
1069 (u8
)cntrs
.link_error_recovery_counter
;
1070 if (cntrs
.link_downed_counter
> 0xFFUL
)
1071 p
->link_downed_counter
= 0xFF;
1073 p
->link_downed_counter
= (u8
)cntrs
.link_downed_counter
;
1074 if (cntrs
.port_rcv_errors
> 0xFFFFUL
)
1075 p
->port_rcv_errors
= cpu_to_be16(0xFFFF);
1077 p
->port_rcv_errors
=
1078 cpu_to_be16((u16
) cntrs
.port_rcv_errors
);
1079 if (cntrs
.port_rcv_remphys_errors
> 0xFFFFUL
)
1080 p
->port_rcv_remphys_errors
= cpu_to_be16(0xFFFF);
1082 p
->port_rcv_remphys_errors
=
1083 cpu_to_be16((u16
)cntrs
.port_rcv_remphys_errors
);
1084 if (cntrs
.port_xmit_discards
> 0xFFFFUL
)
1085 p
->port_xmit_discards
= cpu_to_be16(0xFFFF);
1087 p
->port_xmit_discards
=
1088 cpu_to_be16((u16
)cntrs
.port_xmit_discards
);
1089 if (cntrs
.local_link_integrity_errors
> 0xFUL
)
1090 cntrs
.local_link_integrity_errors
= 0xFUL
;
1091 if (cntrs
.excessive_buffer_overrun_errors
> 0xFUL
)
1092 cntrs
.excessive_buffer_overrun_errors
= 0xFUL
;
1093 p
->link_overrun_errors
= (cntrs
.local_link_integrity_errors
<< 4) |
1094 cntrs
.excessive_buffer_overrun_errors
;
1095 if (cntrs
.vl15_dropped
> 0xFFFFUL
)
1096 p
->vl15_dropped
= cpu_to_be16(0xFFFF);
1098 p
->vl15_dropped
= cpu_to_be16((u16
)cntrs
.vl15_dropped
);
1099 if (cntrs
.port_xmit_data
> 0xFFFFFFFFUL
)
1100 p
->port_xmit_data
= cpu_to_be32(0xFFFFFFFF);
1102 p
->port_xmit_data
= cpu_to_be32((u32
)cntrs
.port_xmit_data
);
1103 if (cntrs
.port_rcv_data
> 0xFFFFFFFFUL
)
1104 p
->port_rcv_data
= cpu_to_be32(0xFFFFFFFF);
1106 p
->port_rcv_data
= cpu_to_be32((u32
)cntrs
.port_rcv_data
);
1107 if (cntrs
.port_xmit_packets
> 0xFFFFFFFFUL
)
1108 p
->port_xmit_packets
= cpu_to_be32(0xFFFFFFFF);
1110 p
->port_xmit_packets
=
1111 cpu_to_be32((u32
)cntrs
.port_xmit_packets
);
1112 if (cntrs
.port_rcv_packets
> 0xFFFFFFFFUL
)
1113 p
->port_rcv_packets
= cpu_to_be32(0xFFFFFFFF);
1115 p
->port_rcv_packets
=
1116 cpu_to_be32((u32
) cntrs
.port_rcv_packets
);
1118 return reply((struct ib_smp
*) pmp
);
1121 static int recv_pma_get_portcounters_ext(struct ib_pma_mad
*pmp
,
1122 struct ib_device
*ibdev
, u8 port
)
1124 struct ib_pma_portcounters_ext
*p
=
1125 (struct ib_pma_portcounters_ext
*)pmp
->data
;
1126 struct ipath_ibdev
*dev
= to_idev(ibdev
);
1127 u64 swords
, rwords
, spkts
, rpkts
, xwait
;
1128 u8 port_select
= p
->port_select
;
1130 ipath_snapshot_counters(dev
->dd
, &swords
, &rwords
, &spkts
,
1133 /* Adjust counters for any resets done. */
1134 swords
-= dev
->z_port_xmit_data
;
1135 rwords
-= dev
->z_port_rcv_data
;
1136 spkts
-= dev
->z_port_xmit_packets
;
1137 rpkts
-= dev
->z_port_rcv_packets
;
1139 memset(pmp
->data
, 0, sizeof(pmp
->data
));
1141 p
->port_select
= port_select
;
1142 if (pmp
->mad_hdr
.attr_mod
!= 0 ||
1143 (port_select
!= port
&& port_select
!= 0xFF))
1144 pmp
->mad_hdr
.status
|= IB_SMP_INVALID_FIELD
;
1146 p
->port_xmit_data
= cpu_to_be64(swords
);
1147 p
->port_rcv_data
= cpu_to_be64(rwords
);
1148 p
->port_xmit_packets
= cpu_to_be64(spkts
);
1149 p
->port_rcv_packets
= cpu_to_be64(rpkts
);
1150 p
->port_unicast_xmit_packets
= cpu_to_be64(dev
->n_unicast_xmit
);
1151 p
->port_unicast_rcv_packets
= cpu_to_be64(dev
->n_unicast_rcv
);
1152 p
->port_multicast_xmit_packets
= cpu_to_be64(dev
->n_multicast_xmit
);
1153 p
->port_multicast_rcv_packets
= cpu_to_be64(dev
->n_multicast_rcv
);
1155 return reply((struct ib_smp
*) pmp
);
1158 static int recv_pma_set_portcounters(struct ib_pma_mad
*pmp
,
1159 struct ib_device
*ibdev
, u8 port
)
1161 struct ib_pma_portcounters
*p
= (struct ib_pma_portcounters
*)
1163 struct ipath_ibdev
*dev
= to_idev(ibdev
);
1164 struct ipath_verbs_counters cntrs
;
1167 * Since the HW doesn't support clearing counters, we save the
1168 * current count and subtract it from future responses.
1170 ipath_get_counters(dev
->dd
, &cntrs
);
1172 if (p
->counter_select
& IB_PMA_SEL_SYMBOL_ERROR
)
1173 dev
->z_symbol_error_counter
= cntrs
.symbol_error_counter
;
1175 if (p
->counter_select
& IB_PMA_SEL_LINK_ERROR_RECOVERY
)
1176 dev
->z_link_error_recovery_counter
=
1177 cntrs
.link_error_recovery_counter
;
1179 if (p
->counter_select
& IB_PMA_SEL_LINK_DOWNED
)
1180 dev
->z_link_downed_counter
= cntrs
.link_downed_counter
;
1182 if (p
->counter_select
& IB_PMA_SEL_PORT_RCV_ERRORS
)
1183 dev
->z_port_rcv_errors
=
1184 cntrs
.port_rcv_errors
+ dev
->rcv_errors
;
1186 if (p
->counter_select
& IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS
)
1187 dev
->z_port_rcv_remphys_errors
=
1188 cntrs
.port_rcv_remphys_errors
;
1190 if (p
->counter_select
& IB_PMA_SEL_PORT_XMIT_DISCARDS
)
1191 dev
->z_port_xmit_discards
= cntrs
.port_xmit_discards
;
1193 if (p
->counter_select
& IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS
)
1194 dev
->z_local_link_integrity_errors
=
1195 cntrs
.local_link_integrity_errors
;
1197 if (p
->counter_select
& IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS
)
1198 dev
->z_excessive_buffer_overrun_errors
=
1199 cntrs
.excessive_buffer_overrun_errors
;
1201 if (p
->counter_select
& IB_PMA_SEL_PORT_VL15_DROPPED
) {
1202 dev
->n_vl15_dropped
= 0;
1203 dev
->z_vl15_dropped
= cntrs
.vl15_dropped
;
1206 if (p
->counter_select
& IB_PMA_SEL_PORT_XMIT_DATA
)
1207 dev
->z_port_xmit_data
= cntrs
.port_xmit_data
;
1209 if (p
->counter_select
& IB_PMA_SEL_PORT_RCV_DATA
)
1210 dev
->z_port_rcv_data
= cntrs
.port_rcv_data
;
1212 if (p
->counter_select
& IB_PMA_SEL_PORT_XMIT_PACKETS
)
1213 dev
->z_port_xmit_packets
= cntrs
.port_xmit_packets
;
1215 if (p
->counter_select
& IB_PMA_SEL_PORT_RCV_PACKETS
)
1216 dev
->z_port_rcv_packets
= cntrs
.port_rcv_packets
;
1218 return recv_pma_get_portcounters(pmp
, ibdev
, port
);
1221 static int recv_pma_set_portcounters_ext(struct ib_pma_mad
*pmp
,
1222 struct ib_device
*ibdev
, u8 port
)
1224 struct ib_pma_portcounters
*p
= (struct ib_pma_portcounters
*)
1226 struct ipath_ibdev
*dev
= to_idev(ibdev
);
1227 u64 swords
, rwords
, spkts
, rpkts
, xwait
;
1229 ipath_snapshot_counters(dev
->dd
, &swords
, &rwords
, &spkts
,
1232 if (p
->counter_select
& IB_PMA_SELX_PORT_XMIT_DATA
)
1233 dev
->z_port_xmit_data
= swords
;
1235 if (p
->counter_select
& IB_PMA_SELX_PORT_RCV_DATA
)
1236 dev
->z_port_rcv_data
= rwords
;
1238 if (p
->counter_select
& IB_PMA_SELX_PORT_XMIT_PACKETS
)
1239 dev
->z_port_xmit_packets
= spkts
;
1241 if (p
->counter_select
& IB_PMA_SELX_PORT_RCV_PACKETS
)
1242 dev
->z_port_rcv_packets
= rpkts
;
1244 if (p
->counter_select
& IB_PMA_SELX_PORT_UNI_XMIT_PACKETS
)
1245 dev
->n_unicast_xmit
= 0;
1247 if (p
->counter_select
& IB_PMA_SELX_PORT_UNI_RCV_PACKETS
)
1248 dev
->n_unicast_rcv
= 0;
1250 if (p
->counter_select
& IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS
)
1251 dev
->n_multicast_xmit
= 0;
1253 if (p
->counter_select
& IB_PMA_SELX_PORT_MULTI_RCV_PACKETS
)
1254 dev
->n_multicast_rcv
= 0;
1256 return recv_pma_get_portcounters_ext(pmp
, ibdev
, port
);
1259 static int process_subn(struct ib_device
*ibdev
, int mad_flags
,
1260 u8 port_num
, const struct ib_mad
*in_mad
,
1261 struct ib_mad
*out_mad
)
1263 struct ib_smp
*smp
= (struct ib_smp
*)out_mad
;
1264 struct ipath_ibdev
*dev
= to_idev(ibdev
);
1268 if (smp
->class_version
!= 1) {
1269 smp
->status
|= IB_SMP_UNSUP_VERSION
;
1274 /* Is the mkey in the process of expiring? */
1275 if (dev
->mkey_lease_timeout
&&
1276 time_after_eq(jiffies
, dev
->mkey_lease_timeout
)) {
1277 /* Clear timeout and mkey protection field. */
1278 dev
->mkey_lease_timeout
= 0;
1283 * M_Key checking depends on
1284 * Portinfo:M_Key_protect_bits
1286 if ((mad_flags
& IB_MAD_IGNORE_MKEY
) == 0 && dev
->mkey
!= 0 &&
1287 dev
->mkey
!= smp
->mkey
&&
1288 (smp
->method
== IB_MGMT_METHOD_SET
||
1289 (smp
->method
== IB_MGMT_METHOD_GET
&&
1290 dev
->mkeyprot
>= 2))) {
1291 if (dev
->mkey_violations
!= 0xFFFF)
1292 ++dev
->mkey_violations
;
1293 if (dev
->mkey_lease_timeout
||
1294 dev
->mkey_lease_period
== 0) {
1295 ret
= IB_MAD_RESULT_SUCCESS
|
1296 IB_MAD_RESULT_CONSUMED
;
1299 dev
->mkey_lease_timeout
= jiffies
+
1300 dev
->mkey_lease_period
* HZ
;
1301 /* Future: Generate a trap notice. */
1302 ret
= IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_CONSUMED
;
1304 } else if (dev
->mkey_lease_timeout
)
1305 dev
->mkey_lease_timeout
= 0;
1307 switch (smp
->method
) {
1308 case IB_MGMT_METHOD_GET
:
1309 switch (smp
->attr_id
) {
1310 case IB_SMP_ATTR_NODE_DESC
:
1311 ret
= recv_subn_get_nodedescription(smp
, ibdev
);
1313 case IB_SMP_ATTR_NODE_INFO
:
1314 ret
= recv_subn_get_nodeinfo(smp
, ibdev
, port_num
);
1316 case IB_SMP_ATTR_GUID_INFO
:
1317 ret
= recv_subn_get_guidinfo(smp
, ibdev
);
1319 case IB_SMP_ATTR_PORT_INFO
:
1320 ret
= recv_subn_get_portinfo(smp
, ibdev
, port_num
);
1322 case IB_SMP_ATTR_PKEY_TABLE
:
1323 ret
= recv_subn_get_pkeytable(smp
, ibdev
);
1325 case IB_SMP_ATTR_SM_INFO
:
1326 if (dev
->port_cap_flags
& IB_PORT_SM_DISABLED
) {
1327 ret
= IB_MAD_RESULT_SUCCESS
|
1328 IB_MAD_RESULT_CONSUMED
;
1331 if (dev
->port_cap_flags
& IB_PORT_SM
) {
1332 ret
= IB_MAD_RESULT_SUCCESS
;
1337 smp
->status
|= IB_SMP_UNSUP_METH_ATTR
;
1342 case IB_MGMT_METHOD_SET
:
1343 switch (smp
->attr_id
) {
1344 case IB_SMP_ATTR_GUID_INFO
:
1345 ret
= recv_subn_set_guidinfo(smp
, ibdev
);
1347 case IB_SMP_ATTR_PORT_INFO
:
1348 ret
= recv_subn_set_portinfo(smp
, ibdev
, port_num
);
1350 case IB_SMP_ATTR_PKEY_TABLE
:
1351 ret
= recv_subn_set_pkeytable(smp
, ibdev
, port_num
);
1353 case IB_SMP_ATTR_SM_INFO
:
1354 if (dev
->port_cap_flags
& IB_PORT_SM_DISABLED
) {
1355 ret
= IB_MAD_RESULT_SUCCESS
|
1356 IB_MAD_RESULT_CONSUMED
;
1359 if (dev
->port_cap_flags
& IB_PORT_SM
) {
1360 ret
= IB_MAD_RESULT_SUCCESS
;
1365 smp
->status
|= IB_SMP_UNSUP_METH_ATTR
;
1370 case IB_MGMT_METHOD_TRAP
:
1371 case IB_MGMT_METHOD_REPORT
:
1372 case IB_MGMT_METHOD_REPORT_RESP
:
1373 case IB_MGMT_METHOD_TRAP_REPRESS
:
1374 case IB_MGMT_METHOD_GET_RESP
:
1376 * The ib_mad module will call us to process responses
1377 * before checking for other consumers.
1378 * Just tell the caller to process it normally.
1380 ret
= IB_MAD_RESULT_SUCCESS
;
1383 smp
->status
|= IB_SMP_UNSUP_METHOD
;
1391 static int process_perf(struct ib_device
*ibdev
, u8 port_num
,
1392 const struct ib_mad
*in_mad
,
1393 struct ib_mad
*out_mad
)
1395 struct ib_pma_mad
*pmp
= (struct ib_pma_mad
*)out_mad
;
1399 if (pmp
->mad_hdr
.class_version
!= 1) {
1400 pmp
->mad_hdr
.status
|= IB_SMP_UNSUP_VERSION
;
1401 ret
= reply((struct ib_smp
*) pmp
);
1405 switch (pmp
->mad_hdr
.method
) {
1406 case IB_MGMT_METHOD_GET
:
1407 switch (pmp
->mad_hdr
.attr_id
) {
1408 case IB_PMA_CLASS_PORT_INFO
:
1409 ret
= recv_pma_get_classportinfo(pmp
);
1411 case IB_PMA_PORT_SAMPLES_CONTROL
:
1412 ret
= recv_pma_get_portsamplescontrol(pmp
, ibdev
,
1415 case IB_PMA_PORT_SAMPLES_RESULT
:
1416 ret
= recv_pma_get_portsamplesresult(pmp
, ibdev
);
1418 case IB_PMA_PORT_SAMPLES_RESULT_EXT
:
1419 ret
= recv_pma_get_portsamplesresult_ext(pmp
,
1422 case IB_PMA_PORT_COUNTERS
:
1423 ret
= recv_pma_get_portcounters(pmp
, ibdev
,
1426 case IB_PMA_PORT_COUNTERS_EXT
:
1427 ret
= recv_pma_get_portcounters_ext(pmp
, ibdev
,
1431 pmp
->mad_hdr
.status
|= IB_SMP_UNSUP_METH_ATTR
;
1432 ret
= reply((struct ib_smp
*) pmp
);
1436 case IB_MGMT_METHOD_SET
:
1437 switch (pmp
->mad_hdr
.attr_id
) {
1438 case IB_PMA_PORT_SAMPLES_CONTROL
:
1439 ret
= recv_pma_set_portsamplescontrol(pmp
, ibdev
,
1442 case IB_PMA_PORT_COUNTERS
:
1443 ret
= recv_pma_set_portcounters(pmp
, ibdev
,
1446 case IB_PMA_PORT_COUNTERS_EXT
:
1447 ret
= recv_pma_set_portcounters_ext(pmp
, ibdev
,
1451 pmp
->mad_hdr
.status
|= IB_SMP_UNSUP_METH_ATTR
;
1452 ret
= reply((struct ib_smp
*) pmp
);
1456 case IB_MGMT_METHOD_GET_RESP
:
1458 * The ib_mad module will call us to process responses
1459 * before checking for other consumers.
1460 * Just tell the caller to process it normally.
1462 ret
= IB_MAD_RESULT_SUCCESS
;
1465 pmp
->mad_hdr
.status
|= IB_SMP_UNSUP_METHOD
;
1466 ret
= reply((struct ib_smp
*) pmp
);
1474 * ipath_process_mad - process an incoming MAD packet
1475 * @ibdev: the infiniband device this packet came in on
1476 * @mad_flags: MAD flags
1477 * @port_num: the port number this packet came in on
1478 * @in_wc: the work completion entry for this packet
1479 * @in_grh: the global route header for this packet
1480 * @in_mad: the incoming MAD
1481 * @out_mad: any outgoing MAD reply
1483 * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
1484 * interested in processing.
1486 * Note that the verbs framework has already done the MAD sanity checks,
1487 * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
1490 * This is called by the ib_mad module.
1492 int ipath_process_mad(struct ib_device
*ibdev
, int mad_flags
, u8 port_num
,
1493 const struct ib_wc
*in_wc
, const struct ib_grh
*in_grh
,
1494 const struct ib_mad_hdr
*in
, size_t in_mad_size
,
1495 struct ib_mad_hdr
*out
, size_t *out_mad_size
,
1496 u16
*out_mad_pkey_index
)
1499 const struct ib_mad
*in_mad
= (const struct ib_mad
*)in
;
1500 struct ib_mad
*out_mad
= (struct ib_mad
*)out
;
1502 if (WARN_ON_ONCE(in_mad_size
!= sizeof(*in_mad
) ||
1503 *out_mad_size
!= sizeof(*out_mad
)))
1504 return IB_MAD_RESULT_FAILURE
;
1506 switch (in_mad
->mad_hdr
.mgmt_class
) {
1507 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
:
1508 case IB_MGMT_CLASS_SUBN_LID_ROUTED
:
1509 ret
= process_subn(ibdev
, mad_flags
, port_num
,
1512 case IB_MGMT_CLASS_PERF_MGMT
:
1513 ret
= process_perf(ibdev
, port_num
, in_mad
, out_mad
);
1516 ret
= IB_MAD_RESULT_SUCCESS
;