]>
Commit | Line | Data |
---|---|---|
77241056 MM |
1 | /* |
2 | * | |
3 | * This file is provided under a dual BSD/GPLv2 license. When using or | |
4 | * redistributing this file, you may do so under either license. | |
5 | * | |
6 | * GPL LICENSE SUMMARY | |
7 | * | |
8 | * Copyright(c) 2015 Intel Corporation. | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of version 2 of the GNU General Public License as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, but | |
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * General Public License for more details. | |
18 | * | |
19 | * BSD LICENSE | |
20 | * | |
21 | * Copyright(c) 2015 Intel Corporation. | |
22 | * | |
23 | * Redistribution and use in source and binary forms, with or without | |
24 | * modification, are permitted provided that the following conditions | |
25 | * are met: | |
26 | * | |
27 | * - Redistributions of source code must retain the above copyright | |
28 | * notice, this list of conditions and the following disclaimer. | |
29 | * - Redistributions in binary form must reproduce the above copyright | |
30 | * notice, this list of conditions and the following disclaimer in | |
31 | * the documentation and/or other materials provided with the | |
32 | * distribution. | |
33 | * - Neither the name of Intel Corporation nor the names of its | |
34 | * contributors may be used to endorse or promote products derived | |
35 | * from this software without specific prior written permission. | |
36 | * | |
37 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
38 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
39 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
40 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
41 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
42 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
43 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
44 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
45 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
46 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
47 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
48 | * | |
49 | */ | |
50 | ||
51 | #include <linux/spinlock.h> | |
52 | #include <linux/pci.h> | |
53 | #include <linux/io.h> | |
54 | #include <linux/delay.h> | |
55 | #include <linux/netdevice.h> | |
56 | #include <linux/vmalloc.h> | |
57 | #include <linux/module.h> | |
58 | #include <linux/prefetch.h> | |
8859b4a6 | 59 | #include <rdma/ib_verbs.h> |
77241056 MM |
60 | |
61 | #include "hfi.h" | |
62 | #include "trace.h" | |
63 | #include "qp.h" | |
64 | #include "sdma.h" | |
65 | ||
66 | #undef pr_fmt | |
67 | #define pr_fmt(fmt) DRIVER_NAME ": " fmt | |
68 | ||
69 | /* | |
70 | * The size has to be longer than this string, so we can append | |
71 | * board/chip information to it in the initialization code. | |
72 | */ | |
73 | const char ib_hfi1_version[] = HFI1_DRIVER_VERSION "\n"; | |
74 | ||
75 | DEFINE_SPINLOCK(hfi1_devs_lock); | |
76 | LIST_HEAD(hfi1_dev_list); | |
77 | DEFINE_MUTEX(hfi1_mutex); /* general driver use */ | |
78 | ||
79 | unsigned int hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU; | |
80 | module_param_named(max_mtu, hfi1_max_mtu, uint, S_IRUGO); | |
81 | MODULE_PARM_DESC(max_mtu, "Set max MTU bytes, default is 8192"); | |
82 | ||
83 | unsigned int hfi1_cu = 1; | |
84 | module_param_named(cu, hfi1_cu, uint, S_IRUGO); | |
85 | MODULE_PARM_DESC(cu, "Credit return units"); | |
86 | ||
87 | unsigned long hfi1_cap_mask = HFI1_CAP_MASK_DEFAULT; | |
88 | static int hfi1_caps_set(const char *, const struct kernel_param *); | |
89 | static int hfi1_caps_get(char *, const struct kernel_param *); | |
90 | static const struct kernel_param_ops cap_ops = { | |
91 | .set = hfi1_caps_set, | |
92 | .get = hfi1_caps_get | |
93 | }; | |
94 | module_param_cb(cap_mask, &cap_ops, &hfi1_cap_mask, S_IWUSR | S_IRUGO); | |
95 | MODULE_PARM_DESC(cap_mask, "Bit mask of enabled/disabled HW features"); | |
96 | ||
97 | MODULE_LICENSE("Dual BSD/GPL"); | |
98 | MODULE_DESCRIPTION("Intel Omni-Path Architecture driver"); | |
99 | MODULE_VERSION(HFI1_DRIVER_VERSION); | |
100 | ||
101 | /* | |
102 | * MAX_PKT_RCV is the max # if packets processed per receive interrupt. | |
103 | */ | |
104 | #define MAX_PKT_RECV 64 | |
105 | #define EGR_HEAD_UPDATE_THRESHOLD 16 | |
106 | ||
107 | struct hfi1_ib_stats hfi1_stats; | |
108 | ||
109 | static int hfi1_caps_set(const char *val, const struct kernel_param *kp) | |
110 | { | |
111 | int ret = 0; | |
112 | unsigned long *cap_mask_ptr = (unsigned long *)kp->arg, | |
113 | cap_mask = *cap_mask_ptr, value, diff, | |
114 | write_mask = ((HFI1_CAP_WRITABLE_MASK << HFI1_CAP_USER_SHIFT) | | |
115 | HFI1_CAP_WRITABLE_MASK); | |
116 | ||
117 | ret = kstrtoul(val, 0, &value); | |
118 | if (ret) { | |
119 | pr_warn("Invalid module parameter value for 'cap_mask'\n"); | |
120 | goto done; | |
121 | } | |
122 | /* Get the changed bits (except the locked bit) */ | |
123 | diff = value ^ (cap_mask & ~HFI1_CAP_LOCKED_SMASK); | |
124 | ||
125 | /* Remove any bits that are not allowed to change after driver load */ | |
126 | if (HFI1_CAP_LOCKED() && (diff & ~write_mask)) { | |
127 | pr_warn("Ignoring non-writable capability bits %#lx\n", | |
128 | diff & ~write_mask); | |
129 | diff &= write_mask; | |
130 | } | |
131 | ||
132 | /* Mask off any reserved bits */ | |
133 | diff &= ~HFI1_CAP_RESERVED_MASK; | |
134 | /* Clear any previously set and changing bits */ | |
135 | cap_mask &= ~diff; | |
136 | /* Update the bits with the new capability */ | |
137 | cap_mask |= (value & diff); | |
138 | /* Check for any kernel/user restrictions */ | |
139 | diff = (cap_mask & (HFI1_CAP_MUST_HAVE_KERN << HFI1_CAP_USER_SHIFT)) ^ | |
140 | ((cap_mask & HFI1_CAP_MUST_HAVE_KERN) << HFI1_CAP_USER_SHIFT); | |
141 | cap_mask &= ~diff; | |
142 | /* Set the bitmask to the final set */ | |
143 | *cap_mask_ptr = cap_mask; | |
144 | done: | |
145 | return ret; | |
146 | } | |
147 | ||
148 | static int hfi1_caps_get(char *buffer, const struct kernel_param *kp) | |
149 | { | |
150 | unsigned long cap_mask = *(unsigned long *)kp->arg; | |
151 | ||
152 | cap_mask &= ~HFI1_CAP_LOCKED_SMASK; | |
153 | cap_mask |= ((cap_mask & HFI1_CAP_K2U) << HFI1_CAP_USER_SHIFT); | |
154 | ||
155 | return scnprintf(buffer, PAGE_SIZE, "0x%lx", cap_mask); | |
156 | } | |
157 | ||
158 | const char *get_unit_name(int unit) | |
159 | { | |
160 | static char iname[16]; | |
161 | ||
9805071e | 162 | snprintf(iname, sizeof(iname), DRIVER_NAME "_%u", unit); |
77241056 MM |
163 | return iname; |
164 | } | |
165 | ||
49dbb6cf DD |
166 | const char *get_card_name(struct rvt_dev_info *rdi) |
167 | { | |
168 | struct hfi1_ibdev *ibdev = container_of(rdi, struct hfi1_ibdev, rdi); | |
169 | struct hfi1_devdata *dd = container_of(ibdev, | |
170 | struct hfi1_devdata, verbs_dev); | |
171 | return get_unit_name(dd->unit); | |
172 | } | |
173 | ||
174 | struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi) | |
175 | { | |
176 | struct hfi1_ibdev *ibdev = container_of(rdi, struct hfi1_ibdev, rdi); | |
177 | struct hfi1_devdata *dd = container_of(ibdev, | |
178 | struct hfi1_devdata, verbs_dev); | |
179 | return dd->pcidev; | |
180 | } | |
181 | ||
77241056 MM |
182 | /* |
183 | * Return count of units with at least one port ACTIVE. | |
184 | */ | |
185 | int hfi1_count_active_units(void) | |
186 | { | |
187 | struct hfi1_devdata *dd; | |
188 | struct hfi1_pportdata *ppd; | |
189 | unsigned long flags; | |
190 | int pidx, nunits_active = 0; | |
191 | ||
192 | spin_lock_irqsave(&hfi1_devs_lock, flags); | |
193 | list_for_each_entry(dd, &hfi1_dev_list, list) { | |
194 | if (!(dd->flags & HFI1_PRESENT) || !dd->kregbase) | |
195 | continue; | |
196 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
197 | ppd = dd->pport + pidx; | |
198 | if (ppd->lid && ppd->linkup) { | |
199 | nunits_active++; | |
200 | break; | |
201 | } | |
202 | } | |
203 | } | |
204 | spin_unlock_irqrestore(&hfi1_devs_lock, flags); | |
205 | return nunits_active; | |
206 | } | |
207 | ||
208 | /* | |
209 | * Return count of all units, optionally return in arguments | |
210 | * the number of usable (present) units, and the number of | |
211 | * ports that are up. | |
212 | */ | |
213 | int hfi1_count_units(int *npresentp, int *nupp) | |
214 | { | |
215 | int nunits = 0, npresent = 0, nup = 0; | |
216 | struct hfi1_devdata *dd; | |
217 | unsigned long flags; | |
218 | int pidx; | |
219 | struct hfi1_pportdata *ppd; | |
220 | ||
221 | spin_lock_irqsave(&hfi1_devs_lock, flags); | |
222 | ||
223 | list_for_each_entry(dd, &hfi1_dev_list, list) { | |
224 | nunits++; | |
225 | if ((dd->flags & HFI1_PRESENT) && dd->kregbase) | |
226 | npresent++; | |
227 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
228 | ppd = dd->pport + pidx; | |
229 | if (ppd->lid && ppd->linkup) | |
230 | nup++; | |
231 | } | |
232 | } | |
233 | ||
234 | spin_unlock_irqrestore(&hfi1_devs_lock, flags); | |
235 | ||
236 | if (npresentp) | |
237 | *npresentp = npresent; | |
238 | if (nupp) | |
239 | *nupp = nup; | |
240 | ||
241 | return nunits; | |
242 | } | |
243 | ||
244 | /* | |
245 | * Get address of eager buffer from it's index (allocated in chunks, not | |
246 | * contiguous). | |
247 | */ | |
248 | static inline void *get_egrbuf(const struct hfi1_ctxtdata *rcd, u64 rhf, | |
249 | u8 *update) | |
250 | { | |
251 | u32 idx = rhf_egr_index(rhf), offset = rhf_egr_buf_offset(rhf); | |
252 | ||
253 | *update |= !(idx & (rcd->egrbufs.threshold - 1)) && !offset; | |
254 | return (void *)(((u64)(rcd->egrbufs.rcvtids[idx].addr)) + | |
255 | (offset * RCV_BUF_BLOCK_SIZE)); | |
256 | } | |
257 | ||
258 | /* | |
259 | * Validate and encode the a given RcvArray Buffer size. | |
260 | * The function will check whether the given size falls within | |
261 | * allowed size ranges for the respective type and, optionally, | |
262 | * return the proper encoding. | |
263 | */ | |
264 | inline int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encoded) | |
265 | { | |
266 | if (unlikely(!IS_ALIGNED(size, PAGE_SIZE))) | |
267 | return 0; | |
268 | if (unlikely(size < MIN_EAGER_BUFFER)) | |
269 | return 0; | |
270 | if (size > | |
271 | (type == PT_EAGER ? MAX_EAGER_BUFFER : MAX_EXPECTED_BUFFER)) | |
272 | return 0; | |
273 | if (encoded) | |
274 | *encoded = ilog2(size / PAGE_SIZE) + 1; | |
275 | return 1; | |
276 | } | |
277 | ||
278 | static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd, | |
279 | struct hfi1_packet *packet) | |
280 | { | |
281 | struct hfi1_message_header *rhdr = packet->hdr; | |
282 | u32 rte = rhf_rcv_type_err(packet->rhf); | |
283 | int lnh = be16_to_cpu(rhdr->lrh[0]) & 3; | |
284 | struct hfi1_ibport *ibp = &ppd->ibport_data; | |
285 | ||
286 | if (packet->rhf & (RHF_VCRC_ERR | RHF_ICRC_ERR)) | |
287 | return; | |
288 | ||
289 | if (packet->rhf & RHF_TID_ERR) { | |
290 | /* For TIDERR and RC QPs preemptively schedule a NAK */ | |
291 | struct hfi1_ib_header *hdr = (struct hfi1_ib_header *)rhdr; | |
292 | struct hfi1_other_headers *ohdr = NULL; | |
293 | u32 tlen = rhf_pkt_len(packet->rhf); /* in bytes */ | |
294 | u16 lid = be16_to_cpu(hdr->lrh[1]); | |
295 | u32 qp_num; | |
296 | u32 rcv_flags = 0; | |
297 | ||
298 | /* Sanity check packet */ | |
299 | if (tlen < 24) | |
300 | goto drop; | |
301 | ||
302 | /* Check for GRH */ | |
303 | if (lnh == HFI1_LRH_BTH) | |
304 | ohdr = &hdr->u.oth; | |
305 | else if (lnh == HFI1_LRH_GRH) { | |
306 | u32 vtf; | |
307 | ||
308 | ohdr = &hdr->u.l.oth; | |
309 | if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR) | |
310 | goto drop; | |
311 | vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow); | |
312 | if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) | |
313 | goto drop; | |
314 | rcv_flags |= HFI1_HAS_GRH; | |
315 | } else | |
316 | goto drop; | |
317 | ||
318 | /* Get the destination QP number. */ | |
319 | qp_num = be32_to_cpu(ohdr->bth[1]) & HFI1_QPN_MASK; | |
8859b4a6 | 320 | if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) { |
895420dd | 321 | struct rvt_qp *qp; |
b77d713a | 322 | unsigned long flags; |
77241056 MM |
323 | |
324 | rcu_read_lock(); | |
325 | qp = hfi1_lookup_qpn(ibp, qp_num); | |
326 | if (!qp) { | |
327 | rcu_read_unlock(); | |
328 | goto drop; | |
329 | } | |
330 | ||
331 | /* | |
332 | * Handle only RC QPs - for other QP types drop error | |
333 | * packet. | |
334 | */ | |
b77d713a | 335 | spin_lock_irqsave(&qp->r_lock, flags); |
77241056 MM |
336 | |
337 | /* Check for valid receive state. */ | |
83693bd1 DD |
338 | if (!(ib_rvt_state_ops[qp->state] & |
339 | RVT_PROCESS_RECV_OK)) { | |
4eb06882 | 340 | ibp->rvp.n_pkt_drops++; |
77241056 MM |
341 | } |
342 | ||
343 | switch (qp->ibqp.qp_type) { | |
344 | case IB_QPT_RC: | |
345 | hfi1_rc_hdrerr( | |
346 | rcd, | |
347 | hdr, | |
348 | rcv_flags, | |
349 | qp); | |
350 | break; | |
351 | default: | |
352 | /* For now don't handle any other QP types */ | |
353 | break; | |
354 | } | |
355 | ||
b77d713a | 356 | spin_unlock_irqrestore(&qp->r_lock, flags); |
77241056 MM |
357 | rcu_read_unlock(); |
358 | } /* Unicast QP */ | |
359 | } /* Valid packet with TIDErr */ | |
360 | ||
361 | /* handle "RcvTypeErr" flags */ | |
362 | switch (rte) { | |
363 | case RHF_RTE_ERROR_OP_CODE_ERR: | |
364 | { | |
365 | u32 opcode; | |
366 | void *ebuf = NULL; | |
367 | __be32 *bth = NULL; | |
368 | ||
369 | if (rhf_use_egr_bfr(packet->rhf)) | |
370 | ebuf = packet->ebuf; | |
371 | ||
372 | if (ebuf == NULL) | |
373 | goto drop; /* this should never happen */ | |
374 | ||
375 | if (lnh == HFI1_LRH_BTH) | |
376 | bth = (__be32 *)ebuf; | |
377 | else if (lnh == HFI1_LRH_GRH) | |
378 | bth = (__be32 *)((char *)ebuf + sizeof(struct ib_grh)); | |
379 | else | |
380 | goto drop; | |
381 | ||
382 | opcode = be32_to_cpu(bth[0]) >> 24; | |
383 | opcode &= 0xff; | |
384 | ||
385 | if (opcode == IB_OPCODE_CNP) { | |
386 | /* | |
387 | * Only in pre-B0 h/w is the CNP_OPCODE handled | |
624be1db | 388 | * via this code path. |
77241056 | 389 | */ |
895420dd | 390 | struct rvt_qp *qp = NULL; |
77241056 MM |
391 | u32 lqpn, rqpn; |
392 | u16 rlid; | |
393 | u8 svc_type, sl, sc5; | |
394 | ||
395 | sc5 = (be16_to_cpu(rhdr->lrh[0]) >> 12) & 0xf; | |
396 | if (rhf_dc_info(packet->rhf)) | |
397 | sc5 |= 0x10; | |
398 | sl = ibp->sc_to_sl[sc5]; | |
399 | ||
400 | lqpn = be32_to_cpu(bth[1]) & HFI1_QPN_MASK; | |
401 | rcu_read_lock(); | |
402 | qp = hfi1_lookup_qpn(ibp, lqpn); | |
403 | if (qp == NULL) { | |
404 | rcu_read_unlock(); | |
405 | goto drop; | |
406 | } | |
407 | ||
408 | switch (qp->ibqp.qp_type) { | |
409 | case IB_QPT_UD: | |
410 | rlid = 0; | |
411 | rqpn = 0; | |
412 | svc_type = IB_CC_SVCTYPE_UD; | |
413 | break; | |
414 | case IB_QPT_UC: | |
415 | rlid = be16_to_cpu(rhdr->lrh[3]); | |
416 | rqpn = qp->remote_qpn; | |
417 | svc_type = IB_CC_SVCTYPE_UC; | |
418 | break; | |
419 | default: | |
420 | goto drop; | |
421 | } | |
422 | ||
423 | process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type); | |
424 | rcu_read_unlock(); | |
425 | } | |
426 | ||
427 | packet->rhf &= ~RHF_RCV_TYPE_ERR_SMASK; | |
428 | break; | |
429 | } | |
430 | default: | |
431 | break; | |
432 | } | |
433 | ||
434 | drop: | |
435 | return; | |
436 | } | |
437 | ||
438 | static inline void init_packet(struct hfi1_ctxtdata *rcd, | |
439 | struct hfi1_packet *packet) | |
440 | { | |
441 | ||
442 | packet->rsize = rcd->rcvhdrqentsize; /* words */ | |
443 | packet->maxcnt = rcd->rcvhdrq_cnt * packet->rsize; /* words */ | |
444 | packet->rcd = rcd; | |
445 | packet->updegr = 0; | |
446 | packet->etail = -1; | |
f4f30031 | 447 | packet->rhf_addr = get_rhf_addr(rcd); |
77241056 MM |
448 | packet->rhf = rhf_to_cpu(packet->rhf_addr); |
449 | packet->rhqoff = rcd->head; | |
450 | packet->numpkt = 0; | |
451 | packet->rcv_flags = 0; | |
452 | } | |
453 | ||
454 | #ifndef CONFIG_PRESCAN_RXQ | |
455 | static void prescan_rxq(struct hfi1_packet *packet) {} | |
977940b8 | 456 | #else /* !CONFIG_PRESCAN_RXQ */ |
77241056 MM |
457 | static int prescan_receive_queue; |
458 | ||
895420dd | 459 | static void process_ecn(struct rvt_qp *qp, struct hfi1_ib_header *hdr, |
77241056 | 460 | struct hfi1_other_headers *ohdr, |
977940b8 | 461 | u64 rhf, u32 bth1, struct ib_grh *grh) |
77241056 MM |
462 | { |
463 | struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); | |
977940b8 AK |
464 | u32 rqpn = 0; |
465 | u16 rlid; | |
77241056 | 466 | u8 sc5, svc_type; |
77241056 MM |
467 | |
468 | switch (qp->ibqp.qp_type) { | |
977940b8 AK |
469 | case IB_QPT_SMI: |
470 | case IB_QPT_GSI: | |
77241056 | 471 | case IB_QPT_UD: |
977940b8 AK |
472 | rlid = be16_to_cpu(hdr->lrh[3]); |
473 | rqpn = be32_to_cpu(ohdr->u.ud.deth[1]) & HFI1_QPN_MASK; | |
77241056 MM |
474 | svc_type = IB_CC_SVCTYPE_UD; |
475 | break; | |
977940b8 AK |
476 | case IB_QPT_UC: |
477 | rlid = qp->remote_ah_attr.dlid; | |
478 | rqpn = qp->remote_qpn; | |
479 | svc_type = IB_CC_SVCTYPE_UC; | |
480 | break; | |
481 | case IB_QPT_RC: | |
482 | rlid = qp->remote_ah_attr.dlid; | |
483 | rqpn = qp->remote_qpn; | |
484 | svc_type = IB_CC_SVCTYPE_RC; | |
485 | break; | |
77241056 MM |
486 | default: |
487 | return; | |
488 | } | |
489 | ||
77241056 MM |
490 | sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf; |
491 | if (rhf_dc_info(rhf)) | |
492 | sc5 |= 0x10; | |
493 | ||
977940b8 | 494 | if (bth1 & HFI1_FECN_SMASK) { |
77241056 MM |
495 | u16 pkey = (u16)be32_to_cpu(ohdr->bth[0]); |
496 | u16 dlid = be16_to_cpu(hdr->lrh[1]); | |
77241056 | 497 | |
977940b8 | 498 | return_cnp(ibp, qp, rqpn, pkey, dlid, rlid, sc5, grh); |
77241056 MM |
499 | } |
500 | ||
977940b8 | 501 | if (bth1 & HFI1_BECN_SMASK) { |
77241056 | 502 | struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); |
977940b8 | 503 | u32 lqpn = bth1 & HFI1_QPN_MASK; |
77241056 MM |
504 | u8 sl = ibp->sc_to_sl[sc5]; |
505 | ||
977940b8 | 506 | process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type); |
77241056 | 507 | } |
77241056 MM |
508 | } |
509 | ||
510 | struct ps_mdata { | |
511 | struct hfi1_ctxtdata *rcd; | |
512 | u32 rsize; | |
513 | u32 maxcnt; | |
514 | u32 ps_head; | |
515 | u32 ps_tail; | |
516 | u32 ps_seq; | |
517 | }; | |
518 | ||
519 | static inline void init_ps_mdata(struct ps_mdata *mdata, | |
520 | struct hfi1_packet *packet) | |
521 | { | |
522 | struct hfi1_ctxtdata *rcd = packet->rcd; | |
523 | ||
524 | mdata->rcd = rcd; | |
525 | mdata->rsize = packet->rsize; | |
526 | mdata->maxcnt = packet->maxcnt; | |
3e7ccca0 | 527 | mdata->ps_head = packet->rhqoff; |
77241056 | 528 | |
82c2611d | 529 | if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) { |
3e7ccca0 | 530 | mdata->ps_tail = get_rcvhdrtail(rcd); |
82c2611d NV |
531 | if (rcd->ctxt == HFI1_CTRL_CTXT) |
532 | mdata->ps_seq = rcd->seq_cnt; | |
533 | else | |
534 | mdata->ps_seq = 0; /* not used with DMA_RTAIL */ | |
77241056 MM |
535 | } else { |
536 | mdata->ps_tail = 0; /* used only with DMA_RTAIL*/ | |
537 | mdata->ps_seq = rcd->seq_cnt; | |
538 | } | |
539 | } | |
540 | ||
82c2611d NV |
541 | static inline int ps_done(struct ps_mdata *mdata, u64 rhf, |
542 | struct hfi1_ctxtdata *rcd) | |
77241056 | 543 | { |
82c2611d | 544 | if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) |
77241056 MM |
545 | return mdata->ps_head == mdata->ps_tail; |
546 | return mdata->ps_seq != rhf_rcv_seq(rhf); | |
547 | } | |
548 | ||
82c2611d NV |
549 | static inline int ps_skip(struct ps_mdata *mdata, u64 rhf, |
550 | struct hfi1_ctxtdata *rcd) | |
551 | { | |
552 | /* | |
553 | * Control context can potentially receive an invalid rhf. | |
554 | * Drop such packets. | |
555 | */ | |
556 | if ((rcd->ctxt == HFI1_CTRL_CTXT) && (mdata->ps_head != mdata->ps_tail)) | |
557 | return mdata->ps_seq != rhf_rcv_seq(rhf); | |
558 | ||
559 | return 0; | |
560 | } | |
561 | ||
562 | static inline void update_ps_mdata(struct ps_mdata *mdata, | |
563 | struct hfi1_ctxtdata *rcd) | |
77241056 | 564 | { |
77241056 | 565 | mdata->ps_head += mdata->rsize; |
3e7ccca0 | 566 | if (mdata->ps_head >= mdata->maxcnt) |
77241056 | 567 | mdata->ps_head = 0; |
82c2611d NV |
568 | |
569 | /* Control context must do seq counting */ | |
570 | if (!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) || | |
571 | (rcd->ctxt == HFI1_CTRL_CTXT)) { | |
77241056 MM |
572 | if (++mdata->ps_seq > 13) |
573 | mdata->ps_seq = 1; | |
574 | } | |
575 | } | |
576 | ||
577 | /* | |
578 | * prescan_rxq - search through the receive queue looking for packets | |
579 | * containing Excplicit Congestion Notifications (FECNs, or BECNs). | |
580 | * When an ECN is found, process the Congestion Notification, and toggle | |
581 | * it off. | |
582 | */ | |
583 | static void prescan_rxq(struct hfi1_packet *packet) | |
584 | { | |
585 | struct hfi1_ctxtdata *rcd = packet->rcd; | |
586 | struct ps_mdata mdata; | |
587 | ||
588 | if (!prescan_receive_queue) | |
589 | return; | |
590 | ||
591 | init_ps_mdata(&mdata, packet); | |
592 | ||
593 | while (1) { | |
594 | struct hfi1_devdata *dd = rcd->dd; | |
595 | struct hfi1_ibport *ibp = &rcd->ppd->ibport_data; | |
596 | __le32 *rhf_addr = (__le32 *) rcd->rcvhdrq + mdata.ps_head + | |
597 | dd->rhf_offset; | |
895420dd | 598 | struct rvt_qp *qp; |
77241056 MM |
599 | struct hfi1_ib_header *hdr; |
600 | struct hfi1_other_headers *ohdr; | |
601 | struct ib_grh *grh = NULL; | |
602 | u64 rhf = rhf_to_cpu(rhf_addr); | |
977940b8 | 603 | u32 etype = rhf_rcv_type(rhf), qpn, bth1; |
77241056 MM |
604 | int is_ecn = 0; |
605 | u8 lnh; | |
606 | ||
82c2611d | 607 | if (ps_done(&mdata, rhf, rcd)) |
77241056 MM |
608 | break; |
609 | ||
82c2611d NV |
610 | if (ps_skip(&mdata, rhf, rcd)) |
611 | goto next; | |
612 | ||
77241056 MM |
613 | if (etype != RHF_RCV_TYPE_IB) |
614 | goto next; | |
615 | ||
616 | hdr = (struct hfi1_ib_header *) | |
617 | hfi1_get_msgheader(dd, rhf_addr); | |
618 | lnh = be16_to_cpu(hdr->lrh[0]) & 3; | |
619 | ||
620 | if (lnh == HFI1_LRH_BTH) | |
621 | ohdr = &hdr->u.oth; | |
622 | else if (lnh == HFI1_LRH_GRH) { | |
623 | ohdr = &hdr->u.l.oth; | |
624 | grh = &hdr->u.l.grh; | |
625 | } else | |
626 | goto next; /* just in case */ | |
627 | ||
977940b8 AK |
628 | bth1 = be32_to_cpu(ohdr->bth[1]); |
629 | is_ecn = !!(bth1 & (HFI1_FECN_SMASK | HFI1_BECN_SMASK)); | |
77241056 MM |
630 | |
631 | if (!is_ecn) | |
632 | goto next; | |
633 | ||
977940b8 | 634 | qpn = bth1 & HFI1_QPN_MASK; |
77241056 MM |
635 | rcu_read_lock(); |
636 | qp = hfi1_lookup_qpn(ibp, qpn); | |
637 | ||
638 | if (qp == NULL) { | |
639 | rcu_read_unlock(); | |
640 | goto next; | |
641 | } | |
642 | ||
977940b8 | 643 | process_ecn(qp, hdr, ohdr, rhf, bth1, grh); |
77241056 | 644 | rcu_read_unlock(); |
977940b8 AK |
645 | |
646 | /* turn off BECN, FECN */ | |
647 | bth1 &= ~(HFI1_FECN_SMASK | HFI1_BECN_SMASK); | |
648 | ohdr->bth[1] = cpu_to_be32(bth1); | |
77241056 | 649 | next: |
82c2611d | 650 | update_ps_mdata(&mdata, rcd); |
77241056 MM |
651 | } |
652 | } | |
9d2f53ef | 653 | #endif /* CONFIG_PRESCAN_RXQ */ |
82c2611d NV |
654 | |
655 | static inline int skip_rcv_packet(struct hfi1_packet *packet, int thread) | |
656 | { | |
657 | int ret = RCV_PKT_OK; | |
658 | ||
659 | /* Set up for the next packet */ | |
660 | packet->rhqoff += packet->rsize; | |
661 | if (packet->rhqoff >= packet->maxcnt) | |
662 | packet->rhqoff = 0; | |
663 | ||
664 | packet->numpkt++; | |
665 | if (unlikely((packet->numpkt & (MAX_PKT_RECV - 1)) == 0)) { | |
666 | if (thread) { | |
667 | cond_resched(); | |
668 | } else { | |
669 | ret = RCV_PKT_LIMIT; | |
670 | this_cpu_inc(*packet->rcd->dd->rcv_limit); | |
671 | } | |
672 | } | |
673 | ||
674 | packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff + | |
675 | packet->rcd->dd->rhf_offset; | |
676 | packet->rhf = rhf_to_cpu(packet->rhf_addr); | |
677 | ||
678 | return ret; | |
679 | } | |
77241056 | 680 | |
f4f30031 | 681 | static inline int process_rcv_packet(struct hfi1_packet *packet, int thread) |
77241056 MM |
682 | { |
683 | int ret = RCV_PKT_OK; | |
684 | ||
685 | packet->hdr = hfi1_get_msgheader(packet->rcd->dd, | |
686 | packet->rhf_addr); | |
687 | packet->hlen = (u8 *)packet->rhf_addr - (u8 *)packet->hdr; | |
688 | packet->etype = rhf_rcv_type(packet->rhf); | |
689 | /* total length */ | |
690 | packet->tlen = rhf_pkt_len(packet->rhf); /* in bytes */ | |
691 | /* retrieve eager buffer details */ | |
692 | packet->ebuf = NULL; | |
693 | if (rhf_use_egr_bfr(packet->rhf)) { | |
694 | packet->etail = rhf_egr_index(packet->rhf); | |
695 | packet->ebuf = get_egrbuf(packet->rcd, packet->rhf, | |
696 | &packet->updegr); | |
697 | /* | |
698 | * Prefetch the contents of the eager buffer. It is | |
699 | * OK to send a negative length to prefetch_range(). | |
700 | * The +2 is the size of the RHF. | |
701 | */ | |
702 | prefetch_range(packet->ebuf, | |
703 | packet->tlen - ((packet->rcd->rcvhdrqentsize - | |
704 | (rhf_hdrq_offset(packet->rhf)+2)) * 4)); | |
705 | } | |
706 | ||
707 | /* | |
708 | * Call a type specific handler for the packet. We | |
709 | * should be able to trust that etype won't be beyond | |
710 | * the range of valid indexes. If so something is really | |
711 | * wrong and we can probably just let things come | |
712 | * crashing down. There is no need to eat another | |
713 | * comparison in this performance critical code. | |
714 | */ | |
715 | packet->rcd->dd->rhf_rcv_function_map[packet->etype](packet); | |
716 | packet->numpkt++; | |
717 | ||
718 | /* Set up for the next packet */ | |
719 | packet->rhqoff += packet->rsize; | |
720 | if (packet->rhqoff >= packet->maxcnt) | |
721 | packet->rhqoff = 0; | |
722 | ||
f4f30031 DL |
723 | if (unlikely((packet->numpkt & (MAX_PKT_RECV - 1)) == 0)) { |
724 | if (thread) { | |
725 | cond_resched(); | |
726 | } else { | |
727 | ret = RCV_PKT_LIMIT; | |
728 | this_cpu_inc(*packet->rcd->dd->rcv_limit); | |
729 | } | |
77241056 MM |
730 | } |
731 | ||
732 | packet->rhf_addr = (__le32 *) packet->rcd->rcvhdrq + packet->rhqoff + | |
733 | packet->rcd->dd->rhf_offset; | |
734 | packet->rhf = rhf_to_cpu(packet->rhf_addr); | |
735 | ||
736 | return ret; | |
737 | } | |
738 | ||
739 | static inline void process_rcv_update(int last, struct hfi1_packet *packet) | |
740 | { | |
741 | /* | |
742 | * Update head regs etc., every 16 packets, if not last pkt, | |
743 | * to help prevent rcvhdrq overflows, when many packets | |
744 | * are processed and queue is nearly full. | |
745 | * Don't request an interrupt for intermediate updates. | |
746 | */ | |
747 | if (!last && !(packet->numpkt & 0xf)) { | |
748 | update_usrhead(packet->rcd, packet->rhqoff, packet->updegr, | |
749 | packet->etail, 0, 0); | |
750 | packet->updegr = 0; | |
751 | } | |
752 | packet->rcv_flags = 0; | |
753 | } | |
754 | ||
755 | static inline void finish_packet(struct hfi1_packet *packet) | |
756 | { | |
757 | ||
758 | /* | |
759 | * Nothing we need to free for the packet. | |
760 | * | |
761 | * The only thing we need to do is a final update and call for an | |
762 | * interrupt | |
763 | */ | |
764 | update_usrhead(packet->rcd, packet->rcd->head, packet->updegr, | |
765 | packet->etail, rcv_intr_dynamic, packet->numpkt); | |
766 | ||
767 | } | |
768 | ||
769 | static inline void process_rcv_qp_work(struct hfi1_packet *packet) | |
770 | { | |
771 | ||
772 | struct hfi1_ctxtdata *rcd; | |
895420dd | 773 | struct rvt_qp *qp, *nqp; |
77241056 MM |
774 | |
775 | rcd = packet->rcd; | |
776 | rcd->head = packet->rhqoff; | |
777 | ||
778 | /* | |
779 | * Iterate over all QPs waiting to respond. | |
780 | * The list won't change since the IRQ is only run on one CPU. | |
781 | */ | |
782 | list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) { | |
783 | list_del_init(&qp->rspwait); | |
54d10c1e DD |
784 | if (qp->r_flags & RVT_R_RSP_NAK) { |
785 | qp->r_flags &= ~RVT_R_RSP_NAK; | |
77241056 MM |
786 | hfi1_send_rc_ack(rcd, qp, 0); |
787 | } | |
54d10c1e | 788 | if (qp->r_flags & RVT_R_RSP_SEND) { |
77241056 MM |
789 | unsigned long flags; |
790 | ||
54d10c1e | 791 | qp->r_flags &= ~RVT_R_RSP_SEND; |
77241056 | 792 | spin_lock_irqsave(&qp->s_lock, flags); |
83693bd1 DD |
793 | if (ib_rvt_state_ops[qp->state] & |
794 | RVT_PROCESS_OR_FLUSH_SEND) | |
77241056 MM |
795 | hfi1_schedule_send(qp); |
796 | spin_unlock_irqrestore(&qp->s_lock, flags); | |
797 | } | |
798 | if (atomic_dec_and_test(&qp->refcount)) | |
799 | wake_up(&qp->wait); | |
800 | } | |
801 | } | |
802 | ||
803 | /* | |
804 | * Handle receive interrupts when using the no dma rtail option. | |
805 | */ | |
f4f30031 | 806 | int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread) |
77241056 MM |
807 | { |
808 | u32 seq; | |
f4f30031 | 809 | int last = RCV_PKT_OK; |
77241056 MM |
810 | struct hfi1_packet packet; |
811 | ||
812 | init_packet(rcd, &packet); | |
813 | seq = rhf_rcv_seq(packet.rhf); | |
f4f30031 DL |
814 | if (seq != rcd->seq_cnt) { |
815 | last = RCV_PKT_DONE; | |
77241056 | 816 | goto bail; |
f4f30031 | 817 | } |
77241056 MM |
818 | |
819 | prescan_rxq(&packet); | |
820 | ||
f4f30031 DL |
821 | while (last == RCV_PKT_OK) { |
822 | last = process_rcv_packet(&packet, thread); | |
77241056 MM |
823 | seq = rhf_rcv_seq(packet.rhf); |
824 | if (++rcd->seq_cnt > 13) | |
825 | rcd->seq_cnt = 1; | |
826 | if (seq != rcd->seq_cnt) | |
f4f30031 | 827 | last = RCV_PKT_DONE; |
77241056 MM |
828 | process_rcv_update(last, &packet); |
829 | } | |
830 | process_rcv_qp_work(&packet); | |
831 | bail: | |
832 | finish_packet(&packet); | |
f4f30031 | 833 | return last; |
77241056 MM |
834 | } |
835 | ||
f4f30031 | 836 | int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread) |
77241056 MM |
837 | { |
838 | u32 hdrqtail; | |
f4f30031 | 839 | int last = RCV_PKT_OK; |
77241056 MM |
840 | struct hfi1_packet packet; |
841 | ||
842 | init_packet(rcd, &packet); | |
843 | hdrqtail = get_rcvhdrtail(rcd); | |
f4f30031 DL |
844 | if (packet.rhqoff == hdrqtail) { |
845 | last = RCV_PKT_DONE; | |
77241056 | 846 | goto bail; |
f4f30031 | 847 | } |
77241056 MM |
848 | smp_rmb(); /* prevent speculative reads of dma'ed hdrq */ |
849 | ||
850 | prescan_rxq(&packet); | |
851 | ||
f4f30031 DL |
852 | while (last == RCV_PKT_OK) { |
853 | last = process_rcv_packet(&packet, thread); | |
77241056 | 854 | if (packet.rhqoff == hdrqtail) |
f4f30031 | 855 | last = RCV_PKT_DONE; |
77241056 MM |
856 | process_rcv_update(last, &packet); |
857 | } | |
858 | process_rcv_qp_work(&packet); | |
859 | bail: | |
860 | finish_packet(&packet); | |
f4f30031 | 861 | return last; |
77241056 MM |
862 | } |
863 | ||
864 | static inline void set_all_nodma_rtail(struct hfi1_devdata *dd) | |
865 | { | |
866 | int i; | |
867 | ||
82c2611d | 868 | for (i = HFI1_CTRL_CTXT + 1; i < dd->first_user_ctxt; i++) |
77241056 MM |
869 | dd->rcd[i]->do_interrupt = |
870 | &handle_receive_interrupt_nodma_rtail; | |
871 | } | |
872 | ||
873 | static inline void set_all_dma_rtail(struct hfi1_devdata *dd) | |
874 | { | |
875 | int i; | |
876 | ||
82c2611d | 877 | for (i = HFI1_CTRL_CTXT + 1; i < dd->first_user_ctxt; i++) |
77241056 MM |
878 | dd->rcd[i]->do_interrupt = |
879 | &handle_receive_interrupt_dma_rtail; | |
880 | } | |
881 | ||
fb9036dd JS |
882 | void set_all_slowpath(struct hfi1_devdata *dd) |
883 | { | |
884 | int i; | |
885 | ||
886 | /* HFI1_CTRL_CTXT must always use the slow path interrupt handler */ | |
887 | for (i = HFI1_CTRL_CTXT + 1; i < dd->first_user_ctxt; i++) | |
888 | dd->rcd[i]->do_interrupt = &handle_receive_interrupt; | |
889 | } | |
890 | ||
891 | static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd, | |
892 | struct hfi1_packet packet, | |
893 | struct hfi1_devdata *dd) | |
894 | { | |
895 | struct work_struct *lsaw = &rcd->ppd->linkstate_active_work; | |
896 | struct hfi1_message_header *hdr = hfi1_get_msgheader(packet.rcd->dd, | |
897 | packet.rhf_addr); | |
898 | ||
899 | if (hdr2sc(hdr, packet.rhf) != 0xf) { | |
900 | int hwstate = read_logical_state(dd); | |
901 | ||
902 | if (hwstate != LSTATE_ACTIVE) { | |
903 | dd_dev_info(dd, "Unexpected link state %d\n", hwstate); | |
904 | return 0; | |
905 | } | |
906 | ||
907 | queue_work(rcd->ppd->hfi1_wq, lsaw); | |
908 | return 1; | |
909 | } | |
910 | return 0; | |
911 | } | |
912 | ||
77241056 MM |
913 | /* |
914 | * handle_receive_interrupt - receive a packet | |
915 | * @rcd: the context | |
916 | * | |
917 | * Called from interrupt handler for errors or receive interrupt. | |
918 | * This is the slow path interrupt handler. | |
919 | */ | |
f4f30031 | 920 | int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread) |
77241056 | 921 | { |
77241056 MM |
922 | struct hfi1_devdata *dd = rcd->dd; |
923 | u32 hdrqtail; | |
82c2611d | 924 | int needset, last = RCV_PKT_OK; |
77241056 | 925 | struct hfi1_packet packet; |
82c2611d NV |
926 | int skip_pkt = 0; |
927 | ||
928 | /* Control context will always use the slow path interrupt handler */ | |
929 | needset = (rcd->ctxt == HFI1_CTRL_CTXT) ? 0 : 1; | |
77241056 MM |
930 | |
931 | init_packet(rcd, &packet); | |
932 | ||
82c2611d | 933 | if (!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) { |
77241056 MM |
934 | u32 seq = rhf_rcv_seq(packet.rhf); |
935 | ||
f4f30031 DL |
936 | if (seq != rcd->seq_cnt) { |
937 | last = RCV_PKT_DONE; | |
77241056 | 938 | goto bail; |
f4f30031 | 939 | } |
77241056 MM |
940 | hdrqtail = 0; |
941 | } else { | |
942 | hdrqtail = get_rcvhdrtail(rcd); | |
f4f30031 DL |
943 | if (packet.rhqoff == hdrqtail) { |
944 | last = RCV_PKT_DONE; | |
77241056 | 945 | goto bail; |
f4f30031 | 946 | } |
77241056 | 947 | smp_rmb(); /* prevent speculative reads of dma'ed hdrq */ |
82c2611d NV |
948 | |
949 | /* | |
950 | * Control context can potentially receive an invalid | |
951 | * rhf. Drop such packets. | |
952 | */ | |
953 | if (rcd->ctxt == HFI1_CTRL_CTXT) { | |
954 | u32 seq = rhf_rcv_seq(packet.rhf); | |
955 | ||
956 | if (seq != rcd->seq_cnt) | |
957 | skip_pkt = 1; | |
958 | } | |
77241056 MM |
959 | } |
960 | ||
961 | prescan_rxq(&packet); | |
962 | ||
f4f30031 | 963 | while (last == RCV_PKT_OK) { |
77241056 MM |
964 | |
965 | if (unlikely(dd->do_drop && atomic_xchg(&dd->drop_packet, | |
966 | DROP_PACKET_OFF) == DROP_PACKET_ON)) { | |
967 | dd->do_drop = 0; | |
968 | ||
969 | /* On to the next packet */ | |
970 | packet.rhqoff += packet.rsize; | |
971 | packet.rhf_addr = (__le32 *) rcd->rcvhdrq + | |
972 | packet.rhqoff + | |
973 | dd->rhf_offset; | |
974 | packet.rhf = rhf_to_cpu(packet.rhf_addr); | |
975 | ||
82c2611d NV |
976 | } else if (skip_pkt) { |
977 | last = skip_rcv_packet(&packet, thread); | |
978 | skip_pkt = 0; | |
77241056 | 979 | } else { |
fb9036dd JS |
980 | /* Auto activate link on non-SC15 packet receive */ |
981 | if (unlikely(rcd->ppd->host_link_state == | |
982 | HLS_UP_ARMED) && | |
983 | set_armed_to_active(rcd, packet, dd)) | |
984 | goto bail; | |
f4f30031 | 985 | last = process_rcv_packet(&packet, thread); |
77241056 MM |
986 | } |
987 | ||
82c2611d | 988 | if (!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) { |
77241056 MM |
989 | u32 seq = rhf_rcv_seq(packet.rhf); |
990 | ||
991 | if (++rcd->seq_cnt > 13) | |
992 | rcd->seq_cnt = 1; | |
993 | if (seq != rcd->seq_cnt) | |
f4f30031 | 994 | last = RCV_PKT_DONE; |
77241056 MM |
995 | if (needset) { |
996 | dd_dev_info(dd, | |
997 | "Switching to NO_DMA_RTAIL\n"); | |
998 | set_all_nodma_rtail(dd); | |
999 | needset = 0; | |
1000 | } | |
1001 | } else { | |
1002 | if (packet.rhqoff == hdrqtail) | |
f4f30031 | 1003 | last = RCV_PKT_DONE; |
82c2611d NV |
1004 | /* |
1005 | * Control context can potentially receive an invalid | |
1006 | * rhf. Drop such packets. | |
1007 | */ | |
1008 | if (rcd->ctxt == HFI1_CTRL_CTXT) { | |
1009 | u32 seq = rhf_rcv_seq(packet.rhf); | |
1010 | ||
1011 | if (++rcd->seq_cnt > 13) | |
1012 | rcd->seq_cnt = 1; | |
1013 | if (!last && (seq != rcd->seq_cnt)) | |
1014 | skip_pkt = 1; | |
1015 | } | |
1016 | ||
77241056 MM |
1017 | if (needset) { |
1018 | dd_dev_info(dd, | |
1019 | "Switching to DMA_RTAIL\n"); | |
1020 | set_all_dma_rtail(dd); | |
1021 | needset = 0; | |
1022 | } | |
1023 | } | |
1024 | ||
1025 | process_rcv_update(last, &packet); | |
1026 | } | |
1027 | ||
1028 | process_rcv_qp_work(&packet); | |
1029 | ||
1030 | bail: | |
1031 | /* | |
1032 | * Always write head at end, and setup rcv interrupt, even | |
1033 | * if no packets were processed. | |
1034 | */ | |
1035 | finish_packet(&packet); | |
f4f30031 | 1036 | return last; |
77241056 MM |
1037 | } |
1038 | ||
fb9036dd JS |
1039 | /* |
1040 | * We may discover in the interrupt that the hardware link state has | |
1041 | * changed from ARMED to ACTIVE (due to the arrival of a non-SC15 packet), | |
1042 | * and we need to update the driver's notion of the link state. We cannot | |
1043 | * run set_link_state from interrupt context, so we queue this function on | |
1044 | * a workqueue. | |
1045 | * | |
1046 | * We delay the regular interrupt processing until after the state changes | |
1047 | * so that the link will be in the correct state by the time any application | |
1048 | * we wake up attempts to send a reply to any message it received. | |
1049 | * (Subsequent receive interrupts may possibly force the wakeup before we | |
1050 | * update the link state.) | |
1051 | * | |
1052 | * The rcd is freed in hfi1_free_ctxtdata after hfi1_postinit_cleanup invokes | |
1053 | * dd->f_cleanup(dd) to disable the interrupt handler and flush workqueues, | |
1054 | * so we're safe from use-after-free of the rcd. | |
1055 | */ | |
1056 | void receive_interrupt_work(struct work_struct *work) | |
1057 | { | |
1058 | struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, | |
1059 | linkstate_active_work); | |
1060 | struct hfi1_devdata *dd = ppd->dd; | |
1061 | int i; | |
1062 | ||
1063 | /* Received non-SC15 packet implies neighbor_normal */ | |
1064 | ppd->neighbor_normal = 1; | |
1065 | set_link_state(ppd, HLS_UP_ACTIVE); | |
1066 | ||
1067 | /* | |
1068 | * Interrupt all kernel contexts that could have had an | |
1069 | * interrupt during auto activation. | |
1070 | */ | |
1071 | for (i = HFI1_CTRL_CTXT; i < dd->first_user_ctxt; i++) | |
1072 | force_recv_intr(dd->rcd[i]); | |
1073 | } | |
1074 | ||
77241056 MM |
1075 | /* |
1076 | * Convert a given MTU size to the on-wire MAD packet enumeration. | |
1077 | * Return -1 if the size is invalid. | |
1078 | */ | |
1079 | int mtu_to_enum(u32 mtu, int default_if_bad) | |
1080 | { | |
1081 | switch (mtu) { | |
1082 | case 0: return OPA_MTU_0; | |
1083 | case 256: return OPA_MTU_256; | |
1084 | case 512: return OPA_MTU_512; | |
1085 | case 1024: return OPA_MTU_1024; | |
1086 | case 2048: return OPA_MTU_2048; | |
1087 | case 4096: return OPA_MTU_4096; | |
1088 | case 8192: return OPA_MTU_8192; | |
1089 | case 10240: return OPA_MTU_10240; | |
1090 | } | |
1091 | return default_if_bad; | |
1092 | } | |
1093 | ||
1094 | u16 enum_to_mtu(int mtu) | |
1095 | { | |
1096 | switch (mtu) { | |
1097 | case OPA_MTU_0: return 0; | |
1098 | case OPA_MTU_256: return 256; | |
1099 | case OPA_MTU_512: return 512; | |
1100 | case OPA_MTU_1024: return 1024; | |
1101 | case OPA_MTU_2048: return 2048; | |
1102 | case OPA_MTU_4096: return 4096; | |
1103 | case OPA_MTU_8192: return 8192; | |
1104 | case OPA_MTU_10240: return 10240; | |
1105 | default: return 0xffff; | |
1106 | } | |
1107 | } | |
1108 | ||
1109 | /* | |
1110 | * set_mtu - set the MTU | |
1111 | * @ppd: the per port data | |
1112 | * | |
1113 | * We can handle "any" incoming size, the issue here is whether we | |
1114 | * need to restrict our outgoing size. We do not deal with what happens | |
1115 | * to programs that are already running when the size changes. | |
1116 | */ | |
1117 | int set_mtu(struct hfi1_pportdata *ppd) | |
1118 | { | |
1119 | struct hfi1_devdata *dd = ppd->dd; | |
1120 | int i, drain, ret = 0, is_up = 0; | |
1121 | ||
1122 | ppd->ibmtu = 0; | |
1123 | for (i = 0; i < ppd->vls_supported; i++) | |
1124 | if (ppd->ibmtu < dd->vld[i].mtu) | |
1125 | ppd->ibmtu = dd->vld[i].mtu; | |
1126 | ppd->ibmaxlen = ppd->ibmtu + lrh_max_header_bytes(ppd->dd); | |
1127 | ||
1128 | mutex_lock(&ppd->hls_lock); | |
1129 | if (ppd->host_link_state == HLS_UP_INIT | |
1130 | || ppd->host_link_state == HLS_UP_ARMED | |
1131 | || ppd->host_link_state == HLS_UP_ACTIVE) | |
1132 | is_up = 1; | |
1133 | ||
1134 | drain = !is_ax(dd) && is_up; | |
1135 | ||
1136 | if (drain) | |
1137 | /* | |
1138 | * MTU is specified per-VL. To ensure that no packet gets | |
1139 | * stuck (due, e.g., to the MTU for the packet's VL being | |
1140 | * reduced), empty the per-VL FIFOs before adjusting MTU. | |
1141 | */ | |
1142 | ret = stop_drain_data_vls(dd); | |
1143 | ||
1144 | if (ret) { | |
1145 | dd_dev_err(dd, "%s: cannot stop/drain VLs - refusing to change per-VL MTUs\n", | |
1146 | __func__); | |
1147 | goto err; | |
1148 | } | |
1149 | ||
1150 | hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_MTU, 0); | |
1151 | ||
1152 | if (drain) | |
1153 | open_fill_data_vls(dd); /* reopen all VLs */ | |
1154 | ||
1155 | err: | |
1156 | mutex_unlock(&ppd->hls_lock); | |
1157 | ||
1158 | return ret; | |
1159 | } | |
1160 | ||
1161 | int hfi1_set_lid(struct hfi1_pportdata *ppd, u32 lid, u8 lmc) | |
1162 | { | |
1163 | struct hfi1_devdata *dd = ppd->dd; | |
1164 | ||
1165 | ppd->lid = lid; | |
1166 | ppd->lmc = lmc; | |
1167 | hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LIDLMC, 0); | |
1168 | ||
1169 | dd_dev_info(dd, "IB%u:%u got a lid: 0x%x\n", dd->unit, ppd->port, lid); | |
1170 | ||
1171 | return 0; | |
1172 | } | |
1173 | ||
1174 | /* | |
1175 | * Following deal with the "obviously simple" task of overriding the state | |
1176 | * of the LEDs, which normally indicate link physical and logical status. | |
1177 | * The complications arise in dealing with different hardware mappings | |
1178 | * and the board-dependent routine being called from interrupts. | |
1179 | * and then there's the requirement to _flash_ them. | |
1180 | */ | |
1181 | #define LED_OVER_FREQ_SHIFT 8 | |
1182 | #define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT) | |
1183 | /* Below is "non-zero" to force override, but both actual LEDs are off */ | |
1184 | #define LED_OVER_BOTH_OFF (8) | |
1185 | ||
1186 | static void run_led_override(unsigned long opaque) | |
1187 | { | |
1188 | struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)opaque; | |
1189 | struct hfi1_devdata *dd = ppd->dd; | |
1190 | int timeoff; | |
1191 | int ph_idx; | |
1192 | ||
1193 | if (!(dd->flags & HFI1_INITTED)) | |
1194 | return; | |
1195 | ||
1196 | ph_idx = ppd->led_override_phase++ & 1; | |
1197 | ppd->led_override = ppd->led_override_vals[ph_idx]; | |
1198 | timeoff = ppd->led_override_timeoff; | |
1199 | ||
1200 | /* | |
1201 | * don't re-fire the timer if user asked for it to be off; we let | |
1202 | * it fire one more time after they turn it off to simplify | |
1203 | */ | |
1204 | if (ppd->led_override_vals[0] || ppd->led_override_vals[1]) | |
1205 | mod_timer(&ppd->led_override_timer, jiffies + timeoff); | |
1206 | } | |
1207 | ||
1208 | void hfi1_set_led_override(struct hfi1_pportdata *ppd, unsigned int val) | |
1209 | { | |
1210 | struct hfi1_devdata *dd = ppd->dd; | |
1211 | int timeoff, freq; | |
1212 | ||
1213 | if (!(dd->flags & HFI1_INITTED)) | |
1214 | return; | |
1215 | ||
1216 | /* First check if we are blinking. If not, use 1HZ polling */ | |
1217 | timeoff = HZ; | |
1218 | freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT; | |
1219 | ||
1220 | if (freq) { | |
1221 | /* For blink, set each phase from one nybble of val */ | |
1222 | ppd->led_override_vals[0] = val & 0xF; | |
1223 | ppd->led_override_vals[1] = (val >> 4) & 0xF; | |
1224 | timeoff = (HZ << 4)/freq; | |
1225 | } else { | |
1226 | /* Non-blink set both phases the same. */ | |
1227 | ppd->led_override_vals[0] = val & 0xF; | |
1228 | ppd->led_override_vals[1] = val & 0xF; | |
1229 | } | |
1230 | ppd->led_override_timeoff = timeoff; | |
1231 | ||
1232 | /* | |
1233 | * If the timer has not already been started, do so. Use a "quick" | |
1234 | * timeout so the function will be called soon, to look at our request. | |
1235 | */ | |
1236 | if (atomic_inc_return(&ppd->led_override_timer_active) == 1) { | |
1237 | /* Need to start timer */ | |
a3faf606 MFW |
1238 | setup_timer(&ppd->led_override_timer, run_led_override, |
1239 | (unsigned long)ppd); | |
1240 | ||
77241056 MM |
1241 | ppd->led_override_timer.expires = jiffies + 1; |
1242 | add_timer(&ppd->led_override_timer); | |
1243 | } else { | |
1244 | if (ppd->led_override_vals[0] || ppd->led_override_vals[1]) | |
1245 | mod_timer(&ppd->led_override_timer, jiffies + 1); | |
1246 | atomic_dec(&ppd->led_override_timer_active); | |
1247 | } | |
1248 | } | |
1249 | ||
1250 | /** | |
1251 | * hfi1_reset_device - reset the chip if possible | |
1252 | * @unit: the device to reset | |
1253 | * | |
1254 | * Whether or not reset is successful, we attempt to re-initialize the chip | |
1255 | * (that is, much like a driver unload/reload). We clear the INITTED flag | |
1256 | * so that the various entry points will fail until we reinitialize. For | |
1257 | * now, we only allow this if no user contexts are open that use chip resources | |
1258 | */ | |
1259 | int hfi1_reset_device(int unit) | |
1260 | { | |
1261 | int ret, i; | |
1262 | struct hfi1_devdata *dd = hfi1_lookup(unit); | |
1263 | struct hfi1_pportdata *ppd; | |
1264 | unsigned long flags; | |
1265 | int pidx; | |
1266 | ||
1267 | if (!dd) { | |
1268 | ret = -ENODEV; | |
1269 | goto bail; | |
1270 | } | |
1271 | ||
1272 | dd_dev_info(dd, "Reset on unit %u requested\n", unit); | |
1273 | ||
1274 | if (!dd->kregbase || !(dd->flags & HFI1_PRESENT)) { | |
1275 | dd_dev_info(dd, | |
1276 | "Invalid unit number %u or not initialized or not present\n", | |
1277 | unit); | |
1278 | ret = -ENXIO; | |
1279 | goto bail; | |
1280 | } | |
1281 | ||
1282 | spin_lock_irqsave(&dd->uctxt_lock, flags); | |
1283 | if (dd->rcd) | |
1284 | for (i = dd->first_user_ctxt; i < dd->num_rcv_contexts; i++) { | |
1285 | if (!dd->rcd[i] || !dd->rcd[i]->cnt) | |
1286 | continue; | |
1287 | spin_unlock_irqrestore(&dd->uctxt_lock, flags); | |
1288 | ret = -EBUSY; | |
1289 | goto bail; | |
1290 | } | |
1291 | spin_unlock_irqrestore(&dd->uctxt_lock, flags); | |
1292 | ||
1293 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
1294 | ppd = dd->pport + pidx; | |
1295 | if (atomic_read(&ppd->led_override_timer_active)) { | |
1296 | /* Need to stop LED timer, _then_ shut off LEDs */ | |
1297 | del_timer_sync(&ppd->led_override_timer); | |
1298 | atomic_set(&ppd->led_override_timer_active, 0); | |
1299 | } | |
1300 | ||
1301 | /* Shut off LEDs after we are sure timer is not running */ | |
1302 | ppd->led_override = LED_OVER_BOTH_OFF; | |
1303 | } | |
1304 | if (dd->flags & HFI1_HAS_SEND_DMA) | |
1305 | sdma_exit(dd); | |
1306 | ||
1307 | hfi1_reset_cpu_counters(dd); | |
1308 | ||
1309 | ret = hfi1_init(dd, 1); | |
1310 | ||
1311 | if (ret) | |
1312 | dd_dev_err(dd, | |
1313 | "Reinitialize unit %u after reset failed with %d\n", | |
1314 | unit, ret); | |
1315 | else | |
1316 | dd_dev_info(dd, "Reinitialized unit %u after resetting\n", | |
1317 | unit); | |
1318 | ||
1319 | bail: | |
1320 | return ret; | |
1321 | } | |
1322 | ||
1323 | void handle_eflags(struct hfi1_packet *packet) | |
1324 | { | |
1325 | struct hfi1_ctxtdata *rcd = packet->rcd; | |
1326 | u32 rte = rhf_rcv_type_err(packet->rhf); | |
1327 | ||
77241056 | 1328 | rcv_hdrerr(rcd, rcd->ppd, packet); |
a03a03e9 IH |
1329 | if (rhf_err_flags(packet->rhf)) |
1330 | dd_dev_err(rcd->dd, | |
1331 | "receive context %d: rhf 0x%016llx, errs [ %s%s%s%s%s%s%s%s] rte 0x%x\n", | |
1332 | rcd->ctxt, packet->rhf, | |
1333 | packet->rhf & RHF_K_HDR_LEN_ERR ? "k_hdr_len " : "", | |
1334 | packet->rhf & RHF_DC_UNC_ERR ? "dc_unc " : "", | |
1335 | packet->rhf & RHF_DC_ERR ? "dc " : "", | |
1336 | packet->rhf & RHF_TID_ERR ? "tid " : "", | |
1337 | packet->rhf & RHF_LEN_ERR ? "len " : "", | |
1338 | packet->rhf & RHF_ECC_ERR ? "ecc " : "", | |
1339 | packet->rhf & RHF_VCRC_ERR ? "vcrc " : "", | |
1340 | packet->rhf & RHF_ICRC_ERR ? "icrc " : "", | |
1341 | rte); | |
77241056 MM |
1342 | } |
1343 | ||
1344 | /* | |
1345 | * The following functions are called by the interrupt handler. They are type | |
1346 | * specific handlers for each packet type. | |
1347 | */ | |
1348 | int process_receive_ib(struct hfi1_packet *packet) | |
1349 | { | |
1350 | trace_hfi1_rcvhdr(packet->rcd->ppd->dd, | |
1351 | packet->rcd->ctxt, | |
1352 | rhf_err_flags(packet->rhf), | |
1353 | RHF_RCV_TYPE_IB, | |
1354 | packet->hlen, | |
1355 | packet->tlen, | |
1356 | packet->updegr, | |
1357 | rhf_egr_index(packet->rhf)); | |
1358 | ||
1359 | if (unlikely(rhf_err_flags(packet->rhf))) { | |
1360 | handle_eflags(packet); | |
1361 | return RHF_RCV_CONTINUE; | |
1362 | } | |
1363 | ||
1364 | hfi1_ib_rcv(packet); | |
1365 | return RHF_RCV_CONTINUE; | |
1366 | } | |
1367 | ||
1368 | int process_receive_bypass(struct hfi1_packet *packet) | |
1369 | { | |
1370 | if (unlikely(rhf_err_flags(packet->rhf))) | |
1371 | handle_eflags(packet); | |
1372 | ||
1373 | dd_dev_err(packet->rcd->dd, | |
1374 | "Bypass packets are not supported in normal operation. Dropping\n"); | |
1375 | return RHF_RCV_CONTINUE; | |
1376 | } | |
1377 | ||
1378 | int process_receive_error(struct hfi1_packet *packet) | |
1379 | { | |
1380 | handle_eflags(packet); | |
1381 | ||
1382 | if (unlikely(rhf_err_flags(packet->rhf))) | |
1383 | dd_dev_err(packet->rcd->dd, | |
1384 | "Unhandled error packet received. Dropping.\n"); | |
1385 | ||
1386 | return RHF_RCV_CONTINUE; | |
1387 | } | |
1388 | ||
1389 | int kdeth_process_expected(struct hfi1_packet *packet) | |
1390 | { | |
1391 | if (unlikely(rhf_err_flags(packet->rhf))) | |
1392 | handle_eflags(packet); | |
1393 | ||
1394 | dd_dev_err(packet->rcd->dd, | |
1395 | "Unhandled expected packet received. Dropping.\n"); | |
1396 | return RHF_RCV_CONTINUE; | |
1397 | } | |
1398 | ||
1399 | int kdeth_process_eager(struct hfi1_packet *packet) | |
1400 | { | |
1401 | if (unlikely(rhf_err_flags(packet->rhf))) | |
1402 | handle_eflags(packet); | |
1403 | ||
1404 | dd_dev_err(packet->rcd->dd, | |
1405 | "Unhandled eager packet received. Dropping.\n"); | |
1406 | return RHF_RCV_CONTINUE; | |
1407 | } | |
1408 | ||
1409 | int process_receive_invalid(struct hfi1_packet *packet) | |
1410 | { | |
1411 | dd_dev_err(packet->rcd->dd, "Invalid packet type %d. Dropping\n", | |
1412 | rhf_rcv_type(packet->rhf)); | |
1413 | return RHF_RCV_CONTINUE; | |
1414 | } |